hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
795bcf8de903b746352efa85f5d1c578c613d02f | 2,209 | py | Python | samples/cli/accelbyte_py_sdk_cli/iam/_admin_reset_input_validations.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | samples/cli/accelbyte_py_sdk_cli/iam/_admin_reset_input_validations.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | 1 | 2021-10-13T03:46:58.000Z | 2021-10-13T03:46:58.000Z | samples/cli/accelbyte_py_sdk_cli/iam/_admin_reset_input_validations.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# justice-iam-service (5.10.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.iam import admin_reset_input_validations as admin_reset_input_validations_internal
from accelbyte_py_sdk.api.iam.models import RestErrorResponse
@click.command()
@click.argument("field", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def admin_reset_input_validations(
field: str,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(admin_reset_input_validations_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {
"Authorization": login_with_auth
}
else:
login_as_internal(login_as)
result, error = admin_reset_input_validations_internal(
field=field,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"AdminResetInputValidations failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
admin_reset_input_validations.operation_id = "AdminResetInputValidations"
admin_reset_input_validations.is_deprecated = False
| 32.014493 | 108 | 0.751471 |
795bd02d4f9c57d6697c7f3adc0447d0f2d75a75 | 2,541 | py | Python | main/sentry_commands/project_registration.py | farhannysf/sentry-bot | df8ebe362592ee24793a9665c43c7f0cfc86a031 | [
"MIT"
] | 1 | 2021-03-08T21:40:37.000Z | 2021-03-08T21:40:37.000Z | main/sentry_commands/project_registration.py | farhannysf/sentry-bot | df8ebe362592ee24793a9665c43c7f0cfc86a031 | [
"MIT"
] | null | null | null | main/sentry_commands/project_registration.py | farhannysf/sentry-bot | df8ebe362592ee24793a9665c43c7f0cfc86a031 | [
"MIT"
] | null | null | null | from utility import checkChannel, retrieveDB_data, checkUser
from settings import unauthorized_channelMessage, glyph_URL
async def project_registrationLogic(
ctx, discordEmbed, firestore, db, operation, project_name
):
channelId = str(ctx.message.channel.id)
guildId = str(ctx.message.guild.id)
userId = str(ctx.author.id)
channelVerify = await checkChannel(db, firestore, channelId, guildId)
if channelVerify:
registered_project = ""
user_projects = retrieveDB_data(db, option="user-projects", title=guildId)
user_projectsDB = db.collection("user-projects").document(str(guildId))
userVerify = await checkUser(db, firestore, str(userId), str(guildId))
if userVerify:
registered_project = user_projects[str(userId)]
usageMessage = f"`!sentry project register project-name-slug`\n------\n`!sentry project revoke`\n------"
embed = discordEmbed(
title="***SENTRY PROJECT REGISTRATION***",
description="Register a Sentry project to your Discord user ID and enable mention alert",
color=0xE74C3C,
)
embed.set_thumbnail(url=glyph_URL)
embed.add_field(name="Usage", value=usageMessage)
embed.add_field(
name="Registered Project", value=f"{registered_project}\n------"
)
if operation == "revoke":
if not userVerify:
return await ctx.send("You have no project to be revoked")
data = {str(userId): firestore.DELETE_FIELD}
user_projectsDB.update(data)
return await ctx.send(f"Revoked Sentry project: `{user_projects[userId]}`")
if operation == "register":
if project_name is None:
return await ctx.send(embed=embed)
updb_dict = user_projectsDB.get().to_dict()
existingProject = [
key for key, value in updb_dict.items() if value == str(project_name)
]
if len(existingProject) > 0:
return await ctx.send(
f"Sentry project: `{project_name}` is already registered to <@!{existingProject[0]}>"
)
else:
data = {str(userId): str(project_name)}
user_projectsDB.update(data)
return await ctx.send(f"Registered Sentry project: `{project_name}`")
else:
return await ctx.send(embed=embed)
else:
return await ctx.send(unauthorized_channelMessage)
| 38.5 | 112 | 0.61708 |
795bd031eee0779dd719e1cc079bcd8bb53f6372 | 10,743 | py | Python | dso/dso/task/regression/dataset.py | ryok/deep-symbolic-optimization | 9dc2086f5d219fdfab5aaae2485e11b693da4d4a | [
"BSD-3-Clause"
] | 65 | 2020-03-27T06:20:04.000Z | 2021-05-28T11:30:18.000Z | dso/dso/task/regression/dataset.py | ryok/deep-symbolic-optimization | 9dc2086f5d219fdfab5aaae2485e11b693da4d4a | [
"BSD-3-Clause"
] | 9 | 2020-02-20T04:00:50.000Z | 2021-06-04T18:16:29.000Z | dso/dso/task/regression/dataset.py | ryok/deep-symbolic-optimization | 9dc2086f5d219fdfab5aaae2485e11b693da4d4a | [
"BSD-3-Clause"
] | 18 | 2020-05-01T11:45:38.000Z | 2021-06-10T13:27:51.000Z | """Class for deterministically generating a benchmark dataset from benchmark specifications."""
import os
import ast
import itertools
from pkg_resources import resource_filename
import zlib
import click
import pandas as pd
import numpy as np
from dso.functions import function_map
class BenchmarkDataset(object):
"""
Class used to generate (X, y) data from a named benchmark expression.
Parameters
----------
name : str
Name of benchmark expression.
benchmark_source : str, optional
Filename of CSV describing benchmark expressions.
root : str, optional
Directory containing benchmark_source and function_sets.csv.
noise : float, optional
If not None, Gaussian noise is added to the y values with standard
deviation = noise * RMS of the noiseless y training values.
dataset_size_multiplier : float, optional
Multiplier for size of the dataset.
seed : int, optional
Random number seed used to generate data. Checksum on name is added to
seed.
logdir : str, optional
Directory where experiment logfiles are saved.
backup : bool, optional
Save generated dataset in logdir if logdir is provided.
"""
def __init__(self, name, benchmark_source="benchmarks.csv", root=None, noise=0.0,
dataset_size_multiplier=1.0, seed=0, logdir=None,
backup=False):
# Set class variables
self.name = name
self.seed = seed
self.noise = noise if noise is not None else 0.0
self.dataset_size_multiplier = dataset_size_multiplier if dataset_size_multiplier is not None else 1.0
# Set random number generator used for sampling X values
seed += zlib.adler32(name.encode("utf-8")) # Different seed for each name, otherwise two benchmarks with the same domain will always have the same X values
self.rng = np.random.RandomState(seed)
# Load benchmark data
if root is None:
root = resource_filename("dso.task", "regression")
benchmark_path = os.path.join(root, benchmark_source)
benchmark_df = pd.read_csv(benchmark_path, index_col=0, encoding="ISO-8859-1")
row = benchmark_df.loc[name]
self.n_input_var = row["variables"]
# Create symbolic expression
self.numpy_expr = self.make_numpy_expr(row["expression"])
# Create X values
train_spec = ast.literal_eval(row["train_spec"])
test_spec = ast.literal_eval(row["test_spec"])
if test_spec is None:
test_spec = train_spec
self.X_train = self.make_X(train_spec)
self.X_test = self.make_X(test_spec)
self.train_spec = train_spec
self.test_spec = test_spec
# Compute y values
self.y_train = self.numpy_expr(self.X_train)
self.y_test = self.numpy_expr(self.X_test)
self.y_train_noiseless = self.y_train.copy()
self.y_test_noiseless = self.y_test.copy()
# Add Gaussian noise
if self.noise > 0:
y_rms = np.sqrt(np.mean(self.y_train**2))
scale = self.noise * y_rms
self.y_train += self.rng.normal(loc=0, scale=scale, size=self.y_train.shape)
self.y_test += self.rng.normal(loc=0, scale=scale, size=self.y_test.shape)
elif self.noise < 0:
print('WARNING: Ignoring negative noise value: {}'.format(self.noise))
# Load default function set
function_set_path = os.path.join(root, "function_sets.csv")
function_set_df = pd.read_csv(function_set_path, index_col=0)
function_set_name = row["function_set"]
self.function_set = function_set_df.loc[function_set_name].tolist()[0].strip().split(',')
# Prepare status output
output_message = '\n-- BUILDING DATASET -----------------\n'
output_message += 'Benchmark path : {}\n'.format(benchmark_path)
output_message += 'Generated data for benchmark : {}\n'.format(name)
output_message += 'Function set path : {}\n'.format(function_set_path)
output_message += 'Function set : {} --> {}\n'.format(function_set_name, self.function_set)
if backup and logdir is not None:
output_message += self.save(logdir)
output_message += '-------------------------------------\n'
print(output_message)
def make_X(self, spec):
"""Creates X values based on specification"""
features = []
for i in range(1, self.n_input_var + 1):
# Hierarchy: "all" --> "x{}".format(i)
input_var = "x{}".format(i)
if "all" in spec:
input_var = "all"
elif input_var not in spec:
input_var = "x1"
if "U" in spec[input_var]:
low, high, n = spec[input_var]["U"]
n = int(n * self.dataset_size_multiplier)
feature = self.rng.uniform(low=low, high=high, size=n)
elif "E" in spec[input_var]:
start, stop, step = spec[input_var]["E"]
if step > stop - start:
n = step
else:
n = int((stop - start)/step) + 1
n = int(n * self.dataset_size_multiplier)
feature = np.linspace(start=start, stop=stop, num=n, endpoint=True)
else:
raise ValueError("Did not recognize specification for {}: {}.".format(input_var, spec[input_var]))
features.append(feature)
# Do multivariable combinations
if "E" in spec[input_var] and self.n_input_var > 1:
X = np.array(list(itertools.product(*features)))
else:
X = np.column_stack(features)
return X
def make_numpy_expr(self, s):
# This isn't pretty, but unlike sympy's lambdify, this ensures we use
# our protected functions. Otherwise, some expressions may have large
# error even if the functional form is correct due to the training set
# not using protected functions.
# Replace function names
s = s.replace("ln(", "log(")
s = s.replace("pi", "np.pi")
s = s.replace("pow", "np.power")
for k in function_map.keys():
s = s.replace(k + '(', "function_map['{}'].function(".format(k))
# Replace variable names
for i in reversed(range(self.n_input_var)):
old = "x{}".format(i+1)
new = "x[:, {}]".format(i)
s = s.replace(old, new)
numpy_expr = lambda x : eval(s)
return numpy_expr
def save(self, logdir='./'):
save_path = os.path.join(logdir,'data_{}_n{:.2f}_d{:.0f}_s{}.csv'.format(
self.name, self.noise, self.dataset_size_multiplier, self.seed))
try:
os.makedirs(logdir, exist_ok=True)
np.savetxt(
save_path,
np.concatenate(
(
np.hstack((self.X_train, self.y_train[..., np.newaxis])),
np.hstack((self.X_test, self.y_test[..., np.newaxis]))
), axis=0),
delimiter=',', fmt='%1.5f'
)
return 'Saved dataset to : {}\n'.format(save_path)
except:
import sys
e = sys.exc_info()[0]
print("WARNING: Could not save dataset: {}".format(e))
def plot(self, logdir='./'):
"""Plot Dataset with underlying ground truth."""
if self.X_train.shape[1] == 1:
from matplotlib import pyplot as plt
save_path = os.path.join(logdir,'plot_{}_n{:.2f}_d{:.0f}_s{}.png'.format(
self.name, self.noise, self.dataset_size_multiplier, self.seed))
# Draw ground truth expression
bounds = list(list(self.train_spec.values())[0].values())[0][:2]
x = np.linspace(bounds[0], bounds[1], endpoint=True, num=100)
y = self.numpy_expr(x[:, None])
plt.plot(x, y, color='red', linestyle='dashed')
# Draw the actual points
plt.scatter(self.X_train, self.y_train)
# Add a title
plt.title(
"{} N:{} M:{} S:{}".format(
self.name, self.noise, self.dataset_size_multiplier, self.seed),
fontsize=7)
try:
os.makedirs(logdir, exist_ok=True)
plt.savefig(save_path)
print('Saved plot to : {}'.format(save_path))
except:
import sys
e = sys.exc_info()[0]
print("WARNING: Could not plot dataset: {}".format(e))
plt.close()
else:
print("WARNING: Plotting only supported for 2D datasets.")
@click.command()
@click.argument("benchmark_source", default="benchmarks.csv")
@click.option('--plot', is_flag=True)
@click.option('--save_csv', is_flag=True)
@click.option('--sweep', is_flag=True)
def main(benchmark_source, plot, save_csv, sweep):
"""Plots all benchmark expressions."""
regression_path = resource_filename("dso.task", "regression/")
benchmark_path = os.path.join(regression_path, benchmark_source)
save_dir = os.path.join(regression_path, 'log')
df = pd.read_csv(benchmark_path, encoding="ISO-8859-1")
names = df["name"].to_list()
for name in names:
if not name.startswith("Nguyen") and not name.startswith("Constant") and not name.startswith("Custom"):
continue
datasets = []
# Noiseless
d = BenchmarkDataset(
name=name,
benchmark_source=benchmark_source)
datasets.append(d)
# Generate all combinations of noise levels and dataset size multipliers
if sweep and name.startswith("Nguyen"):
noises = [0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10]
dataset_size_multipliers = [1.0, 10.0]
for noise in noises:
for dataset_size_multiplier in dataset_size_multipliers:
d = BenchmarkDataset(
name=name,
benchmark_source=benchmark_source,
noise=noise,
dataset_size_multiplier=dataset_size_multiplier,
backup=save_csv,
logdir=save_dir)
datasets.append(d)
# Plot and/or save datasets
for dataset in datasets:
if plot and dataset.X_train.shape[1] == 1:
dataset.plot(save_dir)
if __name__ == "__main__":
main()
| 39.065455 | 163 | 0.578609 |
795bd063e45d16ef36efa0c19a79aa5fca148473 | 1,356 | py | Python | tests/tools/cisco_san_fabric_lib.py | akshatha-s13/mdsLib-1 | 339a5fb9606d4cac85055c97a229c690b4438d42 | [
"Apache-2.0"
] | null | null | null | tests/tools/cisco_san_fabric_lib.py | akshatha-s13/mdsLib-1 | 339a5fb9606d4cac85055c97a229c690b4438d42 | [
"Apache-2.0"
] | null | null | null | tests/tools/cisco_san_fabric_lib.py | akshatha-s13/mdsLib-1 | 339a5fb9606d4cac85055c97a229c690b4438d42 | [
"Apache-2.0"
] | 1 | 2020-05-05T06:56:22.000Z | 2020-05-05T06:56:22.000Z | import logging
import pprint
from mdslib._fabric import Fabric
logFormatter = logging.Formatter("[%(asctime)s] [%(module)-14.14s] [%(levelname)-5.5s] %(message)s")
log = logging.getLogger()
fileHandler = logging.FileHandler("cisco_san_fabric_lib.log")
fileHandler.setFormatter(logFormatter)
fileHandler.setLevel(logging.DEBUG)
log.addHandler(fileHandler)
log.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
consoleHandler.setLevel(logging.INFO)
log.addHandler(consoleHandler)
log.info("Starting fabric discovery...")
seed_ip = '10.126.94.175'
username = 'admin'
password = 'nbv!2345'
port = 8443
f = Fabric("new")
if f.discover_all_switches_in_fabric(seed_switch_ip=seed_ip,
username=username,
password=password,
connection_type='https',
port=port,
timeout=30,
verify_ssl=False,
discover_npv=True):
swlist = f.discovered_switches
print("List of switch ip and switch object")
pprint.pprint(swlist)
print("List of switch ip and switch version")
for ip, swobj in swlist.items():
print(ip, swobj.version)
| 30.818182 | 100 | 0.619469 |
795bd106a95bd0876255a2584d2cb8ed3c20874e | 374 | py | Python | factory_boss/errors.py | mariushelf/factory_boss | b9f9c99ea731e2af1c6951f734e20ffe05ba1693 | [
"MIT"
] | 1 | 2021-06-03T12:47:17.000Z | 2021-06-03T12:47:17.000Z | factory_boss/errors.py | mariushelf/factory_boss | b9f9c99ea731e2af1c6951f734e20ffe05ba1693 | [
"MIT"
] | 5 | 2021-05-25T12:46:00.000Z | 2021-05-27T18:32:37.000Z | factory_boss/errors.py | mariushelf/factory_boss | b9f9c99ea731e2af1c6951f734e20ffe05ba1693 | [
"MIT"
] | null | null | null | class FactoryBossError(Exception):
pass
class UndefinedValueError(FactoryBossError):
pass
class ConfigurationError(FactoryBossError):
pass
class InvalidReferenceError(FactoryBossError):
pass
class UnresolvedReferenceError(FactoryBossError):
""" A reference has not been resolved yet """
pass
class ResolveError(FactoryBossError):
pass
| 14.96 | 49 | 0.759358 |
795bd141430035407a763d5610a6745ba0a19097 | 10,205 | py | Python | ilqr_utils.py | tungnd1705/PC3-pytorch | e1ed5f475da387cb92dd1e3830e7d195562b4b64 | [
"MIT"
] | 19 | 2020-02-18T04:02:09.000Z | 2022-02-27T19:40:14.000Z | ilqr_utils.py | tungnd1705/PC3-pytorch | e1ed5f475da387cb92dd1e3830e7d195562b4b64 | [
"MIT"
] | null | null | null | ilqr_utils.py | tungnd1705/PC3-pytorch | e1ed5f475da387cb92dd1e3830e7d195562b4b64 | [
"MIT"
] | 2 | 2021-07-15T15:04:15.000Z | 2021-11-27T15:24:23.000Z | import matplotlib.pyplot as plt
import numpy as np
import torch
from matplotlib.animation import FuncAnimation, writers
np.random.seed(0)
def cost_dz(R_z, z, z_goal):
# compute the first-order deravative of latent cost w.r.t z
z_diff = np.expand_dims(z - z_goal, axis=-1)
return np.squeeze(2 * np.matmul(R_z, z_diff))
def cost_du(R_u, u):
# compute the first-order deravative of latent cost w.r.t u
return np.atleast_1d(np.squeeze(2 * np.matmul(R_u, np.expand_dims(u, axis=-1))))
def cost_dzz(R_z):
# compute the second-order deravative of latent cost w.r.t z
return 2 * R_z
def cost_duu(R_u):
# compute the second-order deravative of latent cost w.r.t u
return 2 * R_u
def cost_duz(z, u):
# compute the second-order deravative of latent cost w.r.t uz
return np.zeros((u.shape[-1], z.shape[-1]))
def latent_cost(R_z, R_u, z_seq, z_goal, u_seq):
z_diff = np.expand_dims(z_seq - z_goal, axis=-1)
cost_z = np.squeeze(np.matmul(np.matmul(z_diff.transpose((0, 2, 1)), R_z), z_diff))
u_seq_reshaped = np.expand_dims(u_seq, axis=-1)
cost_u = np.squeeze(np.matmul(np.matmul(u_seq_reshaped.transpose((0, 2, 1)), R_u), u_seq_reshaped))
return np.sum(cost_z) + np.sum(cost_u)
def one_step_back(R_z, R_u, z, u, z_goal, A, B, V_prime_next_z, V_prime_next_zz, mu_inv_regulator):
"""
V_prime_next_z: first order derivative of the value function at time step t+1
V_prime_next_zz: second order derivative of the value function at time tep t+1
A: derivative of F(z, u) w.r.t z at z_bar_t, u_bar_t
B: derivative of F(z, u) w.r.t u at z_bar_t, u_bar_t
"""
# compute Q_z, Q_u, Q_zz, Q_uu, Q_uz using cost function, A, B and V
Q_z = cost_dz(R_z, z, z_goal) + np.matmul(A.transpose(), V_prime_next_z)
Q_u = cost_du(R_u, u) + np.matmul(B.transpose(), V_prime_next_z)
Q_zz = cost_dzz(R_z) + np.matmul(np.matmul(A.transpose(), V_prime_next_zz), A)
Q_uz = cost_duz(z, u) + np.matmul(np.matmul(B.transpose(), V_prime_next_zz), A)
Q_uu = cost_duu(R_u) + np.matmul(np.matmul(B.transpose(), V_prime_next_zz), B)
# compute k and K matrix, add regularization to Q_uu
Q_uu_regularized = Q_uu + mu_inv_regulator * np.eye(Q_uu.shape[0])
Q_uu_in = np.linalg.inv(Q_uu_regularized)
k = -np.matmul(Q_uu_in, Q_u)
K = -np.matmul(Q_uu_in, Q_uz)
# compute V_z and V_zz using k and K
V_prime_z = Q_z + np.matmul(Q_uz.transpose(), k)
V_prime_zz = Q_zz + np.matmul(Q_uz.transpose(), K)
return k, K, V_prime_z, V_prime_zz
def backward(R_z, R_u, z_seq, u_seq, z_goal, A_seq, B_seq, inv_regulator):
"""
do the backward pass
return a sequence of k and K matrices
"""
# first and second order derivative of the value function at the last time step
V_prime_next_z = cost_dz(R_z, z_seq[-1], z_goal)
V_prime_next_zz = cost_dzz(R_z)
k, K = [], []
act_seq_len = len(u_seq)
for t in reversed(range(act_seq_len)):
k_t, K_t, V_prime_z, V_prime_zz = one_step_back(
R_z, R_u, z_seq[t], u_seq[t], z_goal, A_seq[t], B_seq[t], V_prime_next_z, V_prime_next_zz, inv_regulator
)
k.insert(0, k_t)
K.insert(0, K_t)
V_prime_next_z, V_prime_next_zz = V_prime_z, V_prime_zz
return k, K
def forward(z_seq, u_seq, k, K, dynamics, alpha):
"""
update the trajectory, given k and K
"""
z_seq_new = []
z_seq_new.append(z_seq[0])
u_seq_new = []
for i in range(0, len(u_seq)):
u_new = u_seq[i] + alpha * k[i] + np.matmul(K[i], z_seq_new[i] - z_seq[i])
u_seq_new.append(u_new)
with torch.no_grad():
z_new = dynamics(torch.from_numpy(z_seq_new[i]).unsqueeze(0), torch.from_numpy(u_new).unsqueeze(0))[0].mean
z_seq_new.append(z_new.squeeze().numpy())
return np.array(z_seq_new), np.array(u_seq_new)
def get_x_data(mdp, state, config):
image_data = mdp.render(state).squeeze()
x_dim = config["obs_shape"]
if config["task"] == "plane":
x_dim = np.prod(x_dim)
x_data = torch.from_numpy(image_data).double().view(x_dim).unsqueeze(0)
elif config["task"] in ["swing", "balance"]:
x_dim = np.prod(x_dim)
x_data = np.vstack((image_data, image_data))
x_data = torch.from_numpy(x_data).double().view(x_dim).unsqueeze(0)
elif config["task"] in ["cartpole", "threepole"]:
x_data = torch.zeros(size=(2, 80, 80))
x_data[0, :, :] = torch.from_numpy(image_data)
x_data[1, :, :] = torch.from_numpy(image_data)
x_data = x_data.unsqueeze(0)
return x_data
def update_horizon_start(mdp, s, u, encoder, config):
s_next = mdp.transition_function(s, u)
if config["task"] == "plane":
x_next = get_x_data(mdp, s_next, config)
elif config["task"] in ["swing", "balance"]:
obs = mdp.render(s).squeeze()
obs_next = mdp.render(s_next).squeeze()
obs_stacked = np.vstack((obs, obs_next))
x_dim = np.prod(config["obs_shape"])
x_next = torch.from_numpy(obs_stacked).view(x_dim).unsqueeze(0).double()
elif config["task"] in ["cartpole", "threepole"]:
obs = mdp.render(s).squeeze()
obs_next = mdp.render(s_next).squeeze()
x_next = torch.zeros(size=config["obs_shape"])
x_next[0, :, :] = torch.from_numpy(obs)
x_next[1, :, :] = torch.from_numpy(obs_next)
x_next = x_next.unsqueeze(0)
with torch.no_grad():
z_next = encoder(x_next)
return s_next, z_next.squeeze().numpy()
def random_uniform_actions(mdp, plan_len):
# create a trajectory of random actions
random_actions = []
for i in range(plan_len):
action = mdp.sample_random_action()
random_actions.append(action)
return np.array(random_actions)
def random_extreme_actions(mdp, plan_len):
# create a trajectory of extreme actions
extreme_actions = []
for i in range(plan_len):
action = mdp.sample_extreme_action()
extreme_actions.append(action)
return np.array(extreme_actions)
def random_actions_trajs(mdp, num_uniform, num_extreme, plan_len):
actions_trajs = []
for i in range(num_uniform):
actions_trajs.append(random_uniform_actions(mdp, plan_len))
for j in range(num_extreme):
actions_trajs.append(random_extreme_actions(mdp, plan_len))
return actions_trajs
def refresh_actions_trajs(actions_trajs, traj_opt_id, mdp, length, num_uniform, num_extreme):
for traj_id in range(len(actions_trajs)):
if traj_id == traj_opt_id:
actions_trajs[traj_id] = actions_trajs[traj_id][1:]
if len(actions_trajs[traj_id]) < length:
# Duplicate last action.
actions_trajs[traj_id] = np.append(
actions_trajs[traj_id], actions_trajs[traj_id][-1].reshape(1, -1), axis=0
)
continue
if traj_id < num_uniform:
actions_trajs[traj_id] = random_uniform_actions(mdp, length)
else:
actions_trajs[traj_id] = random_extreme_actions(mdp, length)
return actions_trajs
def update_seq_act(z_seq, z_start, u_seq, k, K, dynamics):
"""
update the trajectory, given k and K
"""
z_new = z_start
u_seq_new = []
for i in range(0, len(u_seq)):
u_new = u_seq[i] + k[i] + np.matmul(K[i], (z_new - z_seq[i]))
with torch.no_grad():
z_new = dynamics(torch.from_numpy(z_new).view(1, -1), torch.from_numpy(u_new).view(1, -1))[0].mean
z_new = z_new.squeeze().numpy()
u_seq_new.append(u_new)
return np.array(u_seq_new)
def compute_latent_traj(z_start, u_seq, dynamics):
plan_len = len(u_seq)
z_seq = [z_start]
for i in range(plan_len):
z = torch.from_numpy(z_seq[i]).view(1, -1).double()
u = torch.from_numpy(u_seq[i]).view(1, -1).double()
with torch.no_grad():
z_next = dynamics(z, u)[0].mean
z_seq.append(z_next.squeeze().numpy())
return z_seq
def jacobian(dynamics, z, u):
"""
compute the jacobian of F(z,u) w.r.t z, u
"""
z_dim = z.shape[0]
u_dim = u.shape[0]
z_tensor = torch.from_numpy(z).view(1, -1).double()
u_tensor = torch.from_numpy(u).view(1, -1).double()
if dynamics.armotized:
_, A, B = dynamics(z_tensor, u_tensor)
return A.squeeze().view(z_dim, z_dim).numpy(), B.squeeze().view(z_dim, u_dim).numpy()
z_tensor, u_tensor = z_tensor.squeeze().repeat(z_dim, 1), u_tensor.squeeze().repeat(z_dim, 1)
z_tensor = z_tensor.detach().requires_grad_(True)
u_tensor = u_tensor.detach().requires_grad_(True)
z_next = dynamics(z_tensor, u_tensor)[0].mean
grad_inp = torch.eye(z_dim)
A, B = torch.autograd.grad(z_next, [z_tensor, u_tensor], [grad_inp, grad_inp])
return A.numpy(), B.numpy()
def seq_jacobian(dynamics, z_seq, u_seq):
"""
compute the jacobian w.r.t each pair in the trajectory
"""
A_seq, B_seq = [], []
horizon = len(u_seq)
for i in range(horizon):
z, u = z_seq[i], u_seq[i]
A, B = jacobian(dynamics, z, u)
A_seq.append(A)
B_seq.append(B)
return A_seq, B_seq
def save_traj(images, image_goal, gif_path, task):
# save trajectory as gif file
fig, aa = plt.subplots(1, 2)
m1 = aa[0].matshow(images[0], cmap=plt.cm.gray, vmin=0.0, vmax=1.0)
aa[0].set_title("Time step 0")
aa[0].set_yticklabels([])
aa[0].set_xticklabels([])
m2 = aa[1].matshow(image_goal, cmap=plt.cm.gray, vmin=0.0, vmax=1.0)
aa[1].set_title("goal")
aa[1].set_yticklabels([])
aa[1].set_xticklabels([])
fig.tight_layout()
def updatemat2(t):
m1.set_data(images[t])
aa[0].set_title("Time step " + str(t))
m2.set_data(image_goal)
return m1, m2
frames = len(images)
if task in ["plane", "cartpole"]:
fps = 2
else:
fps = 20
anim = FuncAnimation(fig, updatemat2, frames=frames, interval=200, blit=True, repeat=True)
Writer = writers["imagemagick"] # animation.writers.avail
writer = Writer(fps=fps, metadata=dict(artist="Me"), bitrate=1800)
anim.save(gif_path, writer=writer)
plt.clf()
plt.cla()
| 35.933099 | 119 | 0.64243 |
795bd16fb8d48a598a9cf03a5e48ba8122975729 | 118,133 | py | Python | AtomPos/tifffile.py | rhambach/TEMimage | 436c9d8912db481185d09d9d70c4827c87cbd8a5 | [
"BSD-3-Clause"
] | 2 | 2020-10-05T06:12:43.000Z | 2022-03-15T04:15:16.000Z | AtomPos/tifffile.py | rhambach/TEMimage | 436c9d8912db481185d09d9d70c4827c87cbd8a5 | [
"BSD-3-Clause"
] | null | null | null | AtomPos/tifffile.py | rhambach/TEMimage | 436c9d8912db481185d09d9d70c4827c87cbd8a5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
#
# Slightly modified version of original tifffile from Chritoph Gohlke.
#
# Copyright (c) 2013, rhambach.
# This file is part of the FitAtomPos package and released
# under the MIT-Licence. See LICENCE file for details.
#
# Copyright (c) 2008-2012, Christoph Gohlke
# Copyright (c) 2008-2012, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and meta-data can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
ImageJ, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG/CCITT compressed image data or EXIF/IPTC/GPS/XMP
meta-data is not implemented. Only primary info records are read for STK,
FluoView, and NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SEQ, GEL,
and OME-TIFF, are custom extensions defined by MetaMorph, Carl Zeiss
MicroImaging, Olympus, Media Cybernetics, Molecular Dynamics, and the Open
Microscopy Environment consortium respectively.
For command line usage run ``python tifffile.py --help``
:Authors:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`__,
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2012.08.14
Requirements
------------
* `CPython 2.7 or 3.2 <http://www.python.org>`__
* `Numpy 1.6.2 <http://numpy.scipy.org>`__
* `Matplotlib 1.1.1 <http://matplotlib.sourceforge.net>`__
(optional for plotting)
* `tifffile.c 2012.08.03 <http://www.lfd.uci.edu/~gohlke/>`__
(optional for faster decoding of PackBits and LZW encoded strings)
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis, for a bug fix and some read_cz_lsm functions.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(5) BioFormats. http://www.loci.wisc.edu/ome/formats.html
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) TiffDecoder.java
http://rsbweb.nih.gov/ij/developer/source/ij/io/TiffDecoder.java.html
(8) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
Examples
--------
>>> data = numpy.random.rand(301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> assert numpy.all(image == data)
>>> tif = TiffFile('test.tif')
>>> images = tif.asarray()
>>> image0 = tif[0].asarray()
>>> for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
... if page.is_rgb: pass
... if page.is_palette:
... t = page.color_map
... if page.is_stk:
... t = page.mm_uic_tags.number_planes
... if page.is_lsm:
... t = page.cz_lsm_info
>>> tif.close()
"""
from __future__ import division, print_function
import sys
import os
import re
import glob
import math
import zlib
import time
import struct
import warnings
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as ElementTree
import numpy
__all__ = ['imsave', 'imread', 'imshow', 'TiffFile', 'TiffSequence']
def imsave(filename, data, photometric=None, planarconfig=None,
resolution=None, description=None, software='tifffile.py',
byteorder=None, bigtiff=False):
"""Write image data to TIFF file.
Image data are written uncompressed in one stripe per plane.
Dimensions larger than 2 or 3 (depending on photometric mode and
planar configuration) are flattened and saved as separate pages.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image height,
width, and samples.
photometric : {'minisblack', 'miniswhite', 'rgb'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
software : str
Name of the software used to create the image.
Saved with the first page only.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
bigtiff : bool
If True the BigTIFF format is used.
By default the standard TIFF format is used for data less than 2040 MB.
Examples
--------
>>> data = numpy.random.rand(10, 3, 301, 219)
>>> imsave('temp.tif', data)
"""
assert(photometric in (None, 'minisblack', 'miniswhite', 'rgb'))
assert(planarconfig in (None, 'contig', 'planar'))
assert(byteorder in (None, '<', '>'))
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
if not bigtiff and data.size * data.dtype.itemsize < 2040*2**20:
bigtiff = False
offset_size = 4
tag_size = 12
numtag_format = 'H'
offset_format = 'I'
val_format = '4s'
else:
bigtiff = True
offset_size = 8
tag_size = 20
numtag_format = 'Q'
offset_format = 'Q'
val_format = '8s'
# unify shape of data
samplesperpixel = 1
extrasamples = 0
if photometric is None:
if data.ndim > 2 and (shape[-3] in (3, 4) or shape[-1] in (3, 4)):
photometric = 'rgb'
else:
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if planarconfig is None:
planarconfig = 'planar' if shape[-3] in (3, 4) else 'contig'
if planarconfig == 'contig':
if shape[-1] not in (3, 4):
raise ValueError("not a contiguous RGB(A) image")
data = data.reshape((-1, 1) + shape[-3:])
samplesperpixel = shape[-1]
else:
if shape[-3] not in (3, 4):
raise ValueError("not a planar RGB(A) image")
data = data.reshape((-1, ) + shape[-3:] + (1, ))
samplesperpixel = shape[-3]
if samplesperpixel == 4:
extrasamples = 1
elif planarconfig and len(shape) > 2:
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[-3:])
samplesperpixel = shape[-1]
else:
data = data.reshape((-1, ) + shape[-3:] + (1, ))
samplesperpixel = shape[-3]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
data = data.reshape((-1, 1) + shape[-2:] + (1, ))
shape = data.shape # (pages, planes, height, width, contig samples)
bytestr = bytes if sys.version[0] == '2' else lambda x: bytes(x, 'ascii')
tifftypes = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
tifftags = {'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'extra_samples': 338, 'sample_format': 339}
tags = []
tag_data = []
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def tag(name, dtype, number, value, offset=[0]):
# append tag binary string to tags list
# append (offset, value as binary string) to tag_data list
# increment offset by tag_size
if dtype == 's':
value = bytestr(value) + b'\0'
number = len(value)
value = (value, )
t = [pack('HH', tifftags[name], tifftypes[dtype]),
pack(offset_format, number)]
if len(dtype) > 1:
number *= int(dtype[:-1])
dtype = dtype[-1]
if number == 1:
if isinstance(value, (tuple, list)):
value = value[0]
t.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * number <= offset_size:
t.append(pack(val_format, pack(str(number)+dtype, *value)))
else:
t.append(pack(offset_format, 0))
tag_data.append((offset[0] + offset_size + 4,
pack(str(number)+dtype, *value)))
tags.append(b''.join(t))
offset[0] += tag_size
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if software:
tag('software', 's', 0, software)
if description:
tag('image_description', 's', 0, description)
elif shape != data_shape:
tag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)))
tag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"))
# write previous tags only once
writeonce = (len(tags), len(tag_data)) if shape[0] > 1 else None
tag('compression', 'H', 1, 1)
tag('orientation', 'H', 1, 1)
tag('image_width', 'I', 1, shape[-2])
tag('image_length', 'I', 1, shape[-3])
tag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
tag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
tag('photometric', 'H', 1,
{'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric])
tag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig:
tag('planar_configuration', 'H', 1, 1 if planarconfig=='contig' else 2)
tag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
tag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb':
tag('extra_samples', 'H', 1, 1) # alpha channel
else:
tag('extra_samples', 'H', extrasamples, (0, ) * extrasamples)
if resolution:
tag('x_resolution', '2I', 1, rational(resolution[0]))
tag('y_resolution', '2I', 1, rational(resolution[1]))
tag('resolution_unit', 'H', 1, 2)
tag('rows_per_strip', 'I', 1, shape[-3])
# use one strip per plane
strip_byte_counts = (data[0, 0].size * data.dtype.itemsize, ) * shape[1]
tag('strip_byte_counts', offset_format, shape[1], strip_byte_counts)
# strip_offsets must be the last tag; will be updated later
tag('strip_offsets', offset_format, shape[1], (0, ) * shape[1])
fd = open(filename, 'wb')
seek = fd.seek
tell = fd.tell
def write(arg, *args):
fd.write(pack(arg, *args) if args else arg)
write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
write('HHH', 43, 8, 0)
else:
write('H', 42)
ifd_offset = tell()
write(offset_format, 0) # first IFD
for i in range(shape[0]):
# update pointer at ifd_offset
pos = tell()
seek(ifd_offset)
write(offset_format, pos)
seek(pos)
# write tags
write(numtag_format, len(tags))
tag_offset = tell()
write(b''.join(tags))
ifd_offset = tell()
write(offset_format, 0) # offset to next ifd
# write extra tag data and update pointers
for off, dat in tag_data:
pos = tell()
seek(tag_offset + off)
write(offset_format, pos)
seek(pos)
write(dat)
# update strip_offsets
pos = tell()
if len(strip_byte_counts) == 1:
seek(ifd_offset - offset_size)
write(offset_format, pos)
else:
seek(pos - offset_size*shape[1])
strip_offset = pos
for size in strip_byte_counts:
write(offset_format, strip_offset)
strip_offset += size
seek(pos)
# write data
data[i].tofile(fd) # if this fails, try update Python and numpy
fd.flush()
# remove tags that should be written only once
if writeonce:
tags = tags[writeonce[0]:]
d = writeonce[0] * tag_size
tag_data = [(o-d, v) for (o, v) in tag_data[writeonce[1]:]]
writeonce = None
fd.close()
def imread(files, *args, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
Examples
--------
>>> im = imread('test.tif', 0)
>>> im.shape
(256, 256, 4)
>>> ims = imread(['test.tif', 'test.tif'])
>>> ims.shape
(2, 256, 256, 4)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, basestring):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(*args, **kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(*args, **kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
result = self.func(instance)
if result is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, result)
return result
class TiffFile(object):
"""Read image and meta-data from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the close method.
Attributes
----------
pages : list
All TIFF pages in file.
series : list of Records(shape, dtype, axes, TIFFpages)
TIFF pages with compatible shapes and types.
All attributes are read-only.
Examples
--------
>>> tif = TiffFile('test.tif')
... try:
... images = tif.asarray()
... except Exception as e:
... print(e)
... finally:
... tif.close()
"""
def __init__(self, filename, multifile=False):
"""Initialize instance from file.
Parameters
----------
filename : str
Name of file to open.
multifile : bool
If True, series may include pages from multiple files.
"""
filename = os.path.abspath(filename)
self._fd = open(filename, 'rb')
self.fname = os.path.basename(filename)
self.fpath = os.path.dirname(filename)
self._tiffs = {self.fname: self} # cache of TIFFfiles
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
try:
self._fromfile()
except Exception:
self._fd.close()
raise
def close(self):
"""Close open file handle(s)."""
if not hasattr(self, 'tiffs'):
return
for tif in self._tiffs.values():
if tif._fd:
tif._fd.close()
tif._fd = None
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fd.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fd.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byteorder+'H', self._fd.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fd.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if not self.pages:
raise ValueError("empty TIFF file")
@lazyattr
def series(self):
"""Return series of TiffPage with compatible shape and properties."""
series = []
if self.is_ome:
series = self._omeseries()
elif self.is_fluoview:
dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(self.pages[0].mm_header.dimensions))
series = [Record(
axes=''.join(dims.get(i[0].strip().upper(), 'O')
for i in mmhd if i[1] > 1),
shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
pages=self.pages, dtype=numpy.dtype(self.pages[0].dtype))]
elif self.is_lsm:
lsmi = self.pages[0].cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if self.pages[0].is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = [getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes]
pages = [p for p in self.pages if not p.is_reduced]
series = [Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype))]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + list(pages[0].shape)
axes = axes[:i] + 'CYX'
series.append(Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype)))
elif self.is_imagej:
shape = []
axes = []
ij = self.pages[0].imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not self.is_rgb:
shape.append(ij['channels'])
axes.append('C')
remain = len(self.pages) // (numpy.prod(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
shape.extend(self.pages[0].shape)
axes.extend(self.pages[0].axes)
series = [Record(pages=self.pages, shape=shape, axes=axes,
dtype=numpy.dtype(self.pages[0].dtype))]
elif self.is_nih:
series = [Record(pages=self.pages,
shape=(len(self.pages),) + self.pages[0].shape,
axes='I' + self.pages[0].axes,
dtype=numpy.dtype(self.pages[0].dtype))]
elif self.pages[0].is_shaped:
shape = self.pages[0].tags['image_description'].value[7:-1]
shape = tuple(int(i) for i in shape.split(b','))
series = [Record(pages=self.pages, shape=shape,
axes='O' * len(shape),
dtype=numpy.dtype(self.pages[0].dtype))]
if not series:
shapes = []
pages = {}
for page in self.pages:
if not page.shape:
continue
shape = page.shape + (page.axes,
page.compression in TIFF_DECOMPESSORS)
if not shape in pages:
shapes.append(shape)
pages[shape] = [page]
else:
pages[shape].append(page)
series = [Record(pages=pages[s],
axes=(('I' + s[-2])
if len(pages[s]) > 1 else s[-2]),
dtype=numpy.dtype(pages[s][0].dtype),
shape=((len(pages[s]), ) + s[:-2]
if len(pages[s]) > 1 else s[:-2]))
for s in shapes]
return series
def asarray(self, key=None, series=None, **kwargs):
"""Return image data of multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
"""
if key is None and series is None:
series = 0
if series is not None:
pages = self.series[series].pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if len(pages) == 1:
return pages[0].asarray(**kwargs)
elif self.is_nih:
result = numpy.vstack(p.asarray(colormapped=False,
squeeze=False,**kwargs) for p in pages)
if pages[0].is_palette:
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
if self.is_ome and any(p is None for p in pages):
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray(**kwargs))
result = numpy.vstack((p.asarray(**kwargs) if p else nopage)
for p in pages)
if key is None:
try:
result.shape = self.series[series].shape
except ValueError:
warnings.warn("failed to reshape %s to %s" % (
result.shape, self.series[series].shape))
result.shape = (-1,) + pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
def _omeseries(self):
"""Return image series in OME-TIFF file(s)."""
root = ElementTree.XML(self.pages[0].tags['image_description'].value)
uuid = root.attrib.get('UUID', None)
self._tiffs = {uuid: self}
modulo = {}
result = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("not an OME-TIFF master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
axes = "".join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size'+ax]) for ax in axes)
size = numpy.prod(shape[:-2])
ifds = [None] * size
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
idx = numpy.ravel_multi_index(idx, shape[:-2])
for uuid in data:
if uuid.tag.endswith('UUID'):
if uuid.text not in self._tiffs:
if not self._multifile:
# abort reading multi file OME series
return []
fn = uuid.attrib['FileName']
try:
tf = TiffFile(os.path.join(self.fpath, fn))
except (IOError, ValueError):
warnings.warn("failed to read %s" % fn)
break
self._tiffs[uuid.text] = tf
pages = self._tiffs[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
result.append(Record(axes=axes, shape=shape, pages=ifds,
dtype=numpy.dtype(ifds[0].dtype)))
for record in result:
for axis, (newaxis, labels) in modulo.items():
i = record.axes.index(axis)
size = len(labels)
if record.shape[i] == size:
record.axes = record.axes.replace(axis, newaxis, 1)
else:
record.shape[i] //= size
record.shape.insert(i+1, size)
record.axes = record.axes.replace(axis, axis+newaxis, 1)
return result
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [self.fname.capitalize(),
"%.2f MB" % (self.fstat[6] / 1048576),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._tiffs) > 1:
result.append("%i files" % (len(self._tiffs)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
@lazyattr
def fstat(self):
return os.fstat(self._fd.fileno())
@lazyattr
def is_bigtiff(self):
return self.offset_size != 4
@lazyattr
def is_rgb(self):
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_palette(self):
return all(p.is_palette for p in self.pages)
@lazyattr
def is_mdgel(self):
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
return self.pages[0].is_lsm
@lazyattr
def is_imagej(self):
return self.pages[0].is_imagej
@lazyattr
def is_nih(self):
return self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
return self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
return self.pages[0].is_ome
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, colormapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
colormapped and with one alpha channel if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'P' plane, 'I' image series,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'F' phase, 'H' lifetime,
'L' exposure, 'V' event, 'O' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy array
Color look up table if exists.
mm_uic_tags: Record(dict)
Consolidated MetaMorph mm_uic# tags, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and meta_data tags, if exists.
All attributes are read-only.
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0.
"""
fd = self.parent._fd
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fd.read(offset_size))[0]
if not offset:
raise StopIteration()
# read standard tags
tags = self.tags
fd.seek(offset)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fd.read(size))[0]
except Exception:
warnings.warn("corrupted page list")
raise StopIteration()
for _ in range(numtags):
tag = TiffTag(self.parent)
tags[tag.name] = tag
# read LSM info subrecords
if self.is_lsm:
pos = fd.tell()
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_'+name]
except KeyError:
continue
if not offset:
continue
fd.seek(offset)
try:
setattr(self, 'cz_lsm_'+name, reader(fd, byteorder))
except ValueError:
pass
fd.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TiffTag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if not 'photometric' in self.tags:
self.photometric = None
if 'image_length' in self.tags:
self.strips_per_image = int(math.floor(float(self.image_length +
self.rows_per_strip - 1) / self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if self.is_imagej:
# consolidate imagej meta data
adict = imagej_description(tags['image_description'].value)
try:
adict.update(imagej_meta_data(
tags['imagej_meta_data'].value,
tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception:
pass
self.imagej_tags = Record(adict)
if not 'image_length' in self.tags or not 'image_width' in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self._shape = ()
self.shape = ()
self.axes = ''
elif self.is_stk:
# consolidate mm_uci tags
planes = tags['mm_uic2'].count
self.mm_uic_tags = Record(tags['mm_uic2'].value)
for key in ('mm_uic3', 'mm_uic4', 'mm_uic1'):
if key in tags:
self.mm_uic_tags.update(tags[key].value)
if self.planar_configuration == 'contig':
self._shape = (planes, 1, self.image_length,
self.image_width, self.samples_per_pixel)
self.shape = tuple(self._shape[i] for i in (0, 2, 3, 4))
self.axes = 'PYXS'
else:
self._shape = (planes, self.samples_per_pixel,
self.image_length, self.image_width, 1)
self.shape = self._shape[:4]
self.axes = 'PSYX'
elif self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
self._shape = (1, 1, self.image_length, self.image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
self.shape = (3, self.image_length, self.image_width)
self.axes = 'SYX'
else:
# LSM and FluoView
self.shape = (self.image_length, self.image_width)
self.axes = 'YX'
elif self.is_rgb or self.samples_per_pixel > 1:
if self.planar_configuration == 'contig':
self._shape = (1, 1, self.image_length, self.image_width,
self.samples_per_pixel)
self.shape = (self.image_length, self.image_width,
self.samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (1, self.samples_per_pixel, self.image_length,
self.image_width, 1)
self.shape = self._shape[1:-1]
self.axes = 'SYX'
if self.is_rgb and 'extra_samples' in self.tags:
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.planar_configuration == 'contig':
self.shape = self.shape[:2] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, self.image_length, self.image_width, 1)
self.shape = self._shape[2:4]
self.axes = 'YX'
if not self.compression and not 'strip_byte_counts' in tags:
self.strip_byte_counts = numpy.prod(self.shape) * (
self.bits_per_sample // 8)
def asarray(self, squeeze=True, colormapped=True, rgbonly=True, verbosity=0):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any argument is False, the shape of the returned array might be
different from the page shape.
Parameters
----------
squeeze : bool
If True all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True color mapping is applied for palette-indexed images.
rgbonly : bool
If True return RGB(A) image without additional extra samples.
verbosity: int
0: silent, 1: verbose, 2: debug
"""
if verbosity>0: print("reading file '%s'"%self.parent._fd.name)
if verbosity>2: print("(%s)"% str(self));
fd = self.parent._fd
if not fd:
raise IOError("TIFF file is not open")
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
if ('ycbcr_subsampling' in self.tags and
self.tags['ycbcr_subsampling'].value not in (1, (1, 1))):
raise ValueError("YCbCr subsampling not supported")
tag = self.tags['sample_format']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError("sample formats don't match %s" % str(tag.value))
dtype = self._dtype
shape = self._shape
if not shape:
return None
image_width = self.image_width
image_length = self.image_length
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
if self.is_tiled:
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
tile_width = self.tile_width
tile_length = self.tile_length
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
shape = shape[:-3] + (tl*tile_length, tw*tile_width, shape[-1])
tile_shape = (tile_length, tile_width, shape[-1])
runlen = tile_width
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
runlen = image_width
try:
offsets[0]
except TypeError:
offsets = (offsets, )
byte_counts = (byte_counts, )
if any(o < 2 for o in offsets):
raise ValueError("corrupted file")
if (not self.is_tiled and (self.is_stk or (not self.compression
and bits_per_sample in (8, 16, 32, 64)
and all(offsets[i] == offsets[i+1] - byte_counts[i]
for i in range(len(offsets)-1))))):
# contiguous data
fd.seek(offsets[0])
result = numpy.fromfile(fd, typecode, numpy.prod(shape))
result = result.astype('=' + dtype)
else:
if self.planar_configuration == 'contig':
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
unpack = lambda x: numpy.fromstring(x, typecode)
elif isinstance(bits_per_sample, tuple):
unpack = lambda x: unpackrgb(x, typecode, bits_per_sample)
else:
unpack = lambda x: unpackints(x, typecode, bits_per_sample,
runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, pl = 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fd.seek(offset)
tile = unpack(decompress(fd.read(bytecount)))
tile.shape = tile_shape
result[0, pl, tl:tl+tile_length,
tw:tw+tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[-2]:
tw, tl = 0, tl + tile_length
if tl >= shape[-3]:
tl, pl = 0, pl + 1
result = result[..., :image_length, :image_width, :]
else:
strip_size = (self.rows_per_strip * self.image_width *
self.samples_per_pixel)
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fd.seek(offset)
strip = unpack(decompress(fd.read(bytecount)))
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor == 'horizontal':
# workaround bug in LSM510 software
if not (self.parent.is_lsm and not self.compression):
numpy.cumsum(result, axis=3, dtype=dtype, out=result)
if colormapped and self.is_palette:
if self.color_map.shape[1] >= 2**bits_per_sample:
# FluoView and LSM might fail here
result = numpy.take(self.color_map, result, axis=1)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.planar_configuration == 'contig':
result = result[..., [0, 1, 2, 3+i]]
else:
result = result[:, [0, 1, 2, 3+i]]
break
else:
if self.planar_configuration == 'contig':
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
return result
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
','.join(t[3:] for t in ('is_stk', 'is_lsm', 'is_nih', 'is_ome',
'is_imagej', 'is_fluoview', 'is_mdgel',
'is_mediacy', 'is_reduced', 'is_tiled')
if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def is_rgb(self):
"""True if page contains a RGB image."""
return self.tags['photometric'].value == 2
@lazyattr
def is_palette(self):
"""True if page contains a palette-colored image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""True if page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""True if page is a reduced image of another image."""
return bool(self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_mdgel(self):
"""True if page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""True if page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""True if page contains MM_UIC2 tag."""
return 'mm_uic2' in self.tags
@lazyattr
def is_lsm(self):
"""True if page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""True if page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""True if page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_ome(self):
"""True if page contains OME-XML in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'<?xml version='))
@lazyattr
def is_shaped(self):
"""True if page contains shape in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'shape=('))
@lazyattr
def is_imagej(self):
"""True if page contains ImageJ description."""
return ('image_description' in self.tags and
self.tags['image_description'].value.startswith(b'ImageJ='))
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data. For codes in CUSTOM_TAGS the 4 bytes file content.
value_offset : int
Location of value in file
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset')
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fd'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fd = parent._fd
byteorder = parent.byteorder
self._offset = fd.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fd.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
if code in TIFF_TAGS:
name = TIFF_TAGS[code][0]
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[dtype]
except KeyError:
raise ValueError("unknown TIFF tag data type %i" % dtype)
fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fd.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = struct.unpack(byteorder+tof, value)[0]
fd.seek(self.value_offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fd, byteorder, dtype, count)
fd.seek(0, 2) # bug in numpy/Python 3.x ?
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fd.read(size))
else:
value = read_numpy(fd, byteorder, dtype, count)
fd.seek(0, 2) # bug in numpy/Python 3.x ?
fd.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if not code in CUSTOM_TAGS:
if len(value) == 1:
value = value[0]
if dtype.endswith('s'):
value = stripnull(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffSequence(object):
"""Sequence of image files.
Properties
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
Examples
--------
>>> ims = TiffSequence("test.oif.files/*.tif")
>>> ims = ims.asarray()
>>> ims.shape
(2, 100, 256, 256)
"""
_axes_pattern = """
# matches Olympus OIF and Leica TIFF series
_?(?:(c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(c|t|x|y|z|ch|tp)(\d{1,4}))?
"""
class _ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern=''):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
"""
if isinstance(files, basestring):
files = sorted(glob.glob(files))
if not files:
raise ValueError("no files found")
if not os.path.isfile(files[0]):
raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._axes_pattern if pattern == 'axes' else pattern
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self._ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = ((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
pass
def asarray(self, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
Raise IndexError if image shapes don't match.
"""
im = self.imread(self.files[0])
result_shape = self.shape + im.shape
result = numpy.zeros(result_shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i-j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = imread(fname, *args, **kwargs)
result[index] = im
result.shape = result_shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self._ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self._ParseError("pattern doesn't match file names")
matches = matches[-1]
if len(matches) % 2:
raise self._ParseError("pattern doesn't match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self._ParseError("pattern doesn't match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes don't match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i-j+1 for i, j in zip(shape, start_index))
if numpy.prod(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
if k.startswith('_'): # does not work with byte
continue
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
s.append(("* %s: %s" % (k, str(v))).split("\n",
1)[0][:PRINT_LINE_LEN])
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TIFFtags with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
#sortbycode = lambda a, b: cmp(a.code, b.code)
#for tag in sorted(self.values(), sortbycode):
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (tag.code, tag.name, typecode,
str(tag.value).split('\n', 1)[0])
s.append(line[:PRINT_LINE_LEN])
return '\n'.join(s)
def read_bytes(fd, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
return numpy.fromfile(fd, byteorder+dtype[-1], count).tostring()
def read_numpy(fd, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
return numpy.fromfile(fd, byteorder+dtype[-1], count)
def read_mm_header(fd, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return numpy.rec.fromfile(fd, MM_HEADER, 1, byteorder=byteorder)[0]
def read_mm_stamp(fd, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.array."""
return numpy.fromfile(fd, byteorder+'8f8', 1)[0]
def read_mm_uic1(fd, byteorder, dtype, count):
"""Read MM_UIC1 tag from file and return as dictionary."""
t = fd.read(8*count)
t = struct.unpack('%s%iI' % (byteorder, 2*count), t)
return dict((MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2])
if k in MM_TAG_IDS)
def read_mm_uic2(fd, byteorder, dtype, count):
"""Read MM_UIC2 tag from file and return as dictionary."""
result = {'number_planes': count}
values = numpy.fromfile(fd, byteorder+'I', 6*count)
result['z_distance'] = values[0::6] // values[1::6]
#result['date_created'] = tuple(values[2::6])
#result['time_created'] = tuple(values[3::6])
#result['date_modified'] = tuple(values[4::6])
#result['time_modified'] = tuple(values[5::6])
return result
def read_mm_uic3(fd, byteorder, dtype, count):
"""Read MM_UIC3 tag from file and return as dictionary."""
t = numpy.fromfile(fd, byteorder+'I', 2*count)
return {'wavelengths': t[0::2] // t[1::2]}
def read_mm_uic4(fd, byteorder, dtype, count):
"""Read MM_UIC4 tag from file and return as dictionary."""
t = struct.unpack(byteorder + 'hI'*count, fd.read(6*count))
return dict((MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2])
if k in MM_TAG_IDS)
def read_cz_lsm_info(fd, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
result = numpy.rec.fromfile(fd, CZ_LSM_INFO, 1,
byteorder=byteorder)[0]
{50350412: '1.3', 67127628: '2.0'}[result.magic_number] # validation
return result
def read_cz_lsm_time_stamps(fd, byteorder):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack(byteorder+'II', fd.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
return struct.unpack(('%s%dd' % (byteorder, count)),
fd.read(8*count))
def read_cz_lsm_event_list(fd, byteorder):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack(byteorder+'II', fd.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack(byteorder+'IdI', fd.read(16))
etext = stripnull(fd.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fd, byteorder):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack(byteorder+"I", fd.read(4))[0]:
raise ValueError("not a lsm_scan_info structure")
fd.read(8)
while True:
entry, dtype, size = unpack(byteorder+"III", fd.read(12))
if dtype == 2:
value = stripnull(fd.read(size))
elif dtype == 4:
value = unpack(byteorder+"i", fd.read(4))[0]
elif dtype == 5:
value = unpack(byteorder+"d", fd.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
block = blocks.pop()
else:
setattr(block, "unknown_%x" % entry, value)
if not blocks:
break
return block
def read_nih_image_header(fd, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = numpy.rec.fromfile(fd, NIH_IMAGE_HEADER, 1, byteorder=byteorder)[0]
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def imagej_meta_data(data, bytecounts, byteorder):
"""Return dict from ImageJ meta data tag value."""
if sys.version_info[0] > 2:
_str = lambda x: str(x, 'cp1252')
else:
_str = str
def read_string(data, byteorder):
return _str(data[1::2])
def read_double(data, byteorder):
return struct.unpack(byteorder+('d' * (len(data)//8)), data)
def read_bytes(data, byteorder):
#return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = {
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
if not bytecounts:
raise ValueError("no ImageJ meta data")
if not data.startswith(b'IJIJ'):
raise ValueError("invalid ImageJ meta data")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ meta data header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for i in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description(description):
"""Return dict from ImageJ image_description tag."""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
if sys.version_info[0] > 2 :
_str = lambda x: str(x, 'cp1252')
else:
_str = str
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
return result
def _replace_by(module_function, warn=True):
"""Try replace decorated function by module.function."""
def decorate(func, module_function=module_function, warn=warn):
sys.path.append(os.path.dirname(__file__))
try:
module, function = module_function.split('.')
func, oldfunc = getattr(__import__(module), function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
sys.path.pop()
return func
return decorate
#@_replace_by('_tifffile.decodepackbits')
def decodepackbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i+n])
i += n
elif n > 129:
result_extend(encoded[i:i+1] * (258-n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
#@_replace_by('_tifffile.decodelzw')
def decodelzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of `bitw` bits at `bitcount` position in encoded."""
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code = code << (bitcount % 8)
code = code & mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn(
"decodelzw encountered unexpected end of stream (code %i)" % code)
return b''.join(result)
#@_replace_by('_tifffile.unpackints')
def unpackints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen+(8-runlen%8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen*itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - itemsize
bitmask = int(itemsize*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
result = numpy.empty((l, ), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code = code << (bitcount % 8)
code = code & bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i+1) % runlen == 0:
bitcount += skipbits
return result
def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpackrgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpackrgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpackrgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
data = numpy.fromstring(data, dtype.byteorder+dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i+1:]))
t &= int('0b'+'1'*bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2**o - 1) // (2**bps - 1)
t //= 2**(o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def stripnull(string):
"""Return string truncated at first null character."""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def datetime_from_timestamp(n, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Examples
--------
>>> datetime_from_timestamp(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(n)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory. Print error message on failure.
Examples
--------
>>> test_tifffile(verbose=False)
"""
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TiffFile(f, multifile=True)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (str(tif), str(img.shape),
img.dtype, tif[0].compression, (time.time()-t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful+failed, time.time()-start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated',
6: 'cielab',
7: 'icclab',
8: 'itulab',
32844: 'logl',
32845: 'logluv',
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decodepackbits,
'lzw': decodelzw,
}
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1B', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
#14: '', # UNICODE
#15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
#4: 'void',
#5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample',
'P': 'plane',
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'F': 'phase',
'R': 'tile', # region
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'O': 'other',
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# MetaMorph STK tags
MM_TAG_IDS = {
0: 'auto_scale',
1: 'min_scale',
2: 'max_scale',
3: 'spatial_calibration',
#4: 'x_calibration',
#5: 'y_calibration',
#6: 'calibration_units',
#7: 'name',
8: 'thresh_state',
9: 'thresh_state_red',
11: 'thresh_state_green',
12: 'thresh_state_blue',
13: 'thresh_state_lo',
14: 'thresh_state_hi',
15: 'zoom',
#16: 'create_time',
#17: 'last_saved_time',
18: 'current_buffer',
19: 'gray_fit',
20: 'gray_point_count',
#21: 'gray_x',
#22: 'gray_y',
#23: 'gray_min',
#24: 'gray_max',
#25: 'gray_unit_name',
26: 'standard_lut',
27: 'wavelength',
#28: 'stage_position',
#29: 'camera_chip_offset',
#30: 'overlay_mask',
#31: 'overlay_compress',
#32: 'overlay',
#33: 'special_overlay_mask',
#34: 'special_overlay_compress',
#35: 'special_overlay',
36: 'image_property',
#37: 'stage_label',
#38: 'autoscale_lo_info',
#39: 'autoscale_hi_info',
#40: 'absolute_z',
#41: 'absolute_z_valid',
#42: 'gamma',
#43: 'gamma_red',
#44: 'gamma_green',
#45: 'gamma_blue',
#46: 'camera_bin',
47: 'new_lut',
#48: 'image_property_ex',
49: 'plane_property',
#50: 'user_lut_table',
51: 'red_autoscale_info',
#52: 'red_autoscale_lo_info',
#53: 'red_autoscale_hi_info',
54: 'red_minscale_info',
55: 'red_maxscale_info',
56: 'green_autoscale_info',
#57: 'green_autoscale_lo_info',
#58: 'green_autoscale_hi_info',
59: 'green_minscale_info',
60: 'green_maxscale_info',
61: 'blue_autoscale_info',
#62: 'blue_autoscale_lo_info',
#63: 'blue_autoscale_hi_info',
64: 'blue_min_scale_info',
65: 'blue_max_scale_info',
#66: 'overlay_plane_color'
}
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'),
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'),
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'),
('lutmode_t', 'u1'),
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'),
]
#NIH_COLORTABLE_TYPE = (
# 'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
# 'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
#NIH_LUTMODE_TYPE = (
# 'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
# 'ColorLut', 'CustomGrayscale')
#NIH_CURVEFIT_TYPE = (
# 'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
# 'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
# 'UncalibratedOD')
#NIH_UNITS_TYPE = (
# 'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
# 'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
#NIH_STACKTYPE_TYPE = (
# 'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# Olymus Fluoview
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'i4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('dimension_data_type', 'i4'),
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('data_type', 'u4'),
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_information', 'u4'),
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
]
# Import functions for LSM_INFO subrecords
CZ_LSM_INFO_READERS = {
'scan_information': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
}
# Descriptions of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
2: '12 bit unsigned integer',
5: '32 bit float',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detectionchannels",
0x80000000: "illuminationchannels",
0xa0000000: "beamsplitters",
0xc0000000: "datachannels",
0x13000000: "markers",
0x11000000: "timers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
0x40000000: "tracks",
0x50000000: "lasers",
0x70000000: "detectionchannels",
0x90000000: "illuminationchannels",
0xb0000000: "beamsplitters",
0xd0000000: "datachannels",
0x14000000: "markers",
0x12000000: "timers",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "oledb_recording_scan_type",
0x10000008: "oledb_recording_scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bccorrection",
0x10000049: "position_bccorrection1",
0x10000050: "position_bccorrection2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
# lasers
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# tracks
0x40000001: "multiplex_type",
0x40000002: "multiplex_order",
0x40000003: "sampling_mode",
0x40000004: "sampling_method",
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# detection_channels
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "detection_channel_name",
0x70000015: "detection_detector_gain_bc1",
0x70000016: "detection_detector_gain_bc2",
0x70000017: "detection_amplifier_gain_bc1",
0x70000018: "detection_amplifier_gain_bc2",
0x70000019: "detection_amplifier_offset_bc1",
0x70000020: "detection_amplifier_offset_bc2",
0x70000021: "detection_spectral_scan_channels",
0x70000022: "detection_spi_wavelength_start",
0x70000023: "detection_spi_wavelength_stop",
0x70000026: "detection_dye_name",
0x70000027: "detection_dye_folder",
# illumination_channels
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitters
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channels
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# markers
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
# timers
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, 1, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2**32-1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
530: ('ycbcr_subsampling', 1, 3, 2, None),
531: ('ycbcr_positioning', 1, 3, 1, None),
32997: ('image_depth', None, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
37510: ('user_comment', None, None, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50838: ('imagej_byte_counts', None, None, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_numpy),
33628: ('mm_uic1', read_mm_uic1),
33629: ('mm_uic2', read_mm_uic2),
33630: ('mm_uic3', read_mm_uic3),
33631: ('mm_uic4', read_mm_uic4),
34361: ('mm_header', read_mm_header),
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info),
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_meta_data', read_bytes),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if (isrgb and data.shape[-3] in (3, 4)):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif (not isrgb and data.shape[-1] in (3, 4)):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette':
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ub' and vmin == 0:
cmap = 'gray'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import re
import optparse
search_doc = lambda r, d: re.search(r, __doc__).group(1) if __doc__ else d
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description=search_doc("\n\n([^|]*?)\n\n", ''),
version="%%prog %s" % search_doc(":Version: (.*)", "Unknown"))
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="don't read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="don't display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the internal tests")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
#if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
notnone = lambda x: next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time()-start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print ("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_palette:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_information', 'mm_uic_tags',
'mm_header', 'imagej_tags', 'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = None, None
if 'gdal_nodata' in page.tags:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
if page.is_stk:
try:
vmin = page.mm_uic_tags['min_scale']
vmax = page.mm_uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = None, None
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str
__version__ = '2012.08.14'
__docformat__ = 'restructuredtext en'
if __name__ == "__main__":
sys.exit(main())
| 35.00237 | 83 | 0.555636 |
795bd2ab23e5e08a0b9b3895ae3f9762aade6ef7 | 4,365 | py | Python | cifar100.py | TheSimoms/hovden-2022-demo | 051d98b4fb1b3a6d5a9325d472894183711d4532 | [
"MIT"
] | 1 | 2022-03-24T16:56:43.000Z | 2022-03-24T16:56:43.000Z | cifar100.py | TheSimoms/hovden-2022-demo | 051d98b4fb1b3a6d5a9325d472894183711d4532 | [
"MIT"
] | null | null | null | cifar100.py | TheSimoms/hovden-2022-demo | 051d98b4fb1b3a6d5a9325d472894183711d4532 | [
"MIT"
] | 1 | 2022-03-23T11:20:15.000Z | 2022-03-23T11:20:15.000Z | import argparse
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import cifar100
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D, Activation
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import EarlyStopping
# Beskriv input og label for modellen
INPUT_SHAPE = (32, 32, 3)
NUMBER_OF_CLASSES = 100
def train(args):
"""Train and save model"""
# Last inn trenings- og valideringsdata
(x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
# Skaler input fra [0, 255] til [0, 1]
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
# Konverter label fra 1 til [0 1 0 0 0 0 0 0 0 0]
y_train = to_categorical(y_train, NUMBER_OF_CLASSES)
y_test = to_categorical(y_test, NUMBER_OF_CLASSES)
# Opprett modellen
model = build_model()
# Stopp trening automatisk dersom den stagnerer
early_stopping = EarlyStopping(
monitor='val_loss',
mode='min',
verbose=1,
patience=args.patience,
restore_best_weights=True,
)
# Tren modellen
model.fit(
x_train,
y_train,
batch_size=args.batch_size,
epochs=args.epochs,
verbose=1,
validation_split=0.2,
callbacks=[early_stopping],
)
# Evaluer modellen på valideringsdata
score = model.evaluate(x_test, y_test, verbose=0)
print(f'Accuracy: {score[1]}')
return model
def build_model():
"""Builds the model"""
model = Sequential([
Conv2D(32, (3, 3), padding='same', input_shape=INPUT_SHAPE),
Activation('relu'),
Conv2D(32, (3, 3), padding='same'),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.5),
Conv2D(64, (3, 3), padding='same'),
Activation('relu'),
Conv2D(64, (3, 3), padding='same'),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.5),
Flatten(),
Dense(512),
Activation('relu'),
Dropout(0.5),
Dense(100, activation='softmax'),
])
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'],
)
return model
def save(model, args):
"""Save model to file system"""
if args.save:
model.save(args.model)
def predict(args):
"""Predict the classes of an image"""
model = load_model(args.model)
image = Image.open(args.image, 'r')
image_resized = image.resize((32, 32))
image_data = np.expand_dims(
np.asarray(image_resized).astype('float32') / 255.0,
axis=0,
)
results = model.predict(image_data)[0]
plot_result(image, results)
def plot_result(image, result):
"""Show image and result"""
with open('cifar100-labels.txt', 'r', encoding='utf-8') as file:
labels = file.read().splitlines()
sorted_results = sorted(
enumerate(result), key=lambda x: x[1], reverse=True,
)
top_five_results = [
(labels[index], probability) for (index, probability) in sorted_results[:5]
]
result_text = '\n'.join([
f'{label}: {(probability * 100):.2f}%' for (label, probability) in top_five_results
])
_, axs = plt.subplots(nrows=2, sharex=True, figsize=(3, 5))
axs[0].imshow(image, origin='upper')
axs[0].axis('off')
axs[1].text(0, 0, result_text)
axs[1].axis('off')
plt.show()
def run():
"""Run software"""
parser = argparse.ArgumentParser()
parser.add_argument('--train', action='store_true')
parser.add_argument('--save', action='store_true')
parser.add_argument('--batch-size', type=int, default=100)
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--patience', type=int, default=50)
parser.add_argument('--predict', action='store_true')
parser.add_argument('--model', type=str, default='model')
parser.add_argument('--image', type=str, default=None)
args = parser.parse_args()
if args.train:
model = train(args)
save(model, args)
if args.predict:
predict(args)
if __name__ == '__main__':
run()
| 24.522472 | 93 | 0.627721 |
795bd2d9179430998a4d1ed6249289bbc394c32a | 92,497 | py | Python | autotest/ogr/ogr_mitab.py | sharkAndshark/gdal | 0cf797ecaa5d1d3312f3a9f51a266cd3f9d02fb5 | [
"Apache-2.0"
] | 3 | 2017-05-06T11:43:08.000Z | 2017-07-19T15:27:06.000Z | autotest/ogr/ogr_mitab.py | sharkAndshark/gdal | 0cf797ecaa5d1d3312f3a9f51a266cd3f9d02fb5 | [
"Apache-2.0"
] | 29 | 2017-03-17T23:55:49.000Z | 2018-03-13T09:27:01.000Z | autotest/ogr/ogr_mitab.py | sharkAndshark/gdal | 0cf797ecaa5d1d3312f3a9f51a266cd3f9d02fb5 | [
"Apache-2.0"
] | 1 | 2017-10-12T05:49:01.000Z | 2017-10-12T05:49:01.000Z | #!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: MapInfo driver testing.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2004, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2012-2014, Even Rouault <even dot rouault at spatialys.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
###############################################################################
import os
import random
import sys
import shutil
import time
import gdaltest
import ogrtest
from osgeo import ogr
from osgeo import osr
from osgeo import gdal
import test_cli_utilities
import pytest
pytestmark = pytest.mark.require_driver('MapInfo File')
###############################################################################
@pytest.fixture(autouse=True, scope='module')
def startup_and_cleanup():
gdaltest.mapinfo_drv = ogr.GetDriverByName('MapInfo File')
gdaltest.mapinfo_ds = gdaltest.mapinfo_drv.CreateDataSource('tmp')
assert gdaltest.mapinfo_ds is not None
yield
fl = gdal.ReadDir('/vsimem/')
if fl is not None:
print(fl)
gdaltest.mapinfo_ds = None
gdaltest.mapinfo_drv.DeleteDataSource('tmp')
###############################################################################
# Create table from data/poly.shp
def test_ogr_mitab_2():
# This should convert to MapInfo datum name 'New_Zealand_GD49'
WEIRD_SRS = 'PROJCS["NZGD49 / UTM zone 59S",GEOGCS["NZGD49",DATUM["NZGD49",SPHEROID["International 1924",6378388,297,AUTHORITY["EPSG","7022"]],TOWGS84[59.47,-5.04,187.44,0.47,-0.1,1.024,-4.5993],AUTHORITY["EPSG","6272"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4272"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",171],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",10000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","27259"]]'
gdaltest.mapinfo_srs = osr.SpatialReference()
gdaltest.mapinfo_srs.ImportFromWkt(WEIRD_SRS)
#######################################################
# Create memory Layer
gdaltest.mapinfo_lyr = gdaltest.mapinfo_ds.CreateLayer('tpoly', gdaltest.mapinfo_srs)
#######################################################
# Setup Schema
ogrtest.quick_create_layer_def(gdaltest.mapinfo_lyr,
[('AREA', ogr.OFTReal),
('EAS_ID', ogr.OFTInteger),
('PRFEDEA', ogr.OFTString)])
#######################################################
# Copy in poly.shp
dst_feat = ogr.Feature(feature_def=gdaltest.mapinfo_lyr.GetLayerDefn())
shp_ds = ogr.Open('data/poly.shp')
gdaltest.shp_ds = shp_ds
shp_lyr = shp_ds.GetLayer(0)
feat = shp_lyr.GetNextFeature()
gdaltest.poly_feat = []
while feat is not None:
gdaltest.poly_feat.append(feat)
dst_feat.SetFrom(feat)
gdaltest.mapinfo_lyr.CreateFeature(dst_feat)
feat = shp_lyr.GetNextFeature()
#######################################################
# Close file.
gdaltest.mapinfo_ds = None
###############################################################################
# Verify that stuff we just wrote is still OK.
#
# Note that we allow a fairly significant error since projected
# coordinates are not stored with much precision in Mapinfo format.
def test_ogr_mitab_3():
gdaltest.mapinfo_ds = ogr.Open('tmp')
gdaltest.mapinfo_lyr = gdaltest.mapinfo_ds.GetLayer(0)
expect = [168, 169, 166, 158, 165]
gdaltest.mapinfo_lyr.SetAttributeFilter('EAS_ID < 170')
tr = ogrtest.check_features_against_list(gdaltest.mapinfo_lyr,
'EAS_ID', expect)
gdaltest.mapinfo_lyr.SetAttributeFilter(None)
for i in range(len(gdaltest.poly_feat)):
orig_feat = gdaltest.poly_feat[i]
read_feat = gdaltest.mapinfo_lyr.GetNextFeature()
assert (ogrtest.check_feature_geometry(read_feat,
orig_feat.GetGeometryRef(),
max_error=0.02) == 0), \
('Geometry check fail. i=%d' % i)
for fld in range(3):
assert orig_feat.GetField(fld) == read_feat.GetField(fld), \
('Attribute %d does not match' % fld)
gdaltest.poly_feat = None
gdaltest.shp_ds = None
assert tr
###############################################################################
# Test ExecuteSQL() results layers with geometry.
def test_ogr_mitab_4():
sql_lyr = gdaltest.mapinfo_ds.ExecuteSQL(
"select * from tpoly where prfedea = '35043413'")
tr = ogrtest.check_features_against_list(sql_lyr, 'prfedea', ['35043413'])
if tr:
sql_lyr.ResetReading()
feat_read = sql_lyr.GetNextFeature()
if ogrtest.check_feature_geometry(feat_read, 'POLYGON ((479750.688 4764702.000,479658.594 4764670.000,479640.094 4764721.000,479735.906 4764752.000,479750.688 4764702.000))', max_error=0.02) != 0:
tr = 0
gdaltest.mapinfo_ds.ReleaseResultSet(sql_lyr)
assert tr
###############################################################################
# Test spatial filtering.
def test_ogr_mitab_5():
gdaltest.mapinfo_lyr.SetAttributeFilter(None)
gdaltest.mapinfo_lyr.SetSpatialFilterRect(479505, 4763195,
480526, 4762819)
tr = ogrtest.check_features_against_list(gdaltest.mapinfo_lyr, 'eas_id',
[158])
gdaltest.mapinfo_lyr.SetSpatialFilter(None)
assert tr
###############################################################################
# Verify that Non-WGS84 datums are populated correctly
def test_ogr_mitab_6():
srs = gdaltest.mapinfo_lyr.GetSpatialRef()
datum_name = srs.GetAttrValue('PROJCS|GEOGCS|DATUM')
assert datum_name == "New_Zealand_GD49", \
("Datum name does not match (expected 'New_Zealand_GD49', got '%s')" % datum_name)
###############################################################################
# Create MIF file.
def test_ogr_mitab_7():
gdaltest.mapinfo_ds = None
gdaltest.mapinfo_drv.DeleteDataSource('tmp')
gdaltest.mapinfo_ds = gdaltest.mapinfo_drv.CreateDataSource('tmp/wrk.mif')
assert gdaltest.mapinfo_ds is not None
###############################################################################
# Create table from data/poly.shp
def test_ogr_mitab_8():
#######################################################
# Create memory Layer
gdaltest.mapinfo_lyr = gdaltest.mapinfo_ds.CreateLayer('tpoly')
#######################################################
# Setup Schema
ogrtest.quick_create_layer_def(gdaltest.mapinfo_lyr,
[('AREA', ogr.OFTReal),
('EAS_ID', ogr.OFTInteger),
('PRFEDEA', ogr.OFTString)])
#######################################################
# Copy in poly.shp
dst_feat = ogr.Feature(feature_def=gdaltest.mapinfo_lyr.GetLayerDefn())
shp_ds = ogr.Open('data/poly.shp')
gdaltest.shp_ds = shp_ds
shp_lyr = shp_ds.GetLayer(0)
feat = shp_lyr.GetNextFeature()
gdaltest.poly_feat = []
while feat is not None:
gdaltest.poly_feat.append(feat)
dst_feat.SetFrom(feat)
gdaltest.mapinfo_lyr.CreateFeature(dst_feat)
feat = shp_lyr.GetNextFeature()
#######################################################
# Close file.
gdaltest.mapinfo_ds = None
###############################################################################
# Verify that stuff we just wrote is still OK.
def test_ogr_mitab_9():
gdaltest.mapinfo_ds = ogr.Open('tmp')
gdaltest.mapinfo_lyr = gdaltest.mapinfo_ds.GetLayer(0)
expect = [168, 169, 166, 158, 165]
gdaltest.mapinfo_lyr.SetAttributeFilter('eas_id < 170')
tr = ogrtest.check_features_against_list(gdaltest.mapinfo_lyr,
'eas_id', expect)
gdaltest.mapinfo_lyr.SetAttributeFilter(None)
for i in range(len(gdaltest.poly_feat)):
orig_feat = gdaltest.poly_feat[i]
read_feat = gdaltest.mapinfo_lyr.GetNextFeature()
assert (ogrtest.check_feature_geometry(read_feat, orig_feat.GetGeometryRef(),
max_error=0.000000001) == 0)
for fld in range(3):
assert orig_feat.GetField(fld) == read_feat.GetField(fld), \
('Attribute %d does not match' % fld)
gdaltest.poly_feat = None
gdaltest.shp_ds = None
assert tr
###############################################################################
# Read mif file with 2 character .mid delimiter and verify operation.
def test_ogr_mitab_10():
ds = ogr.Open('data/mitab/small.mif')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
assert feat.NAME == " S. 11th St.", 'name attribute wrong.'
assert feat.FLOODZONE == 10, 'FLOODZONE attribute wrong.'
assert (ogrtest.check_feature_geometry(feat,
'POLYGON ((407131.721 155322.441,407134.468 155329.616,407142.741 155327.242,407141.503 155322.467,407140.875 155320.049,407131.721 155322.441))',
max_error=0.000000001) == 0)
feat = lyr.GetNextFeature()
assert feat.OWNER == 'Guarino "Chucky" Sandra', 'owner attribute wrong.'
lyr = None
ds = None
###############################################################################
# Verify support for NTF datum with non-greenwich datum per
# http://trac.osgeo.org/gdal/ticket/1416
#
# This test also exercises SRS reference counting as described in issue:
# http://trac.osgeo.org/gdal/ticket/1680
def test_ogr_mitab_11():
ds = ogr.Open('data/mitab/small_ntf.mif')
srs = ds.GetLayer(0).GetSpatialRef()
ds = None
pm_value = srs.GetAttrValue('PROJCS|GEOGCS|PRIMEM', 1)
assert pm_value[:6] == '2.3372', \
('got unexpected prime meridian, not paris: ' + pm_value)
###############################################################################
# Verify that a newly created mif layer returns a non null layer definition
def test_ogr_mitab_12():
ds = gdaltest.mapinfo_drv.CreateDataSource('tmp', options=['FORMAT=MIF'])
lyr = ds.CreateLayer('testlyrdef')
defn = lyr.GetLayerDefn()
assert defn is not None
ogrtest.quick_create_layer_def(lyr, [('AREA', ogr.OFTReal)])
ds = None
###############################################################################
# Verify that field widths and precisions are propagated correctly in TAB.
def test_ogr_mitab_13():
ds = ogr.Open('../ogr/data/mitab/testlyrdef.gml')
if ds is None:
pytest.skip()
if test_cli_utilities.get_ogr2ogr_path() is None:
pytest.skip()
try:
os.stat('tmp/testlyrdef.tab')
ogr.GetDriverByName('MapInfo File').DeleteDataSource('tmp/testlyrdef.tab')
except (OSError, AttributeError):
pass
gdaltest.runexternal(test_cli_utilities.get_ogr2ogr_path() + ' -f "MapInfo File" tmp/testlyrdef.tab ../ogr/data/mitab/testlyrdef.gml')
ds = ogr.Open('tmp/testlyrdef.tab')
# Check if the width and precision are as preserved.
lyr = ds.GetLayer('testlyrdef')
assert lyr is not None, 'Layer missing.'
defn = lyr.GetLayerDefn()
data = [['AREA', ogr.OFTReal, 7, 4],
['VOLUME', ogr.OFTReal, 0, 0],
['LENGTH', ogr.OFTInteger, 10, 0],
['WIDTH', ogr.OFTInteger, 4, 0]]
for field in data:
fld = defn.GetFieldDefn(defn.GetFieldIndex(field[0]))
assert fld.GetType() == field[1] and fld.GetWidth() == field[2] and fld.GetPrecision() == field[3], \
(field[0] + ' field definition wrong.')
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource('tmp/testlyrdef.tab')
###############################################################################
# Verify that field widths and precisions are propagated correctly in MIF.
def test_ogr_mitab_14():
ds = ogr.Open('../ogr/data/mitab/testlyrdef.gml')
if ds is None:
pytest.skip()
if test_cli_utilities.get_ogr2ogr_path() is None:
pytest.skip()
try:
os.stat('tmp/testlyrdef.mif')
ogr.GetDriverByName('MapInfo File').DeleteDataSource('tmp/testlyrdef.mif')
except (OSError, AttributeError):
pass
gdaltest.runexternal(test_cli_utilities.get_ogr2ogr_path() + ' -f "MapInfo File" -dsco FORMAT=MIF tmp/testlyrdef.mif ../ogr/data/mitab/testlyrdef.gml')
ds = ogr.Open('tmp/testlyrdef.mif')
# Check if the width and precision are as preserved.
lyr = ds.GetLayer('testlyrdef')
assert lyr is not None, 'Layer missing.'
defn = lyr.GetLayerDefn()
data = [['AREA', ogr.OFTReal, 7, 4],
['VOLUME', ogr.OFTReal, 0, 0],
['LENGTH', ogr.OFTInteger, 254, 0],
['WIDTH', ogr.OFTInteger, 254, 0]]
for field in data:
fld = defn.GetFieldDefn(defn.GetFieldIndex(field[0]))
expected_with = field[2]
if fld.GetType() == ogr.OFTInteger:
expected_with = 0
assert fld.GetType() == field[1] and fld.GetWidth() == expected_with and fld.GetPrecision() == field[3], \
(field[0] + ' field definition wrong.')
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource('tmp/testlyrdef.mif')
###############################################################################
# Test .mif without .mid (#5141)
def test_ogr_mitab_15():
ds = ogr.Open('data/mitab/nomid.mif')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
assert feat is not None
ds = None
# Test opening .mif without .mid even if there are declared attributes
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource('/vsimem/nomid.mif')
lyr = ds.CreateLayer('empty')
lyr.CreateField(ogr.FieldDefn('ID', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField(0, 1)
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT(1 2)'))
lyr.CreateFeature(f)
ds = None
gdal.Unlink('/vsimem/nomid.mid')
ds = ogr.Open('/vsimem/nomid.mif')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f.IsFieldSet(0) or f.GetGeometryRef() is None:
f.DumpReadable()
pytest.fail()
gdal.Unlink('/vsimem/nomid.mif')
###############################################################################
# Test empty .mif
def test_ogr_mitab_16():
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource('tmp/empty.mif')
lyr = ds.CreateLayer('empty')
lyr.CreateField(ogr.FieldDefn('ID', ogr.OFTInteger))
ds = None
ds = ogr.Open('tmp/empty.mif')
assert ds is not None
ds = None
###############################################################################
# Run test_ogrsf
def test_ogr_mitab_17():
if test_cli_utilities.get_test_ogrsf_path() is None:
pytest.skip()
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' tmp')
assert ret.find('INFO') != -1 and ret.find('ERROR') == -1
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' tmp/wrk.mif')
assert ret.find('INFO') != -1 and ret.find('ERROR') == -1
###############################################################################
# Test EPSG:2154
# (https://github.com/mapgears/mitab/issues/1)
def test_ogr_mitab_18():
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource('/vsimem/ogr_mitab_18.tab')
sr = osr.SpatialReference()
sr.ImportFromEPSG(2154)
lyr = ds.CreateLayer('test', srs=sr)
lyr.CreateField(ogr.FieldDefn('ID', ogr.OFTInteger))
ds = None
# Test with our generated file, and with one generated by MapInfo
for filename in ['/vsimem/ogr_mitab_18.tab', 'data/mitab/lambert93_francais.TAB']:
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
sr_got = lyr.GetSpatialRef()
wkt = sr_got.ExportToWkt()
if '2154' not in wkt:
print(filename)
pytest.fail(sr_got)
proj4 = sr_got.ExportToProj4()
assert proj4.startswith('+proj=lcc +lat_0=46.5 +lon_0=3 +lat_1=49 +lat_2=44 +x_0=700000 +y_0=6600000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs')
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource('/vsimem/ogr_mitab_18.tab')
###############################################################################
# Check that we correctly round coordinate to the appropriate precision
# (https://github.com/mapgears/mitab/issues/2)
def test_ogr_mitab_19():
ds = ogr.Open('data/mitab/utm31.TAB')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
# Strict text comparison to check precision
if feat.GetGeometryRef().ExportToWkt() != 'POINT (485248.12 2261.45)':
feat.DumpReadable()
pytest.fail()
###############################################################################
# Check that we take into account the user defined bound file
# (https://github.com/mapgears/mitab/issues/3)
# Also test BOUNDS layer creation option (http://trac.osgeo.org/gdal/ticket/5642)
def test_ogr_mitab_20():
# Pass i==0: without MITAB_BOUNDS_FILE
# Pass i==1: with MITAB_BOUNDS_FILE and French bounds : first load
# Pass i==2: with MITAB_BOUNDS_FILE and French bounds : should use already loaded file
# Pass i==3: without MITAB_BOUNDS_FILE : should unload the file
# Pass i==4: use BOUNDS layer creation option
# Pass i==5: with MITAB_BOUNDS_FILE and European bounds
# Pass i==6: with MITAB_BOUNDS_FILE and generic EPSG:2154 (Europe bounds expected)
for fmt in ['tab', 'mif']:
for i in range(7):
if i == 1 or i == 2 or i == 5 or i == 6:
gdal.SetConfigOption('MITAB_BOUNDS_FILE', 'data/mitab/mitab_bounds.txt')
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource('/vsimem/ogr_mitab_20.' + fmt)
sr = osr.SpatialReference()
if i == 1 or i == 2: # French bounds
sr.SetFromUserInput("""PROJCS["RGF93 / Lambert-93",
GEOGCS["RGF93",
DATUM["Reseau_Geodesique_Francais_1993",
SPHEROID["GRS 80",6378137,298.257222101],
TOWGS84[0,0,0,0,0,0,0]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433]],
PROJECTION["Lambert_Conformal_Conic_2SP"],
PARAMETER["standard_parallel_1",49.00000000002],
PARAMETER["standard_parallel_2",44],
PARAMETER["latitude_of_origin",46.5],
PARAMETER["central_meridian",3],
PARAMETER["false_easting",700000],
PARAMETER["false_northing",6600000],
UNIT["Meter",1.0],
AUTHORITY["EPSG","2154"]]""")
elif i == 5: # European bounds
sr.SetFromUserInput("""PROJCS["RGF93 / Lambert-93",
GEOGCS["RGF93",
DATUM["Reseau_Geodesique_Francais_1993",
SPHEROID["GRS 80",6378137,298.257222101],
TOWGS84[0,0,0,0,0,0,0]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433]],
PROJECTION["Lambert_Conformal_Conic_2SP"],
PARAMETER["standard_parallel_1",49.00000000001],
PARAMETER["standard_parallel_2",44],
PARAMETER["latitude_of_origin",46.5],
PARAMETER["central_meridian",3],
PARAMETER["false_easting",700000],
PARAMETER["false_northing",6600000],
UNIT["Meter",1.0],
AUTHORITY["EPSG","2154"]]""")
else:
sr.ImportFromEPSG(2154)
if i == 4:
lyr = ds.CreateLayer('test', srs=sr, options=['BOUNDS=75000,6000000,1275000,7200000'])
else:
lyr = ds.CreateLayer('test', srs=sr)
lyr.CreateField(ogr.FieldDefn('ID', ogr.OFTInteger))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt("POINT (700000.001 6600000.001)"))
lyr.CreateFeature(feat)
ds = None
gdal.SetConfigOption('MITAB_BOUNDS_FILE', None)
ds = ogr.Open('/vsimem/ogr_mitab_20.' + fmt)
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
assert not (i == 6 and lyr.GetSpatialRef().ExportToWkt().find('49.00000000001') < 0), \
fmt
# Strict text comparison to check precision
if fmt == 'tab':
if i == 1 or i == 2 or i == 4:
if feat.GetGeometryRef().ExportToWkt() != 'POINT (700000.001 6600000.001)':
print(i)
feat.DumpReadable()
pytest.fail(fmt)
else:
if feat.GetGeometryRef().ExportToWkt() == 'POINT (700000.001 6600000.001)':
print(i)
feat.DumpReadable()
pytest.fail(fmt)
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource('/vsimem/ogr_mitab_20.' + fmt)
gdal.SetConfigOption('MITAB_BOUNDS_FILE', 'tmp/mitab_bounds.txt')
for i in range(2):
if i == 1 and not sys.platform.startswith('linux'):
time.sleep(1)
f = open('tmp/mitab_bounds.txt', 'wb')
if i == 0:
f.write(
"""Source = CoordSys Earth Projection 3, 33, "m", 3, 46.5, 44, 49, 700000, 6600000
Destination=CoordSys Earth Projection 3, 33, "m", 3, 46.5, 44, 49.00000000001, 700000, 6600000 Bounds (-792421, 5278231) (3520778, 9741029)""".encode('ascii'))
else:
f.write(
"""Source = CoordSys Earth Projection 3, 33, "m", 3, 46.5, 44, 49, 700000, 6600000
Destination=CoordSys Earth Projection 3, 33, "m", 3, 46.5, 44, 49.00000000002, 700000, 6600000 Bounds (75000, 6000000) (1275000, 7200000)""".encode('ascii'))
f.close()
if i == 1 and sys.platform.startswith('linux'):
os.system('touch -d "1 minute ago" tmp/mitab_bounds.txt')
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource('/vsimem/ogr_mitab_20.tab')
sr = osr.SpatialReference()
sr.ImportFromEPSG(2154)
lyr = ds.CreateLayer('test', srs=sr)
lyr.CreateField(ogr.FieldDefn('ID', ogr.OFTInteger))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt("POINT (700000.001 6600000.001)"))
lyr.CreateFeature(feat)
ds = None
ds = ogr.Open('/vsimem/ogr_mitab_20.tab')
lyr = ds.GetLayer(0)
if i == 0:
expected = '49.00000000001'
else:
expected = '49.00000000002'
if lyr.GetSpatialRef().ExportToWkt().find(expected) < 0:
print(i)
gdal.SetConfigOption('MITAB_BOUNDS_FILE', None)
os.unlink('tmp/mitab_bounds.txt')
pytest.fail(lyr.GetSpatialRef().ExportToWkt())
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource('/vsimem/ogr_mitab_20.tab')
gdal.SetConfigOption('MITAB_BOUNDS_FILE', None)
os.unlink('tmp/mitab_bounds.txt')
###############################################################################
# Create .tab without explicit field
def test_ogr_mitab_21():
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource('/vsimem/ogr_mitab_21.tab')
lyr = ds.CreateLayer('test')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt("POINT (0 0)"))
gdal.PushErrorHandler('CPLQuietErrorHandler')
lyr.CreateFeature(feat)
gdal.PopErrorHandler()
ds = None
ds = ogr.Open('/vsimem/ogr_mitab_21.tab')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetField('FID') != 1:
feat.DumpReadable()
pytest.fail()
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource('/vsimem/ogr_mitab_21.tab')
###############################################################################
# Test append in update mode
def test_ogr_mitab_22():
filename = '/vsimem/ogr_mitab_22.tab'
for nb_features in (2, 1000):
if nb_features == 2:
nb_runs = 2
else:
nb_runs = 1
# When doing 2 runs, in the second one, we create an empty
# .tab and then open it for update. This can trigger specific bugs
for j in range(nb_runs):
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(filename)
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('ID', ogr.OFTInteger))
if j == 0:
i = 0
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('ID', i + 1)
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt("POINT (%d %d)" % (i, i)))
if lyr.CreateFeature(feat) != 0:
print(i)
pytest.fail(nb_features)
ds = None
for i in range(nb_features - (1 - j)):
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('ID', i + 1 + (1 - j))
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt("POINT (%d %d)" % (i + (1 - j), i + (1 - j))))
if lyr.CreateFeature(feat) != 0:
print(i)
pytest.fail(nb_features)
ds = None
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
for i in range(nb_features):
f = lyr.GetNextFeature()
assert f is not None and f.GetField('ID') == i + 1, nb_features
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(filename)
###############################################################################
# Test creating features then reading
def test_ogr_mitab_23():
filename = '/vsimem/ogr_mitab_23.tab'
for nb_features in (0, 1, 2, 100, 1000):
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(filename)
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('ID', ogr.OFTInteger))
for i in range(nb_features):
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('ID', i + 1)
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt("POINT (0 0)"))
lyr.CreateFeature(feat)
lyr.ResetReading()
for i in range(nb_features):
f = lyr.GetNextFeature()
assert f is not None and f.GetField('ID') == i + 1, nb_features
f = lyr.GetNextFeature()
assert f is None
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(filename)
###############################################################################
# Test creating features then reading then creating again then reading
def test_ogr_mitab_24():
filename = '/vsimem/ogr_mitab_24.tab'
for nb_features in (2, 100, 1000):
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(filename)
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('ID', ogr.OFTInteger))
for i in range(int(nb_features / 2)):
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('ID', i + 1)
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt("POINT (0 0)"))
lyr.CreateFeature(feat)
lyr.ResetReading()
for i in range(int(nb_features / 2)):
f = lyr.GetNextFeature()
assert f is not None and f.GetField('ID') == i + 1, nb_features
f = lyr.GetNextFeature()
assert f is None
for i in range(int(nb_features / 2)):
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('ID', nb_features / 2 + i + 1)
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt("POINT (0 0)"))
lyr.CreateFeature(feat)
lyr.ResetReading()
for i in range(nb_features):
f = lyr.GetNextFeature()
assert f is not None and f.GetField('ID') == i + 1, nb_features
f = lyr.GetNextFeature()
assert f is None
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(filename)
###############################################################################
# Test that opening in update mode without doing any change does not alter
# file
def test_ogr_mitab_25():
filename = 'tmp/ogr_mitab_25.tab'
for nb_features in (2, 1000):
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(filename)
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('ID', ogr.OFTInteger))
for i in range(int(nb_features / 2)):
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('ID', i + 1)
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt("POINT (%d %d)" % (i, i)))
lyr.CreateFeature(feat)
ds = None
if sys.platform.startswith('linux'):
for ext in ('map', 'tab', 'dat', 'id'):
os.system('touch -d "1 minute ago" %s' % filename[0:-3] + ext)
mtime_dict = {}
for ext in ('map', 'tab', 'dat', 'id'):
mtime_dict[ext] = os.stat(filename[0:-3] + ext).st_mtime
if not sys.platform.startswith('linux'):
time.sleep(1)
# Try without doing anything
ds = ogr.Open(filename, update=1)
ds = None
for ext in ('map', 'tab', 'dat', 'id'):
mtime = os.stat(filename[0:-3] + ext).st_mtime
assert mtime_dict[ext] == mtime, ('mtime of .%s has changed !' % ext)
# Try by reading all features
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
lyr.GetFeatureCount(1)
ds = None
for ext in ('map', 'tab', 'dat', 'id'):
mtime = os.stat(filename[0:-3] + ext).st_mtime
assert mtime_dict[ext] == mtime, ('mtime of .%s has changed !' % ext)
# Try by reading all features with a spatial index
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
lyr.SetSpatialFilterRect(0.5, 0.5, 1.5, 1.5)
lyr.GetFeatureCount(1)
ds = None
for ext in ('map', 'tab', 'dat', 'id'):
mtime = os.stat(filename[0:-3] + ext).st_mtime
assert mtime_dict[ext] == mtime, ('mtime of .%s has changed !' % ext)
if test_cli_utilities.get_test_ogrsf_path() is not None:
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro -fsf ' + filename)
assert ret.find('INFO') != -1 and ret.find('ERROR') == -1
ogr.GetDriverByName('MapInfo File').DeleteDataSource(filename)
###############################################################################
# Test DeleteFeature()
def test_ogr_mitab_26():
filename = '/vsimem/ogr_mitab_26.tab'
for nb_features in (2, 1000):
if nb_features == 2:
nb_runs = 2
else:
nb_runs = 1
for j in range(nb_runs):
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(filename)
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('ID', ogr.OFTInteger))
for i in range(nb_features):
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('ID', i + 1)
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt("POINT (%d %d)" % (i, i)))
lyr.CreateFeature(feat)
if nb_features == 2:
assert lyr.DeleteFeature(int(nb_features / 2)) == 0, j
else:
for k in range(int(nb_features / 2)):
assert lyr.DeleteFeature(int(nb_features / 4) + k) == 0, j
if j == 1:
# Expected failure : already deleted feature
ret = lyr.DeleteFeature(int(nb_features / 2))
if ret != ogr.OGRERR_NON_EXISTING_FEATURE:
print(j)
pytest.fail(nb_features)
feat = lyr.GetFeature(int(nb_features / 2))
if feat is not None:
print(j)
pytest.fail(nb_features)
# Expected failure : illegal feature id
ret = lyr.DeleteFeature(nb_features + 1)
if ret != ogr.OGRERR_NON_EXISTING_FEATURE:
print(j)
pytest.fail(nb_features)
ds = None
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
assert lyr.GetFeatureCount() == nb_features / 2
ds = None
# This used to trigger a bug in DAT record deletion during implementation...
if nb_features == 1000:
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
lyr.DeleteFeature(245)
ds = None
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
assert lyr.GetFeatureCount() == nb_features / 2 - 1
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(filename)
###############################################################################
# Test SetFeature()
def test_ogr_mitab_27():
filename = '/vsimem/ogr_mitab_27.tab'
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(filename)
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('intfield', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('realfield', ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn('stringfield', ogr.OFTString))
# Invalid call : feature without FID
f = ogr.Feature(lyr.GetLayerDefn())
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.SetFeature(f)
gdal.PopErrorHandler()
assert ret != 0
# Invalid call : feature with FID <= 0
f = ogr.Feature(lyr.GetLayerDefn())
f.SetFID(0)
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.SetFeature(f)
gdal.PopErrorHandler()
assert ret != 0
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('intfield', 1)
f.SetField('realfield', 2.34)
f.SetField('stringfield', "foo")
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT (1 2)'))
lyr.CreateFeature(f)
fid = f.GetFID()
# Invalid call : feature with FID > feature_count
f = ogr.Feature(lyr.GetLayerDefn())
f.SetFID(2)
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = lyr.SetFeature(f)
gdal.PopErrorHandler()
assert ret != 0
# Update previously created object with blank feature
f = ogr.Feature(lyr.GetLayerDefn())
f.SetFID(fid)
lyr.SetFeature(f)
ds = None
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f.GetField('intfield') != 0 or f.GetField('realfield') != 0 or f.GetField('stringfield') != '' or \
f.GetGeometryRef() is not None:
f.DumpReadable()
pytest.fail()
f.SetField('intfield', 1)
f.SetField('realfield', 2.34)
f.SetField('stringfield', "foo")
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT (2 3)'))
lyr.SetFeature(f)
ds = None
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f.GetField('intfield') != 1 or f.GetField('realfield') != 2.34 or f.GetField('stringfield') != 'foo' or \
f.GetGeometryRef() is None:
f.DumpReadable()
pytest.fail()
lyr.DeleteFeature(f.GetFID())
ds = None
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
# SetFeature() on a deleted feature
lyr.SetFeature(f)
f = lyr.GetFeature(1)
if f.GetField('intfield') != 1 or f.GetField('realfield') != 2.34 or f.GetField('stringfield') != 'foo' or \
f.GetGeometryRef() is None:
f.DumpReadable()
pytest.fail()
ds = None
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
f = lyr.GetFeature(1)
# SetFeature() with identical feature : no-op
assert lyr.SetFeature(f) == 0
ds = None
stat = gdal.VSIStatL(filename[0:-3] + "map")
old_size = stat.size
# This used to trigger a bug: when using SetFeature() repeatedly, we
# can create object blocks in the .map that are made only of deleted
# objects.
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
f = lyr.GetFeature(1)
for _ in range(100):
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT (2 3)'))
assert lyr.SetFeature(f) == 0
ds = None
stat = gdal.VSIStatL(filename[0:-3] + "map")
assert stat.size == old_size
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
f = lyr.GetFeature(1)
# SetFeature() with identical geometry : rewrite only attributes
f.SetField('intfield', -1)
assert lyr.SetFeature(f) == 0
f = lyr.GetFeature(1)
if f.GetField('intfield') != -1 or f.GetField('realfield') != 2.34 or f.GetField('stringfield') != 'foo' or \
f.GetGeometryRef() is None:
f.DumpReadable()
pytest.fail()
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(filename)
###############################################################################
def generate_permutation(n):
tab = [i for i in range(n)]
for _ in range(10 * n):
ind = random.randint(0, n - 1)
tmp = tab[0]
tab[0] = tab[ind]
tab[ind] = tmp
return tab
###############################################################################
# Test updating object blocks with deleted objects
def test_ogr_mitab_28():
filename = '/vsimem/ogr_mitab_28.tab'
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(filename)
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('ID', ogr.OFTInteger))
ds = None
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
# Generate 10x10 grid
N2 = 10
N = N2 * N2
for n in generate_permutation(N):
x = int(n / N2)
y = n % N2
f = ogr.Feature(lyr.GetLayerDefn())
# f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(%d %d)' % (x,y)))
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(%d %d,%f %f,%f %f)' % (x, y, x + 0.1, y, x + 0.2, y)))
lyr.CreateFeature(f)
# Delete all features
for i in range(N):
lyr.DeleteFeature(i + 1)
# Set deleted features
i = 0
permutation = generate_permutation(N)
for n in permutation:
x = int(n / N2)
y = n % N2
f = ogr.Feature(lyr.GetLayerDefn())
# f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(%d %d)' % (x,y)))
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(%d %d,%f %f,%f %f)' % (x, y, x + 0.1, y, x + 0.2, y)))
f.SetFID(i + 1)
i = i + 1
lyr.SetFeature(f)
ds = None
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
i = 0
# Check sequential enumeration
for f in lyr:
g = f.GetGeometryRef()
(x, y, _) = g.GetPoint(0)
n = permutation[i]
x_ref = int(n / N2)
y_ref = n % N2
assert abs(x - x_ref) + abs(y - y_ref) <= 0.1
i = i + 1
# Check spatial index integrity
for n in range(N):
x = int(n / N2)
y = n % N2
lyr.SetSpatialFilterRect(x - 0.5, y - 0.5, x + 0.5, y + 0.5)
assert lyr.GetFeatureCount() == 1
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(filename)
###############################################################################
# Test updating a file with compressed geometries.
def test_ogr_mitab_29():
try:
os.stat('tmp/cache/compr_symb_deleted_records.tab')
except OSError:
try:
gdaltest.unzip('tmp/cache', 'data/mitab/compr_symb_deleted_records.zip')
try:
os.stat('tmp/cache/compr_symb_deleted_records.tab')
except OSError:
pytest.skip()
except OSError:
pytest.skip()
shutil.copy('tmp/cache/compr_symb_deleted_records.tab', 'tmp')
shutil.copy('tmp/cache/compr_symb_deleted_records.dat', 'tmp')
shutil.copy('tmp/cache/compr_symb_deleted_records.id', 'tmp')
shutil.copy('tmp/cache/compr_symb_deleted_records.map', 'tmp')
# Is a 100x100 point grid with only the 4 edge lines left (compressed points)
ds = ogr.Open('tmp/compr_symb_deleted_records.tab', update=1)
lyr = ds.GetLayer(0)
# Re-add the 98x98 interior points
N2 = 98
N = N2 * N2
permutation = generate_permutation(N)
for n in permutation:
x = 1 + int(n / N2)
y = 1 + n % N2
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(%d %d)' % (x, y)))
lyr.CreateFeature(f)
ds = None
# Check grid integrity that after reopening
ds = ogr.Open('tmp/compr_symb_deleted_records.tab')
lyr = ds.GetLayer(0)
N2 = 100
N = N2 * N2
for n in range(N):
x = int(n / N2)
y = n % N2
lyr.SetSpatialFilterRect(x - 0.01, y - 0.01, x + 0.01, y + 0.01)
if lyr.GetFeatureCount() != 1:
print(n)
pytest.fail(x - 0.01, y - 0.01, x + 0.01, y + 0.01)
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource('tmp/compr_symb_deleted_records.tab')
###############################################################################
# Test SyncToDisk() in create mode
def test_ogr_mitab_30(update=0):
filename = 'tmp/ogr_mitab_30.tab'
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(filename)
lyr = ds.CreateLayer('test', options=['BOUNDS=0,0,100,100'])
lyr.CreateField(ogr.FieldDefn('ID', ogr.OFTInteger))
assert lyr.SyncToDisk() == 0
ds2 = ogr.Open(filename)
lyr2 = ds2.GetLayer(0)
assert lyr2.GetFeatureCount() == 0 and lyr2.GetLayerDefn().GetFieldCount() == 1
ds2 = None
# Check that the files are not updated in between
if sys.platform.startswith('linux'):
for ext in ('map', 'tab', 'dat', 'id'):
os.system('touch -d "1 minute ago" %s' % filename[0:-3] + ext)
stat = {}
for ext in ('map', 'tab', 'dat', 'id'):
stat[ext] = gdal.VSIStatL(filename[0:-3] + ext)
if not sys.platform.startswith('linux'):
time.sleep(1)
assert lyr.SyncToDisk() == 0
for ext in ('map', 'tab', 'dat', 'id'):
stat2 = gdal.VSIStatL(filename[0:-3] + ext)
assert stat[ext].size == stat2.size and stat[ext].mtime == stat2.mtime
if update == 1:
ds = None
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
for j in range(100):
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('ID', j + 1)
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT (%d %d)' % (j, j)))
lyr.CreateFeature(feat)
feat = None
if not (j <= 10 or (j % 5) == 0):
continue
for i in range(2):
ret = lyr.SyncToDisk()
assert ret == 0
if i == 0:
for ext in ('map', 'tab', 'dat', 'id'):
stat[ext] = gdal.VSIStatL(filename[0:-3] + ext)
else:
for ext in ('map', 'tab', 'dat', 'id'):
stat2 = gdal.VSIStatL(filename[0:-3] + ext)
if stat[ext].size != stat2.size:
print(j)
pytest.fail(i)
ds2 = ogr.Open(filename)
lyr2 = ds2.GetLayer(0)
assert lyr2.GetFeatureCount() == j + 1, i
feat2 = lyr2.GetFeature(j + 1)
if feat2.GetField('ID') != j + 1 or feat2.GetGeometryRef().ExportToWkt() != 'POINT (%d %d)' % (j, j):
print(i)
feat2.DumpReadable()
pytest.fail(j)
lyr2.ResetReading()
for _ in range(j + 1):
feat2 = lyr2.GetNextFeature()
if feat2.GetField('ID') != j + 1 or feat2.GetGeometryRef().ExportToWkt() != 'POINT (%d %d)' % (j, j):
print(i)
feat2.DumpReadable()
pytest.fail(j)
ds2 = None
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(filename)
###############################################################################
# Test SyncToDisk() in update mode
def test_ogr_mitab_31():
return test_ogr_mitab_30(update=1)
###############################################################################
# Check read support of non-spatial .tab/.data without .map or .id (#5718)
# We only check read-only behaviour though.
def test_ogr_mitab_32():
for update in (0, 1):
ds = ogr.Open('data/mitab/aspatial-table.tab', update=update)
lyr = ds.GetLayer(0)
assert lyr.GetFeatureCount() == 2, update
f = lyr.GetNextFeature()
assert f.GetField('a') == 1 and f.GetField('b') == 2 and f.GetField('d') == 'hello', \
update
f = lyr.GetFeature(2)
assert f.GetField('a') == 4, update
ds = None
###############################################################################
# Test opening and modifying a file created with MapInfo that consists of
# a single object block, without index block
def test_ogr_mitab_33():
for update in (0, 1):
ds = ogr.Open('data/mitab/single_point_mapinfo.tab', update=update)
lyr = ds.GetLayer(0)
assert lyr.GetFeatureCount() == 1, update
f = lyr.GetNextFeature()
assert f.GetField('toto') == '', update
ds = None
# Test adding a new object
shutil.copy('data/mitab/single_point_mapinfo.tab', 'tmp')
shutil.copy('data/mitab/single_point_mapinfo.dat', 'tmp')
shutil.copy('data/mitab/single_point_mapinfo.id', 'tmp')
shutil.copy('data/mitab/single_point_mapinfo.map', 'tmp')
ds = ogr.Open('tmp/single_point_mapinfo.tab', update=1)
lyr = ds.GetLayer(0)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT(1363180 7509810)'))
lyr.CreateFeature(f)
f = None
ds = None
ds = ogr.Open('tmp/single_point_mapinfo.tab')
lyr = ds.GetLayer(0)
assert lyr.GetFeatureCount() == 2
f = lyr.GetNextFeature()
assert f is not None
f = lyr.GetNextFeature()
assert f is not None
ds = None
# Test replacing the existing object
shutil.copy('data/mitab/single_point_mapinfo.tab', 'tmp')
shutil.copy('data/mitab/single_point_mapinfo.dat', 'tmp')
shutil.copy('data/mitab/single_point_mapinfo.id', 'tmp')
shutil.copy('data/mitab/single_point_mapinfo.map', 'tmp')
ds = ogr.Open('tmp/single_point_mapinfo.tab', update=1)
lyr = ds.GetLayer(0)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetFID(1)
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT(1363180 7509810)'))
lyr.SetFeature(f)
f = None
ds = None
ds = ogr.Open('tmp/single_point_mapinfo.tab')
lyr = ds.GetLayer(0)
assert lyr.GetFeatureCount() == 1
f = lyr.GetNextFeature()
assert f is not None
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource('tmp/single_point_mapinfo.tab')
###############################################################################
# Test updating a line that spans over several coordinate blocks
def test_ogr_mitab_34():
filename = '/vsimem/ogr_mitab_34.tab'
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(filename)
lyr = ds.CreateLayer('ogr_mitab_34', options=['BOUNDS=-1000,0,1000,3000'])
lyr.CreateField(ogr.FieldDefn('dummy', ogr.OFTString))
geom = ogr.Geometry(ogr.wkbLineString)
for i in range(1000):
geom.AddPoint_2D(i, i)
for _ in range(2):
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(geom)
lyr.CreateFeature(f)
f = None
ds = None
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
lyr.GetNextFeature() # seek to another object
geom = f.GetGeometryRef()
geom.SetPoint_2D(0, -1000, 3000)
lyr.SetFeature(f)
ds = None
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
geom = f.GetGeometryRef()
assert geom.GetX(0) == pytest.approx(-1000, abs=1e-2) and geom.GetY(0) == pytest.approx(3000, abs=1e-2)
for i in range(999):
assert geom.GetX(i + 1) == pytest.approx((i + 1), abs=1e-2) and geom.GetY(i + 1) == pytest.approx((i + 1), abs=1e-2)
f = lyr.GetNextFeature()
geom = f.GetGeometryRef()
for i in range(1000):
assert geom.GetX(i) == pytest.approx((i), abs=1e-2) and geom.GetY(i) == pytest.approx((i), abs=1e-2)
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(filename)
###############################################################################
# Test SRS support
def get_srs_from_coordsys(coordsys):
mif_filename = '/vsimem/foo.mif'
f = gdal.VSIFOpenL(mif_filename, "wb")
content = """Version 300
Charset "Neutral"
Delimiter ","
%s
Columns 1
foo Char(254)
Data
NONE
""" % coordsys
content = content.encode('ascii')
gdal.VSIFWriteL(content, 1, len(content), f)
gdal.VSIFCloseL(f)
f = gdal.VSIFOpenL(mif_filename[0:-3] + "mid", "wb")
content = '""\n'
content = content.encode('ascii')
gdal.VSIFWriteL(content, 1, len(content), f)
gdal.VSIFCloseL(f)
ds = ogr.Open(mif_filename)
srs = ds.GetLayer(0).GetSpatialRef()
if srs is not None:
srs = srs.Clone()
gdal.Unlink(mif_filename)
gdal.Unlink(mif_filename[0:-3] + "mid")
return srs
def get_coordsys_from_srs(srs):
mif_filename = '/vsimem/foo.mif'
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(mif_filename)
lyr = ds.CreateLayer('foo', srs=srs)
lyr.CreateField(ogr.FieldDefn('foo'))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
ds = None
f = gdal.VSIFOpenL(mif_filename, "rb")
data = gdal.VSIFReadL(1, 10000, f).decode('ascii')
gdal.VSIFCloseL(f)
gdal.Unlink(mif_filename)
gdal.Unlink(mif_filename[0:-3] + "mid")
data = data[data.find('CoordSys'):]
data = data[0:data.find('\n')]
return data
def test_ogr_mitab_35():
# Local/non-earth
srs = osr.SpatialReference()
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys NonEarth Units "m"'
srs = osr.SpatialReference('LOCAL_CS["foo"]')
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys NonEarth Units "m"'
srs = get_srs_from_coordsys(coordsys)
wkt = srs.ExportToWkt()
assert wkt in ('LOCAL_CS["Nonearth",UNIT["Meter",1]]', 'LOCAL_CS["Nonearth",UNIT["Meter",1],AXIS["Easting",EAST],AXIS["Northing",NORTH]]')
# Test units
for mif_unit in ['mi', 'km', 'in', 'ft', 'yd', 'mm', 'cm', 'm', 'survey ft', 'nmi', 'li', 'ch', 'rd']:
coordsys = 'CoordSys NonEarth Units "%s"' % mif_unit
srs = get_srs_from_coordsys(coordsys)
# print(srs)
got_coordsys = get_coordsys_from_srs(srs)
assert coordsys == got_coordsys, srs
# Geographic
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys Earth Projection 1, 104'
srs = get_srs_from_coordsys(coordsys)
wkt = srs.ExportToWkt()
assert wkt == 'GEOGCS["unnamed",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST]]'
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys Earth Projection 1, 104'
# Projected
srs = osr.SpatialReference()
srs.ImportFromEPSG(32631)
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys Earth Projection 8, 104, "m", 3, 0, 0.9996, 500000, 0'
srs = get_srs_from_coordsys(coordsys)
wkt = srs.ExportToWkt()
assert wkt == 'PROJCS["unnamed",GEOGCS["unnamed",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",3],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH]]'
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys Earth Projection 8, 104, "m", 3, 0, 0.9996, 500000, 0'
# Test round-tripping of projection methods and a few units
for coordsys in ['CoordSys Earth Projection 1, 104',
'CoordSys Earth Projection 2, 104, "survey ft", 1, 2',
'CoordSys Earth Projection 3, 104, "ft", 1, 2, 3, 4, 5, 6',
'CoordSys Earth Projection 4, 104, "m", 1, 90, 90',
'CoordSys Earth Projection 5, 104, "m", 1, 90, 90',
'CoordSys Earth Projection 6, 104, "m", 1, 2, 3, 4, 5, 6',
'CoordSys Earth Projection 7, 104, "m", 1, 2, 3, 4, 5, 6',
'CoordSys Earth Projection 8, 104, "m", 1, 2, 3, 4, 5',
'CoordSys Earth Projection 9, 104, "m", 1, 2, 3, 4, 5, 6',
'CoordSys Earth Projection 10, 104, "m", 1',
'CoordSys Earth Projection 11, 104, "m", 1',
'CoordSys Earth Projection 12, 104, "m", 1',
'CoordSys Earth Projection 13, 104, "m", 1',
'CoordSys Earth Projection 14, 104, "m", 1',
'CoordSys Earth Projection 15, 104, "m", 1',
'CoordSys Earth Projection 16, 104, "m", 1',
'CoordSys Earth Projection 17, 104, "m", 1',
'CoordSys Earth Projection 18, 104, "m", 1, 2, 3, 4',
'CoordSys Earth Projection 19, 104, "m", 1, 2, 3, 4, 5, 6',
'CoordSys Earth Projection 20, 104, "m", 1, 2, 3, 4, 5',
#'CoordSys Earth Projection 21, 104, "m", 1, 2, 3, 4, 5',
#'CoordSys Earth Projection 22, 104, "m", 1, 2, 3, 4, 5',
#'CoordSys Earth Projection 23, 104, "m", 1, 2, 3, 4, 5',
#'CoordSys Earth Projection 24, 104, "m", 1, 2, 3, 4, 5',
'CoordSys Earth Projection 25, 104, "m", 1, 2, 3, 4',
'CoordSys Earth Projection 26, 104, "m", 1, 2',
'CoordSys Earth Projection 27, 104, "m", 1, 2, 3, 4',
'CoordSys Earth Projection 28, 104, "m", 1, 2, 90',
# 'CoordSys Earth Projection 29, 104, "m", 1, 90, 90', # alias of 4
'CoordSys Earth Projection 30, 104, "m", 1, 2, 3, 4',
'CoordSys Earth Projection 31, 104, "m", 1, 2, 3, 4, 5',
'CoordSys Earth Projection 32, 104, "m", 1, 2, 3, 4, 5, 6',
'CoordSys Earth Projection 33, 104, "m", 1, 2, 3, 4',
]:
srs = get_srs_from_coordsys(coordsys)
# print(srs)
got_coordsys = get_coordsys_from_srs(srs)
# if got_coordsys.find(' Bounds') >= 0:
# got_coordsys = got_coordsys[0:got_coordsys.find(' Bounds')]
assert coordsys == got_coordsys, srs
# Test TOWGS84
srs = osr.SpatialReference()
srs.ImportFromEPSG(4322)
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys Earth Projection 1, 103'
srs = get_srs_from_coordsys(coordsys)
wkt = srs.ExportToWkt()
assert wkt in ('GEOGCS["unnamed",DATUM["WGS_1972",SPHEROID["WGS 72",6378135,298.26]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST]]', 'GEOGCS["unnamed",DATUM["World_Geodetic_System_1972",SPHEROID["WGS 72",6378135,298.26]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST]]')
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys Earth Projection 1, 103'
# Test Lambert 93
srs = osr.SpatialReference()
srs.ImportFromEPSG(2154)
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys Earth Projection 3, 33, "m", 3, 46.5, 44, 49, 700000, 6600000'
srs = get_srs_from_coordsys(coordsys)
assert srs.GetAuthorityCode(None) == '2154'
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys Earth Projection 3, 33, "m", 3, 46.5, 44, 49, 700000, 6600000'
srs = osr.SpatialReference('PROJCS["RGF93 / Lambert-93",GEOGCS["RGF93",DATUM["Reseau_Geodesique_Francais_1993",SPHEROID["GRS 80",6378137,298.257222101]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",49.00000000002],PARAMETER["standard_parallel_2",44],PARAMETER["latitude_of_origin",46.5],PARAMETER["central_meridian",3],PARAMETER["false_easting",700000],PARAMETER["false_northing",6600000],UNIT["Meter",1.0],AUTHORITY["EPSG","2154"]]')
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys Earth Projection 3, 33, "m", 3, 46.5, 44, 49.00000000002, 700000, 6600000'
gdal.SetConfigOption('MITAB_BOUNDS_FILE', 'data/mitab/mitab_bounds.txt')
coordsys = get_coordsys_from_srs(srs)
gdal.SetConfigOption('MITAB_BOUNDS_FILE', None)
assert coordsys == 'CoordSys Earth Projection 3, 33, "m", 3, 46.5, 44, 49.00000000002, 700000, 6600000 Bounds (75000, 6000000) (1275000, 7200000)'
# http://trac.osgeo.org/gdal/ticket/4115
srs = get_srs_from_coordsys('CoordSys Earth Projection 10, 157, "m", 0')
wkt = srs.ExportToWkt()
assert wkt == 'PROJCS["WGS 84 / Pseudo-Mercator",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Mercator_1SP"],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0 +lon_0=0 +x_0=0 +y_0=0 +k=1 +units=m +nadgrids=@null +wktext +no_defs"]]'
# We don't round-trip currently
# MIF 999
srs = osr.SpatialReference("""GEOGCS["unnamed",
DATUM["MIF 999,1,1,2,3",
SPHEROID["WGS 72",6378135,298.26]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433]]""")
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys Earth Projection 1, 999, 1, 1, 2, 3'
srs = get_srs_from_coordsys(coordsys)
wkt = srs.ExportToWkt()
assert wkt == 'GEOGCS["unnamed",DATUM["MIF 999,1,1,2,3",SPHEROID["WGS 72",6378135,298.26],TOWGS84[1,2,3,0,0,0,0]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST]]'
# MIF 9999
srs = osr.SpatialReference("""GEOGCS["unnamed",
DATUM["MIF 9999,1,1,2,3,4,5,6,7,3",
SPHEROID["WGS 72",6378135,298.26]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433]]""")
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys Earth Projection 1, 9999, 1, 1, 2, 3, 4, 5, 6, 7, 3'
srs = get_srs_from_coordsys(coordsys)
wkt = srs.ExportToWkt()
assert wkt == 'GEOGCS["unnamed",DATUM["MIF 9999,1,1,2,3,4,5,6,7,3",SPHEROID["WGS 72",6378135,298.26],TOWGS84[1,2,3,-4,-5,-6,7]],PRIMEM["non-Greenwich",3],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST]]'
# Test EPSG:2393 / KKJ
srs = osr.SpatialReference("""PROJCS["KKJ / Finland Uniform Coordinate System",GEOGCS["KKJ",DATUM["Kartastokoordinaattijarjestelma_1966",SPHEROID["International 1924",6378388,297,AUTHORITY["EPSG","7022"]],AUTHORITY["EPSG","6123"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4123"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",27],PARAMETER["scale_factor",1],PARAMETER["false_easting",3500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Northing",NORTH],AXIS["Easting",EAST],AUTHORITY["EPSG","2393"]]""")
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys Earth Projection 24, 1016, "m", 27, 0, 1, 3500000, 0'
srs = get_srs_from_coordsys(coordsys)
wkt = srs.ExportToWkt()
assert wkt == 'PROJCS["unnamed",GEOGCS["unnamed",DATUM["Kartastokoordinaattijarjestelma_1966",SPHEROID["International 1924",6378388,297]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",27],PARAMETER["scale_factor",1],PARAMETER["false_easting",3500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH]]'
coordsys = get_coordsys_from_srs(srs)
assert coordsys == 'CoordSys Earth Projection 24, 1016, "m", 27, 0, 1, 3500000, 0'
###############################################################################
# Test opening and modifying a file with polygons created with MapInfo that consists of
# a single object block, without index block
def test_ogr_mitab_36():
# Test modifying a new object
shutil.copy('data/mitab/polygon_without_index.tab', 'tmp')
shutil.copy('data/mitab/polygon_without_index.dat', 'tmp')
shutil.copy('data/mitab/polygon_without_index.id', 'tmp')
shutil.copy('data/mitab/polygon_without_index.map', 'tmp')
ds = ogr.Open('tmp/polygon_without_index.tab', update=1)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
g = f.GetGeometryRef()
ring = g.GetGeometryRef(0)
ring.SetPoint_2D(1, ring.GetX(1) + 100, ring.GetY())
g = g.Clone()
f.SetGeometry(g)
lyr.SetFeature(f)
f = None
ds = None
ds = ogr.Open('tmp/polygon_without_index.tab')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
got_g = f.GetGeometryRef()
if ogrtest.check_feature_geometry(f, got_g, max_error=0.1):
f.DumpReadable()
pytest.fail(g)
while True:
f = lyr.GetNextFeature()
if f is None:
break
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource('tmp/polygon_without_index.tab')
###############################################################################
# Simple testing of Seamless tables
def test_ogr_mitab_37():
ds = ogr.Open('data/mitab/seamless.tab')
lyr = ds.GetLayer(0)
assert lyr.GetFeatureCount() == 4
f = lyr.GetNextFeature()
assert f.GetFID() == 4294967297 and f.id == '1'
f = lyr.GetNextFeature()
assert f.GetFID() == 4294967298 and f.id == '2'
f = lyr.GetNextFeature()
assert f.GetFID() == 8589934593 and f.id == '3'
f = lyr.GetNextFeature()
assert f.GetFID() == 8589934594 and f.id == '4'
f = lyr.GetFeature(4294967297)
assert f.GetFID() == 4294967297 and f.id == '1'
f = lyr.GetFeature(8589934594)
assert f.GetFID() == 8589934594 and f.id == '4'
f = lyr.GetFeature(8589934594 + 1)
assert f is None
f = lyr.GetFeature(4294967297 * 2 + 1)
assert f is None
###############################################################################
# Open MIF with MID with TAB delimiter and empty first field (#5405)
def test_ogr_mitab_38():
ds = ogr.Open('data/mitab/empty_first_field_with_tab_delimiter.mif')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f['field1'] != '' or f['field2'] != 'foo':
f.DumpReadable()
pytest.fail()
###############################################################################
# Read various geometry types from .mif
def test_ogr_mitab_39():
ds = ogr.Open('data/mitab/all_geoms.mif')
lyr = ds.GetLayer(0)
ds_ref = ogr.Open('data/mitab/all_geoms.mif.golden.csv')
lyr_ref = ds_ref.GetLayer(0)
while True:
f = lyr.GetNextFeature()
f_ref = lyr_ref.GetNextFeature()
if f is None:
assert f_ref is None
break
if ogrtest.check_feature_geometry(f, f_ref.GetGeometryRef()) != 0 or \
f.GetStyleString() != f_ref.GetStyleString():
f.DumpReadable()
f_ref.DumpReadable()
pytest.fail()
###############################################################################
# Read various geometry types from .mif but potentially truncated
def test_ogr_mitab_40():
content = open('data/mitab/all_geoms.mif', 'rt').read()
for i in range(len(content)):
gdal.FileFromMemBuffer('/vsimem/ogr_mitab_40.mif', content[0:i])
with gdaltest.error_handler():
ds = ogr.Open('/vsimem/ogr_mitab_40.mif')
if ds is not None:
lyr = ds.GetLayer(0)
for _ in lyr:
pass
gdal.Unlink('/vsimem/ogr_mitab_40.mif')
###############################################################################
# Read various geometry types from .tab
def test_ogr_mitab_41():
ds = ogr.Open('data/mitab/all_geoms.tab')
lyr = ds.GetLayer(0)
ds_ref = ogr.Open('data/mitab/all_geoms.mif.golden.csv')
lyr_ref = ds_ref.GetLayer(0)
while True:
f = lyr.GetNextFeature()
f_ref = lyr_ref.GetNextFeature()
if f is None:
assert f_ref is None
break
if ogrtest.check_feature_geometry(f, f_ref.GetGeometryRef()) != 0 or \
f.GetStyleString() != f_ref.GetStyleString():
f.DumpReadable()
f_ref.DumpReadable()
pytest.fail()
###############################################################################
# Read various geometry types from .tab with block size = 32256
def test_ogr_mitab_42():
ds = ogr.Open('/vsizip/data/mitab/all_geoms_block_32256.zip')
lyr = ds.GetLayer(0)
ds_ref = ogr.Open('data/mitab/all_geoms.mif.golden.csv')
lyr_ref = ds_ref.GetLayer(0)
while True:
f = lyr.GetNextFeature()
f_ref = lyr_ref.GetNextFeature()
if f is None:
assert f_ref is None
break
if ogrtest.check_feature_geometry(f, f_ref.GetGeometryRef()) != 0 or \
f.GetStyleString() != f_ref.GetStyleString():
f.DumpReadable()
f_ref.DumpReadable()
pytest.fail()
###############################################################################
# Test creating tab with block size = 32256
def test_ogr_mitab_43():
src_ds = gdal.OpenEx('/vsizip/data/mitab/all_geoms_block_32256.zip')
gdal.VectorTranslate('/vsimem/all_geoms_block_512.tab', src_ds, format='MapInfo File')
gdal.VectorTranslate('/vsimem/all_geoms_block_32256.tab', src_ds, format='MapInfo File', datasetCreationOptions=['BLOCKSIZE=32256'])
with gdaltest.error_handler():
out_ds = gdal.VectorTranslate('/vsimem/all_geoms_block_invalid.tab', src_ds, format='MapInfo File', datasetCreationOptions=['BLOCKSIZE=32768'])
assert out_ds is None
gdal.Unlink('/vsimem/all_geoms_block_invalid.dat')
src_ds = None
size = gdal.VSIStatL('/vsimem/all_geoms_block_512.map').size
assert size == 6656
size = gdal.VSIStatL('/vsimem/all_geoms_block_32256.map').size
assert size == 161280
ds = ogr.Open('/vsimem/all_geoms_block_32256.tab')
lyr = ds.GetLayer(0)
ds_ref = ogr.Open('/vsimem/all_geoms_block_512.tab')
lyr_ref = ds_ref.GetLayer(0)
while True:
f = lyr.GetNextFeature()
f_ref = lyr_ref.GetNextFeature()
if f is None:
assert f_ref is None
break
if ogrtest.check_feature_geometry(f, f_ref.GetGeometryRef()) != 0 or \
f.GetStyleString() != f_ref.GetStyleString():
f.DumpReadable()
f_ref.DumpReadable()
pytest.fail()
gdaltest.mapinfo_drv.DeleteDataSource('/vsimem/all_geoms_block_512.tab')
gdaltest.mapinfo_drv.DeleteDataSource('/vsimem/all_geoms_block_32256.tab')
gdal.Unlink('/vsimem/all_geoms_block_32768.dat')
###############################################################################
# Test limitation on width and precision of numeric fields in creation (#6392)
def test_ogr_mitab_44():
ds = gdaltest.mapinfo_drv.CreateDataSource('/vsimem/ogr_mitab_44.mif')
lyr = ds.CreateLayer('test')
fld_defn = ogr.FieldDefn('test', ogr.OFTReal)
fld_defn.SetWidth(30)
fld_defn.SetPrecision(29)
lyr.CreateField(fld_defn)
ds = None
ds = ogr.Open('/vsimem/ogr_mitab_44.mif')
lyr = ds.GetLayer(0)
fld_defn = lyr.GetLayerDefn().GetFieldDefn(0)
assert fld_defn.GetWidth() == 20 and fld_defn.GetPrecision() == 16
ds = None
gdaltest.mapinfo_drv.DeleteDataSource('/vsimem/ogr_mitab_44.mif')
###############################################################################
# Test read/write MapInfo layers with encoding specified
def test_ogr_mitab_45():
lyrNames = ['lyr1', 'lyr2']
# 0 1 2 3
# 012345678901234567890123456789012
fldNames = ['field1', 'абвгдежзийклмнопрстуфхцчшщьъэюя']
featNames = ['аз',
'буки',
'веди']
formats = ['MIF', 'TAB', 'MIF', 'TAB']
lyrNums = [1, 1, 2, 2]
dsExts = ['.mif', '.tab', '', '']
for formatN, frmt in enumerate(formats):
lyrCount = lyrNums[formatN]
ext = dsExts[formatN]
dsName = '/vsimem/45/ogr_mitab_45_%s_%s%s' % (frmt, lyrCount, ext)
ds = gdaltest.mapinfo_drv.CreateDataSource(dsName, options=['FORMAT=' + frmt])
assert ds is not None, ('Can\'t create dataset: ' + dsName)
for i in range(lyrCount):
lyr = ds.CreateLayer(lyrNames[i], options=['ENCODING=CP1251'])
assert lyr is not None, ('Can\'t create layer ' + lyrNames[i] +
' for ' + dsName)
if lyr.TestCapability(ogr.OLCStringsAsUTF8) != 1:
pytest.skip('skipping test: recode is not possible')
for fldName in fldNames:
fld_defn = ogr.FieldDefn(fldName, ogr.OFTString)
fld_defn.SetWidth(254)
lyr.CreateField(fld_defn)
for featName in featNames:
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt("POINT (25 72)"))
for fldName in fldNames:
featValue = fldName + ' ' + featName
feat.SetField(fldName, featValue)
lyr.CreateFeature(feat)
ds = None
# reopen and check
ds = ogr.Open(dsName)
assert ds is not None, ('Can\'t reopen dataset: ' + dsName)
for i in range(lyrCount):
lyr = ds.GetLayer(i)
assert lyr is not None, ('Can\'t get layer ' + lyrNames[i] +
' from ' + dsName)
for fldN, expectedName in enumerate(fldNames):
fldName = lyr.GetLayerDefn().GetFieldDefn(fldN).GetName()
assert fldName == expectedName, ('Can\'t get field name\n' +
' result name: "' + fldName + '"\n'
' expected name: "' + expectedName + '"\n'
' from layer : ' + lyrNames[i] +
' from dataset :' + dsName)
for featName in featNames:
feat = lyr.GetNextFeature()
for fldN, fldName in enumerate(fldNames):
expectedValue = fldName + ' ' + featName
# column value by number
value = feat.GetField(fldN)
assert value == expectedValue, \
('Can\'t get field value by number\n' +
' result value: "' + value + '"\n'
' expected value: "' + expectedValue + '"\n'
' from layer : ' + lyrNames[i] +
' from dataset :' + dsName)
# column value by name
value = feat.GetField(fldNames[fldN])
assert value == expectedValue, \
('Can\'t get field value by name\n' +
' result value: "' + value + '"\n'
' expected value: "' + expectedValue + '"\n'
' from layer : ' + lyrNames[i] +
' from dataset :' + dsName)
gdaltest.mapinfo_drv.DeleteDataSource(dsName)
###############################################################################
# Test read MapInfo layers with encoding specified
def test_ogr_mitab_46():
dsNames = ['data/mitab/tab-win1251.TAB',
'data/mitab/win1251.mif']
fldNames = ['Поле_А', 'Поле_Б', 'Поле_В', 'Поле_Г', 'Поле_Д']
fldVal = [['Значение А', 'Значение Б', 'Значение В', 'Значение Г', 'Значение Д'],
['Значение 1', 'Значение 2', 'Значение 3', 'Значение 4', 'Значение 5'],
['Полигон', 'Синий', 'Заливка', 'А а Б б', 'ЪЫЁЩ']]
for dsName in dsNames:
ds = ogr.Open(dsName)
assert ds is not None, ('Can\'t open dataset: ' + dsName)
lyr = ds.GetLayer(0)
assert lyr is not None, ('Can\'t get layer 0 from ' + dsName)
if lyr.TestCapability(ogr.OLCStringsAsUTF8) != 1:
pytest.skip('skipping test: recode is not possible')
for fldN, expectedName in enumerate(fldNames):
fldName = lyr.GetLayerDefn().GetFieldDefn(fldN).GetName()
assert fldName == expectedName, ('Can\'t get field\n' +
' result name: "' + fldName + '"\n'
' expected name: "' + expectedName + '"\n'
' from dataset :' + dsName)
for featFldVal in fldVal:
feat = lyr.GetNextFeature()
for fldN, fldName in enumerate(fldNames):
expectedValue = featFldVal[fldN]
# column value by number
value = feat.GetField(fldN)
assert value == expectedValue, ('Can\'t get field value by number\n' +
' result value: "' + value + '"\n'
' expected value: "' + expectedValue + '"\n'
' from dataset :' + dsName)
# column value by name
value = feat.GetField(fldName)
assert value == expectedValue, ('Can\'t get field value by name\n' +
' result value: "' + value + '"\n'
' expected value: "' + expectedValue + '"\n'
' from dataset :' + dsName)
###############################################################################
# Test opening a dataset with a .ind file
def test_ogr_mitab_47():
ds = ogr.Open('data/mitab/poly_indexed.tab')
lyr = ds.GetLayer(0)
lyr.SetAttributeFilter("PRFEDEA = '35043413'")
assert lyr.GetFeatureCount() == 1
for ext in ('tab', 'dat', 'map', 'id'):
gdal.FileFromMemBuffer('/vsimem/poly_indexed.' + ext,
open('data/mitab/poly_indexed.' + ext, 'rb').read())
ds = ogr.Open('/vsimem/poly_indexed.tab')
lyr = ds.GetLayer(0)
lyr.SetAttributeFilter("PRFEDEA = '35043413'")
assert lyr.GetFeatureCount() == 1
ds = None
for ext in ('tab', 'dat', 'map', 'id'):
gdal.Unlink('/vsimem/poly_indexed.' + ext)
###############################################################################
# Test writing and reading LCC_1SP
def test_ogr_mitab_48():
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource('/vsimem/test.mif')
sr = osr.SpatialReference()
sr.SetFromUserInput("""PROJCS["NTF (Paris) / France IV (deprecated)",
GEOGCS["NTF (Paris)",
DATUM["Nouvelle_Triangulation_Francaise_Paris",
SPHEROID["Clarke 1880 (IGN)",6378249.2,293.4660212936269,
AUTHORITY["EPSG","7011"]],
TOWGS84[-168,-60,320,0,0,0,0],
AUTHORITY["EPSG","6807"]],
PRIMEM["Paris",2.33722917,
AUTHORITY["EPSG","8903"]],
UNIT["grad",0.01570796326794897,
AUTHORITY["EPSG","9105"]],
AUTHORITY["EPSG","4807"]],
PROJECTION["Lambert_Conformal_Conic_1SP"],
PARAMETER["latitude_of_origin",46.85],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",0.99994471],
PARAMETER["false_easting",234.358],
PARAMETER["false_northing",4185861.369],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AXIS["X",EAST],
AXIS["Y",NORTH],
AUTHORITY["EPSG","27584"]]""")
lyr = ds.CreateLayer('foo', srs=sr)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
ds = None
ds = ogr.Open('/vsimem/test.mif')
lyr = ds.GetLayer(0)
sr_got = lyr.GetSpatialRef()
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource('/vsimem/test.mif')
sr_expected = osr.SpatialReference()
sr_expected.SetFromUserInput("""PROJCS["unnamed",
GEOGCS["unnamed",
DATUM["NTF_Paris_Meridian",
SPHEROID["Clarke 1880 (modified for IGN)",6378249.2,293.4660213],
TOWGS84[-168,-60,320,0,0,0,0]],
PRIMEM["Paris",2.33722917],
UNIT["degree",0.0174532925199433]],
PROJECTION["Lambert_Conformal_Conic_1SP"],
PARAMETER["latitude_of_origin",42.165],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",0.99994471],
PARAMETER["false_easting",234.358],
PARAMETER["false_northing",4185861.369],
UNIT["metre",1]]""")
assert sr_got.IsSame(sr_expected) != 0, sr_got.ExportToPrettyWkt()
###############################################################################
# Test reading an aspatial TAB file.
def test_ogr_mitab_49_aspatial():
ds = ogr.GetDriverByName('MapInfo File').Open('data/mitab/aspatial.tab')
lyr = ds.GetLayer(0)
geom_type = lyr.GetLayerDefn().GetGeomType()
assert geom_type == ogr.wkbNone
assert lyr.GetSpatialRef() is None
assert lyr.GetExtent(can_return_null=True) is None
###############################################################################
# Test creating an indexed field
def test_ogr_mitab_tab_field_index_creation():
layername = 'ogr_mitab_tab_field_index_creation'
filename = '/vsimem/' + layername + '.tab'
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(filename)
lyr = ds.CreateLayer(layername)
lyr.CreateField(ogr.FieldDefn('id', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('other_field', ogr.OFTInteger))
with gdaltest.error_handler():
ds.ExecuteSQL('CREATE INDEX ON foo USING id')
ds.ExecuteSQL('CREATE INDEX ON ' + layername + ' USING foo')
ds.ExecuteSQL('CREATE INDEX ON ' + layername + ' USING id')
ds.ExecuteSQL('CREATE INDEX ON ' + layername + ' USING id')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField(0, 100)
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField(0, 200)
lyr.CreateFeature(f)
ds = None
assert gdal.VSIStatL('/vsimem/' + layername + '.ind') is not None, 'no ind file'
ds = ogr.Open(filename)
with gdaltest.error_handler():
ds.ExecuteSQL('CREATE INDEX ON ' + layername + ' USING other_field')
lyr = ds.GetLayer(0)
lyr.SetAttributeFilter('id = 200')
assert lyr.GetFeatureCount() == 1, 'bad feature count'
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(filename)
###############################################################################
# Test reading a tab_view file
def test_ogr_mitab_tab_view():
ds = ogr.Open('data/mitab/view_first_table_second_table.tab')
lyr = ds.GetLayer(0)
assert lyr.GetLayerDefn().GetFieldCount() == 2, 'bad field count'
f = lyr.GetNextFeature()
if f['ID'] != 100 or f['foo'] != 'foo':
f.DumpReadable()
pytest.fail('bad feature')
ds = None
ds = ogr.Open('data/mitab/view_select_all_first_table_second_table.tab')
lyr = ds.GetLayer(0)
assert lyr.GetLayerDefn().GetFieldCount() == 3, 'bad field count'
f = lyr.GetNextFeature()
if f['joint_field'] != 1 or f['ID'] != 100 or f['foo'] != 'foo':
f.DumpReadable()
pytest.fail('bad feature')
ds = None
###############################################################################
def test_ogr_mitab_style():
tmpfile = '/vsimem/ogr_mitab_style.tab'
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('id', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POLYGON((0 0,0 1,1 1,0 0))'))
f.SetStyleString("BRUSH(fc:#AABBCC,bc:#DDEEFF);PEN(c:#DDEEFF)")
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POLYGON((0 0,0 1,1 1,0 0))'))
f.SetStyleString('BRUSH(fc:#AABBCC,id:"mapinfo-brush-1")')
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POLYGON((0 0,0 1,1 1,0 0))'))
f.SetStyleString('BRUSH(fc:#AABBCC00,bc:#ddeeff00)')
lyr.CreateFeature(f)
ds = None
ds = ogr.Open(tmpfile)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f.GetStyleString() != 'BRUSH(fc:#aabbcc,bc:#ddeeff,id:"mapinfo-brush-2,ogr-brush-0");PEN(w:1px,c:#ddeeff,id:"mapinfo-pen-2,ogr-pen-0",cap:r,j:r)':
f.DumpReadable()
pytest.fail()
f = lyr.GetNextFeature()
if f.GetStyleString() != 'BRUSH(fc:#aabbcc,id:"mapinfo-brush-1,ogr-brush-1");PEN(w:1px,c:#000000,id:"mapinfo-pen-2,ogr-pen-0",cap:r,j:r)':
f.DumpReadable()
pytest.fail()
f = lyr.GetNextFeature()
if f.GetStyleString() != 'BRUSH(fc:#aabbcc,id:"mapinfo-brush-1,ogr-brush-1");PEN(w:1px,c:#000000,id:"mapinfo-pen-2,ogr-pen-0",cap:r,j:r)':
f.DumpReadable()
pytest.fail()
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(tmpfile)
###############################################################################
def test_ogr_mitab_tab_write_field_name_with_dot():
tmpfile = '/vsimem/ogr_mitab_tab_write_field_name_with_dot.tab'
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('with.dot', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f['with.dot'] = 1
f.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT(2 3)'))
lyr.CreateFeature(f)
with gdaltest.error_handler():
ds = None
ds = ogr.Open(tmpfile)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
assert f['with_dot'] == 1
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(tmpfile)
###############################################################################
# Test read text labels with local encoding from mif/mid file
def test_ogr_mitab_local_encoding_label():
dsNames = ['data/mitab/win1251_text.mif',
'data/mitab/tab-win1251_text.tab']
expectedStyles = ['LABEL(t:"Поле",a:0.000000,s:2.070000g,c:#ff0000,p:2,f:"DejaVu Serif")',
'LABEL(t:"Поле",a:0.000000,s:0.015375g,c:#000000,p:1,f:"Times New Roman")']
for (dsName, expectedStyle) in zip(dsNames, expectedStyles):
ds = ogr.Open(dsName)
assert ds is not None, ('Can\'t open dataset: ' + dsName)
lyr = ds.GetLayer(0)
assert lyr is not None, ('Can\'t get layer 0 from ' + dsName)
if lyr.TestCapability(ogr.OLCStringsAsUTF8) != 1:
pytest.skip('skipping test: recode is not possible')
feat = lyr.GetNextFeature()
assert lyr is not None, ('Can\'t find text feature in' + dsName)
assert feat.GetStyleString() == expectedStyle, (feat.GetStyleString(), expectedStyle)
###############################################################################
# Check fix for https://github.com/OSGeo/gdal/issues/1232
def test_ogr_mitab_delete_feature_no_geometry():
filename = '/vsimem/test.tab'
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(filename)
lyr = ds.CreateLayer('test', geom_type = ogr.wkbNone)
lyr.CreateField(ogr.FieldDefn('id', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f['id'] = 1
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f['id'] = 2
lyr.CreateFeature(f)
ds = None
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayer(0)
assert lyr.DeleteFeature(1) == 0
ds = None
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
assert f['id'] == 2
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(filename)
###############################################################################
# Check fix for https://github.com/OSGeo/gdal/issues/1636
def test_ogr_mitab_too_large_value_for_decimal_field():
filename = '/vsimem/test.tab'
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(filename)
lyr = ds.CreateLayer('test', geom_type = ogr.wkbNone)
fld = ogr.FieldDefn('f', ogr.OFTReal)
fld.SetWidth(20)
fld.SetPrecision(12)
lyr.CreateField(fld)
f = ogr.Feature(lyr.GetLayerDefn())
f['f'] = 1234567.012
assert lyr.CreateFeature(f) == ogr.OGRERR_NONE
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f['f'] = 123456789.012
with gdaltest.error_handler():
assert lyr.CreateFeature(f) != ogr.OGRERR_NONE
f = None
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(filename)
###############################################################################
# Check custom datum/spheroid parameters export
def test_ogr_mitab_custom_datum_export():
sr = osr.SpatialReference()
sr.SetGeogCS('Custom', 'Custom', 'Sphere', 6370997.0, 0.0)
sr.SetTOWGS84(1, 2, 3, 4, 5, 6, 7)
proj = sr.ExportToMICoordSys()
assert proj == 'Earth Projection 1, 9999, 12, 1, 2, 3, -4, -5, -6, -7, 0'
sr = osr.SpatialReference()
sr.SetGeogCS('Custom', 'Custom', 'NWL-9D or WGS-66', 6378145.0, 298.25)
sr.SetTOWGS84(1, 2, 3, 4, 5, 6, 7)
sr.SetUTM(33)
proj = sr.ExportToMICoordSys()
assert proj == 'Earth Projection 8, 9999, 42, 1, 2, 3, -4, -5, -6, -7, 0, "m", 15, 0, 0.9996, 500000, 0'
###############################################################################
# Check write/read description
def test_ogr_mitab_description():
filename = '/vsimem/test_description.tab'
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(filename)
assert ds is not None, ('Can\'t create dataset: ' + filename)
test_description = 'Состав данных: Топокарты (растр) 1:50К, 100К, 250К, 500К, Топокарты (вектор) 1:100К, 1:250К, ЦМР 10м, Реестр географических названий 1:100000, АТД 1:10000, лидарная съемка, ортофото. Лицензия: на геоданные - ограничительная, не соответствующая определению "открытых данных", так как запрещено распространение данных.'
lyr = ds.CreateLayer('test_description', options=['ENCODING=CP1251', 'DESCRIPTION={}'.format(test_description)])
assert lyr is not None, ('Can\'t create layer "test_description"')
if lyr.TestCapability(ogr.OLCStringsAsUTF8) != 1:
pytest.skip('skipping test: recode is not possible')
lyr.CreateField(ogr.FieldDefn('feature_id', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('other_field', ogr.OFTInteger))
# Check description truncate.
check_text = 'Состав данных: Топокарты (растр) 1:50К, 100К, 250К, 500К, Топокарты (вектор) 1:100К, 1:250К, ЦМР 10м, Реестр географических названий 1:100000, АТД 1:10000, лидарная съемка, ортофото. Лицензия: на геоданные - ограничительная, не соответствующая определению "открытых данных", так как запрещено распростр'
assert check_text == lyr.GetMetadataItem('DESCRIPTION')
ds = None
# Check storing description in tab file.
ds = ogr.Open(filename, update=1)
assert ds is not None, ('Can\'t open dataset: ' + filename)
lyr = ds.GetLayer(0)
assert lyr is not None, ('Can\'t get layer 0 from ' + filename)
assert check_text == lyr.GetMetadataItem('DESCRIPTION')
# Check update description in tab file.
check_short_text = 'Состав данных: Топокарты (растр) 1:50К, 100К, 250К, 500К'
lyr.SetMetadataItem('DESCRIPTION', check_short_text)
ds = None
ds = ogr.Open(filename)
assert ds is not None, ('Can\'t open dataset: ' + filename)
lyr = ds.GetLayer(0)
assert lyr is not None, ('Can\'t get layer 0 from ' + filename)
assert check_short_text == lyr.GetMetadataItem('DESCRIPTION')
ds = None
# Check line breaks and double quotes
test_description = 'Состав данных: "Топокарты (растр)"\n1:50К,\n100К,\n250К,\n500К\r\n"new line"'
check_description = 'Состав данных: "Топокарты (растр)" 1:50К, 100К, 250К, 500К "new line"'
ds = ogr.Open(filename, update=1)
assert ds is not None, ('Can\'t open dataset: ' + filename)
lyr = ds.GetLayer(0)
assert lyr is not None, ('Can\'t get layer 0 from ' + filename)
lyr.SetMetadataItem('DESCRIPTION', test_description)
ds = None
ds = ogr.Open(filename)
assert ds is not None, ('Can\'t open dataset: ' + filename)
lyr = ds.GetLayer(0)
assert lyr is not None, ('Can\'t get layer 0 from ' + filename)
assert check_description == lyr.GetMetadataItem('DESCRIPTION')
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(filename)
###############################################################################
# Test writing and reading back unset/null date, time, datetime
def test_ogr_mitab_nulldatetime():
filename = '/vsimem/nulldatetime.tab'
ds = ogr.GetDriverByName('MapInfo File').CreateDataSource(filename)
lyr = ds.CreateLayer("nulldatetime")
lyr.CreateField(ogr.FieldDefn("time", ogr.OFTTime))
lyr.CreateField(ogr.FieldDefn("date", ogr.OFTDate))
lyr.CreateField(ogr.FieldDefn("datetime", ogr.OFTDateTime))
f = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(f)
ds = None
ds = ogr.Open(filename)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
assert not f.IsFieldSet("time")
assert not f.IsFieldSet("date")
assert not f.IsFieldSet("datetime")
ds = None
ogr.GetDriverByName('MapInfo File').DeleteDataSource(filename)
| 37.707705 | 676 | 0.580181 |
795bd309f21e3fe706437e2c83af3a2ee6a10bba | 1,104 | py | Python | kubernetes_asyncio/test/test_v1beta1_rolling_update_daemon_set.py | olitheolix/kubernetes_asyncio | 344426793e4e4b653bcd8e4a29c6fa4766e1fff7 | [
"Apache-2.0"
] | 1 | 2020-03-25T01:24:27.000Z | 2020-03-25T01:24:27.000Z | kubernetes_asyncio/test/test_v1beta1_rolling_update_daemon_set.py | olitheolix/kubernetes_asyncio | 344426793e4e4b653bcd8e4a29c6fa4766e1fff7 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/test/test_v1beta1_rolling_update_daemon_set.py | olitheolix/kubernetes_asyncio | 344426793e4e4b653bcd8e4a29c6fa4766e1fff7 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v1beta1_rolling_update_daemon_set import V1beta1RollingUpdateDaemonSet # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV1beta1RollingUpdateDaemonSet(unittest.TestCase):
"""V1beta1RollingUpdateDaemonSet unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1RollingUpdateDaemonSet(self):
"""Test V1beta1RollingUpdateDaemonSet"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v1beta1_rolling_update_daemon_set.V1beta1RollingUpdateDaemonSet() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.926829 | 130 | 0.750906 |
795bd3adbf171b37bf6898011a13977f7c8c5425 | 1,979 | py | Python | own_data.py | dzionek/pl-pres-elections-2020 | 7acc4b755f9f490f74c1fd9b55e6a12324ab35c5 | [
"MIT"
] | 1 | 2020-07-08T13:53:46.000Z | 2020-07-08T13:53:46.000Z | own_data.py | dzionek/pl-pres-elections-2020 | 7acc4b755f9f490f74c1fd9b55e6a12324ab35c5 | [
"MIT"
] | null | null | null | own_data.py | dzionek/pl-pres-elections-2020 | 7acc4b755f9f490f74c1fd9b55e6a12324ab35c5 | [
"MIT"
] | null | null | null | """
Module with constants used in the analysis.
"""
from plotly.express import colors
candidates = [
'Robert BIEDROŃ',
'Krzysztof BOSAK',
'Andrzej Sebastian DUDA',
'Szymon Franciszek HOŁOWNIA',
'Marek JAKUBIAK',
'Władysław Marcin KOSINIAK-KAMYSZ',
'Mirosław Mariusz PIOTROWSKI',
'Paweł Jan TANAJNO',
'Rafał Kazimierz TRZASKOWSKI',
'Waldemar Włodzimierz WITKOWSKI',
'Stanisław Józef ŻÓŁTEK'
]
candidates_colors = {
'Robert BIEDROŃ' : colors.sequential.Reds,
'Krzysztof BOSAK' : colors.sequential.Greys,
'Andrzej Sebastian DUDA' : colors.sequential.Blues,
'Szymon Franciszek HOŁOWNIA' : colors.sequential.YlOrBr,
'Marek JAKUBIAK' : colors.sequential.Purples,
'Władysław Marcin KOSINIAK-KAMYSZ' : colors.sequential.Greens,
'Mirosław Mariusz PIOTROWSKI' : colors.sequential.matter,
'Paweł Jan TANAJNO' : colors.sequential.PuBu,
'Rafał Kazimierz TRZASKOWSKI' : colors.sequential.Oranges,
'Waldemar Włodzimierz WITKOWSKI' : colors.sequential.YlOrRd,
'Stanisław Józef ŻÓŁTEK' : colors.sequential.tempo
}
poland_center = {
"lat": 52,
"lon": 19.1451
}
poland_zoom = 5.2
opacity = 0.8
map_margin = {
"r":0, "t":0, "l":0, "b":0
}
parties_2019 = [
'KOALICYJNY KOMITET WYBORCZY KOALICJA OBYWATELSKA PO .N IPL ZIELONI - ZPOW-601-6/19',
'KOMITET WYBORCZY PRAWO I SPRAWIEDLIWOŚĆ - ZPOW-601-9/19'
]
simplify_party_dict = {
'KOALICYJNY KOMITET WYBORCZY KOALICJA OBYWATELSKA PO .N IPL ZIELONI - ZPOW-601-6/19': 'Koalicja Obywatelska',
'KOMITET WYBORCZY PRAWO I SPRAWIEDLIWOŚĆ - ZPOW-601-9/19': 'Prawo i Sprawiedliwość'
}
parties_to_candidates = {
'Koalicja Obywatelska': 'Rafał Kazimierz TRZASKOWSKI',
'Prawo i Sprawiedliwość': 'Andrzej Sebastian DUDA'
}
parties_2019_colors = {
'KOALICYJNY KOMITET WYBORCZY KOALICJA OBYWATELSKA PO .N IPL ZIELONI - ZPOW-601-6/19': candidates_colors['Rafał Kazimierz TRZASKOWSKI'],
'KOMITET WYBORCZY PRAWO I SPRAWIEDLIWOŚĆ - ZPOW-601-9/19': candidates_colors['Andrzej Sebastian DUDA']
}
| 29.537313 | 136 | 0.73623 |
795bd42d2631d817b2855db714dd6c0e5e22804e | 4,811 | py | Python | Data/Preprocess_Scripts/preprocess_ner_en_es.py | AshishMittal/GLUECoS | 3bade2cae46424cd859a5cffcf973c7791588825 | [
"MIT"
] | null | null | null | Data/Preprocess_Scripts/preprocess_ner_en_es.py | AshishMittal/GLUECoS | 3bade2cae46424cd859a5cffcf973c7791588825 | [
"MIT"
] | null | null | null | Data/Preprocess_Scripts/preprocess_ner_en_es.py | AshishMittal/GLUECoS | 3bade2cae46424cd859a5cffcf973c7791588825 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. Licensed under the MIT license.
import os
import subprocess
import shlex
import shutil
import argparse
# download tweets
def download_tweets(original_path):
# copy twitter credentials in required file
shutil.copy('twitter_authentication.txt',original_path+'/Release/twitter_auth.txt')
os.chdir(original_path+'/Release')
#run scripts for scraping validation tweets
os.system('chmod +x runScripts.sh')
subprocess.call(shlex.split('./runScripts.sh ./../dev_offset.tsv gold'))
original_validation_path = './Files/data.tsv'
new_validation_path = 'validation.tsv'
shutil.copyfile(original_validation_path,new_validation_path)
#run scripts for scraping train tweets
subprocess.call(shlex.split('./runScripts.sh ./../train_offset.tsv gold'))
original_train_path = './Files/data.tsv'
new_train_path = 'train.tsv'
shutil.copyfile(original_train_path,new_train_path)
# final format from above obtained files
def make_temp_file(original_path):
with open(original_path + 'Release/validation.tsv','r+', encoding='utf-8') as infile:
con=infile.readlines()
validation_content=[x.strip('\n') for x in con]
with open(original_path + 'Release/train.tsv','r+', encoding='utf-8') as infile:
con=infile.readlines()
train_content=[x.strip('\n') for x in con]
prev_id = validation_content[0].split('\t')[0]
with open('temp.txt','w+', encoding='utf-8') as outfile:
for i in validation_content:
if i!='':
j=i.split('\t')
curr_id=j[0]
word=j[4].replace(chr(65039),'')
tag=j[5]
if curr_id==prev_id:
if word!='':
outfile.write(curr_id + '\t'+ word+'\t'+tag+'\n')
else:
if word!='':
outfile.write('\n' + curr_id + '\t' + word+'\t'+tag+'\n')
prev_id=curr_id
prev_id = train_content[0].split('\t')[0]
with open('temp.txt','a', encoding='utf-8') as outfile:
for i in train_content:
if i!='':
j=i.split('\t')
curr_id=j[0]
word=j[4].replace(chr(65039),'')
tag=j[5]
if curr_id==prev_id:
if word!='':
outfile.write(curr_id + '\t'+ word+'\t'+tag+'\n')
else:
if word!='':
outfile.write('\n' + curr_id + '\t' + word+'\t'+tag+'\n')
prev_id=curr_id
# make processed file from ID and input files
def make_split_file(id_file,input_file,output_file,mode):
with open(id_file,'r',encoding='utf-8') as f:
con=f.readlines()
ids=[x.strip('\n') for x in con]
with open(input_file,'r',encoding='utf-8') as infile:
con=infile.readlines()
all_sentences=[x.strip('\n') for x in con]
temp_dict={}
for i in all_sentences:
if i!='':
j=i.split('\t')
if j[0] in ids:
if mode=='test':
if j[0] in temp_dict.keys():
res=temp_dict.get(j[0])
if j[1]!='':
res += j[1]+ '\t' + 'O' + '\n'
temp_dict.update({j[0]:res})
else:
if j[1]!='':
temp_dict.update({j[0]:j[1]+ '\t' + 'O' + '\n'})
else:
if j[0] in temp_dict.keys():
res=temp_dict.get(j[0])
res += j[1]+ '\t' + j[2] + '\n'
temp_dict.update({j[0]:res})
else:
temp_dict.update({j[0]:j[1]+ '\t' + j[2] + '\n'})
with open(output_file,'w',encoding='utf-8') as outfile:
for i in ids:
if i in temp_dict.keys():
outfile.write(temp_dict.get(i)+'\n')
else:
if mode=='test':
outfile.write('not found' + '\t' + 'O' + '\n\n')
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True, help="Original data directory")
parser.add_argument("--output_dir", default="en", type=str, help="Processed data directory")
args = parser.parse_args()
original_path = args.data_dir+'/NER_EN_ES/temp/'
new_path = args.output_dir +'/NER_EN_ES/'
id_dir = args.data_dir + '/NER_EN_ES/ID_Files'
owd = os.getcwd()
if not os.path.exists(new_path):
os.mkdir(new_path)
# call required functions to process
download_tweets(original_path)
os.chdir(owd)
make_temp_file(original_path)
# make train, test and validation files
make_split_file(id_dir+'/train_ids.txt','temp.txt',new_path+'/train.txt',mode='train')
make_split_file(id_dir+'/test_ids.txt','temp.txt',new_path+'/test.txt',mode='test')
make_split_file(id_dir+'/validation_ids.txt','temp.txt',new_path+'/validation.txt',mode='valid')
# append all data in one file
open(new_path+'/all.txt', 'w+', encoding='utf-8').writelines([l for l in open(new_path+'/train.txt', 'r', encoding='utf-8').readlines()])
open(new_path+'/all.txt', 'a',encoding='utf-8').writelines([l for l in open(new_path+'/test.txt','r',encoding='utf-8').readlines()])
open(new_path+'/all.txt', 'a',encoding='utf-8').writelines([l for l in open(new_path+'/validation.txt','r',encoding='utf-8').readlines()])
# delete temp files
os.unlink('temp.txt')
if __name__=='__main__':
main()
| 31.24026 | 139 | 0.656412 |
795bd48bc7737504b56fff80d7d6dddc57147c85 | 5,413 | py | Python | Final Exam/Q2/mdp.py | billgoo/Rutgers-CS520-Intro-to-AI | e3c67af8a1d0efdec763b4415962150563c9431c | [
"MIT"
] | null | null | null | Final Exam/Q2/mdp.py | billgoo/Rutgers-CS520-Intro-to-AI | e3c67af8a1d0efdec763b4415962150563c9431c | [
"MIT"
] | null | null | null | Final Exam/Q2/mdp.py | billgoo/Rutgers-CS520-Intro-to-AI | e3c67af8a1d0efdec763b4415962150563c9431c | [
"MIT"
] | 1 | 2021-05-03T05:24:57.000Z | 2021-05-03T05:24:57.000Z | from utils import *
import numpy as np
class MDP:
"""A Markov Decision Process, defined by an initial state, transition model,
and reward function. We also keep track of a beta value, for use by
algorithms. The transition model is represented somewhat differently from
the text. Instead of T(s, a, s') being probability number for each
state/action/state triplet, we instead have T(s, a) return a list of (p, s')
pairs. We also keep track of the possible states, terminal states, and
actions for each state. [page 615]"""
"""
states NEW as 0, DEAD as 9, and USEDi as i
actions USED as 0, REPLACE as 1
"""
t_used, t_replace, reward_used, reward_replace = [], [], [100], [0]
def __init__(self, beta=.9):
# def __init__(self, init, actlist, terminals, beta=.9):
'''
update(self, init=init, actlist=actlist, terminals=terminals,
beta=beta, states=set(), reward={})
'''
self.beta = beta
self.build_model()
def build_model(self):
self.t_used.append([(1, 1)])
for i in range(1, 9):
p = 0.1 * i
self.t_used.append([(p, i + 1), (1 - p, i)])
self.t_used.append([(float('-inf'), 0)])
self.t_replace.append([(float('-inf'), 0)])
for i in range(1, 10):
# replace will all go to New
self.t_replace.append([(1, 0)])
# replace will have the unchanged cost so don't need to specific
# reward for use
for i in range(1, 9):
self.reward_used.append(100 - 10 * i)
self.reward_used.append(0)
for i in range(1, 10):
self.reward_replace.append(-250)
def R(self):
"""Return a numeric reward for this state."""
return [self.reward_used, self.reward_replace]
def T(self):
"""Transition model. From a state and an action, return a list
of (result-state, probability) pairs."""
result = [self.t_used, self.t_replace]
return result
def actions(self, state):
"""Set of actions that can be performed in this state. By default, a
fixed list of actions, except for terminal states. Override this
method if you need to specialize by state."""
if state == 0:
return [0]
elif state == 9:
return [1]
else:
return [0, 1]
def __del__(self):
print("__del__")
def value_iteration(mdp, epsilon=0.001):
"""Solving an MDP by value iteration."""
U1 = dict([(i, 0) for i in range(10)])
R, T, beta = mdp.R(), mdp.T(), mdp.beta
while True:
U = U1.copy()
delta = 0
for s in range(10):
'''
print(s)
print(mdp.actions(s))
print([(R[a][s] + beta * sum([p * U[s1] for (p, s1) in T[a][s]]))
for a in mdp.actions(s)])
'''
U1[s] = max([(R[a][s] + beta * sum([p * U[s1] for (p, s1) in T[a][s]]))
for a in mdp.actions(s)])
delta = max(delta, abs(U1[s] - U[s]))
if delta < epsilon * (1 - beta) / beta:
return U
def best_policy(mdp, U):
"""Given an MDP and a utility function U, determine the best policy,
as a mapping from state to action. (Equation 17.4)"""
pi = {}
for s in range(10):
# for s in mdp.states:
pi[s] = argmax(mdp.actions(s), lambda a: expected_utility(a, s, U, mdp))
return pi
def expected_utility(a, s, U, mdp):
"""The expected utility of doing a in state s, according to the MDP and U."""
# print(sum([p * U[s1] for (p, s1) in mdp.T()[a][s]]))
R, T, beta = mdp.R(), mdp.T(), mdp.beta
return R[a][s] + beta * sum([p * U[s1] for (p, s1) in T[a][s]])
'''
def policy_iteration(mdp):
"Solve an MDP by policy iteration [Fig. 17.7]"
U = dict([(s, 0) for s in mdp.states])
pi = dict([(s, random.choice(mdp.actions(s))) for s in mdp.states])
while True:
U = policy_evaluation(pi, U, mdp)
unchanged = True
for s in mdp.states:
a = argmax(mdp.actions(s), lambda a: expected_utility(a, s, U, mdp))
if a != pi[s]:
pi[s] = a
unchanged = False
if unchanged:
return pi
def policy_evaluation(pi, U, mdp, k=20):
"""Return an updated utility mapping U from each state in the MDP to its
utility, using an approximation (modified policy iteration)."""
R, T, beta = mdp.R, mdp.T, mdp.beta
for i in range(k):
for s in mdp.states:
U[s] = R(s) + beta * sum([p * U[s] for (p, s1) in T(s, pi[s])])
return U
'''
def argmax(seq, fn):
"""Return an element with highest fn(seq[i]) score; tie goes to first one.
argmax(['one', 'to', 'three'], len)
'three'
return 0: used, 1: replace, 2: both
"""
if len(seq) == 1:
return seq[0]
best = -1
best_score = float("-inf")
for x in seq:
x_score = fn(x)
if x_score > best_score:
best, best_score = x, x_score
elif x_score == best_score:
best, best_score = 2, x_score
return best
if __name__ == '__main__':
mdp = MDP()
U = value_iteration(mdp)
print("U:", U)
print("policy:", best_policy(mdp, U))
print(mdp.T())
print(mdp.R())
del mdp
| 32.220238 | 83 | 0.546277 |
795bd4b2cea1f0ef2084b9e9752835c6fc743af2 | 712 | py | Python | setup.py | codes1gn/gemini | 4b173ea583f2578244d1d0fb482ccb77818f7558 | [
"MIT"
] | null | null | null | setup.py | codes1gn/gemini | 4b173ea583f2578244d1d0fb482ccb77818f7558 | [
"MIT"
] | null | null | null | setup.py | codes1gn/gemini | 4b173ea583f2578244d1d0fb482ccb77818f7558 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md") as f:
long_description = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name="gemini", # Replace with your own username
version="0.0.1",
author="Enflame Tech",
author_email="heng.shi@enflame-tech.com",
description="model parallel runner",
long_description=long_description,
long_description_content_type="text/markdown",
url="",
packages=find_packages(),
classifiers=[
],
python_requires='>=2.7',
tests_require=[
# 'unittest',
],
entry_points={
"console_scripts": [
"gemini_python=gemini.bin.gemini_python:main",
]
}
)
| 22.967742 | 58 | 0.633427 |
795bd51b62cb6f3086183fa1461f2b5cf5debbe9 | 21,472 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/operations/_virtual_network_peerings_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/operations/_virtual_network_peerings_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/operations/_virtual_network_peerings_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkPeeringsOperations(object):
"""VirtualNetworkPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network peering.
:type virtual_network_peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.VirtualNetworkPeering"
"""Gets the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network peering.
:type virtual_network_peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.VirtualNetworkPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
virtual_network_peering_parameters, # type: "models.VirtualNetworkPeering"
**kwargs # type: Any
):
# type: (...) -> "models.VirtualNetworkPeering"
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_network_peering_parameters, 'VirtualNetworkPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
virtual_network_peering_parameters, # type: "models.VirtualNetworkPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.VirtualNetworkPeering"]
"""Creates or updates a peering in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the peering.
:type virtual_network_peering_name: str
:param virtual_network_peering_parameters: Parameters supplied to the create or update virtual
network peering operation.
:type virtual_network_peering_parameters: ~azure.mgmt.network.v2018_07_01.models.VirtualNetworkPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_07_01.models.VirtualNetworkPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
virtual_network_peering_parameters=virtual_network_peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.VirtualNetworkPeeringListResult"]
"""Gets all virtual network peerings in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkPeeringListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_07_01.models.VirtualNetworkPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings'} # type: ignore
| 50.403756 | 250 | 0.676556 |
795bd5b67523a5391e4f9825ac9eb11c42790086 | 2,498 | py | Python | MD/process_keywords.py | vitroid/vitroid.github.io | cb4f06a4a4925a0e06a4001d3680be7998552b83 | [
"MIT"
] | null | null | null | MD/process_keywords.py | vitroid/vitroid.github.io | cb4f06a4a4925a0e06a4001d3680be7998552b83 | [
"MIT"
] | 1 | 2020-02-12T02:46:21.000Z | 2020-02-12T02:46:21.000Z | MD/process_keywords.py | vitroid/vitroid.github.io | cb4f06a4a4925a0e06a4001d3680be7998552b83 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# scan all md files and make the link lists
import glob
import re
from logging import getLogger, basicConfig, INFO, DEBUG
import sys
import pickle
import os
import smartfind as sf
basicConfig(level=INFO)
logger = getLogger()
def hashtag_proc(x, words):
words.add(x)
return ""
# return "[{0}]({0}.md)".format(x) + " "
from ktree import *
keywords = [fn[:-3] for fn in glob.glob("*.md")]
kwtree = keyword_tree(keywords)
# logger.info(kwtree)
if __name__ == "__main__":
for word in ("かもめだった", "LDAがいい", "ice T研究のため", "ice T3", "ice T"):
print(word, keyword_find(word, kwtree))
def process_keywords(filename, lines, autolink=False):
words = set()
parsed = ""
for mode, line in lines:
if mode == "normal":
if autolink:
head = 0
processed = ""
# logger.info(line)
while head < len(line):
if line[head] == "#":
# might be a hashtag
if line[head+1] not in "# ":
# it is a hashtag
m = re.search(r"\s", line[head+1:])
if m:
words.add(line[head+1:head+1+m.span()[0]])
head += m.span()[1]
continue
head += 1
continue
found = keyword_find(line[head:], kwtree)
if found:
words.add(line[head:head+found])
head += found
else:
head += 1
else:
# hashtag
line = re.sub(r"#([^#\s]+)\s", lambda x:hashtag_proc(x.group(1), words), line)
for word in words:
if len(tuple(sf.sfind(word, '/[]!"(),;'))):
logger.info(" ELIM {0}".format(word))
continue
referfrom = "../_ref/{0}.pickle".format(word)
if os.path.exists(referfrom):
logger.info(" Update linklist {0}".format(word))
with open(referfrom, mode='rb') as f:
S = pickle.load(f)
else:
S = set()
logger.info(" Create linklist {0}".format(word))
L0 = len(S)
S.add(filename)
if L0 != len(S):
with open(referfrom, mode='wb') as f:
pickle.dump(S, f)
# return parsed
| 30.096386 | 94 | 0.460769 |
795bd5f97ac3ee8abe70708ad7f745efca8336f4 | 7,009 | py | Python | aries_cloudagent/messaging/valid.py | DibbsZA/aries-cloudagent-python | a094dd7697023721ac2a2fd4e58b04d4b37d1f44 | [
"Apache-2.0"
] | null | null | null | aries_cloudagent/messaging/valid.py | DibbsZA/aries-cloudagent-python | a094dd7697023721ac2a2fd4e58b04d4b37d1f44 | [
"Apache-2.0"
] | null | null | null | aries_cloudagent/messaging/valid.py | DibbsZA/aries-cloudagent-python | a094dd7697023721ac2a2fd4e58b04d4b37d1f44 | [
"Apache-2.0"
] | null | null | null | """Validators for schema fields."""
from datetime import datetime
from base58 import alphabet
from marshmallow.exceptions import ValidationError
from marshmallow.validate import OneOf, Range, Regexp
from .util import epoch_to_str
B58 = alphabet if isinstance(alphabet, str) else alphabet.decode("ascii")
class IntEpoch(Range):
"""Validate value against (integer) epoch format."""
EXAMPLE = int(datetime.now().timestamp())
def __init__(self):
"""Initializer."""
super().__init__( # use 64-bit for Aries RFC compatibility
min=-9223372036854775808,
max=9223372036854775807,
error="Value {input} is not a valid integer epoch time."
)
class IndyDID(Regexp):
"""Validate value against indy DID."""
EXAMPLE = "WgWxqztrNooG92RXvxSTWv"
def __init__(self):
"""Initializer."""
super().__init__(
rf"^(did:sov:)?[{B58}]{{21,22}}$",
error="Value {input} is not an indy decentralized identifier (DID)."
)
class IndyRawPublicKey(Regexp):
"""Validate value against indy (Ed25519VerificationKey2018) raw public key."""
EXAMPLE = "H3C2AVvLMv6gmMNam3uVAjZpfkcJCwDwnZn6z3wXmqPV"
def __init__(self):
"""Initializer."""
super().__init__(
rf"^[{B58}]{{43,44}}$",
error="Value {input} is not a raw Ed25519VerificationKey2018 key."
)
class IndyCredDefId(Regexp):
"""Validate value against indy credential definition identifier specification."""
EXAMPLE = "WgWxqztrNooG92RXvxSTWv:3:CL:20:tag"
def __init__(self):
"""Initializer."""
super().__init__(
(
rf"([{B58}]{{21,22}})" # issuer DID
f":3" # cred def id marker
f":CL" # sig alg
rf":(([1-9][0-9]*)|([{B58}]{{21,22}}:2:.+:[0-9.]+))" # schema txn / id
f"(.+)?$" # tag
),
error="Value {input} is not an indy credential definition identifier."
)
class IndyVersion(Regexp):
"""Validate value against indy version specification."""
EXAMPLE = "1.0"
def __init__(self):
"""Initializer."""
super().__init__(
rf"^[0-9.]+$",
error="Value {input} is not an indy version (use only digits and '.')."
)
class IndySchemaId(Regexp):
"""Validate value against indy schema identifier specification."""
EXAMPLE = "WgWxqztrNooG92RXvxSTWv:2:schema_name:1.0"
def __init__(self):
"""Initializer."""
super().__init__(
rf"^[{B58}]{{21,22}}:2:.+:[0-9.]+$",
error="Value {input} is not an indy schema identifier."
)
class IndyRevRegId(Regexp):
"""Validate value against indy revocation registry identifier specification."""
EXAMPLE = f"WgWxqztrNooG92RXvxSTWv:4:WgWxqztrNooG92RXvxSTWv:3:CL:20:tag:CL_ACCUM:0"
def __init__(self):
"""Initializer."""
super().__init__(
(
rf"^([{B58}]{{21,22}}):4:"
rf"([{B58}]{{21,22}}):3:"
rf"CL:(([1-9][0-9]*)|([{B58}]{{21,22}}:2:.+:[0-9.]+))(:.+)?:"
rf"CL_ACCUM:.+$"
),
error="Value {input} is not an indy revocation registry identifier."
)
class IndyPredicate(OneOf):
"""Validate value against indy predicate."""
EXAMPLE = ">="
def __init__(self):
"""Initializer."""
super().__init__(
choices=["<", "<=", ">=", ">"],
error="Value {input} must be one of {choices}."
)
class IndyISO8601DateTime(Regexp):
"""Validate value against ISO 8601 datetime format, indy profile."""
EXAMPLE = epoch_to_str(int(datetime.now().timestamp()))
def __init__(self):
"""Initializer."""
super().__init__(
r"^\d{4}-\d\d-\d\d[T ]\d\d:\d\d"
r"(?:\:(?:\d\d(?:\.\d{1,6})?))?(?:[+-]\d\d:?\d\d|Z)$",
error="Value {input} is not a date in valid format."
)
class Base64(Regexp):
"""Validate base64 value."""
EXAMPLE = "ey4uLn0="
def __init__(self):
"""Initializer."""
super().__init__(
r"^[a-zA-Z0-9+/]*={0,2}$",
error="Value {input} is not a valid base64 encoding"
)
def __call__(self, value):
"""Validate input value."""
if value is None or len(value) % 4:
raise ValidationError(self.error)
return super().__call__(value)
class Base64URL(Regexp):
"""Validate base64 value."""
EXAMPLE = "ey4uLn0="
def __init__(self):
"""Initializer."""
super().__init__(
r"^[-_a-zA-Z0-9]*={0,2}$",
error="Value {input} is not a valid base64url encoding"
)
def __call__(self, value):
"""Validate input value."""
if value is None or len(value) % 4:
raise ValidationError(self.error)
return super().__call__(value)
class SHA256Hash(Regexp):
"""Validate (binhex-encoded) SHA256 value."""
EXAMPLE = "617a48c7c8afe0521efdc03e5bb0ad9e655893e6b4b51f0e794d70fba132aacb"
def __init__(self):
"""Initializer."""
super().__init__(
r"^[a-fA-F0-9+/]{64}$",
error="Value {input} is not a valid (binhex-encoded) SHA-256 hash"
)
class UUIDFour(Regexp):
"""Validate UUID4: 8-4-4-4-12 hex digits, the 13th of which being 4."""
EXAMPLE = "3fa85f64-5717-4562-b3fc-2c963f66afa6"
def __init__(self):
"""Initializer."""
super().__init__(
r"[a-fA-F0-9]{8}-"
r"[a-fA-F0-9]{4}-"
r"4[a-fA-F0-9]{3}-"
r"[a-fA-F0-9]{4}-"
r"[a-fA-F0-9]{12}",
error="Value {input} is not a UUID4 (8-4-4-4-12 hex digits with digit#13=4)"
)
# Instances for marshmallow schema specification
INT_EPOCH = {
"validate": IntEpoch(),
"example": IntEpoch.EXAMPLE
}
INDY_DID = {
"validate": IndyDID(),
"example": IndyDID.EXAMPLE
}
INDY_RAW_PUBLIC_KEY = {
"validate": IndyRawPublicKey(),
"example": IndyRawPublicKey.EXAMPLE
}
INDY_SCHEMA_ID = {
"validate": IndySchemaId(),
"example": IndySchemaId.EXAMPLE
}
INDY_CRED_DEF_ID = {
"validate": IndyCredDefId(),
"example": IndyCredDefId.EXAMPLE
}
INDY_REV_REG_ID = {
"validate": IndyRevRegId(),
"example": IndyRevRegId.EXAMPLE
}
INDY_VERSION = {
"validate": IndyVersion(),
"example": IndyVersion.EXAMPLE
}
INDY_PREDICATE = {
"validate": IndyPredicate(),
"example": IndyPredicate.EXAMPLE
}
INDY_ISO8601_DATETIME = {
"validate": IndyISO8601DateTime(),
"example": IndyISO8601DateTime.EXAMPLE
}
BASE64 = {
"validate": Base64(),
"example": Base64.EXAMPLE
}
BASE64URL = {
"validate": Base64URL(),
"example": Base64.EXAMPLE
}
SHA256 = {
"validate": SHA256Hash(),
"example": SHA256Hash.EXAMPLE
}
UUID4 = {
"validate": UUIDFour(),
"example": UUIDFour.EXAMPLE
}
| 24.85461 | 88 | 0.570124 |
795bd63e02fee89dfefc76ba95b83fc3b2c72200 | 979 | py | Python | modelscript/metamodels/classes/checkers.py | ScribesZone/ModelScribes | a36be1047283f2e470dc2dd4353f2a714377bb7d | [
"MIT"
] | 1 | 2019-02-22T14:27:06.000Z | 2019-02-22T14:27:06.000Z | modelscript/metamodels/classes/checkers.py | ScribesZone/ModelScribes | a36be1047283f2e470dc2dd4353f2a714377bb7d | [
"MIT"
] | 4 | 2015-12-18T10:30:02.000Z | 2015-12-18T10:36:28.000Z | modelscript/metamodels/classes/checkers.py | ScribesZone/ModelScribes | a36be1047283f2e470dc2dd4353f2a714377bb7d | [
"MIT"
] | null | null | null | # coding=utf-8
from modelscript.megamodels.checkers import NamingChecker
from modelscript.base.issues import (
Levels
)
from modelscript.base .symbols import Symbol
from modelscript.metamodels.classes.classes import (
Class)
from modelscript.metamodels.classes.types import EnumerationLiteral
class ClassNomenclatureChecker(NamingChecker):
def __init__(self, **params):
super(ClassNomenclatureChecker, self).__init__(
metaclasses=[Class],
fun=Symbol.is_CamlCase,
namingName='CamlCase',
**params)
ClassNomenclatureChecker(
level=Levels.Warning
)
class EnumLiteralNomenclatureChecker(NamingChecker):
def __init__(self, **params):
super(EnumLiteralNomenclatureChecker, self).__init__(
metaclasses=[EnumerationLiteral],
fun=Symbol.is_camlCase,
namingName='camlCase',
**params
)
EnumLiteralNomenclatureChecker(
level=Levels.Warning
) | 26.459459 | 67 | 0.705822 |
795bd6971a8e0090099099c4130c854f1d975a44 | 346 | py | Python | jeu_educatif/python/c_change_notes.py | Charles-Svg/Projet_L3_jeu_educatif | 841f70f1368117288128342258f5832ca9028161 | [
"MIT"
] | null | null | null | jeu_educatif/python/c_change_notes.py | Charles-Svg/Projet_L3_jeu_educatif | 841f70f1368117288128342258f5832ca9028161 | [
"MIT"
] | null | null | null | jeu_educatif/python/c_change_notes.py | Charles-Svg/Projet_L3_jeu_educatif | 841f70f1368117288128342258f5832ca9028161 | [
"MIT"
] | null | null | null | # Afin de modifier votre note, vous devrez importer le module "notes" à votre programme et modifier votre note.
# Les notes sont comprises dans une variable "notes" dans le module "notes". Cette variable contient les notes de tous
# les élèves sous forme de tableau d'indice, on accède donc aux notes d'un élève comme ceci : notes.notes["eleve"]
| 86.5 | 118 | 0.771676 |
795bd7fd9d77a6e1a37303eaa16c80f3f551aa46 | 1,889 | py | Python | spydrnet/ir/innerpin.py | chgentso/spydrnet | 2cfc33ae367771cd2f3a15f240ebd0ec76841b5f | [
"BSD-3-Clause"
] | 34 | 2020-03-12T15:40:49.000Z | 2022-02-28T07:13:47.000Z | spydrnet/ir/innerpin.py | chgentso/spydrnet | 2cfc33ae367771cd2f3a15f240ebd0ec76841b5f | [
"BSD-3-Clause"
] | 104 | 2020-01-06T20:32:19.000Z | 2022-01-02T00:20:14.000Z | spydrnet/ir/innerpin.py | chgentso/spydrnet | 2cfc33ae367771cd2f3a15f240ebd0ec76841b5f | [
"BSD-3-Clause"
] | 10 | 2020-09-02T20:24:00.000Z | 2022-02-24T16:10:07.000Z | from spydrnet.ir.pin import Pin
from copy import deepcopy, copy, error
class InnerPin(Pin):
"""Pins that correspond to definitions.
These pins can be thought of as on the inside of a definition. There can be
many outer pins for each inner pin
"""
__slots__ = ['_port']
def __init__(self):
super().__init__()
self._port = None
@property
def port(self):
"""Return the port that the inner pin is a part of.
This object cannot be modified directly by the end user."""
return self._port
def _clone_rip_and_replace(self, memo):
"""Remove from its current environment and place it into the new cloned environment with references held in the memo dictionary"""
if self._wire != None:
assert self._wire in memo, "wire must have been cloned in order to rip and replace innerpin"
self._wire = memo[self._wire]
def _clone_rip(self):
"""Remove from its current environmnet. This will remove all pin pointers and create a floating stand alone instance."""
self._wire = None
def _clone(self, memo):
"""Not api safe clone function.
Clone leaving all references in tact.
the element can then either be ripped or ripped and replaced"""
assert self not in memo, "the object should not have been copied twice in this pass"
c = InnerPin()
memo[self] = c
c._wire = self._wire
c._port = None
return c
def clone(self):
"""Clone the inner pin in an api safe way.
The following conditions will be met:
* The inner pin will be orphaned from any ports
* The pin will not be connected to any wires
* The pin will not be referenced to by any wires or outer pins
"""
c = self._clone(dict())
c._clone_rip()
return c
| 32.016949 | 138 | 0.634198 |
795bd8aefe3c03bddf9d1981c71ad23a71913b59 | 6,106 | py | Python | test/nni_test/nnitest/utils.py | CuriousCat-7/nni | 66057ba74c5252e38a576712b59a8bf867e2d514 | [
"MIT"
] | 3 | 2021-01-05T07:41:58.000Z | 2021-01-11T02:08:01.000Z | test/nni_test/nnitest/utils.py | CuriousCat-7/nni | 66057ba74c5252e38a576712b59a8bf867e2d514 | [
"MIT"
] | 21 | 2020-11-13T19:01:01.000Z | 2022-02-27T09:12:51.000Z | test/nni_test/nnitest/utils.py | CuriousCat-7/nni | 66057ba74c5252e38a576712b59a8bf867e2d514 | [
"MIT"
] | 3 | 2020-10-23T02:53:47.000Z | 2020-11-15T22:05:09.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import contextlib
import collections
import os
import socket
import sys
import subprocess
import requests
import time
import ruamel.yaml as yaml
import shlex
EXPERIMENT_DONE_SIGNAL = 'Experiment done'
GREEN = '\33[32m'
RED = '\33[31m'
CLEAR = '\33[0m'
REST_ENDPOINT = 'http://localhost:8080'
API_ROOT_URL = REST_ENDPOINT + '/api/v1/nni'
EXPERIMENT_URL = API_ROOT_URL + '/experiment'
STATUS_URL = API_ROOT_URL + '/check-status'
TRIAL_JOBS_URL = API_ROOT_URL + '/trial-jobs'
METRICS_URL = API_ROOT_URL + '/metric-data'
def read_last_line(file_name):
'''read last line of a file and return None if file not found'''
try:
*_, last_line = open(file_name)
return last_line.strip()
except (FileNotFoundError, ValueError):
return None
def remove_files(file_list):
'''remove a list of files'''
for file_path in file_list:
with contextlib.suppress(FileNotFoundError):
os.remove(file_path)
def get_yml_content(file_path):
'''Load yaml file content'''
with open(file_path, 'r') as file:
return yaml.load(file, Loader=yaml.Loader)
def dump_yml_content(file_path, content):
'''Dump yaml file content'''
with open(file_path, 'w') as file:
file.write(yaml.dump(content, default_flow_style=False))
def setup_experiment(installed=True):
'''setup the experiment if nni is not installed'''
if not installed:
os.environ['PATH'] = os.environ['PATH'] + ':' + os.getcwd()
sdk_path = os.path.abspath('../src/sdk/pynni')
cmd_path = os.path.abspath('../tools')
pypath = os.environ.get('PYTHONPATH')
if pypath:
pypath = ':'.join([pypath, sdk_path, cmd_path])
else:
pypath = ':'.join([sdk_path, cmd_path])
os.environ['PYTHONPATH'] = pypath
def get_experiment_id(experiment_url):
experiment_id = requests.get(experiment_url).json()['id']
return experiment_id
def get_experiment_dir(experiment_url=None, experiment_id=None):
'''get experiment root directory'''
assert any([experiment_url, experiment_id])
if experiment_id is None:
experiment_id = get_experiment_id(experiment_url)
return os.path.join(os.path.expanduser('~'), 'nni-experiments', experiment_id)
def get_nni_log_dir(experiment_url=None, experiment_id=None):
'''get nni's log directory from nni's experiment url'''
return os.path.join(get_experiment_dir(experiment_url, experiment_id), 'log')
def get_nni_log_path(experiment_url):
'''get nni's log path from nni's experiment url'''
return os.path.join(get_nni_log_dir(experiment_url), 'nnimanager.log')
def is_experiment_done(nnimanager_log_path):
'''check if the experiment is done successfully'''
assert os.path.exists(nnimanager_log_path), 'Experiment starts failed'
with open(nnimanager_log_path, 'r') as f:
log_content = f.read()
return EXPERIMENT_DONE_SIGNAL in log_content
def get_experiment_status(status_url):
nni_status = requests.get(status_url).json()
return nni_status['status']
def get_trial_stats(trial_jobs_url):
trial_jobs = requests.get(trial_jobs_url).json()
trial_stats = collections.defaultdict(int)
for trial_job in trial_jobs:
trial_stats[trial_job['status']] += 1
return trial_stats
def get_trial_jobs(trial_jobs_url, status=None):
'''Return failed trial jobs'''
trial_jobs = requests.get(trial_jobs_url).json()
res = []
for trial_job in trial_jobs:
if status is None or trial_job['status'] == status:
res.append(trial_job)
return res
def get_failed_trial_jobs(trial_jobs_url):
'''Return failed trial jobs'''
return get_trial_jobs(trial_jobs_url, 'FAILED')
def print_file_content(filepath):
with open(filepath, 'r') as f:
content = f.read()
print(filepath, flush=True)
print(content, flush=True)
def print_trial_job_log(training_service, trial_jobs_url):
trial_jobs = get_trial_jobs(trial_jobs_url)
for trial_job in trial_jobs:
trial_log_dir = os.path.join(get_experiment_dir(EXPERIMENT_URL), 'trials', trial_job['id'])
log_files = ['stderr', 'trial.log'] if training_service == 'local' else ['stdout_log_collection.log']
for log_file in log_files:
print_file_content(os.path.join(trial_log_dir, log_file))
def print_experiment_log(experiment_id):
log_dir = get_nni_log_dir(experiment_id=experiment_id)
for log_file in ['dispatcher.log', 'nnimanager.log']:
filepath = os.path.join(log_dir, log_file)
print_file_content(filepath)
print('nnictl log stderr:')
subprocess.run(shlex.split('nnictl log stderr {}'.format(experiment_id)))
print('nnictl log stdout:')
subprocess.run(shlex.split('nnictl log stdout {}'.format(experiment_id)))
def parse_max_duration_time(max_exec_duration):
unit = max_exec_duration[-1]
time = max_exec_duration[:-1]
units_dict = {'s':1, 'm':60, 'h':3600, 'd':86400}
return int(time) * units_dict[unit]
def deep_update(source, overrides):
"""Update a nested dictionary or similar mapping.
Modify ``source`` in place.
"""
for key, value in overrides.items():
if isinstance(value, collections.Mapping) and value:
returned = deep_update(source.get(key, {}), value)
source[key] = returned
else:
source[key] = overrides[key]
return source
def detect_port(port):
'''Detect if the port is used'''
socket_test = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
socket_test.connect(('127.0.0.1', int(port)))
socket_test.close()
return True
except:
return False
def wait_for_port_available(port, timeout):
begin_time = time.time()
while True:
if not detect_port(port):
return
if time.time() - begin_time > timeout:
msg = 'port {} is not available in {} seconds.'.format(port, timeout)
raise RuntimeError(msg)
time.sleep(1)
| 33.734807 | 109 | 0.685883 |
795bdb7fca6960bd8f5b9a23d483518cfcc29d15 | 363 | py | Python | sdk/python/pulumi_azure_native/healthbot/v20201208preview/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/healthbot/v20201208preview/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/healthbot/v20201208preview/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .bot import *
from .get_bot import *
from ._inputs import *
from . import outputs
| 27.923077 | 80 | 0.719008 |
795bdb8c91ea8c9e618d1a1b1ef0391fc2c74f95 | 1,697 | py | Python | devtools/gha/coverage_report_xml.py | wijnandhoitinga/nutils | 7ad6793ca5e3a43f45dcc0a4a795b381d2a0b9d4 | [
"MIT"
] | 25 | 2015-04-29T13:10:22.000Z | 2019-03-18T09:45:29.000Z | devtools/gha/coverage_report_xml.py | wijnandhoitinga/nutils | 7ad6793ca5e3a43f45dcc0a4a795b381d2a0b9d4 | [
"MIT"
] | 330 | 2015-03-04T09:06:38.000Z | 2019-06-11T10:31:54.000Z | devtools/gha/coverage_report_xml.py | wijnandhoitinga/nutils | 7ad6793ca5e3a43f45dcc0a4a795b381d2a0b9d4 | [
"MIT"
] | 16 | 2015-03-23T08:00:46.000Z | 2019-02-21T11:14:47.000Z | import sys
import re
import os.path
from typing import Sequence
from xml.etree import ElementTree
from pathlib import Path
from coverage import Coverage
paths = []
for path in sys.path:
try:
paths.append(str(Path(path).resolve()).lower()+os.path.sep)
except FileNotFoundError:
pass
paths = list(sorted(paths, key=len, reverse=True))
unix_paths = tuple(p.replace('\\', '/') for p in paths)
packages = tuple(p.replace('/', '.') for p in unix_paths)
dst = Path('coverage.xml')
# Generate `coverage.xml` with absolute file and package names.
cov = Coverage()
cov.load()
cov.xml_report(outfile=str(dst))
# Load the report, remove the largest prefix in `packages` from attribute
# `name` of element `package`, if any, and similarly the largest prefix in
# `paths` from attribute `filename` of element `class` and from the content of
# element `source`. Matching prefixes is case insensitive for case insensitive
# file systems.
def remove_prefix(value: str, prefixes: Sequence[str]) -> str:
lvalue = value.lower()
for prefix in prefixes:
if lvalue.startswith(prefix):
return value[len(prefix):]
return value
root = ElementTree.parse(str(dst))
for elem in root.iter('package'):
for package in packages:
name = elem.get('name')
if name:
elem.set('name', remove_prefix(name, packages))
for elem in root.iter('class'):
filename = elem.get('filename')
if filename:
elem.set('filename', remove_prefix(filename, unix_paths))
for elem in root.iter('source'):
text = elem.text
if text:
elem.text = remove_prefix(text, paths)
root.write('coverage.xml')
| 30.303571 | 78 | 0.67472 |
795bdbb89009a5848a9b4c9d71045eb1e9460b49 | 6,232 | py | Python | src/hog_svm/hog.py | duytq99/traffic-signs-detection | 99bae2fb76dee4f783d9bd2e8cfd9f11084a2c3c | [
"MIT"
] | null | null | null | src/hog_svm/hog.py | duytq99/traffic-signs-detection | 99bae2fb76dee4f783d9bd2e8cfd9f11084a2c3c | [
"MIT"
] | null | null | null | src/hog_svm/hog.py | duytq99/traffic-signs-detection | 99bae2fb76dee4f783d9bd2e8cfd9f11084a2c3c | [
"MIT"
] | null | null | null | import cv2
import time
import argparse
import numpy as np
import matplotlib.pyplot as plt
import PIL.Image as Image
from skimage.feature import hog
from skimage.draw import draw
def _hog_normalize_block(block, method, eps=1e-5):
if method == 'L1':
out = block / (np.sum(np.abs(block)) + eps)
elif method == 'L1-sqrt':
out = np.sqrt(block / (np.sum(np.abs(block)) + eps))
elif method == 'L2':
out = block / np.sqrt(np.sum(block ** 2) + eps ** 2)
elif method == 'L2-Hys':
out = block / np.sqrt(np.sum(block ** 2) + eps ** 2)
out = np.minimum(out, 0.2)
out = out / np.sqrt(np.sum(out ** 2) + eps ** 2)
else:
raise ValueError('Selected block normalization method is invalid.')
return out
def hog_descriptor_scratch(im, cell_size=(8,8), orientations = 9, block_norm = None, cells_per_block=(4,4), visualize = True, visualize_grad=False):
# square root normalization and extract image shape
image = np.sqrt(im).astype(np.float32)
sx, sy = image.shape # image size
orientations = orientations # number of gradient bins
cx, cy = cell_size # pixels per cell
b_row, b_col = cells_per_block # number of cells in each block
n_cellsx = int(np.floor(sx / cx)) # number of cells in x
n_cellsy = int(np.floor(sy / cy)) # number of cells in y
# compute gradient on image
gx = np.zeros(image.shape)
gy = np.zeros(image.shape)
gx[:, :-1] = np.diff(image, n=1, axis=1) # compute gradient on x-direction
gy[:-1, :] = np.diff(image, n=1, axis=0) # compute gradient on y-direction
# visualize gradient image
if visualize_grad:
fig, a = plt.subplots(1,2)
a[0].imshow(gx, cmap='gray')
a[1].imshow(gy, cmap='gray')
plt.show()
# compute magnitute and orientation (phase) of gradient image
grad_mag = np.sqrt(gx ** 2 + gy ** 2) # gradient magnitude
grad_ori = np.rad2deg(np.arctan2(gy, gx + 1e-15)) % 180
# compute histogram of orientations with magnitute-based weights
orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations))
for dx in range(n_cellsx):
for dy in range(n_cellsy):
ori = grad_ori[dy*cy:dy*cy+cy, dx*cx:dx*cx+cx]
mag = grad_mag[dy*cy:dy*cy+cy, dx*cx:dx*cx+cx]
hist, _ = np.histogram(ori, bins=orientations, range=(0, 180), weights=mag) # 1-D vector, 9 elements
orientation_histogram[dy, dx, :] = hist
# compute block normalization (L2)
if block_norm is not None:
n_blocks_row = (n_cellsy - b_row) + 1
n_blocks_col = (n_cellsx - b_col) + 1
normalized_blocks = np.zeros((n_blocks_row, n_blocks_col,b_row, b_col, orientations))
for r in range(n_blocks_row):
for c in range(n_blocks_col):
block = orientation_histogram[r:r + b_row, c:c + b_col, :]
normalized_blocks[r, c, :] = _hog_normalize_block(block, method=block_norm)
# visualize HoG feature
if visualize:
hog_image = None
radius = min(cy, cx) // 2 - 1
orientations_arr = np.arange(orientations)
orientation_bin_midpoints = (np.pi * (orientations_arr + .5) / orientations)
dr_arr = radius * np.sin(orientation_bin_midpoints)
dc_arr = radius * np.cos(orientation_bin_midpoints)
hog_image = np.zeros((sy, sx), dtype=float)
for r in range(n_cellsy):
for c in range(n_cellsx):
for o, dr, dc in zip(orientations_arr, dr_arr, dc_arr):
centre = tuple([r * cy + cy // 2,
c * cx + cx // 2])
rr, cc = draw.line(int(centre[0] - dc),
int(centre[1] + dr),
int(centre[0] + dc),
int(centre[1] - dr))
hog_image[rr, cc] += orientation_histogram[r, c, o]
return normalized_blocks.ravel(), hog_image
else:
return normalized_blocks.ravel()
else:
return orientation_histogram.ravel()
def parse_arguments():
parser = argparse.ArgumentParser(description='Arguments parser.')
parser.add_argument('-p', '--path', type=str, required=True, help='image path')
parser.add_argument('-o', '--orientations', default=9, type=int, required=False, help='number of orientations')
parser.add_argument('-cs', '--cell_size', default=4, type=int, required=False, help='size of cell')
parser.add_argument('-cb', '--cell_per_block', default=8, type=int, required=False, help='number of cells per block')
parser.add_argument('-n', '--block_norm', type=str, default='L2', required=False, help='block normalization method')
parser.add_argument('-v', '--visualize_results', type=bool, default=True, help='visualize processed images')
parser.add_argument('--save', action='store_true')
return parser.parse_args()
def main():
args = parse_arguments()
im = cv2.imread(args.path)
im = cv2.resize(im, (96,96))
im_orig = im.copy()
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
start = time.time()
hogFeatureScratch, hogImageScratch = hog_descriptor_scratch(
im,
orientations=args.orientations,
cell_size=(args.cell_size, args.cell_size),
cells_per_block=(args.cell_per_block,args.cell_per_block),
block_norm=args.block_norm,
visualize=args.visualize_results)
print("Scratch HoG: ", time.time()- start)
print("Feature vector: ", hogFeatureScratch)
print("Feature vector shape: ", hogFeatureScratch.shape)
start = time.time()
hogFeatureSkimage, hogImageSkimage = hog(
im,
orientations=args.orientations,
pixels_per_cell=(args.cell_size, args.cell_size),
cells_per_block=(args.cell_per_block,args.cell_per_block),
transform_sqrt=True,
visualize=args.visualize_results,
block_norm=args.block_norm)
print("Skimage HoG: ", time.time()- start)
print("Feature vector: ", hogFeatureSkimage)
print("Feature vector shape: ", hogFeatureSkimage.shape)
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plot
# visualize experiment result
fig, a = plt.subplots(1,3)
a[0].imshow(cv2.cvtColor(im_orig, cv2.COLOR_BGR2RGB))
a[0].set_title('Original traffic sign')
a[1].imshow(hogImageSkimage, cmap='gray')
a[1].set_title('HOG feature from Skimage')
a[2].imshow(hogImageScratch, cmap='gray')
a[2].set_title('HOG feature from scratch')
plt.tight_layout()
plt.savefig('output/fig.png')
plt.show()
hogImageScratch = np.stack([hogImageScratch,hogImageScratch,hogImageScratch],axis=-1)
print(hogImageScratch.shape)
cv2.imwrite('output/ori.png', im_orig)
cv2.imwrite('output/hog_scratch.png', hogImageScratch)
if __name__ == '__main__':
main()
| 37.769697 | 148 | 0.701701 |
795bdc1f8af305f536e3cd3d83967e9f307c5cf5 | 6,306 | py | Python | rbacProject/apps/adm/models.py | xeroCBW/testmodel | 6000d79f91d11bcf6ba76befb3a94a007231ecdc | [
"MIT"
] | null | null | null | rbacProject/apps/adm/models.py | xeroCBW/testmodel | 6000d79f91d11bcf6ba76befb3a94a007231ecdc | [
"MIT"
] | 54 | 2020-06-24T07:12:19.000Z | 2022-03-12T00:43:57.000Z | rbacProject/apps/adm/models.py | xeroCBW/testmodel | 6000d79f91d11bcf6ba76befb3a94a007231ecdc | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Supplier(models.Model):
"""
分销商管理
"""
company = models.CharField(max_length=30, verbose_name="公司名称")
address = models.CharField(max_length=100, verbose_name="地址")
linkname = models.CharField(max_length=20, verbose_name="联系人")
phone = models.CharField(max_length=20, verbose_name="联系电话")
status = models.BooleanField(default=True, verbose_name="状态")
belongs_to = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE, verbose_name="责任人")
desc = models.TextField(blank=True, null=True, verbose_name="备注")
add_time = models.DateTimeField(auto_now_add=True, verbose_name="添加时间")
class Meta:
verbose_name = "分销商管理"
verbose_name_plural = verbose_name
def __str__(self):
return self.company
class Customer(models.Model):
"""
客户信息
"""
unit = models.CharField(max_length=50, verbose_name="客户单位")
address = models.CharField(max_length=100, verbose_name="地址")
name = models.CharField(max_length=20, verbose_name="联系人")
phone = models.CharField(max_length=30, verbose_name="联系电话")
belongs_to = models.ForeignKey(User, blank=True, null=True , on_delete=models.CASCADE, verbose_name="责任人")
status = models.BooleanField(default=True, verbose_name="状态")
desc = models.TextField(blank=True, null=True, verbose_name="备注")
add_time = models.DateTimeField(auto_now_add=True, verbose_name="添加时间")
class Meta:
verbose_name = "客户管理"
verbose_name_plural = verbose_name
def __str__(self):
return self.unit
class AssetType(models.Model):
"""
资产类型
"""
name = models.CharField(max_length=30, verbose_name="类型名称", help_text="类型名称")
desc = models.TextField(blank=True, null=True, verbose_name="备注")
class Meta:
verbose_name = "资产类型"
def __str__(self):
return self.name
class Asset(models.Model):
asset_status = (
("0", "闲置"),
("1", "在用"),
("2", "维修"),
("3", "报废"),
("4", "售出")
)
warehouse_choices = (
("0", "南京"),
("1", "苏州"),
)
assetNum = models.CharField(max_length=128, default="", verbose_name="资产编号")
assetType = models.ForeignKey(AssetType, blank=True, null=True, on_delete=models.CASCADE, verbose_name="资产类型")
brand = models.CharField(max_length=20, blank=True, null=True, verbose_name="品牌")
model = models.CharField(max_length=30, default="", verbose_name="型号")
warehouse = models.CharField(choices=warehouse_choices, default="1", max_length=20, verbose_name="仓库")
price = models.IntegerField(blank=True, null=True, verbose_name="价格")
buyDate = models.DateField(verbose_name="购买日期")
warrantyDate = models.DateField(verbose_name="到保日期")
status = models.CharField(choices=asset_status, max_length=20, default="1", verbose_name="资产状态")
customer = models.CharField(max_length=80, default="", blank=True, null=True, verbose_name="客户信息")
owner = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE, verbose_name="使用人")
operator = models.CharField(max_length=20, default="", verbose_name="入库人")
add_time = models.DateTimeField(auto_now_add=True, verbose_name="添加时间")
desc = models.TextField(default="", blank=True, null=True, verbose_name="备注信息")
class Meta:
verbose_name = "资产管理"
verbose_name_plural = verbose_name
def __str__(self):
return self.assetNum
class AssetFile(models.Model):
asset = models.ForeignKey(Asset, blank=True, null=True, on_delete=models.CASCADE, verbose_name="资产")
upload_user = models.CharField(max_length=20, verbose_name="上传人")
file_content = models.ImageField(upload_to="asset_file/%Y/%m", null=True, blank=True, verbose_name="资产文件")
add_time = models.DateTimeField(auto_now_add=True, verbose_name="上传时间")
class AssetLog(models.Model):
asset = models.ForeignKey(Asset, verbose_name="资产",on_delete=models.CASCADE)
operator = models.CharField(max_length=20, verbose_name="操作人")
desc = models.TextField(default="", verbose_name="备注")
add_time = models.DateTimeField(auto_now_add=True, verbose_name="添加时间")
class Mate:
verbose_name = "变更记录"
verbose_name_plural = verbose_name
def __str__(self):
return self.asset
class ServiceInfo(models.Model):
content = models.TextField(verbose_name="记录内容")
writer = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE, verbose_name="记录人")
is_reminding = models.BooleanField(default=False, verbose_name="邮件消息提醒")
add_time = models.DateTimeField(auto_now_add=True, verbose_name="添加时间")
class Mate:
verbose_name = "服务记录"
verbose_name_plural = verbose_name
def __str__(self):
return self.content
class EquipmentType(models.Model):
"""
设备类型
"""
name = models.CharField(max_length=30, verbose_name="类型名称", help_text="类型名称")
desc = models.TextField(blank=True, null=True, verbose_name="备注")
class Meta:
verbose_name = "设备类型"
verbose_name_plural = verbose_name
ordering = ['id']
def __str__(self):
return self.name
class Equipment(models.Model):
number = models.CharField(max_length=30, default="", verbose_name="设备编号")
equipment_type = models.ForeignKey(EquipmentType, blank=True, null=True, on_delete=models.CASCADE, verbose_name="设备类型")
equipment_model = models.CharField(max_length=50, default="", verbose_name="设备型号")
buy_date = models.DateField(verbose_name="购买日期")
warranty_date = models.DateField(verbose_name="质保日期")
accounting = models.BooleanField(default=False, verbose_name="费用核算状态")
config_desc = models.TextField(blank=True, null=True, verbose_name="配置说明")
customer = models.ForeignKey(Customer, blank=True, null=True, on_delete=models.CASCADE, verbose_name="客户信息")
supplier = models.ForeignKey(Supplier, blank=True, null=True, on_delete=models.CASCADE, verbose_name="分销商")
service_info = models.ManyToManyField(ServiceInfo, blank=True, verbose_name="服务记录")
class Meta:
verbose_name = "设备管理"
verbose_name_plural = verbose_name
def __str__(self):
return self.number
| 36.877193 | 123 | 0.697431 |
795bdc6946e8fff1dd70edaf232686c187e18504 | 2,036 | py | Python | kubernetes-puppet-creator/cluster_build.py | opentokix/yolo-dangerzone | 7ea5ab54e3369997c9715fb762cc5604023b3aee | [
"Unlicense"
] | 2 | 2020-02-13T02:38:25.000Z | 2022-01-24T15:02:29.000Z | kubernetes-puppet-creator/cluster_build.py | opentokix/yolo-dangerzone | 7ea5ab54e3369997c9715fb762cc5604023b3aee | [
"Unlicense"
] | null | null | null | kubernetes-puppet-creator/cluster_build.py | opentokix/yolo-dangerzone | 7ea5ab54e3369997c9715fb762cc5604023b3aee | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
import os
import ConfigParser
from subprocess import call
def run_docker():
config = ConfigParser.ConfigParser()
config.readfp(open('cluster_setup.ini'))
fdqn_base = config.get('main', 'domain')
controller_string = ""
for item in config.items('controllers'):
controller_string += item[0] + ":" + item[1] + ","
controller_string = "ETCD_INITIAL_CLUSTER=" + controller_string[:-1]
volume_option = os.getcwd() + ":/mnt"
os_option = "OS=" + config.get('main', 'os')
version_option = "VERSION=" + config.get('main', 'version')
container_runtime_option = "CONTAINER_RUNTIME=" + config.get('main', 'runtime')
cni_provider_option = "CNI_PROVIDER=" + config.get('main', 'cniprovider')
command_line = ['docker', 'run', '--rm',
'-v', volume_option,
'-e', os_option,
'-e', version_option,
'-e', container_runtime_option,
'-e', cni_provider_option,
'-e', controller_string,
'-e', 'ETCD_IP="%{::ipaddress_eth0}"',
'-e', 'KUBE_API_ADVERTISE_ADDRESS="%{::ipaddress_eth0}"',
'-e', 'INSTALL_DASHBOARD=true', 'puppet/kubetool:3.0.1']
call(command_line)
return config
def make_yaml_files(config):
class_block = """
classes:
- kubernetes
"""
for controller in config.items('controllers'):
f_name = controller[0] + "." + config.get('main', 'domain') + ".yaml"
outfile = open(f_name, 'w')
outfile.writelines(class_block)
with open('Rhel.yaml') as infile:
outfile.write(infile.read())
with open(controller[0] + ".yaml") as infile:
outfile.write(infile.read())
for worker in config.items('workers'):
f_name = worker[0] + "." + config.get('main', 'domain') + ".yaml"
outfile = open(f_name, 'w')
outfile.writelines(class_block)
with open('Rhel.yaml') as infile:
outfile.write(infile.read())
def main():
config = run_docker()
make_yaml_files(config)
if __name__ == '__main__':
main()
| 34.508475 | 81 | 0.617387 |
795bdcad5febe5d2cad5470e9a1ca3f1f5badf01 | 950 | py | Python | django_show/placeholder/placeholder/urls.py | JiaLei123/PythonCamp | 3ff2cee00bc4d8e65f3cc5c7d687da7ecbf9a79e | [
"MIT"
] | null | null | null | django_show/placeholder/placeholder/urls.py | JiaLei123/PythonCamp | 3ff2cee00bc4d8e65f3cc5c7d687da7ecbf9a79e | [
"MIT"
] | null | null | null | django_show/placeholder/placeholder/urls.py | JiaLei123/PythonCamp | 3ff2cee00bc4d8e65f3cc5c7d687da7ecbf9a79e | [
"MIT"
] | null | null | null | """placeholder URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
import views
import placeholder
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='homepage'),
url(r'^image/(?P<width>[0-9]+)x(?P<height>[0-9]+)/$', placeholder.placeholder, name='placeholder')
]
| 35.185185 | 102 | 0.694737 |
795bdcd2ba5bfdd4619be300255852b154b59c49 | 1,403 | py | Python | tests/settings.py | domibydzovsky/wagtail-rest-pack | 821d5d4111a4a7665e50272035e90f836a2c60c2 | [
"MIT"
] | null | null | null | tests/settings.py | domibydzovsky/wagtail-rest-pack | 821d5d4111a4a7665e50272035e90f836a2c60c2 | [
"MIT"
] | null | null | null | tests/settings.py | domibydzovsky/wagtail-rest-pack | 821d5d4111a4a7665e50272035e90f836a2c60c2 | [
"MIT"
] | null | null | null |
from django.conf.global_settings import *
import os
DEBUG = True
SECRET_KEY='xx'
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
REST_FRAMEWORK = {
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 100
}
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
LOGGING_CONFIG = {}
LOGGING = {}
FORCE_SCRIPT_NAME = 'false'
INSTALLED_APPS = [
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'wagtail.api.v2',
'rest_framework',
'rest_framework_extensions',
'rest_framework.authtoken',
'taggit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sitemaps',
'wagtail.contrib.modeladmin',
'wagtail_rest_pack.sitesettings',
'wagtail_rest_pack.page_banner',
'wagtail_rest_pack.breadcrumps',
'wagtail_rest_pack.comments',
'wagtail_rest_pack.custom_tag',
] | 25.509091 | 82 | 0.686386 |
795bdd0ac0437674668da9ed8fdafa3a0d4b79f8 | 3,696 | py | Python | qdarkstyle/__init__.py | sommerc/QDarkStyleSheet | a81b3f62ea0b867bd83950a21db932c12dbef232 | [
"MIT"
] | 5 | 2016-10-19T21:18:49.000Z | 2021-06-13T00:20:33.000Z | qdarkstyle/__init__.py | Willib/QDarkStyleSheet | a81b3f62ea0b867bd83950a21db932c12dbef232 | [
"MIT"
] | 1 | 2016-02-15T17:33:11.000Z | 2016-02-15T17:33:46.000Z | qdarkstyle/__init__.py | Willib/QDarkStyleSheet | a81b3f62ea0b867bd83950a21db932c12dbef232 | [
"MIT"
] | 2 | 2016-02-06T18:12:53.000Z | 2020-10-22T12:23:40.000Z | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) <2013-2014> <Colin Duquesnoy>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
Initialise the QDarkStyleSheet module when used with python.
This modules provides a function to transparently load the stylesheets
with the correct rc file.
"""
import logging
import platform
__version__ = "2.1"
def _logger():
return logging.getLogger('qdarkstyle')
def load_stylesheet(pyside=True):
"""
Loads the stylesheet. Takes care of importing the rc module.
:param pyside: True to load the pyside rc file, False to load the PyQt rc file
:return the stylesheet string
"""
# Smart import of the rc file
if pyside:
import qdarkstyle.pyside_style_rc
else:
import qdarkstyle.pyqt_style_rc
# Load the stylesheet content from resources
if not pyside:
from PyQt4.QtCore import QFile, QTextStream
else:
from PySide.QtCore import QFile, QTextStream
f = QFile(":qdarkstyle/style.qss")
if not f.exists():
_logger().error("Unable to load stylesheet, file not found in "
"resources")
return ""
else:
f.open(QFile.ReadOnly | QFile.Text)
ts = QTextStream(f)
stylesheet = ts.readAll()
if platform.system().lower() == 'darwin': # see issue #12 on github
mac_fix = '''
QDockWidget::title
{
background-color: #353434;
text-align: center;
height: 12px;
}
'''
stylesheet += mac_fix
return stylesheet
def load_stylesheet_pyqt5():
"""
Loads the stylesheet for use in a pyqt5 application.
:param pyside: True to load the pyside rc file, False to load the PyQt rc file
:return the stylesheet string
"""
# Smart import of the rc file
import qdarkstyle.pyqt5_style_rc
# Load the stylesheet content from resources
from PyQt5.QtCore import QFile, QTextStream
f = QFile(":qdarkstyle/style.qss")
if not f.exists():
_logger().error("Unable to load stylesheet, file not found in "
"resources")
return ""
else:
f.open(QFile.ReadOnly | QFile.Text)
ts = QTextStream(f)
stylesheet = ts.readAll()
if platform.system().lower() == 'darwin': # see issue #12 on github
mac_fix = '''
QDockWidget::title
{
background-color: #353434;
text-align: center;
height: 12px;
}
'''
stylesheet += mac_fix
return stylesheet
| 31.322034 | 82 | 0.643939 |
795bde105c5964dec293f400b7bcd53c1277cbf0 | 275 | py | Python | examples/example.py | flavray/pyo3avro-rs | 00528858f04a21162a5b186a53ca95e8876db469 | [
"MIT"
] | 3 | 2020-07-09T14:39:39.000Z | 2021-03-19T18:11:06.000Z | examples/example.py | gaborbernat/pyo3avro-rs | 30824e8c703a43df79c3eb7908c29e5673a2844e | [
"MIT"
] | 20 | 2019-04-17T14:19:45.000Z | 2019-06-10T09:50:40.000Z | examples/example.py | gaborbernat/pyo3avro-rs | 30824e8c703a43df79c3eb7908c29e5673a2844e | [
"MIT"
] | 4 | 2019-04-19T13:49:46.000Z | 2021-03-19T17:05:08.000Z | from pyo3avro_rs import Schema
def run(): # type: () -> None
schema = Schema('{"type": "string"}')
payload = schema.write("some-text")
initial = schema.read(payload)
print(initial)
assert initial == "some-text"
if __name__ == "__main__":
run()
| 17.1875 | 41 | 0.607273 |
795bde77b7688bedc520c1ffbac25b1a4d345295 | 10,723 | py | Python | platformio/commands/home/rpc/handlers/project.py | adlerweb/platformio-core | cd8dc24454176d05ab1360cb51a32b40f1fa7e7f | [
"Apache-2.0"
] | null | null | null | platformio/commands/home/rpc/handlers/project.py | adlerweb/platformio-core | cd8dc24454176d05ab1360cb51a32b40f1fa7e7f | [
"Apache-2.0"
] | null | null | null | platformio/commands/home/rpc/handlers/project.py | adlerweb/platformio-core | cd8dc24454176d05ab1360cb51a32b40f1fa7e7f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import shutil
import time
from os.path import (basename, expanduser, getmtime, isdir, isfile, join,
realpath, sep)
import jsonrpc # pylint: disable=import-error
from platformio import exception, fs
from platformio.commands.home.rpc.handlers.app import AppRPC
from platformio.commands.home.rpc.handlers.piocore import PIOCoreRPC
from platformio.compat import PY2, get_filesystem_encoding
from platformio.ide.projectgenerator import ProjectGenerator
from platformio.managers.platform import PlatformManager
from platformio.project.config import ProjectConfig
from platformio.project.helpers import (get_project_libdeps_dir,
get_project_src_dir,
is_platformio_project)
class ProjectRPC(object):
@staticmethod
def _get_projects(project_dirs=None):
def _get_project_data(project_dir):
data = {"boards": [], "envLibdepsDirs": [], "libExtraDirs": []}
config = ProjectConfig(join(project_dir, "platformio.ini"))
libdeps_dir = get_project_libdeps_dir()
data['libExtraDirs'].extend(
config.get("platformio", "lib_extra_dirs", []))
for section in config.sections():
if not section.startswith("env:"):
continue
data['envLibdepsDirs'].append(join(libdeps_dir, section[4:]))
if config.has_option(section, "board"):
data['boards'].append(config.get(section, "board"))
data['libExtraDirs'].extend(
config.get(section, "lib_extra_dirs", []))
# skip non existing folders and resolve full path
for key in ("envLibdepsDirs", "libExtraDirs"):
data[key] = [
expanduser(d) if d.startswith("~") else realpath(d)
for d in data[key] if isdir(d)
]
return data
def _path_to_name(path):
return (sep).join(path.split(sep)[-2:])
if not project_dirs:
project_dirs = AppRPC.load_state()['storage']['recentProjects']
result = []
pm = PlatformManager()
for project_dir in project_dirs:
data = {}
boards = []
try:
with fs.cd(project_dir):
data = _get_project_data(project_dir)
except exception.PlatformIOProjectException:
continue
for board_id in data.get("boards", []):
name = board_id
try:
name = pm.board_config(board_id)['name']
except exception.PlatformioException:
pass
boards.append({"id": board_id, "name": name})
result.append({
"path":
project_dir,
"name":
_path_to_name(project_dir),
"modified":
int(getmtime(project_dir)),
"boards":
boards,
"envLibStorages": [{
"name": basename(d),
"path": d
} for d in data.get("envLibdepsDirs", [])],
"extraLibStorages": [{
"name": _path_to_name(d),
"path": d
} for d in data.get("libExtraDirs", [])]
})
return result
def get_projects(self, project_dirs=None):
return self._get_projects(project_dirs)
@staticmethod
def get_project_examples():
result = []
for manifest in PlatformManager().get_installed():
examples_dir = join(manifest['__pkg_dir'], "examples")
if not isdir(examples_dir):
continue
items = []
for project_dir, _, __ in os.walk(examples_dir):
project_description = None
try:
config = ProjectConfig(join(project_dir, "platformio.ini"))
config.validate(silent=True)
project_description = config.get("platformio",
"description")
except exception.PlatformIOProjectException:
continue
path_tokens = project_dir.split(sep)
items.append({
"name":
"/".join(path_tokens[path_tokens.index("examples") + 1:]),
"path":
project_dir,
"description":
project_description
})
result.append({
"platform": {
"title": manifest['title'],
"version": manifest['version']
},
"items": sorted(items, key=lambda item: item['name'])
})
return sorted(result, key=lambda data: data['platform']['title'])
def init(self, board, framework, project_dir):
assert project_dir
state = AppRPC.load_state()
if not isdir(project_dir):
os.makedirs(project_dir)
args = ["init", "--board", board]
if framework:
args.extend(["--project-option", "framework = %s" % framework])
if (state['storage']['coreCaller'] and state['storage']['coreCaller']
in ProjectGenerator.get_supported_ides()):
args.extend(["--ide", state['storage']['coreCaller']])
d = PIOCoreRPC.call(args, options={"cwd": project_dir})
d.addCallback(self._generate_project_main, project_dir, framework)
return d
@staticmethod
def _generate_project_main(_, project_dir, framework):
main_content = None
if framework == "arduino":
main_content = "\n".join([
"#include <Arduino.h>",
"",
"void setup() {",
" // put your setup code here, to run once:",
"}",
"",
"void loop() {",
" // put your main code here, to run repeatedly:",
"}"
""
]) # yapf: disable
elif framework == "mbed":
main_content = "\n".join([
"#include <mbed.h>",
"",
"int main() {",
"",
" // put your setup code here, to run once:",
"",
" while(1) {",
" // put your main code here, to run repeatedly:",
" }",
"}",
""
]) # yapf: disable
if not main_content:
return project_dir
with fs.cd(project_dir):
src_dir = get_project_src_dir()
main_path = join(src_dir, "main.cpp")
if isfile(main_path):
return project_dir
if not isdir(src_dir):
os.makedirs(src_dir)
with open(main_path, "w") as f:
f.write(main_content.strip())
return project_dir
def import_arduino(self, board, use_arduino_libs, arduino_project_dir):
board = str(board)
if arduino_project_dir and PY2:
arduino_project_dir = arduino_project_dir.encode(
get_filesystem_encoding())
# don't import PIO Project
if is_platformio_project(arduino_project_dir):
return arduino_project_dir
is_arduino_project = any([
isfile(
join(arduino_project_dir,
"%s.%s" % (basename(arduino_project_dir), ext)))
for ext in ("ino", "pde")
])
if not is_arduino_project:
raise jsonrpc.exceptions.JSONRPCDispatchException(
code=4000,
message="Not an Arduino project: %s" % arduino_project_dir)
state = AppRPC.load_state()
project_dir = join(state['storage']['projectsDir'],
time.strftime("%y%m%d-%H%M%S-") + board)
if not isdir(project_dir):
os.makedirs(project_dir)
args = ["init", "--board", board]
args.extend(["--project-option", "framework = arduino"])
if use_arduino_libs:
args.extend([
"--project-option",
"lib_extra_dirs = ~/Documents/Arduino/libraries"
])
if (state['storage']['coreCaller'] and state['storage']['coreCaller']
in ProjectGenerator.get_supported_ides()):
args.extend(["--ide", state['storage']['coreCaller']])
d = PIOCoreRPC.call(args, options={"cwd": project_dir})
d.addCallback(self._finalize_arduino_import, project_dir,
arduino_project_dir)
return d
@staticmethod
def _finalize_arduino_import(_, project_dir, arduino_project_dir):
with fs.cd(project_dir):
src_dir = get_project_src_dir()
if isdir(src_dir):
fs.rmtree(src_dir)
shutil.copytree(arduino_project_dir, src_dir)
return project_dir
@staticmethod
def import_pio(project_dir):
if not project_dir or not is_platformio_project(project_dir):
raise jsonrpc.exceptions.JSONRPCDispatchException(
code=4001,
message="Not an PlatformIO project: %s" % project_dir)
new_project_dir = join(
AppRPC.load_state()['storage']['projectsDir'],
time.strftime("%y%m%d-%H%M%S-") + basename(project_dir))
shutil.copytree(project_dir, new_project_dir)
state = AppRPC.load_state()
args = ["init"]
if (state['storage']['coreCaller'] and state['storage']['coreCaller']
in ProjectGenerator.get_supported_ides()):
args.extend(["--ide", state['storage']['coreCaller']])
d = PIOCoreRPC.call(args, options={"cwd": new_project_dir})
d.addCallback(lambda _: new_project_dir)
return d
| 38.571942 | 79 | 0.542852 |
795bdf5ad5035e8187aa2245eb08d4b8357af8a2 | 10,387 | py | Python | genver/pybin/logs.py | vhnatyk/vlsistuff | 0981097bd19a0c482728dcc5048a3615ac9a9a90 | [
"MIT"
] | 26 | 2018-03-17T18:14:22.000Z | 2022-03-14T07:23:13.000Z | genver/pybin/logs.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | 1 | 2019-10-16T10:31:11.000Z | 2019-10-17T04:14:53.000Z | genver/pybin/logs.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | 7 | 2018-07-16T07:51:25.000Z | 2022-02-15T14:22:54.000Z |
import sys,types,string
Errors = 0
Corrects = 0
Wrongs = 0
Warnings = 0
print_debug_messages=0
MAXWRONGS = 2000
MAXERRORS = 2000
PYMONLOG = 'pymon.log'
import time
printed_already={}
Flog = False
Flog2 = False
Flog3 = False
Flog4 = False
try:
import veri
except:
import fakeVeri as veri
finishCycles = 0
def get_cycles():
Now = peek('tb.cycles')
if (finishCycles>0)and(finishCycles<=Now):
veri.finish()
return Now
def please_print_debugs():
global print_debug_messages
print_debug_messages=1
def log_time(Why):
log_info('info: %s time=%s'%(Why,time.ctime()))
def log_fatal(Text):
print 'FATAL error!! %s'%(Text)
log_ending('from fatal')
sys.exit()
def log_error(Text,Tb=True):
log_err(Text,Tb)
def log_err(Text,Tb=True):
global Errors,printed_already,Flog
if (not Flog):
Flog=open(PYMONLOG,'w')
Errors +=1
Flog.write('@%d: %d ERROR: %s\n'%(get_cycles(),Errors,Text))
if Tb:
veri.force('tb.errors',str(Errors))
if (Errors>MAXERRORS):
log_info('max errors reached (%d). bailing out. (MAXERRORS==%d)'%(Errors,MAXERRORS))
veri.finish()
sys.exit() # in icarus, sometimes finish doesnt catch
if (Text in printed_already):
return
printed_already[Text]=1
print '@%d: %d: ERROR: %s'%(get_cycles(),Errors,Text)
def log_correct(Text,Print=True):
global Corrects,Flog
if (not Flog):
Flog=open(PYMONLOG,'w')
Corrects += 1
veri.force('tb.corrects',str(Corrects))
if Print:
print '@%d: %d vs %d (err=%d) CORRECT: %s'%(get_cycles(),Corrects,Wrongs,Errors,Text)
Flog.write('@%d: %d vs %d (err=%d) CORRECT: %s\n'%(get_cycles(),Corrects,Wrongs,Errors,Text))
def log_ensure(Cond,Text):
if Cond:
log_correct(Text)
else:
log_wrong(Text)
def log_wrong(Text):
global Wrongs,Flog
Wrongs += 1
veri.force('tb.wrongs',str(Wrongs))
if (not Flog):
Flog=open(PYMONLOG,'w')
print '@%d: %d vs %d (err=%d): WRONG: %s'%(get_cycles(),Wrongs,Corrects,Errors,Text)
Flog.write('@%d: %d vs %d (err=%d): WRONG: %s\n'%(get_cycles(),Wrongs,Corrects,Errors,Text))
if Wrongs >= MAXWRONGS:
log_info('max wrongs reached (%d). bailing out. (MAXWRONGS==%d)'%(Wrongs,MAXWRONGS))
veri.finish()
sys.exit() # in icarus, sometimes finish doesnt catch
def finish(Text='.'):
print '@%d: wrongs=%d vs corrects=%d errors=%d warnings=%d: FINISHING on %s'%(get_cycles(),Wrongs,Corrects,Errors,Warnings,Text)
Flog.write('@%d: wrongs=%d vs corrects=%d errors=%d warnings=%d: FINISHING on %s'%(get_cycles(),Wrongs,Corrects,Errors,Warnings,Text))
veri.finish()
def log_warning(Text):
global Warnings,printed_already,Flog
if (Text in printed_already):
return
if (not Flog):
Flog=open(PYMONLOG,'w')
print '%d: warning: %s'%(Warnings,Text)
Flog.write('%d: warning: %s\n'%(Warnings,Text))
printed_already[Text]=1
Warnings +=1
def log_info(Text):
global Flog
if (not Flog):
Flog=open(PYMONLOG,'w')
print '@%d: info: %s'%(get_cycles(),Text)
Flog.write('@%d: info: %s\n'%(get_cycles(),Text))
def log_info2(Text):
global Flog2
if (not Flog2):
Flog2=open(PYMONLOG+'2','w')
Flog2.write('@%d: %s\n'%(get_cycles(),Text))
def log_info3(Text):
global Flog3
if (not Flog3):
Flog3=open(PYMONLOG+'3','w')
Flog3.write('@%d: %s\n'%(get_cycles(),Text))
def log_info4(Text):
global Flog4
if (not Flog4):
Flog4=open(PYMONLOG+'4','w')
Flog4.write('@%d: %s\n'%(get_cycles(),Text))
def log_dbg(Text):
if (print_debug_messages):
print 'dbg: ',Text
def log_ending(Who):
log_time('%s.py has %d errors, %d wrongs, %d corrects and %d warnings logged\n\n'%(Who,Errors,Wrongs,Corrects, Warnings))
if (Flog):
Flog.close()
return Errors
params={}
def get_param(Param,Default):
if Param in params:
return params[Param][0]
return Default
def parse_args():
global params
fnames=[]
state='idle'
for X in sys.argv[1:]:
if (state=='idle'):
if (X[0]=='-'):
state='param'
Param = X
else:
fnames += [X]
elif (state=='param'):
if (X[0]=='-'):
params[Param]='yes'
state='param'
Param = X
else:
state='idle'
if (Param in params):
params[Param] += [X]
else:
params[Param]=[X]
if (state=='param'):
params[Param]='yes'
params['fnames']=fnames
return params
def startsWith(Long,Short):
if type(Long)!=types.StringType: return False
if Short not in Long: return False
return Long.index(Short)==0
def intx(Val):
if type(Val)==types.IntType: return Val
if type(Val)==types.LongType: return Val
if type(Val)==types.FloatType: return Val
if Val=='-999': return -1
if 'x' in Val: return -1
if 'z' in Val: return -1
if 'q' in Val: return -1
return int(Val,2)
def peek(Sig):
V = intx(veri.peek(Sig))
return V
def peeksigned(Sig):
Str = veri.peek(Sig)
X = intx(Str)
if 'x' in Str: return 9999
if 'z' in Str: return 9999
if Str[0]=='0': return X
if len(Str)==1: return X
Base = 1<<len(Str)
Res = X-Base
return Res
def make_str(Int):
Y = bin(Int)
return Y
def bin8(Int):
X = bin(Int)[2:]
while len(X)<8:
X = '0'+X
return X
def asciiForce(Sig,Str):
res = '0b'
for Chr in Str:
Ord = ord(Chr)
Bin = bin8(Ord)
res += Bin
veri.force(Sig,res)
def float2int(Float):
if Float== -1: return -1
if ((Float & 0x7fffffff)==0): return 0
Bin = bin(Float)[2:]
if len(Bin)<32:
Bin = (32-len(Bin))*'0'+Bin
Sign = int(Bin[0],2)
Exp0 = int(Bin[1:9],2)
Exp = Exp0-127
Mant = int('1'+Bin[9:],2)
Shift = Exp-23
if Shift>0:
Int = Mant<<Shift
else:
Int = Mant>>(-Shift)
if Sign:
Int = -Int
return Int
def binary2float(Float):
if Float<0: return 0
if Float==0: return 0
Bin = bin(Float)[2:]
if len(Bin)<32:
Bin = (32-len(Bin))*'0'+Bin
Sign = int(Bin[0],2)
Exp0 = int(Bin[1:9],2)
Exp = Exp0-127
Mant = 1.0*int('1'+Bin[9:],2)
Shift = Exp-23
if Shift>0:
Int = Mant* (1<<Shift)
else:
Int = Mant/(1<< -Shift)
if Sign:
Int = -Int
return Int
def float2binary(X):
XX = int(X * (1<<24))
XXX = int2float2(XX)
Exp0 = (XXX & 0x7f800000)>>23
if Exp0<24:
return 0
Exp1 = Exp0 - 24
YYY = (XXX & 0x807fffff)| (Exp1<<23)
return YYY
def int2float2(Int):
if Int==0:
return 0
sign0 = int(Int<0)
Abs = int(abs(Int))
Bin = bin(Abs)[2:]
Len = len(Bin)
Mant = Bin[1:]
if len(Mant)>23:
Mant = Mant[:23]
elif len(Mant)<23:
Mant = Mant+'0'*(23-len(Mant))
Exp0 = Len-1
Exp = 0x7f+Exp0
bexp = '00000000'+bin(Exp)[2:]
Bexp = bexp[-8:]
Big = '0b'+str(sign0)+Bexp+Mant
return int(Big,2)
def panicFinish(Reason,ContinueFor=20):
global finishCycles
finishCycles = get_cycles()+ContinueFor
log_error('finishCycle activated because of "%s"'%(Reason))
def finishing(Txt='"not given"'):
log_info('finishing on %s'%Txt)
veri.finish()
Vars = {}
def incr(Who,By=1):
if Who not in Vars:
Vars[Who]=0
Vars[Who] += By
return Vars[Who]
def vars(varName,Value,Clear=False):
if Clear and (varName in Vars):
Vars.pop(varName)
if startsWith(varName,'list'):
if varName not in Vars:
Vars[varName]=[Value]
else:
Vars[varName].append(Value)
else:
Vars[varName]=Value
def gatherPanicNets(deepListFname):
File = open(deepListFname)
List=[]
while True:
line = File.readline()
if line=='': return List
wrds = string.split(line)
if len(wrds)>0:
if wrds[0]=='module:':
Path = wrds[2]
elif wrds[0]=='net:':
Net = wrds[1]
if '.' in Net:
www = string.split(Net,'.')
Last = www[-1]
if startsWith(Last,'panic'):
List.append(Net)
else:
if startsWith(Net,'panic'):
List.append('%s.%s'%(Path,Net))
def getValues(Path,Str,Hex=True):
wrds = string.split(string.replace(Str,',',' '))
res = ''
resi = []
resd = {}
for wrd in wrds:
Val = peek('%s.%s'%(Path,wrd))
resi.append(Val)
resd[wrd]=Val
if Hex=='float':
res += ' %s=%f'%(wrd,binary2float(Val))
resd[wrd]=binary2float(Val)
elif Hex:
res += ' %s=%x'%(wrd,Val)
else:
res += ' %s=%d'%(wrd,Val)
return res,resi,resd
def parseVars(Str):
wrds = string.split(Str)
Res={}
for wrd in wrds:
if '=' in wrd:
ww = string.split(wrd,'=')
if len(ww)==2:
Res[ww[0]]=ww[1]
return Res
def countOnes(Val):
Bin = bin(Val)[2:]
cnt=0
for X in Bin:
if X=='1': cnt += 1
return cnt
def closeEnough(AA,BB,Epsilon=0.0001):
if AA==BB: return True
AA0 = abs(AA)*Epsilon
Diff = abs(AA-BB)
if Diff<Epsilon: return True
if (abs(BB)<Epsilon)and(abs(AA)<Epsilon): return True
if (abs(BB)<Epsilon)and(abs(AA)>Epsilon): return False
if (abs(BB)>Epsilon)and(abs(AA)<Epsilon): return False
Diff = abs(abs(AA/BB)-1.0)
if Diff<Epsilon: return True
return False
def fixedp(Int,Shift):
X = 1<<Shift
Res = 1.0 * Int /X
return Res
def bin2str(Bin):
res =''
while len(Bin)>=8:
Chr1 = Bin[-8:]
Bin = Bin[:-8]
if 'x' in Chr1:
return res
Int = int(Chr1,2)
if Int==0: return res
Chr = chr(Int)
res = Chr + res
return res
print '>>>verification_logs loaded'
| 22.980088 | 138 | 0.541831 |
795bdfb63f14512bd16ea32151978d6c45270125 | 589 | py | Python | apps/assets/models_new/host.py | l3l3l/vulnman | f9d520dbf9f6838c459741bb1173a0382d5ecfd6 | [
"MIT"
] | null | null | null | apps/assets/models_new/host.py | l3l3l/vulnman | f9d520dbf9f6838c459741bb1173a0382d5ecfd6 | [
"MIT"
] | null | null | null | apps/assets/models_new/host.py | l3l3l/vulnman | f9d520dbf9f6838c459741bb1173a0382d5ecfd6 | [
"MIT"
] | null | null | null | from django.db import models
from apps.assets.models.base import BaseAsset
class Host(BaseAsset):
ip = models.GenericIPAddressField()
operating_system = models.CharField(max_length=256, blank=True)
is_online = models.BooleanField(default=True)
class Meta:
ordering = ['ip']
unique_together = [
('project', 'ip')
]
class Hostname(BaseAsset):
name = models.CharField(max_length=256)
host = models.ForeignKey(Host, on_delete=models.PROTECT)
class Meta:
unique_together = [
('project', 'name')
]
| 24.541667 | 67 | 0.643463 |
795bdfec92b8b23dc92b553c43c537bb18a373e1 | 951 | py | Python | 14B-088/new_vs_archival_mom0.py | e-koch/VLA_Lband | 8fca7b2de0b88ce5c5011b34bf3936c69338d0b0 | [
"MIT"
] | 1 | 2021-03-08T23:19:12.000Z | 2021-03-08T23:19:12.000Z | 14B-088/new_vs_archival_mom0.py | e-koch/VLA_Lband | 8fca7b2de0b88ce5c5011b34bf3936c69338d0b0 | [
"MIT"
] | null | null | null | 14B-088/new_vs_archival_mom0.py | e-koch/VLA_Lband | 8fca7b2de0b88ce5c5011b34bf3936c69338d0b0 | [
"MIT"
] | null | null | null |
from aplpy import FITSFigure
import os
import matplotlib.pyplot as mpl
'''
Show the moment 0 of the archival against the moment 0 from 14B-088
'''
fig = mpl.figure(figsize=(15, 7))
mom0_file = "/media/eric/MyRAID/M33/14B-088/HI/full_imaging/M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.mom0.fits"
f1 = FITSFigure(mom0_file, figure=fig, subplot=[0.1,0.1,0.35,0.8])
# f1.set_tick_labels_font(size='x-small')
# f1.set_axis_labels_font(size='small')
f1.show_grayscale()
f1.hide_xaxis_label()
f1.hide_xtick_labels()
f1.hide_yaxis_label()
f1.hide_ytick_labels()
mom0_archival_file = "/media/eric/Data_3/M33/VLA_Data/AT0206/imaging/M33_206_b_c_HI.mom0.fits"
f2 = FITSFigure(mom0_archival_file, figure=fig, subplot=[0.5,0.1,0.35,0.8])
# f2.set_tick_labels_font(size='x-small')
# f2.set_axis_labels_font(size='small')
f2.show_grayscale()
f2.hide_xaxis_label()
f2.hide_xtick_labels()
f2.hide_yaxis_label()
f2.hide_ytick_labels()
fig.canvas.draw()
| 24.384615 | 117 | 0.768665 |
795be018752e2302745c4643e7dbe222ffb9310a | 893 | py | Python | pytdx/parser/get_security_count.py | AtlantixJJ/vnpy | 28992c7d5391f6dd42a14b481d01ceafde048b5f | [
"MIT"
] | 13 | 2019-06-07T04:34:09.000Z | 2022-03-21T07:46:01.000Z | pytdx/parser/get_security_count.py | AtlantixJJ/vnpy | 28992c7d5391f6dd42a14b481d01ceafde048b5f | [
"MIT"
] | 1 | 2020-04-21T02:42:32.000Z | 2020-04-21T02:42:32.000Z | venv/lib/python3.7/site-packages/pytdx/parser/get_security_count.py | CatTiger/vnpy | 7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b | [
"MIT"
] | 2 | 2021-07-08T03:44:41.000Z | 2021-09-15T00:41:19.000Z | # coding=utf-8
from pytdx.parser.base import BaseParser
from pytdx.helper import get_datetime, get_volume, get_price
from collections import OrderedDict
import struct
"""
获取股票数量 深市
发送
0c 0c 18 6c 00 01 08 00 08 00 4e 04 00 00 75 c7 33 01
接收
Bc cb 74 00 0c 0c 18 6c 00 00 4e 04 02 00 02 00 e7 19
In [61]: 0x19e7
Out[61]: 6631
沪市
发送
0c 0c 18 6c 00 01 08 00 08 00 4e 04 01 00 75 c7 33 01
接收
Bc cb 74 00 0c 0c 18 6c 00 00 4e 04 02 00 02 00 b3 33
In [63]: 0x333b
Out[63]: 13115
"""
class GetSecurityCountCmd(BaseParser):
def setParams(self, market):
pkg = bytearray.fromhex(u"0c 0c 18 6c 00 01 08 00 08 00 4e 04")
market_pkg = struct.pack("<H", market)
pkg.extend(market_pkg)
pkg.extend(b'\x75\xc7\x33\x01')
self.send_pkg = pkg
def parseResponse(self, body_buf):
(num, ) = struct.unpack("<H", body_buf[:2])
return num
| 18.604167 | 71 | 0.657335 |
795be05e74856294f020e215408b104d1871f2d7 | 525 | py | Python | atest/testdata/keywords/named_args/KwargsLibrary.py | rdagum/robotframework | b7069d505374e9f09a140ed5a9727d2a40716446 | [
"ECL-2.0",
"Apache-2.0"
] | 7,073 | 2015-01-01T17:19:16.000Z | 2022-03-31T22:01:29.000Z | atest/testdata/keywords/named_args/KwargsLibrary.py | imust6226/robotframework | 08c56fef2ebc64d682c7f99acd77c480d8d0e028 | [
"ECL-2.0",
"Apache-2.0"
] | 2,412 | 2015-01-02T09:29:05.000Z | 2022-03-31T13:10:46.000Z | atest/testdata/keywords/named_args/KwargsLibrary.py | 3mdeb/robotframework | 6006ce0b3d5fc6b45c5eb040dc859acd64bfa846 | [
"ECL-2.0",
"Apache-2.0"
] | 2,298 | 2015-01-03T02:47:15.000Z | 2022-03-31T02:00:16.000Z | class KwargsLibrary:
def one_named(self, named=None):
return named
def two_named(self, fst=None, snd=None):
return '%s, %s' % (fst, snd)
def four_named(self, a=None, b=None, c=None, d=None):
return '%s, %s, %s, %s' % (a, b, c, d)
def mandatory_and_named(self, a, b, c=None):
return '%s, %s, %s' % (a, b, c)
def mandatory_named_and_varargs(self, mandatory, d1=None, d2=None, *varargs):
return '%s, %s, %s, %s' % (mandatory, d1, d2, '[%s]' % ', '.join(varargs))
| 30.882353 | 82 | 0.554286 |
795be1d7071517d0097bcefe091a5c131e9b7d04 | 11,619 | py | Python | pyzoo/zoo/examples/ray/rl_pong/rl_pong.py | SteNicholas/analytics-zoo | 2967e74427855cb0f3d60b5c298343976bb0d23e | [
"Apache-2.0"
] | 1 | 2020-08-10T03:08:41.000Z | 2020-08-10T03:08:41.000Z | pyzoo/zoo/examples/ray/rl_pong/rl_pong.py | SteNicholas/analytics-zoo | 2967e74427855cb0f3d60b5c298343976bb0d23e | [
"Apache-2.0"
] | null | null | null | pyzoo/zoo/examples/ray/rl_pong/rl_pong.py | SteNicholas/analytics-zoo | 2967e74427855cb0f3d60b5c298343976bb0d23e | [
"Apache-2.0"
] | 2 | 2020-05-14T09:56:35.000Z | 2020-05-17T13:38:33.000Z | # This file is adapted from https://github.com/ray-project/ray/blob/master
# /examples/rl_pong/driver.py
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# play Pong https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import numpy as np
import os
import ray
import time
import gym
from zoo import init_spark_on_yarn, init_spark_on_local
from zoo.ray.util.raycontext import RayContext
os.environ["LANG"] = "C.UTF-8"
# Define some hyperparameters.
# The number of hidden layer neurons.
H = 200
learning_rate = 1e-4
# Discount factor for reward.
gamma = 0.99
# The decay factor for RMSProp leaky sum of grad^2.
decay_rate = 0.99
# The input dimensionality: 80x80 grid.
D = 80 * 80
def sigmoid(x):
# Sigmoid "squashing" function to interval [0, 1].
return 1.0 / (1.0 + np.exp(-x))
def preprocess(img):
"""Preprocess 210x160x3 uint8 frame into 6400 (80x80) 1D float vector."""
# Crop the image.
img = img[35:195]
# Downsample by factor of 2.
img = img[::2, ::2, 0]
# Erase background (background type 1).
img[img == 144] = 0
# Erase background (background type 2).
img[img == 109] = 0
# Set everything else (paddles, ball) to 1.
img[img != 0] = 1
return img.astype(np.float).ravel()
def discount_rewards(r):
"""take 1D float array of rewards and compute discounted reward"""
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
# Reset the sum, since this was a game boundary (pong specific!).
if r[t] != 0:
running_add = 0
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
# defines the policy network
# x is a vector that holds the preprocessed pixel information
def policy_forward(x, model):
# neurons in the hidden layer (W1) can detect various game senarios
h = np.dot(model["W1"], x) # compute hidden layer neuron activations
h[h < 0] = 0 # ReLU nonlinearity. threhold at zero
# weights in W2 can then decide if each case we should go UP or DOWN
logp = np.dot(model["W2"], h) # compuate the log probability of going up
p = sigmoid(logp)
# Return probability of taking action 2, and hidden state.
return p, h
def policy_backward(eph, epx, epdlogp, model):
"""backward pass. (eph is array of intermediate hidden states)"""
# the way to change the policy parameters is to
# do some rollouts, take the gradient of the sampled actions
# multiply it by the score and add everything
dW2 = np.dot(eph.T, epdlogp).ravel()
dh = np.outer(epdlogp, model["W2"])
# Backprop relu.
dh[eph <= 0] = 0
dW1 = np.dot(dh.T, epx)
return {"W1": dW1, "W2": dW2}
@ray.remote
class PongEnv(object):
def __init__(self):
# Tell numpy to only use one core. If we don't do this, each actor may
# try to use all of the cores and the resulting contention may result
# in no speedup over the serial version. Note that if numpy is using
# OpenBLAS, then you need to set OPENBLAS_NUM_THREADS=1, and you
# probably need to do it from the command line (so it happens before
# numpy is imported).
os.environ["MKL_NUM_THREADS"] = "1"
self.env = gym.make("Pong-v0")
def compute_gradient(self, model):
# model = {'W1':W1, 'W2':W2}
# given a model, run for one episode and return the parameter
# to be updated and sum(reward)
# Reset the game.
observation = self.env.reset()
# Note that prev_x is used in computing the difference frame.
prev_x = None
xs, hs, dlogps, drs = [], [], [], []
reward_sum = 0
done = False
while not done:
cur_x = preprocess(observation)
x = cur_x - prev_x if prev_x is not None else np.zeros(D)
prev_x = cur_x
# feed difference frames into the network
# so that it can detect motion
aprob, h = policy_forward(x, model)
# Sample an action.
action = 2 if np.random.uniform() < aprob else 3
# The observation.
xs.append(x)
# The hidden state.
hs.append(h)
y = 1 if action == 2 else 0 # A "fake label".
# The gradient that encourages the action that was taken to be
# taken (see http://cs231n.github.io/neural-networks-2/#losses if
# confused).
dlogps.append(y - aprob)
observation, reward, done, info = self.env.step(action)
reward_sum += reward
# Record reward (has to be done after we call step() to get reward
# for previous action).
drs.append(reward)
epx = np.vstack(xs)
eph = np.vstack(hs)
epdlogp = np.vstack(dlogps)
epr = np.vstack(drs)
# Reset the array memory.
xs, hs, dlogps, drs = [], [], [], []
# Compute the discounted reward backward through time.
discounted_epr = discount_rewards(epr)
# Standardize the rewards to be unit normal (helps control the gradient
# estimator variance).
discounted_epr -= np.mean(discounted_epr)
discounted_epr /= np.std(discounted_epr)
# Modulate the gradient with advantage (the policy gradient magic
# happens right here).
epdlogp *= discounted_epr
return policy_backward(eph, epx, epdlogp, model), reward_sum
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train an RL agent")
parser.add_argument("--hadoop_conf", type=str,
help="turn on yarn mode by passing the hadoop path"
"configuration folder. Otherwise, turn on local mode.")
parser.add_argument("--batch_size", default=10, type=int,
help="The number of roll-outs to do per batch.")
parser.add_argument("--iterations", default=-1, type=int,
help="The number of model updates to perform. By "
"default, training will not terminate.")
parser.add_argument("--conda_name", type=str,
help="The name of conda environment.")
parser.add_argument("--slave_num", type=int, default=2,
help="The number of slave nodes")
parser.add_argument("--executor_cores", type=int, default=8,
help="The number of driver's cpu cores you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--executor_memory", type=str, default="10g",
help="The size of slave(executor)'s memory you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--driver_memory", type=str, default="2g",
help="The size of driver's memory you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--driver_cores", type=int, default=8,
help="The number of driver's cpu cores you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--extra_executor_memory_for_ray", type=str, default="20g",
help="The extra executor memory to store some data."
"You can change it depending on your own cluster setting.")
parser.add_argument("--object_store_memory", type=str, default="4g",
help="The memory to store data on local."
"You can change it depending on your own cluster setting.")
args = parser.parse_args()
if args.hadoop_conf:
sc = init_spark_on_yarn(
hadoop_conf=args.hadoop_conf,
conda_name=args.conda_name,
num_executor=args.slave_num,
executor_cores=args.executor_cores,
executor_memory=args.executor_memory,
driver_memory=args.driver_memory,
driver_cores=args.driver_cores,
extra_executor_memory_for_ray=args.extra_executor_memory_for_ray)
ray_ctx = RayContext(
sc=sc,
object_store_memory=args.object_store_memory)
else:
sc = init_spark_on_local(cores=args.driver_cores)
ray_ctx = RayContext(sc=sc, object_store_memory=args.object_store_memory)
ray_ctx.init()
batch_size = args.batch_size
# Run the reinforcement learning.
running_reward = None
batch_num = 1
model = {}
# "Xavier" initialization.
model["W1"] = np.random.randn(H, D) / np.sqrt(D)
model["W2"] = np.random.randn(H) / np.sqrt(H)
# Update buffers that add up gradients over a batch.
grad_buffer = {k: np.zeros_like(v) for k, v in model.items()}
# Update the rmsprop memory.
rmsprop_cache = {k: np.zeros_like(v) for k, v in model.items()}
actors = [PongEnv.remote() for _ in range(batch_size)]
iteration = 0
while iteration != args.iterations:
iteration += 1
model_id = ray.put(model)
actions = []
# Launch tasks to compute gradients from multiple rollouts in parallel.
start_time = time.time()
# run rall_out for batch_size times
for i in range(batch_size):
# compute_gradient returns two variables, so action_id is a list
action_id = actors[i].compute_gradient.remote(model_id)
actions.append(action_id)
for i in range(batch_size):
# wait for one actor to finish its operation
# action_id is the ready object id
action_id, actions = ray.wait(actions)
grad, reward_sum = ray.get(action_id[0])
# Accumulate the gradient of each weight parameter over batch.
for k in model:
grad_buffer[k] += grad[k]
running_reward = (reward_sum if running_reward is None else
running_reward * 0.99 + reward_sum * 0.01)
end_time = time.time()
print("Batch {} computed {} rollouts in {} seconds, "
"running mean is {}".format(batch_num, batch_size,
end_time - start_time,
running_reward))
# update gradient after one iteration
for k, v in model.items():
g = grad_buffer[k]
rmsprop_cache[k] = (
decay_rate * rmsprop_cache[k] + (1 - decay_rate) * g**2)
model[k] += learning_rate * g / (np.sqrt(rmsprop_cache[k]) + 1e-5)
# Reset the batch gradient buffer.
grad_buffer[k] = np.zeros_like(v)
batch_num += 1
ray_ctx.stop()
sc.stop()
| 41.056537 | 88 | 0.615113 |
795be1fcf93f91eb5ba056c4e7dcb2fce581dd5e | 7,297 | py | Python | microcosm_logging/factories.py | globality-corp/microcosm-logging | 516a28b6aa1a5662c44357c1295ef6cf42693401 | [
"Apache-2.0"
] | 2 | 2018-09-20T01:38:55.000Z | 2020-06-24T16:57:36.000Z | microcosm_logging/factories.py | globality-corp/microcosm-logging | 516a28b6aa1a5662c44357c1295ef6cf42693401 | [
"Apache-2.0"
] | 26 | 2016-03-16T20:27:24.000Z | 2018-11-08T22:59:58.000Z | microcosm_logging/factories.py | globality-corp/microcosm-logging | 516a28b6aa1a5662c44357c1295ef6cf42693401 | [
"Apache-2.0"
] | 3 | 2016-12-19T22:40:29.000Z | 2019-03-17T03:46:23.000Z | """
Factory that configures logging.
"""
from logging import (
CRITICAL,
getLogger,
getLogRecordFactory,
setLogRecordFactory,
)
from logging.config import dictConfig
from os import environ
from typing import Dict
from microcosm.api import defaults, typed
@defaults(
default_format="{asctime} - {name} - [{levelname}] - {message}",
json_required_keys="%(asctime)s - %(name)s - %(filename)s - %(levelname)s - %(levelno) - %(message)s",
https_handler=dict(
class_="loggly.handlers.HTTPSHandler",
),
json_formatter=dict(
formatter="pythonjsonlogger.jsonlogger.JsonFormatter",
),
# default log level is INFO
level="INFO",
# tune down some common libraries
levels=dict(
default=dict(
debug=[],
info=[
"boto",
"newrelic",
],
warn=[
"aws_encryption_sdk",
"botocore.vendored.requests",
"bravado_core",
"requests",
"swagger_spec_validator",
],
error=[
"bravado.requests_client",
"FuturesSession",
],
),
override=dict(
debug=[],
info=[],
warn=[],
error=[],
),
bump=dict(),
),
# loggly
loggly=dict(
base_url="https://logs-01.loggly.com",
),
# logstash
logstash=dict(
enabled=typed(bool, default_value=False),
host="localhost",
port=5959,
),
# configure stream handler
stream_handler=dict(
class_="logging.StreamHandler",
formatter="ExtraFormatter",
stream="ext://sys.stdout",
),
)
def configure_logging(graph):
"""
Configure logging using a constructed dictionary config.
- Tunes logging levels.
- Sets up console and, if not in debug, loggly output.
:returns: a logger instance of the configured name
"""
dict_config = make_dict_config(graph)
dictConfig(dict_config)
return True
def configure_logger(graph):
"""
Configure application logger.
"""
graph.use("logging")
return getLogger(graph.metadata.name)
def enable_loggly(graph):
"""
Enable loggly if it is configured and not debug/testing.
"""
if graph.metadata.debug or graph.metadata.testing:
return False
try:
if not graph.config.logging.loggly.token:
return False
if not graph.config.logging.loggly.environment:
return False
except AttributeError:
return False
return True
def make_dict_config(graph):
"""
Build a dictionary configuration from conventions and configuration.
"""
formatters = {}
handlers = {}
loggers = {}
# create formatters
formatters["ExtraFormatter"] = make_extra_console_formatter(graph)
formatters["JSONFormatter"] = make_json_formatter(graph)
# create the console handler with the configured formatter
handlers["console"] = make_stream_handler(graph, formatter=graph.config.logging.stream_handler.formatter)
# maybe create the loggly handler
if enable_loggly(graph):
handlers["LogglyHTTPSHandler"] = make_loggly_handler(graph, formatter="JSONFormatter")
# create the logstash handler only if explicitly configured
if graph.config.logging.logstash.enabled:
handlers["LogstashHandler"] = make_logstash_handler(graph)
# configure the root logger to output to all handlers
loggers[""] = {
"handlers": handlers.keys(),
"level": graph.config.logging.level,
}
# set log levels for libraries
loggers.update(make_library_levels(graph))
bump_library_levels(graph)
return dict(
version=1,
disable_existing_loggers=False,
formatters=formatters,
handlers=handlers,
loggers=loggers,
)
def make_json_formatter(graph):
"""
Create the default json formatter.
"""
return {
"()": graph.config.logging.json_formatter.formatter,
"fmt": graph.config.logging.json_required_keys,
}
def make_extra_console_formatter(graph):
"""
Create the default console formatter.
"""
return {
"()": "microcosm_logging.formatters.ExtraConsoleFormatter",
"format_string": graph.config.logging.default_format,
}
def make_stream_handler(graph, formatter):
"""
Create the stream handler. Used for console/debug output.
"""
return {
"class": graph.config.logging.stream_handler.class_,
"formatter": formatter,
"level": graph.config.logging.level,
"stream": graph.config.logging.stream_handler.stream,
}
def make_loggly_handler(graph, formatter):
"""
Create the loggly handler.
Used for searchable aggregation.
"""
base_url = graph.config.logging.loggly.base_url
metric_service_name = environ.get("METRICS_NAME", graph.metadata.name)
loggly_url = "{}/inputs/{}/tag/{}".format(
base_url,
graph.config.logging.loggly.token,
",".join(set([
graph.metadata.name,
graph.config.logging.loggly.environment,
metric_service_name,
])),
)
return {
"class": graph.config.logging.https_handler.class_,
"formatter": formatter,
"level": graph.config.logging.level,
"url": loggly_url,
}
def make_logstash_handler(graph):
"""
Create the logstash handler
Relies on a temporary directory for the locally-cached database path
that is then streamed to a local logstash daemon. This is created in
the OS temp storage and will be garbage collected at an appropriate time.
"""
return {
"class": "logstash_async.handler.SynchronousLogstashHandler",
"host": graph.config.logging.logstash.host,
"port": graph.config.logging.logstash.port,
}
def make_library_levels(graph):
"""
Create third party library logging level configurations.
Tunes down overly verbose logs in commonly used libraries.
"""
# inject the default components; these can, but probably shouldn't, be overridden
levels = {}
for level in ["DEBUG", "INFO", "WARN", "ERROR"]:
levels.update({
component: {
"level": level,
} for component in graph.config.logging.levels.default[level.lower()]
})
# override components; these can be set per application
for level in ["DEBUG", "INFO", "WARN", "ERROR"]:
levels.update({
component: {
"level": level,
} for component in graph.config.logging.levels.override[level.lower()]
})
return levels
def bump_level_factory(mapping: Dict[str, int]):
factory = getLogRecordFactory()
def apply(name, level, *args, **kwargs):
return factory(
name,
min(level + mapping.get(name, 0), CRITICAL),
*args,
**kwargs,
)
return apply
def bump_library_levels(graph):
if not graph.config.logging.levels.bump:
return
setLogRecordFactory(
bump_level_factory(graph.config.logging.levels.bump),
)
| 24.904437 | 109 | 0.617788 |
795be23aaf9394699ad3c4b2ce5f2ae9f1e8de26 | 116 | py | Python | databaseInfo.py | GodXuxilie/DatabaseSystem | 81a9aaaef3c1d9fc66c8189d6fda08879b2142ae | [
"Apache-2.0"
] | 3 | 2020-06-14T13:35:00.000Z | 2021-12-14T07:28:18.000Z | databaseInfo.py | GodXuxilie/DatabaseSystem | 81a9aaaef3c1d9fc66c8189d6fda08879b2142ae | [
"Apache-2.0"
] | null | null | null | databaseInfo.py | GodXuxilie/DatabaseSystem | 81a9aaaef3c1d9fc66c8189d6fda08879b2142ae | [
"Apache-2.0"
] | 2 | 2020-06-10T02:25:56.000Z | 2020-06-14T13:35:08.000Z | databaseAddress = '127.0.0.1'
databaseLoginName = 'root'
databasePasswd = 'root'
databaseName = 'movie_recommend' | 29 | 32 | 0.75 |
795be255b3b4f8d74b0c03428f973ad91b3cf3fa | 4,632 | py | Python | edges.py | yobeatz/mosaic | 21488d78c2239f0a1ee51c2b4f660066ece213f6 | [
"MIT"
] | 35 | 2021-04-12T16:53:28.000Z | 2022-02-06T17:27:54.000Z | edges.py | yobeatz/mosaic | 21488d78c2239f0a1ee51c2b4f660066ece213f6 | [
"MIT"
] | 1 | 2022-02-06T15:05:55.000Z | 2022-02-08T07:00:23.000Z | edges.py | yobeatz/mosaic | 21488d78c2239f0a1ee51c2b4f660066ece213f6 | [
"MIT"
] | 14 | 2021-04-13T06:52:01.000Z | 2022-02-06T15:10:33.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import skimage as sk
from skimage.io import imread
from skimage import filters
from skimage import transform
import plotting
from pathlib import Path
def load_image(fname, width=900, plot=[]):
if fname:
img0 = imread(fname)
else:
img0 = sk.data.coffee() # coffee (example image)
# ensure image is rgb (for consistency)
if len(img0.shape)<3:
img0 = sk.color.gray2rgb(img0)
# resize to same image width => tile size has always similar effect
if width is not None:
factor = width/img0.shape[1]
img0 = transform.resize(img0, (int(img0.shape[0]*factor), int(img0.shape[1]*factor)), anti_aliasing=True)
img0 = (img0*255).astype(int)
if 'original' in plot: plotting.plot_image(img0)
print (f'Size of input image: {img0.shape[0]}px * {img0.shape[1]}px')
return img0
def edges_diblasi(img, gauss=5, details=1, plot=[]):
# RGB to gray ("Luminance channel" in Di Blasi)
img_gray = sk.color.rgb2gray(img)
# equalize histogram
img_eq = sk.exposure.equalize_hist(img_gray)
# soften image
img_gauss = filters.gaussian(img_eq, sigma=16, truncate=gauss/16)
# segment bright areas to blobs
variance = img_gauss.std()**2 # evtl. direkt die std verwenden
img_seg = np.ones((img.shape[0],img.shape[1]))
threshold = variance/4*2*details
img_seg[abs(img_gauss-img_gauss.mean())>threshold] = 0
### 5. Kanten finden
img_edge = filters.laplace(img_seg, ksize=3)
img_edge[img_edge!=0]=1
if 'edges' in plot: plotting.plot_image(img_edge, inverted=True, title='Di Blasi')
return img_edge
def hed_edges(image):
import cv2 as cv
# based on https://github.com/opencv/opencv/blob/master/samples/dnn/edge_detection.py
class CropLayer(object):
def __init__(self, params, blobs):
self.xstart = 0
self.xend = 0
self.ystart = 0
self.yend = 0
# Our layer receives two inputs. We need to crop the first input blob
# to match a shape of the second one (keeping batch size and number of channels)
def getMemoryShapes(self, inputs):
inputShape, targetShape = inputs[0], inputs[1]
batchSize, numChannels = inputShape[0], inputShape[1]
height, width = targetShape[2], targetShape[3]
self.ystart = int((inputShape[2] - targetShape[2]) / 2)
self.xstart = int((inputShape[3] - targetShape[3]) / 2)
self.yend = self.ystart + height
self.xend = self.xstart + width
return [[batchSize, numChannels, height, width]]
def forward(self, inputs):
return [inputs[0][:,:,self.ystart:self.yend,self.xstart:self.xend]]
# Load the pretrained model (source: https://github.com/s9xie/hed)
script_path = Path(__file__).parent.absolute()
hed_path = Path.joinpath(script_path, 'HED')
net = cv.dnn.readNetFromCaffe(str(hed_path / 'deploy.prototxt'),
str(hed_path / 'hed_pretrained_bsds.caffemodel') )
cv.dnn_registerLayer('Crop', CropLayer)
image=cv.resize(image,(image.shape[1],image.shape[0]))
# prepare image as input dataset (mean values from full image dataset)
inp = cv.dnn.blobFromImage(image, scalefactor=1.0, size=(image.shape[1],image.shape[0]), #w,h
mean=(104.00698793, 116.66876762, 122.67891434),
swapRB=False, crop=False)
net.setInput(inp)
out = net.forward()
cv.dnn_unregisterLayer('Crop') # get rid of issues when run in a loop
out = out[0,0]
return out
def edges_hed(img, gauss=None, plot=[]):
if gauss:
img = filters.gaussian(img, sigma=16, truncate=gauss/16, multichannel=True)
img = img/np.amax(img)*255
img = img.astype(np.uint8)
hed_matrix = hed_edges(img)
# gray to binary
hed_seg = np.ones((hed_matrix.shape[0],hed_matrix.shape[1]))
hed_seg[hed_matrix<0.5]=0
# skeletonize to get inner lines
img_edges = sk.morphology.skeletonize(hed_seg).astype(int)
# option to make plot lines thicker:
#from skimage.morphology import square,dilation
#img_edges = dilation(img_edges, square(3))
if 'edges' in plot: plotting.plot_image(img_edges, inverted=True, title='HED')
return img_edges
if __name__ == '__main__':
img = sk.data.coffee()
#img_edges = edges_diblasi(img)
img_edges = edges_hed(img, gauss=0)
| 31.944828 | 114 | 0.631908 |
795be3a15c4dc9be75f60592bbb6dd1d010ada3b | 1,367 | py | Python | examples/linear_model/plot_sgd_penalties.py | MarcinKonowalczyk/scikit-learn | 8b18d4cbfc3a10ce85decec292d30470c69f40d7 | [
"BSD-3-Clause"
] | 2 | 2020-11-07T02:13:22.000Z | 2022-01-18T14:42:59.000Z | examples/linear_model/plot_sgd_penalties.py | MarcinKonowalczyk/scikit-learn | 8b18d4cbfc3a10ce85decec292d30470c69f40d7 | [
"BSD-3-Clause"
] | 1 | 2022-01-12T13:11:21.000Z | 2022-01-12T13:11:21.000Z | examples/linear_model/plot_sgd_penalties.py | MarcinKonowalczyk/scikit-learn | 8b18d4cbfc3a10ce85decec292d30470c69f40d7 | [
"BSD-3-Clause"
] | 1 | 2021-11-03T09:49:02.000Z | 2021-11-03T09:49:02.000Z | """
==============
SGD: Penalties
==============
Contours of where the penalty is equal to 1
for the three penalties L1, L2 and elastic-net.
All of the above are supported by :class:`~sklearn.linear_model.SGDClassifier`
and :class:`~sklearn.linear_model.SGDRegressor`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
l1_color = "navy"
l2_color = "c"
elastic_net_color = "darkorange"
line = np.linspace(-1.5, 1.5, 1001)
xx, yy = np.meshgrid(line, line)
l2 = xx ** 2 + yy ** 2
l1 = np.abs(xx) + np.abs(yy)
rho = 0.5
elastic_net = rho * l1 + (1 - rho) * l2
plt.figure(figsize=(10, 10), dpi=100)
ax = plt.gca()
elastic_net_contour = plt.contour(
xx, yy, elastic_net, levels=[1], colors=elastic_net_color
)
l2_contour = plt.contour(xx, yy, l2, levels=[1], colors=l2_color)
l1_contour = plt.contour(xx, yy, l1, levels=[1], colors=l1_color)
ax.set_aspect("equal")
ax.spines["left"].set_position("center")
ax.spines["right"].set_color("none")
ax.spines["bottom"].set_position("center")
ax.spines["top"].set_color("none")
plt.clabel(
elastic_net_contour,
inline=1,
fontsize=18,
fmt={1.0: "elastic-net"},
manual=[(-1, -1)],
)
plt.clabel(l2_contour, inline=1, fontsize=18, fmt={1.0: "L2"}, manual=[(-1, -1)])
plt.clabel(l1_contour, inline=1, fontsize=18, fmt={1.0: "L1"}, manual=[(-1, -1)])
plt.tight_layout()
plt.show()
| 24.410714 | 81 | 0.663497 |
795be4f6bf6670a897ed8136490506b141df0920 | 4,848 | py | Python | nova/tests/unit/objects/test_cell_mapping.py | cloud-zuiwanyuan/nova | 0b59a2d9dc22e4fb172810019dba5ece09bb4526 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/objects/test_cell_mapping.py | cloud-zuiwanyuan/nova | 0b59a2d9dc22e4fb172810019dba5ece09bb4526 | [
"Apache-2.0"
] | 1 | 2016-04-04T18:41:59.000Z | 2016-04-04T18:41:59.000Z | nova/tests/unit/objects/test_cell_mapping.py | cloud-zuiwanyuan/nova | 0b59a2d9dc22e4fb172810019dba5ece09bb4526 | [
"Apache-2.0"
] | 2 | 2015-12-04T23:51:46.000Z | 2016-06-07T20:01:59.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
from nova import exception
from nova import objects
from nova.objects import cell_mapping
from nova.tests.unit.objects import test_objects
def get_db_mapping(**updates):
db_mapping = {
'id': 1,
'uuid': uuidutils.generate_uuid(),
'name': 'cell1',
'transport_url': 'rabbit://',
'database_connection': 'sqlite:///',
'created_at': None,
'updated_at': None,
}
db_mapping.update(updates)
return db_mapping
class _TestCellMappingObject(object):
@mock.patch.object(cell_mapping.CellMapping, '_get_by_uuid_from_db')
def test_get_by_uuid(self, uuid_from_db):
db_mapping = get_db_mapping()
uuid_from_db.return_value = db_mapping
mapping_obj = objects.CellMapping().get_by_uuid(self.context,
db_mapping['uuid'])
uuid_from_db.assert_called_once_with(self.context, db_mapping['uuid'])
self.compare_obj(mapping_obj, db_mapping)
@mock.patch.object(cell_mapping.CellMapping, '_get_by_uuid_from_db',
side_effect=exception.CellMappingNotFound(uuid='fake'))
def test_get_by_uuid_invalid(self, uuid_from_db):
db_mapping = get_db_mapping()
self.assertRaises(exception.CellMappingNotFound,
objects.CellMapping().get_by_uuid,
self.context,
db_mapping['uuid'])
uuid_from_db.assert_called_once_with(self.context, db_mapping['uuid'])
@mock.patch.object(cell_mapping.CellMapping, '_create_in_db')
def test_create(self, create_in_db):
uuid = uuidutils.generate_uuid()
db_mapping = get_db_mapping(uuid=uuid, name='test',
database_connection='mysql+pymysql:///')
create_in_db.return_value = db_mapping
mapping_obj = objects.CellMapping(self.context)
mapping_obj.uuid = uuid
mapping_obj.name = 'test'
mapping_obj.database_connection = 'mysql+pymysql:///'
mapping_obj.create()
create_in_db.assert_called_once_with(self.context,
{'uuid': uuid,
'name': 'test',
'database_connection': 'mysql+pymysql:///'})
self.compare_obj(mapping_obj, db_mapping)
@mock.patch.object(cell_mapping.CellMapping, '_save_in_db')
def test_save(self, save_in_db):
uuid = uuidutils.generate_uuid()
db_mapping = get_db_mapping(database_connection='mysql+pymysql:///')
save_in_db.return_value = db_mapping
mapping_obj = objects.CellMapping(self.context)
mapping_obj.uuid = uuid
mapping_obj.database_connection = 'mysql+pymysql:///'
mapping_obj.save()
save_in_db.assert_called_once_with(self.context, uuid,
{'uuid': uuid,
'database_connection': 'mysql+pymysql:///'})
self.compare_obj(mapping_obj, db_mapping)
@mock.patch.object(cell_mapping.CellMapping, '_destroy_in_db')
def test_destroy(self, destroy_in_db):
uuid = uuidutils.generate_uuid()
mapping_obj = objects.CellMapping(self.context)
mapping_obj.uuid = uuid
mapping_obj.destroy()
destroy_in_db.assert_called_once_with(self.context, uuid)
class TestCellMappingObject(test_objects._LocalTest,
_TestCellMappingObject):
pass
class TestRemoteCellMappingObject(test_objects._RemoteTest,
_TestCellMappingObject):
pass
class _TestCellMappingListObject(object):
@mock.patch.object(cell_mapping.CellMappingList, '_get_all_from_db')
def test_get_all(self, get_all_from_db):
db_mapping = get_db_mapping()
get_all_from_db.return_value = [db_mapping]
mapping_obj = objects.CellMappingList.get_all(self.context)
get_all_from_db.assert_called_once_with(self.context)
self.compare_obj(mapping_obj.objects[0], db_mapping)
class TestCellMappingListObject(test_objects._LocalTest,
_TestCellMappingListObject):
pass
class TestRemoteCellMappingListObject(test_objects._RemoteTest,
_TestCellMappingListObject):
pass
| 37.292308 | 78 | 0.667285 |
795be56bd00ee86d9f2547f9b31c915a009a317b | 777 | py | Python | happy_dogs/dogs/migrations/0002_auto_20211004_1316.py | shuryhin-oleksandr/happy_dogs | daaa45a716ae3580482058d2c3ea9615e62f35e9 | [
"MIT"
] | null | null | null | happy_dogs/dogs/migrations/0002_auto_20211004_1316.py | shuryhin-oleksandr/happy_dogs | daaa45a716ae3580482058d2c3ea9615e62f35e9 | [
"MIT"
] | null | null | null | happy_dogs/dogs/migrations/0002_auto_20211004_1316.py | shuryhin-oleksandr/happy_dogs | daaa45a716ae3580482058d2c3ea9615e62f35e9 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.13 on 2021-10-04 13:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dogs', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Visit',
new_name='BoardingVisit',
),
migrations.RenameField(
model_name='dog',
old_name='name',
new_name='first_name',
),
migrations.AddField(
model_name='dog',
name='last_name',
field=models.CharField(blank=True, max_length=128, null=True),
),
migrations.AlterUniqueTogether(
name='dog',
unique_together={('first_name', 'last_name')},
),
]
| 24.28125 | 74 | 0.544402 |
795be67732b657c9607103b752e21d8a6a3ef349 | 663 | py | Python | lang/shell.py | marc-dantas/lyon | abbd5a78acf020601e06bee5fc19c6301391275c | [
"MIT"
] | 2 | 2022-01-30T22:10:41.000Z | 2022-01-30T22:32:34.000Z | lang/shell.py | marc-dantas/lyon | abbd5a78acf020601e06bee5fc19c6301391275c | [
"MIT"
] | null | null | null | lang/shell.py | marc-dantas/lyon | abbd5a78acf020601e06bee5fc19c6301391275c | [
"MIT"
] | 1 | 2022-01-10T16:55:26.000Z | 2022-01-10T16:55:26.000Z | from cmath import exp
from rich.console import Console
CONSOLE = Console()
def print_header(title, subtitle=None) -> None:
CONSOLE.print(f"[blue italic]{title}[/]", style='green')
if subtitle:
CONSOLE.print(f"[blue bold]{subtitle}[/]", style='green')
def print_info(message):
CONSOLE.print(f'[yellow bold]{message}[/] ')
def print_success(message):
CONSOLE.print(message, style='green')
def print_hint(hint: str) -> None:
CONSOLE.print(f" [blue]→[/] [italic]{hint}[/]")
def get() -> str:
CONSOLE.print('[yellow]>>>[/] ', end='')
try:
return input()
except (Exception, KeyboardInterrupt):
exit(1) | 22.1 | 65 | 0.628959 |
795be7d36530641eddbe5d516dbeb7debb9026c9 | 13,406 | py | Python | python/GafferSceneUI/HierarchyView.py | fbhradec/gaffer | eda6eaf44a5506af781488e914e9c6f6318c081c | [
"BSD-3-Clause"
] | 49 | 2018-08-27T07:52:25.000Z | 2022-02-08T13:54:05.000Z | python/GafferSceneUI/HierarchyView.py | fbhradec/gaffer | eda6eaf44a5506af781488e914e9c6f6318c081c | [
"BSD-3-Clause"
] | 21 | 2018-11-27T16:00:32.000Z | 2022-03-23T20:01:55.000Z | python/GafferSceneUI/HierarchyView.py | fbhradec/gaffer | eda6eaf44a5506af781488e914e9c6f6318c081c | [
"BSD-3-Clause"
] | 4 | 2018-12-23T16:16:41.000Z | 2021-06-16T09:04:01.000Z | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import IECore
import Gaffer
import GafferUI
import GafferScene
from . import _GafferSceneUI
from . import ContextAlgo
from . import SetUI
##########################################################################
# HierarchyView
##########################################################################
class HierarchyView( GafferUI.NodeSetEditor ) :
def __init__( self, scriptNode, **kw ) :
column = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, borderWidth = 4, spacing = 4 )
GafferUI.NodeSetEditor.__init__( self, column, scriptNode, **kw )
searchFilter = _GafferSceneUI._HierarchyViewSearchFilter()
setFilter = _GafferSceneUI._HierarchyViewSetFilter()
setFilter.setEnabled( False )
self.__filter = Gaffer.CompoundPathFilter( [ searchFilter, setFilter ] )
with column :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) :
_SearchFilterWidget( searchFilter )
_SetFilterWidget( setFilter )
self.__pathListing = GafferUI.PathListingWidget(
Gaffer.DictPath( {}, "/" ), # temp till we make a ScenePath
columns = [ GafferUI.PathListingWidget.defaultNameColumn ],
allowMultipleSelection = True,
displayMode = GafferUI.PathListingWidget.DisplayMode.Tree,
)
self.__pathListing.setDragPointer( "objects" )
self.__pathListing.setSortable( False )
# Work around insanely slow selection of a range containing many
# objects (using a shift-click). The default selection behaviour
# is SelectRows and this triggers some terrible performance problems
# in Qt. Since we only have a single column, there is no difference
# between SelectItems and SelectRows other than the speed.
#
# This workaround isn't going to be sufficient when we come to add
# additional columns to the HierarchyView. What _might_ work instead
# is to override `QTreeView.setSelection()` in PathListingWidget.py,
# so that we manually expand the selected region to include full rows,
# and then don't have to pass the `QItemSelectionModel::Rows` flag to
# the subsequent `QItemSelectionModel::select()` call. This would be
# essentially the same method we used to speed up
# `PathListingWidget.setSelection()`.
#
# Alternatively we could avoid QItemSelectionModel entirely by managing
# the selection ourself as a persistent PathMatcher.
self.__pathListing._qtWidget().setSelectionBehavior(
self.__pathListing._qtWidget().SelectionBehavior.SelectItems
)
self.__selectionChangedConnection = self.__pathListing.selectionChangedSignal().connect( Gaffer.WeakMethod( self.__selectionChanged ) )
self.__expansionChangedConnection = self.__pathListing.expansionChangedSignal().connect( Gaffer.WeakMethod( self.__expansionChanged ) )
self.__pathListing.contextMenuSignal().connect( Gaffer.WeakMethod( self.__contextMenuSignal ), scoped = False )
self.__pathListing.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPressSignal ), scoped = False )
self.__plug = None
self._updateFromSet()
def scene( self ) :
return self.__plug
def __repr__( self ) :
return "GafferSceneUI.HierarchyView( scriptNode )"
def _updateFromSet( self ) :
# first of all decide what plug we're viewing.
self.__plug = None
self.__plugParentChangedConnection = None
node = self._lastAddedNode()
if node is not None :
self.__plug = next( GafferScene.ScenePlug.RecursiveOutputRange( node ), None )
if self.__plug is not None :
self.__plugParentChangedConnection = self.__plug.parentChangedSignal().connect( Gaffer.WeakMethod( self.__plugParentChanged ) )
# call base class update - this will trigger a call to _titleFormat(),
# hence the need for already figuring out the plug.
GafferUI.NodeSetEditor._updateFromSet( self )
# update our view of the hierarchy
self.__setPathListingPath()
def _updateFromContext( self, modifiedItems ) :
if any( ContextAlgo.affectsSelectedPaths( x ) for x in modifiedItems ) :
self.__transferSelectionFromContext()
elif any( ContextAlgo.affectsExpandedPaths( x ) for x in modifiedItems ) :
self.__transferExpansionFromContext()
for item in modifiedItems :
if not item.startswith( "ui:" ) :
# When the context has changed, the hierarchy of the scene may
# have too so we should update our PathListingWidget.
self.__setPathListingPath()
break
def _titleFormat( self ) :
return GafferUI.NodeSetEditor._titleFormat(
self,
_maxNodes = 1 if self.__plug is not None else 0,
_reverseNodes = True,
_ellipsis = False
)
def __plugParentChanged( self, plug, oldParent ) :
# the plug we were viewing has been deleted or moved - find
# another one to view.
self._updateFromSet()
@GafferUI.LazyMethod( deferUntilPlaybackStops = True )
def __setPathListingPath( self ) :
for f in self.__filter.getFilters() :
f.setScene( self.__plug )
if self.__plug is not None :
# We take a static copy of our current context for use in the ScenePath - this prevents the
# PathListing from updating automatically when the original context changes, and allows us to take
# control of updates ourselves in _updateFromContext(), using LazyMethod to defer the calls to this
# function until we are visible and playback has stopped.
contextCopy = Gaffer.Context( self.getContext() )
for f in self.__filter.getFilters() :
f.setContext( contextCopy )
with Gaffer.BlockedConnection( self.__selectionChangedConnection ) :
self.__pathListing.setPath( GafferScene.ScenePath( self.__plug, contextCopy, "/", filter = self.__filter ) )
self.__transferExpansionFromContext()
self.__transferSelectionFromContext()
else :
with Gaffer.BlockedConnection( self.__selectionChangedConnection ) :
self.__pathListing.setPath( Gaffer.DictPath( {}, "/" ) )
def __expansionChanged( self, pathListing ) :
assert( pathListing is self.__pathListing )
with Gaffer.BlockedConnection( self._contextChangedConnection() ) :
ContextAlgo.setExpandedPaths( self.getContext(), pathListing.getExpansion() )
def __selectionChanged( self, pathListing ) :
assert( pathListing is self.__pathListing )
with Gaffer.BlockedConnection( self._contextChangedConnection() ) :
ContextAlgo.setSelectedPaths( self.getContext(), pathListing.getSelection() )
def __keyPressSignal( self, widget, event ) :
if event.key == "C" and event.modifiers == event.Modifiers.Control :
self.__copySelectedPaths()
return True
return False
def __contextMenuSignal( self, widget ) :
menuDefinition = IECore.MenuDefinition()
selection = self.__pathListing.getSelection()
menuDefinition.append(
"Copy Path%s" % ( "" if selection.size() == 1 else "s" ),
{
"command" : Gaffer.WeakMethod( self.__copySelectedPaths ),
"active" : not selection.isEmpty(),
"shortCut" : "Ctrl+C"
}
)
self.__contextMenu = GafferUI.Menu( menuDefinition )
self.__contextMenu.popup( widget )
return True
def __copySelectedPaths( self, *unused ) :
if self.__plug is None :
return
selection = self.__pathListing.getSelection()
if not selection.isEmpty() :
data = IECore.StringVectorData( selection.paths() )
self.__plug.ancestor( Gaffer.ApplicationRoot ).setClipboardContents( data )
@GafferUI.LazyMethod( deferUntilPlaybackStops = True )
def __transferExpansionFromContext( self ) :
expandedPaths = ContextAlgo.getExpandedPaths( self.getContext() )
if expandedPaths is None :
return
with Gaffer.BlockedConnection( self.__expansionChangedConnection ) :
self.__pathListing.setExpansion( expandedPaths )
@GafferUI.LazyMethod( deferUntilPlaybackStops = True )
def __transferSelectionFromContext( self ) :
selection = ContextAlgo.getSelectedPaths( self.getContext() )
with Gaffer.BlockedConnection( self.__selectionChangedConnection ) :
self.__pathListing.setSelection( selection, scrollToFirst=True, expandNonLeaf=False )
GafferUI.Editor.registerType( "HierarchyView", HierarchyView )
##########################################################################
# _SetFilterWidget
##########################################################################
class _SetFilterWidget( GafferUI.PathFilterWidget ) :
def __init__( self, pathFilter ) :
button = GafferUI.MenuButton(
"Sets",
menu = GafferUI.Menu(
Gaffer.WeakMethod( self.__setsMenuDefinition ),
title = "Set Filter"
)
)
GafferUI.PathFilterWidget.__init__( self, button, pathFilter )
def _updateFromPathFilter( self ) :
pass
def __setsMenuDefinition( self ) :
m = IECore.MenuDefinition()
availableSets = set()
if self.pathFilter().getScene() is not None :
with self.pathFilter().getContext() :
availableSets.update( str( s ) for s in self.pathFilter().getScene()["setNames"].getValue() )
builtInSets = { "__lights", "__cameras", "__coordinateSystems" }
selectedSets = set( self.pathFilter().getSetNames() )
m.append( "/Enabled", { "checkBox" : self.pathFilter().getEnabled(), "command" : Gaffer.WeakMethod( self.__toggleEnabled ) } )
m.append( "/EnabledDivider", { "divider" : True } )
m.append(
"/All", {
"active" : self.pathFilter().getEnabled() and selectedSets.issuperset( availableSets ),
"checkBox" : selectedSets.issuperset( availableSets ),
"command" : functools.partial( Gaffer.WeakMethod( self.__setSets ), builtInSets | availableSets | selectedSets )
}
)
m.append(
"/None", {
"active" : self.pathFilter().getEnabled() and len( selectedSets ),
"checkBox" : not len( selectedSets ),
"command" : functools.partial( Gaffer.WeakMethod( self.__setSets ), set() )
}
)
m.append( "/AllDivider", { "divider" : True } )
def item( setName ) :
updatedSets = set( selectedSets )
if setName in updatedSets :
updatedSets.remove( setName )
else :
updatedSets.add( setName )
return {
"active" : self.pathFilter().getEnabled() and s in availableSets,
"checkBox" : s in selectedSets,
"command" : functools.partial( Gaffer.WeakMethod( self.__setSets ), updatedSets )
}
for s in sorted( builtInSets ) :
m.append(
"/%s" % IECore.CamelCase.toSpaced( s[2:] ),
item( s )
)
if len( availableSets - builtInSets ) :
m.append( "/BuiltInDivider", { "divider" : True } )
pathFn = SetUI.getMenuPathFunction()
for s in sorted( availableSets | selectedSets ) :
if s in builtInSets :
continue
m.append( "/" + pathFn( s ), item( s ) )
return m
def __toggleEnabled( self, *unused ) :
self.pathFilter().setEnabled( not self.pathFilter().getEnabled() )
def __setSets( self, sets, *unused ) :
self.pathFilter().setSetNames( sets )
##########################################################################
# _SearchFilterWidget
##########################################################################
class _SearchFilterWidget( GafferUI.PathFilterWidget ) :
def __init__( self, pathFilter ) :
self.__patternWidget = GafferUI.TextWidget()
GafferUI.PathFilterWidget.__init__( self, self.__patternWidget, pathFilter )
self.__patternWidget._qtWidget().setPlaceholderText( "Filter..." )
self.__patternWidget.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__patternEditingFinished ), scoped = False )
self._updateFromPathFilter()
def _updateFromPathFilter( self ) :
self.__patternWidget.setText( self.pathFilter().getMatchPattern() )
def __patternEditingFinished( self, widget ) :
self.pathFilter().setMatchPattern( self.__patternWidget.getText() )
| 35.559682 | 138 | 0.701925 |
795be8b462ec28099556f56bd44b7fe100857d1e | 8,333 | py | Python | venv/lib/python2.7/site-packages/pylint/pyreverse/diadefslib.py | mutaihillary/mycalculator | 55685dd7c968861f18ae0701129f5af2bc682d67 | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/pylint/pyreverse/diadefslib.py | mutaihillary/mycalculator | 55685dd7c968861f18ae0701129f5af2bc682d67 | [
"MIT"
] | 7 | 2021-02-08T20:22:15.000Z | 2022-03-11T23:19:41.000Z | venv/lib/python2.7/site-packages/pylint/pyreverse/diadefslib.py | mutaihillary/mycalculator | 55685dd7c968861f18ae0701129f5af2bc682d67 | [
"MIT"
] | null | null | null | # Copyright (c) 2000-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""handle diagram generation options for class diagram or default diagrams
"""
from logilab.common.compat import builtins
import astroid
from astroid.utils import LocalsVisitor
from pylint.pyreverse.diagrams import PackageDiagram, ClassDiagram
BUILTINS_NAME = builtins.__name__
# diagram generators ##########################################################
class DiaDefGenerator(object):
"""handle diagram generation options"""
def __init__(self, linker, handler):
"""common Diagram Handler initialization"""
self.config = handler.config
self._set_default_options()
self.linker = linker
self.classdiagram = None # defined by subclasses
def get_title(self, node):
"""get title for objects"""
title = node.name
if self.module_names:
title = '%s.%s' % (node.root().name, title)
return title
def _set_option(self, option):
"""activate some options if not explicitly deactivated"""
# if we have a class diagram, we want more information by default;
# so if the option is None, we return True
if option is None:
if self.config.classes:
return True
else:
return False
return option
def _set_default_options(self):
"""set different default options with _default dictionary"""
self.module_names = self._set_option(self.config.module_names)
all_ancestors = self._set_option(self.config.all_ancestors)
all_associated = self._set_option(self.config.all_associated)
anc_level, ass_level = (0, 0)
if all_ancestors:
anc_level = -1
if all_associated:
ass_level = -1
if self.config.show_ancestors is not None:
anc_level = self.config.show_ancestors
if self.config.show_associated is not None:
ass_level = self.config.show_associated
self.anc_level, self.ass_level = anc_level, ass_level
def _get_levels(self):
"""help function for search levels"""
return self.anc_level, self.ass_level
def show_node(self, node):
"""true if builtins and not show_builtins"""
if self.config.show_builtin:
return True
return node.root().name != BUILTINS_NAME
def add_class(self, node):
"""visit one class and add it to diagram"""
self.linker.visit(node)
self.classdiagram.add_object(self.get_title(node), node)
def get_ancestors(self, node, level):
"""return ancestor nodes of a class node"""
if level == 0:
return
for ancestor in node.ancestors(recurs=False):
if not self.show_node(ancestor):
continue
yield ancestor
def get_associated(self, klass_node, level):
"""return associated nodes of a class node"""
if level == 0:
return
for ass_nodes in klass_node.instance_attrs_type.values() + \
klass_node.locals_type.values():
for ass_node in ass_nodes:
if isinstance(ass_node, astroid.Instance):
ass_node = ass_node._proxied
if not (isinstance(ass_node, astroid.Class)
and self.show_node(ass_node)):
continue
yield ass_node
def extract_classes(self, klass_node, anc_level, ass_level):
"""extract recursively classes related to klass_node"""
if self.classdiagram.has_node(klass_node) or not self.show_node(klass_node):
return
self.add_class(klass_node)
for ancestor in self.get_ancestors(klass_node, anc_level):
self.extract_classes(ancestor, anc_level-1, ass_level)
for ass_node in self.get_associated(klass_node, ass_level):
self.extract_classes(ass_node, anc_level, ass_level-1)
class DefaultDiadefGenerator(LocalsVisitor, DiaDefGenerator):
"""generate minimum diagram definition for the project :
* a package diagram including project's modules
* a class diagram including project's classes
"""
def __init__(self, linker, handler):
DiaDefGenerator.__init__(self, linker, handler)
LocalsVisitor.__init__(self)
def visit_project(self, node):
"""visit an astroid.Project node
create a diagram definition for packages
"""
mode = self.config.mode
if len(node.modules) > 1:
self.pkgdiagram = PackageDiagram('packages %s' % node.name, mode)
else:
self.pkgdiagram = None
self.classdiagram = ClassDiagram('classes %s' % node.name, mode)
def leave_project(self, node):
"""leave the astroid.Project node
return the generated diagram definition
"""
if self.pkgdiagram:
return self.pkgdiagram, self.classdiagram
return self.classdiagram,
def visit_module(self, node):
"""visit an astroid.Module node
add this class to the package diagram definition
"""
if self.pkgdiagram:
self.linker.visit(node)
self.pkgdiagram.add_object(node.name, node)
def visit_class(self, node):
"""visit an astroid.Class node
add this class to the class diagram definition
"""
anc_level, ass_level = self._get_levels()
self.extract_classes(node, anc_level, ass_level)
def visit_from(self, node):
"""visit astroid.From and catch modules for package diagram
"""
if self.pkgdiagram:
self.pkgdiagram.add_from_depend(node, node.modname)
class ClassDiadefGenerator(DiaDefGenerator):
"""generate a class diagram definition including all classes related to a
given class
"""
def __init__(self, linker, handler):
DiaDefGenerator.__init__(self, linker, handler)
def class_diagram(self, project, klass):
"""return a class diagram definition for the given klass and its
related klasses
"""
self.classdiagram = ClassDiagram(klass, self.config.mode)
if len(project.modules) > 1:
module, klass = klass.rsplit('.', 1)
module = project.get_module(module)
else:
module = project.modules[0]
klass = klass.split('.')[-1]
klass = module.ilookup(klass).next()
anc_level, ass_level = self._get_levels()
self.extract_classes(klass, anc_level, ass_level)
return self.classdiagram
# diagram handler #############################################################
class DiadefsHandler(object):
"""handle diagram definitions :
get it from user (i.e. xml files) or generate them
"""
def __init__(self, config):
self.config = config
def get_diadefs(self, project, linker):
"""get the diagrams configuration data
:param linker: astroid.inspector.Linker(IdGeneratorMixIn, LocalsVisitor)
:param project: astroid.manager.Project
"""
# read and interpret diagram definitions (Diadefs)
diagrams = []
generator = ClassDiadefGenerator(linker, self)
for klass in self.config.classes:
diagrams.append(generator.class_diagram(project, klass))
if not diagrams:
diagrams = DefaultDiadefGenerator(linker, self).visit(project)
for diagram in diagrams:
diagram.extract_relationships()
return diagrams
| 35.611111 | 84 | 0.638186 |
795be8ca7888b119a408ba9b679cf24b7fdbb559 | 9,753 | py | Python | src/dynamodb_encryption_sdk/internal/formatting/deserialize/attribute.py | johnwalker/aws-dynamodb-encryption-python | 6081d42bda2eb25da81f63c58666143fe6711189 | [
"Apache-2.0"
] | null | null | null | src/dynamodb_encryption_sdk/internal/formatting/deserialize/attribute.py | johnwalker/aws-dynamodb-encryption-python | 6081d42bda2eb25da81f63c58666143fe6711189 | [
"Apache-2.0"
] | null | null | null | src/dynamodb_encryption_sdk/internal/formatting/deserialize/attribute.py | johnwalker/aws-dynamodb-encryption-python | 6081d42bda2eb25da81f63c58666143fe6711189 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Tooling for deserializing attributes.
.. warning::
No guarantee is provided on the modules and APIs within this
namespace staying consistent. Directly reference at your own risk.
"""
import codecs
from decimal import Decimal
import io
import logging
import struct
try: # Python 3.5.0 and 3.5.1 have incompatible typing modules
from typing import Callable, Dict, List, Text, Union # noqa pylint: disable=unused-import
from dynamodb_encryption_sdk.internal import dynamodb_types # noqa pylint: disable=unused-import,ungrouped-imports
except ImportError: # pragma: no cover
# We only actually need these imports when running the mypy checks
pass
from boto3.dynamodb.types import Binary
from dynamodb_encryption_sdk.exceptions import DeserializationError
from dynamodb_encryption_sdk.identifiers import LOGGER_NAME
from dynamodb_encryption_sdk.internal.formatting.deserialize import decode_byte, decode_length, decode_tag, decode_value
from dynamodb_encryption_sdk.internal.identifiers import Tag, TagValues, TEXT_ENCODING
from dynamodb_encryption_sdk.internal.str_ops import to_str
__all__ = ('deserialize_attribute',)
_LOGGER = logging.getLogger(LOGGER_NAME)
def deserialize_attribute(serialized_attribute): # noqa: C901 pylint: disable=too-many-locals
# type: (bytes) -> dynamodb_types.RAW_ATTRIBUTE
"""Deserializes serialized attributes for decryption.
:param bytes serialized_attribute: Serialized attribute bytes
:returns: Deserialized attribute
:rtype: dict
"""
def _transform_binary_value(value):
# (bytes) -> bytes
"""Transforms a serialized binary value.
:param bytes value: Raw deserialized value
:rtype: bytes
"""
if isinstance(value, Binary):
return value.value
return value
def _deserialize_binary(stream):
# type: (io.BytesIO) -> Dict[Text, bytes]
"""Deserializes a binary object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
value = decode_value(stream)
return {Tag.BINARY.dynamodb_tag: _transform_binary_value(value)}
def _transform_string_value(value):
# (bytes) -> dynamodb_types.STRING
"""Transforms a serialized string value.
:param bytes value: Raw deserialized value
:rtype: dynamodb_encryption_sdk.internal.dynamodb_types.STRING
"""
return codecs.decode(value, TEXT_ENCODING)
def _deserialize_string(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.STRING]
"""Deserializes a string object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
value = decode_value(stream)
return {Tag.STRING.dynamodb_tag: _transform_string_value(value)}
def _transform_number_value(value):
# (bytes) -> dynamodb_types.STRING
"""Transforms a serialized number value.
:param bytes value: Raw deserialized value
:rtype: dynamodb_encryption_sdk.internal.dynamodb_types.STRING
"""
raw_value = codecs.decode(value, TEXT_ENCODING)
decimal_value = Decimal(to_str(raw_value)).normalize()
return '{0:f}'.format(decimal_value)
def _deserialize_number(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.STRING]
"""Deserializes a number object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
value = decode_value(stream)
return {Tag.NUMBER.dynamodb_tag: _transform_number_value(value)}
_boolean_map = {
TagValues.FALSE.value: False,
TagValues.TRUE.value: True
}
def _deserialize_boolean(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.BOOLEAN]
"""Deserializes a boolean object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
value = decode_byte(stream)
return {Tag.BOOLEAN.dynamodb_tag: _boolean_map[value]}
def _deserialize_null(stream): # we want a consistent API but don't use stream, so pylint: disable=unused-argument
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.BOOLEAN]
"""Deserializes a null object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
return {Tag.NULL.dynamodb_tag: True}
def _deserialize_set(stream, member_transform):
# type: (io.BytesIO, Callable) -> List[Union[dynamodb_types.BINARY, dynamodb_types.STRING]]
"""Deserializes contents of serialized set.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: list
"""
member_count = decode_length(stream)
return sorted([
member_transform(decode_value(stream))
for _ in range(member_count)
])
def _deserialize_binary_set(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.SET[dynamodb_types.BINARY]]
"""Deserializes a binary set object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
return {Tag.BINARY_SET.dynamodb_tag: _deserialize_set(stream, _transform_binary_value)}
def _deserialize_string_set(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.SET[dynamodb_types.STRING]]
"""Deserializes a string set object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
return {Tag.STRING_SET.dynamodb_tag: _deserialize_set(stream, _transform_string_value)}
def _deserialize_number_set(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.SET[dynamodb_types.STRING]]
"""Deserializes a number set object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
return {Tag.NUMBER_SET.dynamodb_tag: _deserialize_set(stream, _transform_number_value)}
def _deserialize_list(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.LIST]
"""Deserializes a list object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
member_count = decode_length(stream)
return {Tag.LIST.dynamodb_tag: [
_deserialize(stream)
for _ in range(member_count)
]}
def _deserialize_map(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.MAP]
"""Deserializes a map object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
member_count = decode_length(stream)
members = {} # type: dynamodb_types.MAP
for _ in range(member_count):
key = _deserialize(stream)
if Tag.STRING.dynamodb_tag not in key:
raise DeserializationError(
'Malformed serialized map: found "{}" as map key.'.format(list(key.keys())[0])
)
value = _deserialize(stream)
members[key[Tag.STRING.dynamodb_tag]] = value
return {Tag.MAP.dynamodb_tag: members}
def _deserialize_function(tag):
# type: (bytes) -> Callable
"""Identifies the correct deserialization function based on the provided tag.
:param tag: Identifying tag, read from start of serialized object
:type tag: dynamodb_encryption_sdk.internal.identifiers.Tag
:rtype: callable
"""
deserialize_functions = {
Tag.BINARY.tag: _deserialize_binary,
Tag.BINARY_SET.tag: _deserialize_binary_set,
Tag.NUMBER.tag: _deserialize_number,
Tag.NUMBER_SET.tag: _deserialize_number_set,
Tag.STRING.tag: _deserialize_string,
Tag.STRING_SET.tag: _deserialize_string_set,
Tag.BOOLEAN.tag: _deserialize_boolean,
Tag.NULL.tag: _deserialize_null,
Tag.LIST.tag: _deserialize_list,
Tag.MAP.tag: _deserialize_map
}
try:
return deserialize_functions[tag]
except KeyError:
raise DeserializationError('Unsupported tag: "{}"'.format(tag))
def _deserialize(stream):
# type: (io.BytesIO) -> Dict[Text, dynamodb_types.RAW_ATTRIBUTE]
"""Deserializes a serialized object.
:param stream: Stream containing serialized object
:type stream: io.BytesIO
:rtype: dict
"""
try:
tag = decode_tag(stream)
return _deserialize_function(tag)(stream)
except struct.error:
raise DeserializationError('Malformed serialized data')
if not serialized_attribute:
raise DeserializationError('Empty serialized attribute data')
stream = io.BytesIO(serialized_attribute)
return _deserialize(stream)
| 36.52809 | 120 | 0.666667 |
795be969424d710779281c10939493919ffa94fb | 1,405 | py | Python | composer/models/gpt2/gpt2_hparams.py | ajaysaini725/composer | 00fbf95823cd50354b2410fbd88f06eaf0481662 | [
"Apache-2.0"
] | null | null | null | composer/models/gpt2/gpt2_hparams.py | ajaysaini725/composer | 00fbf95823cd50354b2410fbd88f06eaf0481662 | [
"Apache-2.0"
] | null | null | null | composer/models/gpt2/gpt2_hparams.py | ajaysaini725/composer | 00fbf95823cd50354b2410fbd88f06eaf0481662 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 MosaicML. All Rights Reserved.
import dataclasses
from typing import TYPE_CHECKING
from composer.models.transformer_hparams import TransformerHparams
if TYPE_CHECKING:
from composer.models.transformer_shared import MosaicTransformer
@dataclasses.dataclass
class GPT2Hparams(TransformerHparams):
"""
Overrides TransformerHparams to create GPT-2 specific models and configs.
"""
def initialize_object(self) -> "MosaicTransformer":
import transformers
from composer.models.gpt2.model import GPT2Model
self.validate()
if self.model_config:
config = transformers.GPT2Config.from_dict(self.model_config)
elif self.pretrained_model_name is not None:
config = transformers.GPT2Config.from_pretrained(self.pretrained_model_name)
else:
raise ValueError('One of pretrained_model_name or model_config needed.')
if self.use_pretrained:
model = transformers.AutoModelForCausalLM.from_pretrained(self.pretrained_model_name)
else:
model = transformers.AutoModelForCausalLM.from_config(config) #type: ignore (thirdparty)
return GPT2Model(
module=model,
config=config, #type: ignore (thirdparty)
tokenizer_name=self.tokenizer_name,
gradient_checkpointing=self.gradient_checkpointing,
)
| 33.452381 | 101 | 0.714591 |
795be9aa7db0dc0eca9d1a19df8347ff47e36c4a | 2,433 | py | Python | log_casp_act/run_model_712.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_casp_act/run_model_712.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_casp_act/run_model_712.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null |
import numpy as np
from math import *
import pymultinest
import sys
sys.path.insert(0, '/home/kochenma/pysb')
from pysb.integrate import Solver
import csv
import datetime
import time as tm
from model_712 import model
from pysb.pathfinder import set_path
set_path('bng', '/home/kochenma/BioNetGen')
data_object = []
with open('earm_data.csv') as data_file:
reader = csv.reader(data_file)
line = list(reader)
for each in line:
data_object.append(each)
for i, each in enumerate(data_object):
if i > 0:
for j, item in enumerate(each):
data_object[i][j] = float(data_object[i][j])
data_object = data_object[1:]
time = []
for each in data_object:
time.append(float(each[0]))
model_solver = Solver(model, time, integrator='vode', integrator_options={'atol': 1e-12, 'rtol': 1e-12})
def prior(cube, ndim, nparams):
for k, every in enumerate(model.parameters):
if every.name[-3:] == '1kf':
cube[k] = cube[k]*4 - 4
if every.name[-3:] == '2kf':
cube[k] = cube[k]*4 - 8
if every.name[-3:] == '1kr':
cube[k] = cube[k]*4 - 4
if every.name[-3:] == '1kc':
cube[k] = cube[k]*4 - 1
postfixes = ['1kf', '2kf', '1kr', '1kc']
def loglike(cube, ndim, nparams):
point = []
cube_index = 0
for k, every in enumerate(model.parameters):
if every.name[-3:] in postfixes:
point.append(10**cube[cube_index])
cube_index += 1
else:
point.append(model.parameters[k].value)
model_solver.run(point)
failed = False
for every in model_solver.yobs:
for thing in every:
if thing <= -0.00000001 or np.isnan(thing):
failed = True
if failed:
return ['fail', -10000.0]
else:
parpc = model_solver.yobs[-1][6]/(model_solver.yobs[-1][1] + model_solver.yobs[-1][6])
if (parpc > 0.0) and (parpc < 1.00000001):
print log(parpc), point
return ['sim', log(parpc)]
else:
return ['fail', -10000.0]
n_params = 0
for m, lotsa in enumerate(model.parameters):
if lotsa.name[-3:] == '1kf':
n_params += 1
if lotsa.name[-3:] == '2kf':
n_params += 1
if lotsa.name[-3:] == '1kr':
n_params += 1
if lotsa.name[-3:] == '1kc':
n_params += 1
start_time = tm.clock()
counts = [0, 0]
pymultinest.run(loglike, prior, n_params, evidence_tolerance=0.0001, n_live_points=16000, log_zero=-1e3, sampling_efficiency=0.3, outputfiles_basename='/scratch/kochenma/log_casp_act/712/', resume = False, verbose = False, counts=counts)
print counts
print 'start time', start_time
print 'end time', tm.clock() | 25.610526 | 237 | 0.671599 |
795bea63608fcefd62962de94f09b893ce43d745 | 2,273 | py | Python | azure-mgmt-hdinsight/azure/mgmt/hdinsight/models/cluster_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-hdinsight/azure/mgmt/hdinsight/models/cluster_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-hdinsight/azure/mgmt/hdinsight/models/cluster_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .tracked_resource_py3 import TrackedResource
class Cluster(TrackedResource):
"""The HDInsight cluster.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The Azure Region where the resource lives
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param etag: The ETag for the resource
:type etag: str
:param properties: The properties of the cluster.
:type properties: ~azure.mgmt.hdinsight.models.ClusterGetProperties
:param identity: The identity of the cluster, if configured.
:type identity: ~azure.mgmt.hdinsight.models.ClusterIdentity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ClusterGetProperties'},
'identity': {'key': 'identity', 'type': 'ClusterIdentity'},
}
def __init__(self, *, location: str=None, tags=None, etag: str=None, properties=None, identity=None, **kwargs) -> None:
super(Cluster, self).__init__(location=location, tags=tags, **kwargs)
self.etag = etag
self.properties = properties
self.identity = identity
| 37.262295 | 123 | 0.594369 |
795bea813cb4af4d522b74c4b0dd825c4eff2f8e | 5,407 | py | Python | setup.py | ekhtiar/airflow | 9410715c81a2a16dcd04e7cce56d75747bb19ff6 | [
"Apache-2.0"
] | null | null | null | setup.py | ekhtiar/airflow | 9410715c81a2a16dcd04e7cce56d75747bb19ff6 | [
"Apache-2.0"
] | null | null | null | setup.py | ekhtiar/airflow | 9410715c81a2a16dcd04e7cce56d75747bb19ff6 | [
"Apache-2.0"
] | 1 | 2019-12-12T06:44:14.000Z | 2019-12-12T06:44:14.000Z | from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import os
import sys
# Kept manually in sync with airflow.__version__
version = '1.7.0'
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
async = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
celery = [
'celery>=3.1.17',
'flower>=0.7.3'
]
crypto = ['cryptography>=0.9.3']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker-py>=1.6.0']
druid = ['pydruid>=0.2.1']
gcloud = [
'gcloud>=0.11.0',
]
gcp_api = [
'httplib2',
'google-api-python-client<=1.4.2',
'oauth2client>=1.5.2, <2.0.0',
]
hdfs = ['snakebite>=2.7.8']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'impyla>=0.13.3',
'unicodecsv>=0.14.1'
]
jdbc = ['jaydebeapi>=0.2.0']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.14.1']
mysql = ['mysqlclient>=1.3.6']
rabbitmq = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2>=2.6']
s3 = [
'boto>=2.36.0',
'filechunkio>=1.6',
]
samba = ['pysmbclient>=0.1.3']
slack = ['slackclient>=1.0.0']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
ldap = ['ldap3>=0.9.9.1']
kerberos = ['pykerberos>=1.1.8',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
qds = ['qds-sdk>=1.9.0']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica
devel = ['lxml>=3.3.4', 'nose', 'nose-parameterized', 'mock']
devel_minreq = devel + mysql + doc + password + s3
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = devel + all_dbs + doc + samba + s3 + slack + crypto + oracle + docker
setup(
name='airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(),
package_data={'': ['airflow/alembic.ini']},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'babel>=1.3, <2.0',
'chartkick>=0.4.2, < 0.5',
'croniter>=0.3.8, <0.4',
'dill>=0.2.2, <0.3',
'python-daemon>=2.1.1, <2.2',
'flask>=0.10.1, <0.11',
'flask-admin>=1.4.0, <2.0.0',
'flask-cache>=0.13.1, <0.14',
'flask-login==0.2.11',
'future>=0.15.0, <0.16',
'funcsigs>=0.4, <1',
'gunicorn>=19.3.0, <19.4.0', # 19.4.? seemed to have issues
'jinja2>=2.7.3, <3.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.15.2, <1.0.0',
'pygments>=2.0.1, <3.0',
'python-dateutil>=2.3, <3',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8',
'thrift>=0.9.2, <0.10',
'Flask-WTF==0.12'
],
extras_require={
'all': devel_all,
'all_dbs': all_dbs,
'async': async,
'celery': celery,
'crypto': crypto,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'gcloud': gcloud,
'gcp_api': gcp_api,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'postgres': postgres,
'rabbitmq': rabbitmq,
's3': s3,
'samba': samba,
'slack': slack,
'statsd': statsd,
'vertica': vertica,
'ldap': ldap,
'webhdfs': webhdfs,
'kerberos': kerberos,
'password': password,
'github_enterprise': github_enterprise,
'qds': qds
},
classifiers={
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
},
author='Maxime Beauchemin',
author_email='maximebeauchemin@gmail.com',
url='https://github.com/airbnb/airflow',
download_url=(
'https://github.com/airbnb/airflow/tarball/' + version),
cmdclass={'test': Tox,
'extra_clean': CleanCommand,
},
)
| 28.161458 | 81 | 0.557795 |
795beb1c61b028efd3051bcd2d00b768e8d30ee0 | 5,069 | py | Python | mediagoblin/plugins/piwigo/tools.py | gnu-mirror-unofficial/mediagoblin | 522a61b24a1b7767682eaf7b29c59e40a0a9b73f | [
"CC0-1.0"
] | null | null | null | mediagoblin/plugins/piwigo/tools.py | gnu-mirror-unofficial/mediagoblin | 522a61b24a1b7767682eaf7b29c59e40a0a9b73f | [
"CC0-1.0"
] | null | null | null | mediagoblin/plugins/piwigo/tools.py | gnu-mirror-unofficial/mediagoblin | 522a61b24a1b7767682eaf7b29c59e40a0a9b73f | [
"CC0-1.0"
] | null | null | null | # GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2013 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import namedtuple
import logging
import lxml.etree as ET
from werkzeug.exceptions import MethodNotAllowed, BadRequest
from mediagoblin.tools.request import setup_user_in_request
from mediagoblin.tools.response import Response
_log = logging.getLogger(__name__)
PwgError = namedtuple("PwgError", ["code", "msg"])
class PwgNamedArray(list):
def __init__(self, l, item_name, as_attrib=()):
self.item_name = item_name
self.as_attrib = as_attrib
list.__init__(self, l)
def fill_element_xml(self, el):
for it in self:
n = ET.SubElement(el, self.item_name)
if isinstance(it, dict):
_fill_element_dict(n, it, self.as_attrib)
else:
_fill_element(n, it)
def _fill_element_dict(el, data, as_attr=()):
for k, v in data.items():
if k in as_attr:
if not isinstance(v, str):
v = str(v)
el.set(k, v)
else:
n = ET.SubElement(el, k)
_fill_element(n, v)
def _fill_element(el, data):
if isinstance(data, bool):
if data:
el.text = "1"
else:
el.text = "0"
elif isinstance(data, str):
el.text = data
elif isinstance(data, int):
el.text = str(data)
elif isinstance(data, dict):
_fill_element_dict(el, data)
elif isinstance(data, PwgNamedArray):
data.fill_element_xml(el)
else:
_log.warn("Can't convert to xml: %r", data)
def response_xml(result):
r = ET.Element("rsp")
r.set("stat", "ok")
status = None
if isinstance(result, PwgError):
r.set("stat", "fail")
err = ET.SubElement(r, "err")
err.set("code", str(result.code))
err.set("msg", result.msg)
if result.code >= 100 and result.code < 600:
status = result.code
else:
_fill_element(r, result)
return Response(ET.tostring(r, encoding="utf-8", xml_declaration=True),
mimetype='text/xml', status=status)
class CmdTable:
_cmd_table = {}
def __init__(self, cmd_name, only_post=False):
assert not cmd_name in self._cmd_table
self.cmd_name = cmd_name
self.only_post = only_post
def __call__(self, to_be_wrapped):
assert not self.cmd_name in self._cmd_table
self._cmd_table[self.cmd_name] = (to_be_wrapped, self.only_post)
return to_be_wrapped
@classmethod
def find_func(cls, request):
if request.method == "GET":
cmd_name = request.args.get("method")
else:
cmd_name = request.form.get("method")
entry = cls._cmd_table.get(cmd_name)
if not entry:
return entry
_log.debug("Found method %s", cmd_name)
func, only_post = entry
if only_post and request.method != "POST":
_log.warn("Method %s only allowed for POST", cmd_name)
raise MethodNotAllowed()
return func
def check_form(form):
if not form.validate():
_log.error("form validation failed for form %r", form)
for f in form:
if len(f.errors):
_log.error("Errors for %s: %r", f.name, f.errors)
raise BadRequest()
dump = []
for f in form:
dump.append(f"{f.name}={f.data!r}")
_log.debug("form: %s", " ".join(dump))
class PWGSession:
session_manager = None
def __init__(self, request):
self.request = request
self.in_pwg_session = False
def __enter__(self):
# Backup old state
self.old_session = self.request.session
self.old_user = self.request.user
# Load piwigo session into state
self.request.session = self.session_manager.load_session_from_cookie(
self.request)
setup_user_in_request(self.request)
self.in_pwg_session = True
return self
def __exit__(self, *args):
# Restore state
self.request.session = self.old_session
self.request.user = self.old_user
self.in_pwg_session = False
def save_to_cookie(self, response):
assert self.in_pwg_session
self.session_manager.save_session_to_cookie(self.request.session,
self.request, response)
| 30.721212 | 77 | 0.630894 |
795beb461792db289d6d9f8970198f7469d82026 | 3,059 | py | Python | data/p2DJ/New/R2/benchmark/startQiskit_Class80.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/R2/benchmark/startQiskit_Class80.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/R2/benchmark/startQiskit_Class80.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=2
# total number=8
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.x(input_qubit[1]) # number=5
prog.cx(input_qubit[0],input_qubit[1]) # number=4
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
prog.x(input_qubit[1]) # number=2
prog.x(input_qubit[1]) # number=3
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
prog = circuit1
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startQiskit_Class80.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.06422 | 80 | 0.619157 |
795beb6cc6da66afc04ed399bbf3876d9e895c1d | 299 | py | Python | mlrun/frameworks/lgbm/mlrun_interface.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | mlrun/frameworks/lgbm/mlrun_interface.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | mlrun/frameworks/lgbm/mlrun_interface.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | from abc import ABC
from .._ml_common import MLMLRunInterface
class LGBMModelMLRunInterface(MLMLRunInterface, ABC):
"""
Interface for adding MLRun features for LightGBM models (SciKit-Learn API models).
"""
# MLRun's context default name:
DEFAULT_CONTEXT_NAME = "mlrun-lgbm"
| 23 | 86 | 0.735786 |
795beb713d42d56b5a534eb51466b0d0c563c48b | 30,033 | py | Python | Tests/test_SeqIO_index.py | gurumystictest/BillingTest2 | 6db0937db993cb31626d1863fa511e4e4f424324 | [
"BSD-3-Clause"
] | 1 | 2020-12-21T05:03:12.000Z | 2020-12-21T05:03:12.000Z | Tests/test_SeqIO_index.py | SONNYROBOT/biopython | 629e27d30436d74d345b89cb9c338ff3759f034d | [
"BSD-3-Clause"
] | null | null | null | Tests/test_SeqIO_index.py | SONNYROBOT/biopython | 629e27d30436d74d345b89cb9c338ff3759f034d | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2009-2017 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit tests for Bio.SeqIO.index(...) and index_db() functions."""
try:
import sqlite3
except ImportError:
# Try to run what tests we can in case sqlite3 was not installed
sqlite3 = None
import os
import unittest
import tempfile
import threading
import gzip
import warnings
from io import BytesIO
from io import StringIO
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
from Bio.SeqIO._index import _FormatToRandomAccess
from Bio import BiopythonParserWarning
from Bio import MissingPythonDependencyError
from seq_tests_common import SeqRecordTestBaseClass
from test_SeqIO import SeqIOTestBaseClass
CUR_DIR = os.getcwd()
if sqlite3:
def raw_filenames(index_filename):
"""Open SQLite index and extract filenames (as is).
Returns a 2-tuple, holding a list of strings, and the value
of the meta_data.filenames_relative_to_index (or None).
"""
con = sqlite3.dbapi2.connect(index_filename)
filenames = [
row[0]
for row in con.execute(
"SELECT name FROM file_data ORDER BY file_number;"
).fetchall()
]
try:
(filenames_relative_to_index,) = con.execute(
"SELECT value FROM meta_data WHERE key=?;",
("filenames_relative_to_index",),
).fetchone()
filenames_relative_to_index = filenames_relative_to_index.upper() == "TRUE"
except TypeError:
filenames_relative_to_index = None
con.close()
return filenames, filenames_relative_to_index
class OldIndexTest(unittest.TestCase):
"""Testing a pre-built index (make sure cross platform etc).
>>> from Bio import SeqIO
>>> d = SeqIO.index_db("triple_sff.idx", ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"], "sff")
>>> len(d)
54
"""
def setUp(self):
os.chdir(CUR_DIR)
def tearDown(self):
os.chdir(CUR_DIR)
def test_old(self):
"""Load existing index with no options (from parent directory)."""
d = SeqIO.index_db("Roche/triple_sff.idx")
self.assertEqual(54, len(d))
self.assertRaises(FileNotFoundError, d.get_raw, "alpha")
def test_old_check_same_thread(self):
"""Setting check_same_thread to False doesn't raise an exception."""
d = SeqIO.index_db("Roche/triple_sff_rel_paths.idx")
def reader_thread():
try:
d["alpha"]
except sqlite3.ProgrammingError:
self.fail(
"Raised sqlite3.ProgrammingError in violation of check_same_thread=False"
)
reader = threading.Thread(target=reader_thread)
reader.start()
reader.join()
def test_old_rel(self):
"""Load existing index (with relative paths) with no options (from parent directory)."""
d = SeqIO.index_db("Roche/triple_sff_rel_paths.idx")
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_contents(self):
"""Check actual filenames in existing indexes."""
filenames, flag = raw_filenames("Roche/triple_sff.idx")
self.assertIsNone(flag)
self.assertEqual(
filenames, ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"]
)
filenames, flag = raw_filenames("Roche/triple_sff_rel_paths.idx")
self.assertTrue(flag)
self.assertEqual(
filenames, ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"]
)
def test_old_same_dir(self):
"""Load existing index with no options (from same directory)."""
os.chdir("Roche")
d = SeqIO.index_db("triple_sff.idx")
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_same_dir_rel(self):
"""Load existing index (with relative paths) with no options (from same directory)."""
os.chdir("Roche")
d = SeqIO.index_db("triple_sff_rel_paths.idx")
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_format(self):
"""Load existing index with correct format."""
d = SeqIO.index_db("Roche/triple_sff.idx", format="sff")
self.assertEqual(54, len(d))
def test_old_format_wrong(self):
"""Load existing index with wrong format."""
self.assertRaises(
ValueError, SeqIO.index_db, "Roche/triple_sff.idx", format="fasta"
)
def test_old_files(self):
"""Load existing index with correct files (from parent directory)."""
d = SeqIO.index_db(
"Roche/triple_sff.idx",
["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"],
)
self.assertEqual(54, len(d))
self.assertRaises(FileNotFoundError, d.get_raw, "alpha")
def test_old_files_same_dir(self):
"""Load existing index with correct files (from same directory)."""
os.chdir("Roche")
d = SeqIO.index_db(
"triple_sff.idx",
["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"],
)
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_files_wrong(self):
"""Load existing index with wrong files."""
self.assertRaises(
ValueError,
SeqIO.index_db,
"Roche/triple_sff.idx",
["a.sff", "b.sff", "c.sff"],
)
def test_old_files_wrong2(self):
"""Load existing index with wrong number of files."""
self.assertRaises(
ValueError,
SeqIO.index_db,
"Roche/triple_sff.idx",
["E3MFGYR02_no_manifest.sff", "greek.sff"],
)
class NewIndexTest(unittest.TestCase):
"""Check paths etc in newly built index."""
def setUp(self):
os.chdir(CUR_DIR)
def tearDown(self):
os.chdir(CUR_DIR)
for i in ["temp.idx", "Roche/temp.idx"]:
if os.path.isfile(i):
os.remove(i)
def check(self, index_file, sff_files, expt_sff_files):
if os.path.isfile(index_file):
os.remove(index_file)
# Build index...
d = SeqIO.index_db(index_file, sff_files, "sff")
self.assertEqual(395, len(d["alpha"]))
d._con.close() # hack for PyPy
d.close()
self.assertEqual(
[os.path.abspath(f) for f in sff_files],
[os.path.abspath(f) for f in d._filenames],
)
# Now directly check the filenames inside the SQLite index:
filenames, flag = raw_filenames(index_file)
self.assertTrue(flag)
self.assertEqual(filenames, expt_sff_files)
# Load index...
d = SeqIO.index_db(index_file, sff_files)
self.assertEqual(395, len(d["alpha"]))
d._con.close() # hack for PyPy
d.close()
self.assertEqual([os.path.abspath(f) for f in sff_files], d._filenames)
os.remove(index_file)
def test_child_folder_rel(self):
"""Check relative links to child folder."""
# Note we expect relative paths recorded with Unix slashs!
expt_sff_files = [
"Roche/E3MFGYR02_no_manifest.sff",
"Roche/greek.sff",
"Roche/paired.sff",
]
self.check("temp.idx", expt_sff_files, expt_sff_files)
# Here index is given as abs
self.check(
os.path.abspath("temp.idx"),
[
"Roche/E3MFGYR02_no_manifest.sff",
os.path.abspath("Roche/greek.sff"),
"Roche/paired.sff",
],
expt_sff_files,
)
# Here index is given as relative path
self.check(
"temp.idx",
[
"Roche/E3MFGYR02_no_manifest.sff",
os.path.abspath("Roche/greek.sff"),
"Roche/paired.sff",
],
expt_sff_files,
)
def test_same_folder(self):
"""Check relative links in same folder."""
os.chdir("Roche")
expt_sff_files = ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"]
# Here everything is relative,
self.check("temp.idx", expt_sff_files, expt_sff_files)
self.check(
os.path.abspath("temp.idx"),
[
"E3MFGYR02_no_manifest.sff",
os.path.abspath("greek.sff"),
"../Roche/paired.sff",
],
expt_sff_files,
)
self.check(
"temp.idx",
[
"E3MFGYR02_no_manifest.sff",
os.path.abspath("greek.sff"),
"../Roche/paired.sff",
],
expt_sff_files,
)
self.check(
"../Roche/temp.idx",
[
"E3MFGYR02_no_manifest.sff",
os.path.abspath("greek.sff"),
"../Roche/paired.sff",
],
expt_sff_files,
)
def test_some_abs(self):
"""Check absolute filenames in index.
Unless the repository and tests themselves are under the temp
directory (as detected by ``tempfile``), we expect the index to
use absolute filenames.
"""
h, t = tempfile.mkstemp(prefix="index_test_", suffix=".idx")
os.close(h)
os.remove(t)
abs_sff_files = [
os.path.abspath("Roche/E3MFGYR02_no_manifest.sff"),
os.path.abspath("Roche/greek.sff"),
os.path.abspath(os.path.join("Roche", "paired.sff")),
]
if os.getcwd().startswith(os.path.dirname(t)):
# The tests are being run from within the temp directory,
# e.g. index filename /tmp/index_test_XYZ.idx
# and working directory of /tmp/biopython/Tests/
# This means the indexing will use a RELATIVE path
# e.g. biopython/Tests/Roche/E3MFGYR02_no_manifest.sff
# not /tmp/biopython/Tests/Roche/E3MFGYR02_no_manifest.sff
expt_sff_files = [
os.path.relpath(f, os.path.dirname(t)) for f in abs_sff_files
]
else:
expt_sff_files = abs_sff_files
# Providing absolute paths...
self.check(t, abs_sff_files, expt_sff_files)
# Now try with mix of abs and relative paths...
self.check(
t,
[
os.path.abspath("Roche/E3MFGYR02_no_manifest.sff"),
os.path.join("Roche", "greek.sff"),
os.path.abspath("Roche/paired.sff"),
],
expt_sff_files,
)
class IndexDictTests(SeqRecordTestBaseClass, SeqIOTestBaseClass):
tests = [
("Ace/contig1.ace", "ace"),
("Ace/consed_sample.ace", "ace"),
("Ace/seq.cap.ace", "ace"),
("Quality/wrapping_original_sanger.fastq", "fastq"),
("Quality/example.fastq", "fastq"), # Unix newlines
("Quality/example.fastq", "fastq-sanger"),
("Quality/example_dos.fastq", "fastq"), # DOS/Windows newlines
("Quality/tricky.fastq", "fastq"),
("Quality/sanger_faked.fastq", "fastq-sanger"),
("Quality/solexa_faked.fastq", "fastq-solexa"),
("Quality/illumina_faked.fastq", "fastq-illumina"),
("Quality/zero_length.fastq", "fastq"),
("EMBL/epo_prt_selection.embl", "embl"),
("EMBL/U87107.embl", "embl"),
("EMBL/TRBG361.embl", "embl"),
("EMBL/kipo_prt_sample.embl", "embl"),
("EMBL/A04195.imgt", "embl"), # Not a proper EMBL file, an IMGT file
("EMBL/A04195.imgt", "imgt"),
("EMBL/hla_3260_sample.imgt", "imgt"),
("EMBL/patents.embl", "embl"),
("EMBL/AAA03323.embl", "embl"),
("GenBank/NC_000932.faa", "fasta"),
("GenBank/NC_005816.faa", "fasta"),
("GenBank/NC_005816.tsv", "tab"),
("GenBank/NC_005816.ffn", "fasta"),
("GenBank/NC_005816.fna", "fasta"),
("GenBank/NC_005816.gb", "gb"),
("GenBank/cor6_6.gb", "genbank"),
("GenBank/empty_accession.gbk", "gb"),
("GenBank/empty_version.gbk", "gb"),
("IntelliGenetics/vpu_nucaligned.txt", "ig"),
("IntelliGenetics/TAT_mase_nuc.txt", "ig"),
("IntelliGenetics/VIF_mase-pro.txt", "ig"),
("Phd/phd1", "phd"),
("Phd/phd2", "phd"),
("Phd/phd_solexa", "phd"),
("Phd/phd_454", "phd"),
("NBRF/B_nuc.pir", "pir"),
("NBRF/Cw_prot.pir", "pir"),
("NBRF/clustalw.pir", "pir"),
("SwissProt/sp001", "swiss"),
("SwissProt/sp010", "swiss"),
("SwissProt/sp016", "swiss"),
("SwissProt/multi_ex.txt", "swiss"),
("SwissProt/multi_ex.xml", "uniprot-xml"),
("SwissProt/multi_ex.fasta", "fasta"),
("Roche/E3MFGYR02_random_10_reads.sff", "sff"),
("Roche/E3MFGYR02_random_10_reads.sff", "sff-trim"),
("Roche/E3MFGYR02_index_at_start.sff", "sff"),
("Roche/E3MFGYR02_index_in_middle.sff", "sff"),
("Roche/E3MFGYR02_alt_index_at_start.sff", "sff"),
("Roche/E3MFGYR02_alt_index_in_middle.sff", "sff"),
("Roche/E3MFGYR02_alt_index_at_end.sff", "sff"),
("Roche/E3MFGYR02_no_manifest.sff", "sff"),
("Roche/greek.sff", "sff"),
("Roche/greek.sff", "sff-trim"),
("Roche/paired.sff", "sff"),
("Roche/paired.sff", "sff-trim"),
]
def setUp(self):
os.chdir(CUR_DIR)
h, self.index_tmp = tempfile.mkstemp("_idx.tmp")
os.close(h)
def tearDown(self):
os.chdir(CUR_DIR)
if os.path.isfile(self.index_tmp):
os.remove(self.index_tmp)
def check_dict_methods(self, rec_dict, keys, ids, msg):
self.assertCountEqual(keys, rec_dict.keys(), msg=msg)
# This is redundant, I just want to make sure len works:
self.assertEqual(len(keys), len(rec_dict), msg=msg)
# Make sure boolean evaluation works
self.assertEqual(bool(keys), bool(rec_dict), msg=msg)
for key, id in zip(keys, ids):
self.assertIn(key, rec_dict, msg=msg)
self.assertEqual(id, rec_dict[key].id, msg=msg)
self.assertEqual(id, rec_dict.get(key).id, msg=msg)
# Check non-existant keys,
assert chr(0) not in keys, "Bad example in test"
with self.assertRaises(KeyError, msg=msg):
rec = rec_dict[chr(0)]
self.assertIsNone(rec_dict.get(chr(0)), msg=msg)
self.assertEqual(rec_dict.get(chr(0), chr(1)), chr(1), msg=msg)
with self.assertRaises(AttributeError, msg=msg):
rec_dict.iteritems
for key, rec in rec_dict.items():
self.assertIn(key, keys, msg=msg)
self.assertIsInstance(rec, SeqRecord, msg=msg)
self.assertIn(rec.id, ids, msg=msg)
for rec in rec_dict.values():
self.assertIn(key, keys, msg=msg)
self.assertIsInstance(rec, SeqRecord, msg=msg)
self.assertIn(rec.id, ids, msg=msg)
def simple_check(self, filename, fmt, comp):
"""Check indexing (without a key function)."""
msg = "Test failure parsing file %s with format %s" % (filename, fmt)
if comp:
mode = "r" + self.get_mode(fmt)
with gzip.open(filename, mode) as handle:
id_list = [rec.id for rec in SeqIO.parse(handle, fmt)]
else:
id_list = [rec.id for rec in SeqIO.parse(filename, fmt)]
with warnings.catch_warnings():
if "_alt_index_" in filename:
# BiopythonParserWarning: Could not parse the SFF index:
# Unknown magic number b'.diy' in SFF index header:
# b'.diy1.00'
warnings.simplefilter("ignore", BiopythonParserWarning)
rec_dict = SeqIO.index(filename, fmt)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
if not sqlite3:
return
# In memory,
# note here give filenames as list of strings
rec_dict = SeqIO.index_db(":memory:", [filename], fmt)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
# check error conditions
with self.assertRaises(ValueError, msg=msg):
SeqIO.index_db(":memory:", format="dummy")
with self.assertRaises(ValueError, msg=msg):
SeqIO.index_db(":memory:", filenames=["dummy"])
# Saving to file...
index_tmp = self.index_tmp
if os.path.isfile(index_tmp):
os.remove(index_tmp)
# To disk,
# note here we give the filename as a single string
# to confirm that works too.
rec_dict = SeqIO.index_db(index_tmp, filename, fmt)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
# Now reload it...
rec_dict = SeqIO.index_db(index_tmp, [filename], fmt)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
# Now reload without passing filenames and format
# and switch directory to check paths still work
index_tmp = os.path.abspath(index_tmp)
os.chdir(os.path.dirname(filename))
try:
rec_dict = SeqIO.index_db(index_tmp)
finally:
os.chdir(CUR_DIR)
self.check_dict_methods(rec_dict, id_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
os.remove(index_tmp)
def add_prefix(self, key):
"""Sample key_function for testing index code."""
return "id_" + key
def key_check(self, filename, fmt, comp):
"""Check indexing with a key function."""
msg = "Test failure parsing file %s with format %s" % (filename, fmt)
if comp:
mode = "r" + self.get_mode(fmt)
with gzip.open(filename, mode) as handle:
id_list = [rec.id for rec in SeqIO.parse(handle, fmt)]
else:
id_list = [rec.id for rec in SeqIO.parse(filename, fmt)]
key_list = [self.add_prefix(id) for id in id_list]
with warnings.catch_warnings():
if "_alt_index_" in filename:
# BiopythonParserWarning: Could not parse the SFF index:
# Unknown magic number b'.diy' in SFF index header:
# b'.diy1.00'
warnings.simplefilter("ignore", BiopythonParserWarning)
rec_dict = SeqIO.index(filename, fmt, key_function=self.add_prefix)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
rec_dict.close()
if not sqlite3:
return
# In memory,
rec_dict = SeqIO.index_db(
":memory:", [filename], fmt, key_function=self.add_prefix
)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
# check error conditions
with self.assertRaises(ValueError, msg=msg):
SeqIO.index_db(":memory:", format="dummy", key_function=self.add_prefix)
with self.assertRaises(ValueError, msg=msg):
SeqIO.index_db(
":memory:", filenames=["dummy"], key_function=self.add_prefix
)
rec_dict.close()
# Saving to file...
index_tmp = filename + ".key.idx"
if os.path.isfile(index_tmp):
os.remove(index_tmp)
rec_dict = SeqIO.index_db(
index_tmp, [filename], fmt, key_function=self.add_prefix
)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
# Now reload it...
rec_dict = SeqIO.index_db(
index_tmp, [filename], fmt, key_function=self.add_prefix
)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
# Now reload without passing filenames and format
rec_dict = SeqIO.index_db(index_tmp, key_function=self.add_prefix)
self.check_dict_methods(rec_dict, key_list, id_list, msg=msg)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
os.remove(index_tmp)
# Done
def get_raw_check(self, filename, fmt, comp):
# Also checking the key_function here
msg = "Test failure parsing file %s with format %s" % (filename, fmt)
if comp:
with gzip.open(filename, "rb") as handle:
raw_file = handle.read()
mode = "r" + self.get_mode(fmt)
with gzip.open(filename, mode) as handle:
id_list = [rec.id.lower() for rec in SeqIO.parse(handle, fmt)]
else:
with open(filename, "rb") as handle:
raw_file = handle.read()
id_list = [rec.id.lower() for rec in SeqIO.parse(filename, fmt)]
if fmt in ["sff"]:
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonParserWarning)
rec_dict = SeqIO.index(filename, fmt, key_function=str.lower)
if sqlite3:
rec_dict_db = SeqIO.index_db(
":memory:", filename, fmt, key_function=str.lower,
)
else:
rec_dict = SeqIO.index(filename, fmt, key_function=str.lower)
if sqlite3:
rec_dict_db = SeqIO.index_db(
":memory:", filename, fmt, key_function=str.lower,
)
self.assertCountEqual(id_list, rec_dict.keys(), msg=msg)
if sqlite3:
self.assertCountEqual(id_list, rec_dict_db.keys(), msg=msg)
for key in id_list:
self.assertIn(key, rec_dict, msg=msg)
self.assertEqual(key, rec_dict[key].id.lower(), msg=msg)
self.assertEqual(key, rec_dict.get(key).id.lower(), msg=msg)
raw = rec_dict.get_raw(key)
self.assertIsInstance(raw, bytes, msg=msg)
self.assertTrue(raw.strip(), msg=msg)
self.assertIn(raw, raw_file, msg=msg)
if sqlite3:
raw_db = rec_dict_db.get_raw(key)
# Via index using format-specific get_raw which scans the file,
# Via index_db in general using raw length found when indexing.
self.assertEqual(raw, raw_db, msg=msg)
rec1 = rec_dict[key]
# Following isn't very elegant, but it lets me test the
# __getitem__ SFF code is working.
mode = self.get_mode(fmt)
if mode == "b":
handle = BytesIO(raw)
elif mode == "t":
handle = StringIO(raw.decode())
else:
raise RuntimeError("Unexpected mode %s" % mode)
if fmt == "sff":
rec2 = SeqIO.SffIO._sff_read_seq_record(
handle,
rec_dict._proxy._flows_per_read,
rec_dict._proxy._flow_chars,
rec_dict._proxy._key_sequence,
trim=False,
)
elif fmt == "sff-trim":
rec2 = SeqIO.SffIO._sff_read_seq_record(
handle,
rec_dict._proxy._flows_per_read,
rec_dict._proxy._flow_chars,
rec_dict._proxy._key_sequence,
trim=True,
)
elif fmt == "uniprot-xml":
self.assertTrue(raw.startswith(b"<entry "), msg=msg)
self.assertTrue(raw.endswith(b"</entry>"), msg=msg)
# Currently the __getitem__ method uses this
# trick too, but we hope to fix that later
raw = (
"""<?xml version='1.0' encoding='UTF-8'?>
<uniprot xmlns="http://uniprot.org/uniprot"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://uniprot.org/uniprot
http://www.uniprot.org/support/docs/uniprot.xsd">
%s
</uniprot>
"""
% raw.decode()
)
handle = StringIO(raw)
rec2 = SeqIO.read(handle, fmt)
else:
rec2 = SeqIO.read(handle, fmt)
self.compare_record(rec1, rec2)
rec_dict.close()
del rec_dict
if sqlite3:
def test_alpha_fails_db(self):
"""Reject alphabet argument in Bio.SeqIO.index_db()."""
# In historic usage, alphabet=... would be a Bio.Alphabet object.
self.assertRaises(
ValueError,
SeqIO.index_db,
":memory:",
["Fasta/dups.fasta"],
"fasta",
alphabet="XXX",
)
def test_alpha_fails(self):
"""Reject alphabet argument in Bio.SeqIO.index()."""
# In historic usage, alphabet=... would be a Bio.Alphabet object.
self.assertRaises(
ValueError, SeqIO.index, "Fasta/dups.fasta", "fasta", alphabet="XXX"
)
if sqlite3:
def test_duplicates_index_db(self):
"""Index file with duplicate identifiers with Bio.SeqIO.index_db()."""
self.assertRaises(
ValueError, SeqIO.index_db, ":memory:", ["Fasta/dups.fasta"], "fasta"
)
def test_duplicates_index(self):
"""Index file with duplicate identifiers with Bio.SeqIO.index()."""
self.assertRaises(ValueError, SeqIO.index, "Fasta/dups.fasta", "fasta")
def test_duplicates_to_dict(self):
"""Index file with duplicate identifiers with Bio.SeqIO.to_dict()."""
with open("Fasta/dups.fasta") as handle:
iterator = SeqIO.parse(handle, "fasta")
self.assertRaises(ValueError, SeqIO.to_dict, iterator)
def test_simple_checks(self):
for filename1, fmt in self.tests:
assert fmt in _FormatToRandomAccess
tasks = [(filename1, None)]
if os.path.isfile(filename1 + ".bgz"):
tasks.append((filename1 + ".bgz", "bgzf"))
for filename2, comp in tasks:
self.simple_check(filename2, fmt, comp)
def test_key_checks(self):
for filename1, fmt in self.tests:
assert fmt in _FormatToRandomAccess
tasks = [(filename1, None)]
if os.path.isfile(filename1 + ".bgz"):
tasks.append((filename1 + ".bgz", "bgzf"))
for filename2, comp in tasks:
self.key_check(filename2, fmt, comp)
def test_raw_checks(self):
for filename1, fmt in self.tests:
assert fmt in _FormatToRandomAccess
tasks = [(filename1, None)]
if os.path.isfile(filename1 + ".bgz"):
tasks.append((filename1 + ".bgz", "bgzf"))
for filename2, comp in tasks:
self.get_raw_check(filename2, fmt, comp)
class IndexOrderingSingleFile(unittest.TestCase):
f = "GenBank/NC_000932.faa"
ids = [r.id for r in SeqIO.parse(f, "fasta")]
def test_order_to_dict(self):
"""Check to_dict preserves order in indexed file."""
d = SeqIO.to_dict(SeqIO.parse(self.f, "fasta"))
self.assertEqual(self.ids, list(d))
def test_order_index(self):
"""Check index preserves order in indexed file."""
d = SeqIO.index(self.f, "fasta")
self.assertEqual(self.ids, list(d))
if sqlite3:
def test_order_index_db(self):
"""Check index_db preserves ordering indexed file."""
d = SeqIO.index_db(":memory:", [self.f], "fasta")
self.assertEqual(self.ids, list(d))
if sqlite3:
class IndexOrderingManyFiles(unittest.TestCase):
def test_order_index_db(self):
"""Check index_db preserves order in multiple indexed files."""
files = ["GenBank/NC_000932.faa", "GenBank/NC_005816.faa"]
ids = []
for f in files:
ids.extend(r.id for r in SeqIO.parse(f, "fasta"))
d = SeqIO.index_db(":memory:", files, "fasta")
self.assertEqual(ids, list(d))
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 38.70232 | 113 | 0.551493 |
795beb72352e21ff12112a1940426ee6bad4a6be | 46,094 | py | Python | mdgo/core.py | HT-MD/mdgo | bd06b226c6015fb083c099508bc81f521a4329c4 | [
"MIT"
] | 1 | 2021-12-15T01:18:13.000Z | 2021-12-15T01:18:13.000Z | mdgo/core.py | HT-MD/mdgo | bd06b226c6015fb083c099508bc81f521a4329c4 | [
"MIT"
] | 4 | 2021-11-12T20:48:34.000Z | 2021-12-14T23:41:47.000Z | mdgo/core.py | HT-MD/mdgo | bd06b226c6015fb083c099508bc81f521a4329c4 | [
"MIT"
] | 4 | 2021-11-12T21:10:53.000Z | 2022-02-07T19:30:57.000Z | # coding: utf-8
# Copyright (c) Tingzheng Hou.
# Distributed under the terms of the MIT License.
"""
This module implements two core class MdRun and MdJob
for molecular dynamics simulation analysis and job setup.
"""
from __future__ import annotations
from typing import Union, Dict, Tuple, List, Optional
import MDAnalysis
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from MDAnalysis import Universe
from MDAnalysis.analysis.distances import distance_array
from MDAnalysis.lib.distances import capped_distance
from tqdm.notebook import tqdm
from mdgo.util import (
mass_to_name,
assign_name,
assign_resname,
res_dict_from_select_dict,
res_dict_from_datafile,
select_dict_from_resname,
)
from mdgo.conductivity import calc_cond_msd, conductivity_calculator, choose_msd_fitting_region, get_beta
from mdgo.coordination import (
concat_coord_array,
num_of_neighbor,
num_of_neighbor_simple,
num_of_neighbor_specific,
angular_dist_of_neighbor,
neighbor_distance,
find_nearest,
find_nearest_free_only,
process_evol,
heat_map,
get_full_coords,
)
from mdgo.msd import total_msd, partial_msd
from mdgo.residence_time import calc_neigh_corr, fit_residence_time
__author__ = "Tingzheng Hou"
__version__ = "1.0"
__maintainer__ = "Tingzheng Hou"
__email__ = "tingzheng_hou@berkeley.edu"
__date__ = "Feb 9, 2021"
class MdRun:
"""
A core class for MD results analysis. TODO: add support for 2d and dimension selection.
Args:
wrapped_run: The Universe object of wrapped trajectory.
unwrapped_run: The Universe object of unwrapped trajectory.
nvt_start: NVT start time step.
time_step: Timestep between each frame, in ps.
name: Name of the MD run.
select_dict: A dictionary of atom species selection, where each atom species name is a key
and the corresponding values are the selection language. This dict is intended for
analyzing interested atoms.
res_dict: A dictionary of resnames, where each resname is a key
and the corresponding values are the selection language. This dict is intended for
analyzing interested residues (ions/molecules).
cation_name: Name of cation. Default to "cation".
anion_name: Name of anion. Default to "anion".
cation_charge: Charge of cation. Default to 1.
anion_charge: Charge of anion. Default to 1.
temperature: Temperature of the MD run. Default to 298.15.
cond: Whether to calculate conductivity MSD. Default to True.
units: unit system (currently 'real' and 'lj' are supported)
"""
def __init__(
self,
wrapped_run: Universe,
unwrapped_run: Universe,
nvt_start: int,
time_step: float,
name: str,
select_dict: Optional[Dict[str, str]] = None,
res_dict: Optional[Dict[str, str]] = None,
cation_name: str = "cation",
anion_name: str = "anion",
cation_charge: float = 1,
anion_charge: float = -1,
temperature: float = 298.15,
cond: bool = True,
units="real",
):
"""
Base constructor. This is a low level constructor designed to work with
parsed data (``Universe``) or other bridging objects (``CombinedData``). Not
recommended to use directly.
"""
self.wrapped_run = wrapped_run
self.unwrapped_run = unwrapped_run
self.nvt_start = nvt_start
self.time_step = time_step
self.temp = temperature
self.name = name
self.atom_names = mass_to_name(self.wrapped_run.atoms.masses)
if not hasattr(self.wrapped_run.atoms, "names") or not hasattr(self.unwrapped_run.atoms, "names"):
assign_name(self.wrapped_run, self.atom_names)
assign_name(self.unwrapped_run, self.atom_names)
if not hasattr(self.wrapped_run.atoms, "resnames") or not hasattr(self.unwrapped_run.atoms, "resnames"):
if res_dict is None:
assert select_dict is not None, "Either one of select_dict or res_dict should be given."
res_dict = res_dict_from_select_dict(self.wrapped_run, select_dict)
assign_resname(self.wrapped_run, res_dict)
assign_resname(self.unwrapped_run, res_dict)
if select_dict is None:
self.select_dict = select_dict_from_resname(self.wrapped_run)
else:
self.select_dict = select_dict
self.nvt_steps = self.wrapped_run.trajectory.n_frames
self.time_array = np.array([i * self.time_step for i in range(self.nvt_steps - self.nvt_start)])
self.cation_name = cation_name
self.anion_name = anion_name
self.cation_charge = cation_charge
self.anion_charge = anion_charge
self.cations = self.wrapped_run.select_atoms(self.select_dict.get("cation"))
self.anion_center = self.wrapped_run.select_atoms(self.select_dict.get("anion"))
self.anions = self.anion_center.residues.atoms
self.num_cation = len(self.cations)
if cond:
self.cond_array = self.get_cond_array()
else:
self.cond_array = None
self.init_x = self.get_init_dimension()[0]
self.init_y = self.get_init_dimension()[1]
self.init_z = self.get_init_dimension()[2]
self.init_v = self.init_x * self.init_y * self.init_z
self.nvt_x = self.get_nvt_dimension()[0]
self.nvt_y = self.get_nvt_dimension()[1]
self.nvt_z = self.get_nvt_dimension()[2]
self.nvt_v = self.nvt_x * self.nvt_y * self.nvt_z
gas_constant = 8.314
temp = 298.15
faraday_constant_2 = 96485 * 96485
self.c = (self.num_cation / (self.nvt_v * 1e-30)) / (6.022 * 1e23)
self.d_to_sigma = self.c * faraday_constant_2 / (gas_constant * temp)
self.units = units
@classmethod
def from_lammps(
cls,
data_dir: str,
wrapped_dir: str,
unwrapped_dir: str,
nvt_start: int,
time_step: float,
name: str,
select_dict: Optional[Dict[str, str]] = None,
res_dict: Optional[Dict[str, str]] = None,
cation_name: str = "cation",
anion_name: str = "anion",
cation_charge: float = 1,
anion_charge: float = -1,
temperature: float = 298.15,
cond: bool = True,
units: str = "real",
):
"""
Constructor from lammps data file and wrapped and unwrapped trajectory dcd file.
Args:
data_dir: Path to the data file.
wrapped_dir: Path to the wrapped dcd file.
unwrapped_dir: Path to the unwrapped dcd file.
nvt_start: NVT start time step.
time_step: LAMMPS timestep in ps.
name: Name of the MD run.
select_dict: A dictionary of species selection.
res_dict: A dictionary of resnames.
cation_name: Name of cation. Default to "cation".
anion_name: Name of anion. Default to "anion".
cation_charge: Charge of cation. Default to 1.
anion_charge: Charge of anion. Default to 1.
temperature: Temperature of the MD run. Default to 298.15.
cond: Whether to calculate conductivity MSD. Default to True.
units: unit system (currently 'real' and 'lj' are supported)
"""
if res_dict is None:
res_dict = res_dict_from_datafile(data_dir)
wrapped_run = MDAnalysis.Universe(data_dir, wrapped_dir, format="LAMMPS")
unwrapped_run = MDAnalysis.Universe(data_dir, unwrapped_dir, format="LAMMPS")
return cls(
wrapped_run,
unwrapped_run,
nvt_start,
time_step,
name,
select_dict=select_dict,
res_dict=res_dict,
cation_name=cation_name,
anion_name=anion_name,
cation_charge=cation_charge,
anion_charge=anion_charge,
temperature=temperature,
cond=cond,
units=units,
)
def get_init_dimension(self) -> np.ndarray:
"""
Returns the initial box dimension.
"""
return self.wrapped_run.trajectory[0].dimensions
def get_equilibrium_dimension(self, npt_range: int, period: int = 200) -> np.ndarray:
"""
Returns the equilibrium box dimension.
Args:
npt_range: The maximum time step of the npt run.
period: The interval of checking points for volume convergence.
"""
ave_dx = [np.inf, np.inf - 1]
count = 0
ave_dxi = 0
convergence = -1
for i in range(npt_range):
ave_dxi += self.wrapped_run.trajectory[i].dimensions[0]
count += 1
if count * self.time_step == period:
print(ave_dxi / count)
ave_dx.append(ave_dxi / count)
count = 0
if ave_dx[-1] >= ave_dx[-2]:
convergence = i
break
d = []
for j in range(convergence, npt_range):
d.append(self.wrapped_run.trajectory[j].dimensions)
return np.mean(np.array(d), axis=0)
def get_nvt_dimension(self) -> np.ndarray:
"""
Returns the box dimension at the last frame.
"""
return self.wrapped_run.trajectory[-1].dimensions
def get_cond_array(self) -> np.ndarray:
"""Calculates the conductivity "mean square displacement".
Return:
An array of MSD values for each time in the trajectory.
"""
nvt_run = self.unwrapped_run
cations = nvt_run.select_atoms(self.select_dict.get("cation"))
anions = nvt_run.select_atoms(self.select_dict.get("anion"))
cond_array = calc_cond_msd(
nvt_run,
anions,
cations,
self.nvt_start,
self.cation_charge,
self.anion_charge,
)
return cond_array
def choose_cond_fit_region(self) -> tuple:
"""Computes the optimal fitting region (linear regime) of conductivity MSD.
Args:
msd (numpy.array): mean squared displacement
Returns at tuple with the start of the fitting regime (int), end of the
fitting regime (int), and the beta value of the fitting regime (float).
"""
if self.cond_array is None:
self.cond_array = self.get_cond_array()
start, end, beta = choose_msd_fitting_region(self.cond_array, self.time_array)
return start, end, beta
def plot_cond_array(
self,
start: int = -1,
end: int = -1,
*runs: MdRun,
reference: bool = True,
):
"""Plots the conductivity MSD as a function of time.
If no fitting region (start, end) is provided, computes the optimal
fitting region based on the portion of the MSD with greatest
linearity.
Args:
start (int): Start time step for fitting.
end (int): End time step for fitting.
runs (MdRun): Other runs to be compared in the same plot.
reference (bool): Whether to plot reference line.
Default to True.
units (str): unit system (currently 'real' and 'lj' are supported)
"""
if self.cond_array is None:
self.cond_array = self.get_cond_array()
if start == -1 and end == -1:
start, end, _ = choose_msd_fitting_region(self.cond_array, self.time_array)
colors = ["g", "r", "c", "m", "y", "k"]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.loglog(
self.time_array,
self.cond_array,
color="b",
lw=2,
label=self.name,
)
for i, run in enumerate(runs):
ax.loglog(
run.time_array,
run.cond_array,
color=colors[i],
lw=2,
label=run.name,
)
if reference:
slope_guess = (self.cond_array[int(np.log(len(self.time_array)) / 2)] - self.cond_array[5]) / (
self.time_array[int(np.log(len(self.time_array)) / 2)] - self.time_array[5]
)
ax.loglog(self.time_array[start:end], np.array(self.time_array[start:end]) * slope_guess * 2, "k--")
if self.units == "real":
ax.set_ylabel("MSD (A$^2$)")
ax.set_xlabel("Time (ps)")
elif self.units == "lj":
ax.set_ylabel("MSD ($\\sigma^2$)")
ax.set_xlabel("Time ($\\tau$)")
else:
raise ValueError("units selection not supported")
ax.set_ylim(min(np.abs(self.cond_array[1:])) * 0.9, max(np.abs(self.cond_array)) * 1.2)
ax.legend()
fig.show()
def get_conductivity(self, start: int = -1, end: int = -1) -> float:
"""Calculates the Green-Kubo (GK) conductivity given fitting region.
If no fitting region (start, end) is provided, computes the optimal
fitting region based on the portion of the MSD with greatest
linearity.
Args:
start (int): Start time step for fitting MSD.
end (int): End time step for fitting MSD.
Print and return conductivity.
"""
if start == -1 and end == -1:
start, end, beta = choose_msd_fitting_region(self.cond_array, self.time_array)
else:
beta, _ = get_beta(self.cond_array, self.time_array, start, end)
# print info on fitting
time_units = ""
if self.units == "real":
time_units = "ps"
elif self.units == "lj":
time_units = "tau"
print(f"Start of linear fitting regime: {start} ({self.time_array[start]} {time_units})")
print(f"End of linear fitting regime: {end} ({self.time_array[end]} {time_units})")
print(f"Beta value (fit to MSD = t^\u03B2): {beta} (\u03B2 = 1 in the diffusive regime)")
cond = conductivity_calculator(
self.time_array, self.cond_array, self.nvt_v, self.name, start, end, self.temp, self.units
)
return cond
def coord_num_array_single_species(
self,
species: str,
distance: float,
run_start: int,
run_end: int,
center_atom: str = "cation",
) -> np.ndarray:
"""Calculates the coordination number array of one ``species`` around the interested ``center_atom``.
Args:
species: The neighbor species.
distance: The coordination cutoff distance.
run_start: Start frame of analysis.
run_end: End frame of analysis.
center_atom: The solvation shell center atom. Default to "cation".
Return:
An array of coordination number in the frame range.
"""
nvt_run = self.wrapped_run
distance_dict = {species: distance}
center_atoms = nvt_run.select_atoms(self.select_dict.get(center_atom))
num_array = concat_coord_array(
nvt_run,
num_of_neighbor,
center_atoms,
distance_dict,
self.select_dict,
run_start,
run_end,
)["total"]
return num_array
def coord_num_array_multi_species(
self,
distance_dict: Dict[str, float],
run_start: int,
run_end: int,
center_atom: str = "cation",
) -> Dict[str, np.ndarray]:
"""Calculates the coordination number array of multiple species around the interested ``center_atom``.
Args:
distance_dict: A dict of coordination cutoff distance of the neighbor species.
run_start: Start frame of analysis.
run_end: End frame of analysis.
center_atom: The solvation shell center atom. Default to "cation".
Return:
A diction containing the coordination number sequence of each specified neighbor species
and the total coordination number sequence in the specified frame range.
"""
nvt_run = self.wrapped_run
center_atoms = nvt_run.select_atoms(self.select_dict.get(center_atom))
num_array_dict = concat_coord_array(
nvt_run,
num_of_neighbor,
center_atoms,
distance_dict,
self.select_dict,
run_start,
run_end,
)
return num_array_dict
def coord_num_array_specific(
self,
distance_dict: Dict[str, float],
run_start: int,
run_end: int,
center_atom: str = "cation",
counter_atom: str = "anion",
) -> Dict[str, np.ndarray]:
"""Calculates the coordination number array of multiple species of specific
coordination types (SSIP, CIP, AGG).
Args:
distance_dict: A dict of coordination cutoff distance of the neighbor species.
run_start: Start frame of analysis.
run_end: End frame of analysis.
center_atom: The solvation shell center atom. Default to "cation".
counter_atom: The neighbor counter ion species. Default to "anion".
Return:
A diction containing the coordination number sequence of each specified neighbor species
and the total coordination number sequence in the specified frame range.
"""
nvt_run = self.wrapped_run
center_atoms = nvt_run.select_atoms(self.select_dict.get(center_atom))
num_array_dict = concat_coord_array(
nvt_run,
num_of_neighbor_specific,
center_atoms,
distance_dict,
self.select_dict,
run_start,
run_end,
counter_atom=counter_atom,
)
return num_array_dict
def write_solvation_structure(
self,
distance_dict: Dict[str, float],
run_start: int,
run_end: int,
structure_code: int,
write_freq: float,
write_path: str,
center_atom: str = "cation",
):
"""Writes out a series of desired solvation structures as ``*.xyz`` files
Args:
distance_dict: A dict of coordination cutoff distance of the neighbor species.
run_start: Start frame of analysis.
run_end: End frame of analysis.
structure_code: An integer code representing the solvation
structure, for example, 221 is two species A, two species B
and one species C with the same order as in the ``distance_dict``.
write_freq: Probability to write out files.
write_path: Path to write out files.
center_atom: The solvation shell atom. Default to "cation".
"""
nvt_run = self.wrapped_run
center_atoms = nvt_run.select_atoms(self.select_dict.get(center_atom))
for atom in tqdm(center_atoms):
num_of_neighbor(
nvt_run,
atom,
distance_dict,
self.select_dict,
run_start,
run_end,
write=True,
structure_code=structure_code,
write_freq=write_freq,
write_path=write_path,
)
def coord_type_array(
self,
distance: float,
run_start: int,
run_end: int,
center_atom: str = "cation",
counter_atom: str = "anion",
) -> np.ndarray:
"""Calculates the solvation structure type (1 for SSIP, 2 for CIP,
3 for AGG) array of the solvation structure ``center_atom`` (typically the cation).
Args:
distance: The coordination cutoff distance.
run_start: Start frame of analysis.
run_end: End frame of analysis.
center_atom: The solvation shell center atom. Default to "cation".
counter_atom: The neighbor counter ion species. Default to "anion".
Return:
An array of the solvation structure type in the specified frame range.
"""
nvt_run = self.wrapped_run
distance_dict = {counter_atom: distance}
center_atoms = nvt_run.select_atoms(self.select_dict.get(center_atom))
num_array = concat_coord_array(
nvt_run,
num_of_neighbor_simple,
center_atoms,
distance_dict,
self.select_dict,
run_start,
run_end,
)["total"]
return num_array
def angle_array(
self,
distance_dict: Dict[str, float],
run_start: int,
run_end: int,
center_atom: str = "cation",
cip=True,
):
"""
Calculates the angle of a-c-b in the specified frame range.
Args:
distance_dict: A dict of coordination cutoff distance of the neighbor species.
The dictionary key must be in the order of a, b, where a is the neighbor species
used for determining coordination type, b is the other neighbor species, and the
corresponding values are cutoff distance of a->c and b->c, where c is the center species.
run_start: Start frame of analysis.
run_end: End frame of analysis.
center_atom: The center atom species.
cip: Only includes contact ion pair structures with only one `a` and one `c` atoms.
Default to True.
Returns:
An array of angles of a-c-b in the specified frames.
"""
nvt_run = self.wrapped_run
center_atoms = nvt_run.select_atoms(self.select_dict.get(center_atom))
assert len(distance_dict) == 2, "Only distance a->c, b->c shoud be specified in the distance_dict."
distance_dict[center_atom] = list(distance_dict.values())[0]
ang_array = concat_coord_array(
nvt_run,
angular_dist_of_neighbor,
center_atoms,
distance_dict,
self.select_dict,
run_start,
run_end,
cip=cip,
)["total"]
return ang_array
def coordination(
self,
species: str,
distance: float,
run_start: int,
run_end: int,
center_atom: str = "cation",
) -> pd.DataFrame:
"""Tabulates the coordination number distribution of one species
around the solvation structure ``center_atom``.
Args:
species: The neighbor species.
distance: The coordination cutoff distance.
run_start: Start frame of analysis.
run_end: End frame of analysis.
center_atom: The solvation shell center atom. Default to "cation".
Return:
A dataframe of the species coordination number and corresponding percentage.
"""
num_array = self.coord_num_array_single_species(species, distance, run_start, run_end, center_atom=center_atom)
shell_component, shell_count = np.unique(num_array.flatten(), return_counts=True)
combined = np.vstack((shell_component, shell_count)).T
item_name = "Num of " + species + " within " + str(distance) + " " + "\u212B"
item_list = []
percent_list = []
for i in range(len(combined)):
item_list.append(str(int(combined[i, 0])))
percent_list.append(f"{(combined[i, 1] / combined[:, 1].sum() * 100):.4f}%")
df_dict = {item_name: item_list, "Percentage": percent_list}
df = pd.DataFrame(df_dict)
return df
def rdf_integral(
self,
distance_dict: Dict[str, float],
run_start: int,
run_end: int,
center_atom: str = "cation",
) -> pd.DataFrame:
"""Calculates the integral of the radial distribution function of selected species around the ``center_atom``.
Args:
distance_dict: A dict of coordination cutoff distance of the neighbor species.
run_start: Start frame of analysis.
run_end: End frame of analysis.
center_atom: The solvation shell center atom to calculate the radial distribution for. Default to "cation".
Return:
A dataframe of the species and the coordination number.
"""
cn_values = self.coord_num_array_multi_species(distance_dict, run_start, run_end, center_atom=center_atom)
item_name = "Species in first solvation shell"
item_list = []
cn_list = []
for kw, val in cn_values.items():
if kw != "total":
shell_component, shell_count = np.unique(val.flatten(), return_counts=True)
cn = (shell_component * shell_count / shell_count.sum()).sum()
item_list.append(kw)
cn_list.append(cn)
df_dict = {item_name: item_list, "CN": cn_list}
df = pd.DataFrame(df_dict)
return df
def coordination_type(
self,
distance: float,
run_start: int,
run_end: int,
center_atom: str = "cation",
counter_atom: str = "anion",
) -> pd.DataFrame:
"""Tabulates the percentage of each solvation structures (CIP/SSIP/AGG)
Args:
distance: The coordination cutoff distance.
run_start: Start frame of analysis.
run_end: End frame of analysis.
center_atom: The solvation shell center atom. Default to "cation".
counter_atom: The neighbor counter ion species. Default to "anion".
Return:
A dataframe of the solvation structure and percentage.
"""
num_array = self.coord_type_array(
distance, run_start, run_end, center_atom=center_atom, counter_atom=counter_atom
)
shell_component, shell_count = np.unique(num_array.flatten(), return_counts=True)
combined = np.vstack((shell_component, shell_count)).T
item_name = "Solvation structure"
item_dict = {"1": "ssip", "2": "cip", "3": "agg"}
item_list = []
percent_list = []
for i in range(len(combined)):
item = str(int(combined[i, 0]))
item_list.append(item_dict.get(item))
percent_list.append(f"{(combined[i, 1] / combined[:, 1].sum() * 100):.4f}%")
df_dict = {item_name: item_list, "Percentage": percent_list}
df = pd.DataFrame(df_dict)
return df
def coordination_specific(
self,
distance_dict: Dict[str, float],
run_start: int,
run_end: int,
center_atom: str = "cation",
counter_atom: str = "anion",
) -> pd.DataFrame:
"""Calculates the integral of the coordiantion number of selected species
in each type of solvation structures (CIP/SSIP/AGG)
Args:
distance_dict: A dict of coordination cutoff distance of the neighbor species.
run_start: Start frame of analysis.
run_end: End frame of analysis.
center_atom: The solvation shell center atom. Default to "cation".
counter_atom: The neighbor counter ion species. Default to "anion".
Return:
A dataframe of the solvation structure and percentage.
"""
cn_values = self.coord_num_array_specific(
distance_dict, run_start, run_end, center_atom=center_atom, counter_atom=counter_atom
)
item_name = "Species in first solvation shell"
item_list = []
ssip_list = []
cip_list = []
agg_list = []
for kw, val in cn_values.items():
if kw != "total":
shell_component, shell_count = np.unique(val.flatten(), return_counts=True)
cn = (shell_component * shell_count / shell_count.sum()).sum()
if kw.startswith("ssip_"):
item_list.append(kw[5:])
ssip_list.append(cn)
elif kw.startswith("cip_"):
cip_list.append(cn)
else:
agg_list.append(cn)
df_dict = {item_name: item_list, "CN in SSIP": ssip_list, "CN in CIP": cip_list, "CN in AGG": agg_list}
df = pd.DataFrame(df_dict)
return df
def get_msd_all(
self,
start: int = 0,
stop: int = -1,
fft: bool = True,
species: str = "cation",
) -> np.ndarray:
"""Calculates the mean square displacement (MSD) of the interested atom species.
Args:
start: Start time step.
stop: End time step.
fft: Whether to use fft to calculate msd. Default to True.
species: The species for analysis. Default to "cation".
Return:
An array of MSD values in the trajectory
"""
selection = self.select_dict.get(species)
assert selection is not None
msd_array = total_msd(
self.unwrapped_run,
start=start,
stop=stop,
select=selection,
fft=fft,
)
return msd_array
def get_msd_partial(
self,
distance: float,
run_start: int,
run_end: int,
largest: int = 1000,
center_atom: str = "cation",
binding_site: str = "anion",
) -> Tuple[Optional[List[np.ndarray]], Optional[List[np.ndarray]]]:
"""
Calculates the mean square displacement (MSD) of the ``center_atom`` according to coordination states.
The returned ``free_array`` include the MSD when ``center_atom`` is not coordinated to ``binding_site``.
The ``attach_array`` includes the MSD when ``center_atom`` is coordinated to ``binding_site``.
Args:
distance: The coordination cutoff distance.
run_start: Start frame of analysis.
run_end: End frame of analysis.
largest: The largest time sequence to trace.
center_atom: The solvation shell center atom. Default to "cation".
binding_site: The species the ``center_atom`` coordinates to. Default to "anion".
Returns:
Two arrays of MSD in the trajectory
"""
nvt_run = self.unwrapped_run
center_atoms = nvt_run.select_atoms(self.select_dict.get(center_atom))
free_array, attach_array = partial_msd(
nvt_run, center_atoms, largest, self.select_dict, distance, run_start, run_end, binding_site=binding_site
)
return free_array, attach_array
def get_d(self, msd_array: np.ndarray, start: int, stop: int, percentage: float = 1, species: str = "cation"):
"""Prints the self-diffusion coefficient (in m^2/s) of the species.
Prints the Nernst-Einstein conductivity (in mS/cm) if it's the cation.
Args:
msd_array: msd array.
start: Start time step.
stop: End time step.
percentage: The percentage of the cation. Default to 1.
species: The species for analysis. Default to "cation".
"""
a2 = 1e-20
ps = 1e-12
s_m_to_ms_cm = 10
if percentage != 1:
d = (msd_array[start] - msd_array[stop]) / (start - stop) / self.time_step / 6 * a2 / ps
sigma = percentage * d * self.d_to_sigma * s_m_to_ms_cm
print(f"Diffusivity of {(percentage * 100):.2f}% {species}: {d} m^2/s")
if species.lower() == "cation" or species.lower() == "li":
print(f"NE Conductivity of {(percentage * 100):.2f}% {species}: {sigma}mS/cm")
else:
d = (msd_array[start] - msd_array[stop]) / (start - stop) / self.time_step / 6 * a2 / ps
sigma = d * self.d_to_sigma * s_m_to_ms_cm
print("Diffusivity of all " + species + ":", d, "m^2/s")
if species.lower() == "cation" or species.lower() == "li":
print("NE Conductivity of all " + species + ":", sigma, "mS/cm")
def get_neighbor_corr(
self,
distance_dict: Dict[str, float],
run_start: int,
run_end: int,
center_atom: str = "cation",
) -> Tuple[np.ndarray, Dict[str, np.ndarray]]:
"""Calculates the neighbor auto-correlation function (ACF)
of selected species around center_atom.
Args:
distance_dict: A dict of coordination cutoff distance of the neighbor species.
run_start: Start frame of analysis.
run_end: End frame of analysis.
center_atom: The solvation shell center atom to calculate the ACF for. Default to "cation".
Return:
An array of the time series and a dict of ACFs of each species.
"""
return calc_neigh_corr(
self.wrapped_run,
distance_dict,
self.select_dict,
self.time_step,
run_start,
run_end,
center_atom=center_atom,
)
def get_residence_time(
self, times: np.ndarray, acf_avg_dict: Dict[str, np.ndarray], cutoff_time: int
) -> Dict[str, np.floating]:
"""Calculates the residence time of selected species around cation
Args:
times: The time series.
acf_avg_dict: A dict of ACFs of each species.
cutoff_time: Cutoff time for fitting the exponential decay.
Return:
The residence time of each species in a dict.
"""
return fit_residence_time(times, acf_avg_dict, cutoff_time, self.time_step)
def get_neighbor_trj(
self,
run_start: int,
run_end: int,
species: str,
neighbor_cutoff: float,
center_atom: str = "cation",
index: int = 0,
) -> Dict[str, np.ndarray]:
"""Returns the distance between one center atom and neighbors as a function of time
Args:
run_start: Start frame of analysis.
run_end: End frame of analysis.
center_atom: The solvation shell center atom. Default to "cation".
species: The neighbor species.
neighbor_cutoff: The neighbor cutoff distance.
index: The index of the atom in the interested atom group.
Return:
A dict of distance arrays of the center atom-neighbor as a function of time with neighbor id as keys.
"""
center_atoms = self.wrapped_run.select_atoms(self.select_dict.get(center_atom))
return neighbor_distance(
self.wrapped_run,
center_atoms[index],
run_start,
run_end,
species,
self.select_dict,
neighbor_cutoff,
)
def get_hopping_freq_dist(
self,
run_start: int,
run_end: int,
binding_site: str,
binding_cutoff: float,
hopping_cutoff: float,
floating_atom: str = "cation",
smooth: int = 51,
mode: str = "full",
) -> Tuple[np.floating, np.floating]:
"""Calculates the cation hopping rate and hopping distance.
Args:
run_start: Start frame of analysis.
run_end: End frame of analysis.
binding_site: Floating ion binding site species.
binding_cutoff: Binding cutoff distance.
hopping_cutoff: Hopping out cutoff distance.
floating_atom: Floating atom species.
smooth: The length of the smooth filter window. Default to 51.
mode: The mode of treating hopping event. Default to "full".
Return:
The floating_atom average hopping rate and average hopping distance.
"""
nvt_run = self.wrapped_run
floating_atoms = nvt_run.select_atoms(self.select_dict.get(floating_atom))
freqs = []
hopping_distance = []
for ion in tqdm(floating_atoms[:]):
neighbor_trj = neighbor_distance(
nvt_run, ion, run_start, run_end, binding_site, self.select_dict, binding_cutoff
)
if mode == "full":
sites, freq, steps = find_nearest(
neighbor_trj, self.time_step, binding_cutoff, hopping_cutoff, smooth=smooth
)
elif mode == "free":
sites, freq, steps = find_nearest_free_only(
neighbor_trj, self.time_step, binding_cutoff, hopping_cutoff, smooth=smooth
)
else:
raise ValueError("invalid mode")
coords = []
for step in steps:
coord_ion = nvt_run.trajectory[step + run_start][ion.index]
coords.append(coord_ion)
if len(coords) > 1:
dists = []
for i in range(len(coords) - 1):
dist = distance_array(coords[i + 1], coords[i], box=self.get_nvt_dimension())[0][0]
dists.append(dist)
ion_mean_dists = np.mean(dists)
hopping_distance.append(ion_mean_dists)
freqs.append(freq)
return np.mean(freqs), np.mean(hopping_distance)
def shell_evolution(
self,
distance_dict: Dict[str, float],
run_start: int,
run_end: int,
lag_step: int,
binding_cutoff: float,
hopping_cutoff: float,
smooth: int = 51,
cool: int = 0,
binding_site: str = "anion",
center_atom: str = "cation",
duplicate_run: Optional[List[MdRun]] = None,
) -> Dict[str, Dict[str, Union[int, np.ndarray]]]:
"""Calculates the coordination number evolution of species around ``center_atom`` as a function of time,
the coordination numbers are averaged over all time steps around events when the center_atom
hopping to and hopping out from the ``binding_site``. If ``duplicate_run`` is given, it is also averaged over
all duplicate runs.
Args:
distance_dict: A dict of coordination cutoff distance of the neighbor species.
run_start: Start frame of analysis.
run_end: End frame of analysis.
lag_step: time steps to track before and after the hopping event
binding_cutoff: Binding cutoff distance.
hopping_cutoff: Detaching cutoff distance.
smooth: The length of the smooth filter window. Default to 51.
cool: The cool down frames between hopping in and hopping out.
center_atom: The solvation shell center atom. Default to "cation".
binding_site: The select_dict key of the binding site. Default to "anion".
duplicate_run: Default to None.
Return:
A dictionary containing the number of trj logged, the averaged coordination number and standard deviation
for each species, and the corresponding time sequence.
"""
in_list: Dict[str, List[np.ndarray]] = {}
out_list: Dict[str, List[np.ndarray]] = {}
for k in list(distance_dict):
in_list[k] = []
out_list[k] = []
process_evol(
self.wrapped_run,
self.select_dict,
in_list,
out_list,
distance_dict,
run_start,
run_end,
lag_step,
binding_cutoff,
hopping_cutoff,
smooth,
cool,
binding_site,
center_atom,
)
if duplicate_run is not None:
for run in duplicate_run:
process_evol(
run.wrapped_run,
run.select_dict,
in_list,
out_list,
distance_dict,
run_start,
run_end,
lag_step,
binding_cutoff,
hopping_cutoff,
smooth,
cool,
binding_site,
center_atom,
)
cn_dict = {}
cn_dict["time"] = np.array([i * self.time_step - lag_step * self.time_step for i in range(lag_step * 2 + 1)])
for k in list(distance_dict):
if "in_count" not in cn_dict:
cn_dict["in_count"] = np.array(in_list[k]).shape[0]
cn_dict["out_count"] = np.array(out_list[k]).shape[0]
k_dict = {}
k_dict["in_ave"] = np.nanmean(np.array(in_list[k]), axis=0)
k_dict["in_err"] = np.nanstd(np.array(in_list[k]), axis=0)
k_dict["out_ave"] = np.nanmean(np.array(out_list[k]), axis=0)
k_dict["out_err"] = np.nanstd(np.array(out_list[k]), axis=0)
cn_dict[k] = k_dict
return cn_dict
def get_heat_map(
self,
run_start: int,
run_end: int,
cluster_center: str,
cluster_terminal: Union[str, List[str]],
binding_cutoff: float,
hopping_cutoff: float,
floating_atom: str = "cation",
cartesian_by_ref: np.ndarray = None,
sym_dict: Dict[str, List[np.ndarray]] = None,
sample: Optional[int] = None,
smooth: int = 51,
dim: str = "xyz",
) -> np.ndarray:
"""Calculates the heatmap matrix of floating ion around a cluster
Args:
run_start: Start frame of analysis.
run_end: End frame of analysis.
cluster_center: The center atom species of the cluster.
cluster_terminal: The selection string for terminal atom species of the cluster
(typically the binding site for the floating ion). The argument can be a str if
all the terminal atoms have the same selection string and are equivalent, or a list
if the terminal atoms are distinct and have different selection strings.
binding_cutoff: Binding cutoff distance.
hopping_cutoff: Detaching cutoff distance.
floating_atom: The floating atom species.
cartesian_by_ref: Transformation matrix between cartesian
and reference coordinate systems. Default to None.
sym_dict: Dictionary of symmetry operation dictionary. Default to None.
sample: Number of samples desired. Default to None, which is no sampling.
smooth: The length of the smooth filter window. Default to 51.
dim: Desired dimensions to calculate heat map
Return:
An array of coordinates.
"""
nvt_run = self.wrapped_run
floating_atoms = nvt_run.select_atoms(self.select_dict.get(floating_atom))
if isinstance(cluster_terminal, str):
terminal_atom_type: Union[str, List[str]] = self.select_dict.get(cluster_terminal, "Not defined")
assert terminal_atom_type != "Not defined", f"{cluster_terminal} not defined in select_dict"
else:
terminal_atom_type = []
for species in cluster_terminal:
atom_type = self.select_dict.get(species, "Not defined")
assert atom_type != "Not defined", f"{species} not defined in select_dict"
terminal_atom_type.append(atom_type)
coord_list = np.array([[0, 0, 0]])
for atom in tqdm(floating_atoms[:]):
neighbor_trj = neighbor_distance(
nvt_run, atom, run_start, run_end, cluster_center, self.select_dict, binding_cutoff
)
if not bool(neighbor_trj):
continue
sites, freq, steps = find_nearest(
neighbor_trj, self.time_step, binding_cutoff, hopping_cutoff, smooth=smooth
)
if cartesian_by_ref is None:
cartesian_by_ref = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
coords = heat_map(
nvt_run,
atom,
sites,
terminal_atom_type,
cartesian_by_ref,
run_start,
run_end,
)
if not coords.size == 0:
coord_list = np.concatenate((coord_list, coords), axis=0)
coord_list = coord_list[1:]
if sym_dict:
return get_full_coords(coord_list, sample=sample, dim=dim, **sym_dict)
return get_full_coords(coord_list, sample=sample, dim=dim)
def get_cluster_distance(
self, run_start: int, run_end: int, neighbor_cutoff: float, cluster_center: str = "center"
) -> np.floating:
"""Calculates the average distance of the center of clusters/molecules
Args:
run_start: Start frame of analysis.
run_end: End frame of analysis.
neighbor_cutoff: Upper limit of first nearest neighbor.
cluster_center: species name of cluster center. Default to "center".
Return:
The averaged distance.
"""
center_atoms = self.wrapped_run.select_atoms(self.select_dict.get(cluster_center))
trj = self.wrapped_run.trajectory[run_start:run_end:]
means = []
for ts in trj:
distance_matrix = capped_distance(
center_atoms.positions,
center_atoms.positions,
max_cutoff=neighbor_cutoff,
box=ts.dimensions,
return_distances=True,
)[1]
distance_matrix[distance_matrix == 0] = np.nan
means.append(np.nanmean(distance_matrix))
return np.mean(means)
class MdJob:
"""
A core class for MD results analysis.
"""
def __init__(self, name):
"""
Base constructor
"""
self.name = name
@classmethod
def from_dict(cls):
"""
Constructor.
Returns:
"""
return cls("name")
@classmethod
def from_recipe(cls):
"""
Constructor.
Returns:
"""
return cls("name")
| 38.865093 | 119 | 0.592658 |
795bebdcbb0539b9f2d49e35377e57a608ee9561 | 25,651 | py | Python | pylxd/tests/models/test_container.py | AdamIsrael/pylxd | d5d47a4d1185b4956e997d70e09d649ea73ba26b | [
"Apache-2.0"
] | null | null | null | pylxd/tests/models/test_container.py | AdamIsrael/pylxd | d5d47a4d1185b4956e997d70e09d649ea73ba26b | [
"Apache-2.0"
] | null | null | null | pylxd/tests/models/test_container.py | AdamIsrael/pylxd | d5d47a4d1185b4956e997d70e09d649ea73ba26b | [
"Apache-2.0"
] | null | null | null | import contextlib
import json
import os
import shutil
import tempfile
import mock
from six.moves.urllib.parse import quote as url_quote
from pylxd import exceptions, models
from pylxd.tests import testing
class TestContainer(testing.PyLXDTestCase):
"""Tests for pylxd.models.Container."""
def test_all(self):
"""A list of all containers are returned."""
containers = models.Container.all(self.client)
self.assertEqual(1, len(containers))
def test_get(self):
"""Return a container."""
name = 'an-container'
an_container = models.Container.get(self.client, name)
self.assertEqual(name, an_container.name)
def test_get_not_found(self):
"""LXDAPIException is raised when the container doesn't exist."""
def not_found(request, context):
context.status_code = 404
return json.dumps({
'type': 'error',
'error': 'Not found',
'error_code': 404})
self.add_rule({
'text': not_found,
'method': 'GET',
'url': r'^http://pylxd.test/1.0/containers/an-missing-container$',
})
name = 'an-missing-container'
self.assertRaises(
exceptions.LXDAPIException,
models.Container.get, self.client, name)
def test_get_error(self):
"""LXDAPIException is raised when the LXD API errors."""
def not_found(request, context):
context.status_code = 500
return json.dumps({
'type': 'error',
'error': 'Not found',
'error_code': 500})
self.add_rule({
'text': not_found,
'method': 'GET',
'url': r'^http://pylxd.test/1.0/containers/an-missing-container$',
})
name = 'an-missing-container'
self.assertRaises(
exceptions.LXDAPIException,
models.Container.get, self.client, name)
def test_create(self):
"""A new container is created."""
config = {'name': 'an-new-container'}
an_new_container = models.Container.create(
self.client, config, wait=True)
self.assertEqual(config['name'], an_new_container.name)
def test_create_remote(self):
"""A new container is created at target."""
config = {'name': 'an-new-remote-container'}
an_new_remote_container = models.Container.create(
self.client, config, wait=True, target="an-remote")
self.assertEqual(config['name'], an_new_remote_container.name)
self.assertEqual("an-remote", an_new_remote_container.location)
def test_exists(self):
"""A container exists."""
name = 'an-container'
self.assertTrue(models.Container.exists(self.client, name))
def test_not_exists(self):
"""A container exists."""
def not_found(request, context):
context.status_code = 404
return json.dumps({
'type': 'error',
'error': 'Not found',
'error_code': 404})
self.add_rule({
'text': not_found,
'method': 'GET',
'url': r'^http://pylxd.test/1.0/containers/an-missing-container$',
})
name = 'an-missing-container'
self.assertFalse(models.Container.exists(self.client, name))
def test_fetch(self):
"""A sync updates the properties of a container."""
an_container = models.Container(
self.client, name='an-container')
an_container.sync()
self.assertTrue(an_container.ephemeral)
def test_fetch_not_found(self):
"""LXDAPIException is raised on a 404 for updating container."""
def not_found(request, context):
context.status_code = 404
return json.dumps({
'type': 'error',
'error': 'Not found',
'error_code': 404})
self.add_rule({
'text': not_found,
'method': 'GET',
'url': r'^http://pylxd.test/1.0/containers/an-missing-container$',
})
an_container = models.Container(
self.client, name='an-missing-container')
self.assertRaises(exceptions.LXDAPIException, an_container.sync)
def test_fetch_error(self):
"""LXDAPIException is raised on error."""
def not_found(request, context):
context.status_code = 500
return json.dumps({
'type': 'error',
'error': 'An bad error',
'error_code': 500})
self.add_rule({
'text': not_found,
'method': 'GET',
'url': r'^http://pylxd.test/1.0/containers/an-missing-container$',
})
an_container = models.Container(
self.client, name='an-missing-container')
self.assertRaises(exceptions.LXDAPIException, an_container.sync)
def test_update(self):
"""A container is updated."""
an_container = models.Container(
self.client, name='an-container')
an_container.architecture = 1
an_container.config = {}
an_container.created_at = 1
an_container.devices = {}
an_container.ephemeral = 1
an_container.expanded_config = {}
an_container.expanded_devices = {}
an_container.profiles = 1
an_container.status = 1
an_container.save(wait=True)
self.assertTrue(an_container.ephemeral)
def test_rename(self):
an_container = models.Container(
self.client, name='an-container')
an_container.rename('an-renamed-container', wait=True)
self.assertEqual('an-renamed-container', an_container.name)
def test_delete(self):
"""A container is deleted."""
# XXX: rockstar (21 May 2016) - This just executes
# a code path. There should be an assertion here, but
# it's not clear how to assert that, just yet.
an_container = models.Container(
self.client, name='an-container')
an_container.delete(wait=True)
@testing.requires_ws4py
@mock.patch('pylxd.models.container._StdinWebsocket')
@mock.patch('pylxd.models.container._CommandWebsocketClient')
def test_execute(self, _CommandWebsocketClient, _StdinWebsocket):
"""A command is executed on a container."""
fake_websocket = mock.Mock()
fake_websocket.data = 'test\n'
_StdinWebsocket.return_value = fake_websocket
_CommandWebsocketClient.return_value = fake_websocket
an_container = models.Container(
self.client, name='an-container')
result = an_container.execute(['echo', 'test'])
self.assertEqual(0, result.exit_code)
self.assertEqual('test\n', result.stdout)
def test_execute_no_ws4py(self):
"""If ws4py is not installed, ValueError is raised."""
from pylxd.models import container
old_installed = container._ws4py_installed
container._ws4py_installed = False
def cleanup():
container._ws4py_installed = old_installed
self.addCleanup(cleanup)
an_container = models.Container(
self.client, name='an-container')
self.assertRaises(ValueError, an_container.execute, ['echo', 'test'])
@testing.requires_ws4py
def test_execute_string(self):
"""A command passed as string raises a TypeError."""
an_container = models.Container(
self.client, name='an-container')
self.assertRaises(TypeError, an_container.execute, 'apt-get update')
def test_raw_interactive_execute(self):
an_container = models.Container(self.client, name='an-container')
result = an_container.raw_interactive_execute(['/bin/bash'])
self.assertEqual(result['ws'],
'/1.0/operations/operation-abc/websocket?secret=abc')
self.assertEqual(result['control'],
'/1.0/operations/operation-abc/websocket?secret=jkl')
def test_raw_interactive_execute_env(self):
an_container = models.Container(self.client, name='an-container')
result = an_container.raw_interactive_execute(['/bin/bash'],
{"PATH": "/"})
self.assertEqual(result['ws'],
'/1.0/operations/operation-abc/websocket?secret=abc')
self.assertEqual(result['control'],
'/1.0/operations/operation-abc/websocket?secret=jkl')
def test_raw_interactive_execute_string(self):
"""A command passed as string raises a TypeError."""
an_container = models.Container(
self.client, name='an-container')
self.assertRaises(TypeError,
an_container.raw_interactive_execute,
'apt-get update')
def test_migrate(self):
"""A container is migrated."""
from pylxd.client import Client
client2 = Client(endpoint='http://pylxd2.test')
an_container = models.Container(
self.client, name='an-container')
an_migrated_container = an_container.migrate(client2)
self.assertEqual('an-container', an_migrated_container.name)
self.assertEqual(client2, an_migrated_container.client)
@mock.patch('pylxd.models.container.Container.generate_migration_data')
def test_migrate_exception_error(self, generate_migration_data):
"""LXDAPIException is raised in case of migration failure"""
from pylxd.client import Client
from pylxd.exceptions import LXDAPIException
def generate_exception():
response = mock.Mock()
response.status_code = 400
raise LXDAPIException(response)
generate_migration_data.side_effect = generate_exception
an_container = models.Container(
self.client, name='an-container')
client2 = Client(endpoint='http://pylxd2.test')
self.assertRaises(LXDAPIException, an_container.migrate, client2)
@mock.patch('pylxd.models.container.Container.generate_migration_data')
def test_migrate_exception_running(self, generate_migration_data):
"""Migrated container already running on destination"""
from pylxd.client import Client
from pylxd.exceptions import LXDAPIException
client2 = Client(endpoint='http://pylxd2.test')
an_container = models.Container(
self.client, name='an-container')
an_container.status_code = 103
def generate_exception():
response = mock.Mock()
response.status_code = 103
raise LXDAPIException(response)
generate_migration_data.side_effect = generate_exception
an_migrated_container = an_container.migrate(client2)
self.assertEqual('an-container', an_migrated_container.name)
self.assertEqual(client2, an_migrated_container.client)
def test_migrate_started(self):
"""A container is migrated."""
from pylxd.client import Client
client2 = Client(endpoint='http://pylxd2.test')
an_container = models.Container.get(self.client, name='an-container')
an_container.status_code = 103
an_migrated_container = an_container.migrate(client2)
self.assertEqual('an-container', an_migrated_container.name)
self.assertEqual(client2, an_migrated_container.client)
def test_migrate_stopped(self):
"""A container is migrated."""
from pylxd.client import Client
client2 = Client(endpoint='http://pylxd2.test')
an_container = models.Container.get(self.client, name='an-container')
an_container.status_code = 102
an_migrated_container = an_container.migrate(client2)
self.assertEqual('an-container', an_migrated_container.name)
self.assertEqual(client2, an_migrated_container.client)
@mock.patch('pylxd.client._APINode.get')
def test_migrate_local_client(self, get):
"""Migration from local clients is not supported."""
# Mock out the _APINode for the local instance.
response = mock.Mock()
response.json.return_value = {'metadata': {'fake': 'response'}}
response.status_code = 200
get.return_value = response
from pylxd.client import Client
client2 = Client(endpoint='http+unix://pylxd2.test')
an_container = models.Container(
client2, name='an-container')
self.assertRaises(ValueError, an_container.migrate, self.client)
def test_publish(self):
"""Containers can be published."""
self.add_rule({
'text': json.dumps({
'type': 'sync',
'metadata': {
'id': 'operation-abc',
'metadata': {
'fingerprint': ('e3b0c44298fc1c149afbf4c8996fb92427'
'ae41e4649b934ca495991b7852b855')
}
}
}),
'method': 'GET',
'url': r'^http://pylxd.test/1.0/operations/operation-abc$',
})
an_container = models.Container(
self.client, name='an-container')
image = an_container.publish(wait=True)
self.assertEqual(
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855',
image.fingerprint)
class TestContainerState(testing.PyLXDTestCase):
"""Tests for pylxd.models.ContainerState."""
def test_get(self):
"""Return a container."""
name = 'an-container'
an_container = models.Container.get(self.client, name)
state = an_container.state()
self.assertEqual('Running', state.status)
self.assertEqual(103, state.status_code)
def test_start(self):
"""A container is started."""
an_container = models.Container.get(self.client, 'an-container')
an_container.start(wait=True)
def test_stop(self):
"""A container is stopped."""
an_container = models.Container.get(self.client, 'an-container')
an_container.stop()
def test_restart(self):
"""A container is restarted."""
an_container = models.Container.get(self.client, 'an-container')
an_container.restart()
def test_freeze(self):
"""A container is suspended."""
an_container = models.Container.get(self.client, 'an-container')
an_container.freeze()
def test_unfreeze(self):
"""A container is resumed."""
an_container = models.Container.get(self.client, 'an-container')
an_container.unfreeze()
class TestContainerSnapshots(testing.PyLXDTestCase):
"""Tests for pylxd.models.Container.snapshots."""
def setUp(self):
super(TestContainerSnapshots, self).setUp()
self.container = models.Container.get(self.client, 'an-container')
def test_get(self):
"""Return a specific snapshot."""
snapshot = self.container.snapshots.get('an-snapshot')
self.assertEqual('an-snapshot', snapshot.name)
def test_all(self):
"""Return all snapshots."""
snapshots = self.container.snapshots.all()
self.assertEqual(1, len(snapshots))
self.assertEqual('an-snapshot', snapshots[0].name)
self.assertEqual(self.client, snapshots[0].client)
self.assertEqual(self.container, snapshots[0].container)
def test_create(self):
"""Create a snapshot."""
snapshot = self.container.snapshots.create(
'an-snapshot', stateful=True, wait=True)
self.assertEqual('an-snapshot', snapshot.name)
class TestSnapshot(testing.PyLXDTestCase):
"""Tests for pylxd.models.Snapshot."""
def setUp(self):
super(TestSnapshot, self).setUp()
self.container = models.Container.get(self.client, 'an-container')
def test_rename(self):
"""A snapshot is renamed."""
snapshot = models.Snapshot(
self.client, container=self.container,
name='an-snapshot')
snapshot.rename('an-renamed-snapshot', wait=True)
self.assertEqual('an-renamed-snapshot', snapshot.name)
def test_delete(self):
"""A snapshot is deleted."""
snapshot = models.Snapshot(
self.client, container=self.container,
name='an-snapshot')
snapshot.delete(wait=True)
# TODO: add an assertion here
def test_delete_failure(self):
"""If the response indicates delete failure, raise an exception."""
def not_found(request, context):
context.status_code = 404
return json.dumps({
'type': 'error',
'error': 'Not found',
'error_code': 404})
self.add_rule({
'text': not_found,
'method': 'DELETE',
'url': (r'^http://pylxd.test/1.0/containers/'
'an-container/snapshots/an-snapshot$')
})
snapshot = models.Snapshot(
self.client, container=self.container,
name='an-snapshot')
self.assertRaises(exceptions.LXDAPIException, snapshot.delete)
def test_publish(self):
"""Snapshots can be published."""
self.add_rule({
'text': json.dumps({
'type': 'sync',
'metadata': {
'id': 'operation-abc',
'metadata': {
'fingerprint': ('e3b0c44298fc1c149afbf4c8996fb92427'
'ae41e4649b934ca495991b7852b855')
}
}
}),
'method': 'GET',
'url': r'^http://pylxd.test/1.0/operations/operation-abc$',
})
snapshot = models.Snapshot(
self.client, container=self.container,
name='an-snapshot')
image = snapshot.publish(wait=True)
self.assertEqual(
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855',
image.fingerprint)
class TestFiles(testing.PyLXDTestCase):
"""Tests for pylxd.models.Container.files."""
def setUp(self):
super(TestFiles, self).setUp()
self.container = models.Container.get(self.client, 'an-container')
def test_put_delete(self):
"""A file is put on the container and then deleted"""
# we are mocked, so delete should initially not be available
self.assertEqual(False, self.container.files.delete_available())
self.assertRaises(exceptions.LXDAPIExtensionNotAvailable,
self.container.files.delete, '/some/file')
# Now insert delete
self.add_rule({
'text': json.dumps({
'type': 'sync',
'metadata': {'auth': 'trusted',
'environment': {
'certificate': 'an-pem-cert',
},
'api_extensions': ['file_delete']
}}),
'method': 'GET',
'url': r'^http://pylxd.test/1.0$',
})
# Update hostinfo
self.client.host_info = self.client.api.get().json()['metadata']
self.assertEqual(True, self.container.files.delete_available())
# mock out the delete rule:
self.add_rule({
'method': 'DELETE',
'url': (r'^http://pylxd.test/1.0/containers/an-container/files'
r'\?path=%2Fsome%2Ffile$')
})
self.container.files.delete('/some/file')
# now check that an error (non 200) causes an exception
def responder(request, context):
context.status_code = 404
self.add_rule({
'text': responder,
'method': 'DELETE',
'url': (r'^http://pylxd.test/1.0/containers/an-container/files'
r'\?path=%2Fsome%2Ffile%2Fnot%2Ffound$')
})
with self.assertRaises(exceptions.LXDAPIException):
self.container.files.delete('/some/file/not/found')
def test_put_mode_uid_gid(self):
"""Should be able to set the mode, uid and gid of a file"""
# fix up the default POST rule to allow us to see the posted vars
_capture = {}
def capture(request, context):
_capture['headers'] = getattr(request._request, 'headers')
context.status_code = 200
self.add_rule({
'text': capture,
'method': 'POST',
'url': (r'^http://pylxd.test/1.0/containers/an-container/files'
r'\?path=%2Ftmp%2Fputted$'),
})
data = 'The quick brown fox'
# start with an octal mode
self.container.files.put('/tmp/putted', data, mode=0o123, uid=1, gid=2)
headers = _capture['headers']
self.assertEqual(headers['X-LXD-mode'], '0123')
self.assertEqual(headers['X-LXD-uid'], '1')
self.assertEqual(headers['X-LXD-gid'], '2')
# use a str mode this type
self.container.files.put('/tmp/putted', data, mode='456')
headers = _capture['headers']
self.assertEqual(headers['X-LXD-mode'], '0456')
# check that mode='0644' also works (i.e. already has 0 prefix)
self.container.files.put('/tmp/putted', data, mode='0644')
headers = _capture['headers']
self.assertEqual(headers['X-LXD-mode'], '0644')
# check that assertion is raised
with self.assertRaises(ValueError):
self.container.files.put('/tmp/putted', data, mode=object)
def test_recursive_put(self):
@contextlib.contextmanager
def tempdir(prefix='tmp'):
tmpdir = tempfile.mkdtemp(prefix=prefix)
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir)
def create_file(_dir, name, content):
path = os.path.join(_dir, name)
actual_dir = os.path.dirname(path)
if not os.path.exists(actual_dir):
os.makedirs(actual_dir)
with open(path, 'w') as f:
f.write(content)
_captures = []
def capture(request, context):
_captures.append({
'headers': getattr(request._request, 'headers'),
'body': request._request.body,
})
context.status_code = 200
with tempdir() as _dir:
base = (r'^http://pylxd.test/1.0/containers/'
r'an-container/files\?path=')
rules = [
{
'text': capture,
'method': 'POST',
'url': base + url_quote('target', safe='') + '$'
},
{
'text': capture,
'method': 'POST',
'url': base + url_quote('target/dir', safe='') + '$'
},
{
'text': capture,
'method': 'POST',
'url': base + url_quote('target/file1', safe='') + '$'
},
{
'text': capture,
'method': 'POST',
'url': base + url_quote('target/dir/file2',
safe='') + '$'
}
]
self.add_rules(rules)
create_file(_dir, 'file1', "This is file1")
create_file(_dir, 'dir/file2', "This is file2")
self.container.files.recursive_put(_dir, './target/')
self.assertEqual(_captures[0]['headers']['X-LXD-type'],
'directory')
self.assertEqual(_captures[1]['body'], b"This is file1")
self.assertEqual(_captures[2]['headers']['X-LXD-type'],
'directory')
self.assertEqual(_captures[3]['body'], b"This is file2")
def test_get(self):
"""A file is retrieved from the container."""
data = self.container.files.get('/tmp/getted')
self.assertEqual(b'This is a getted file', data)
def test_get_not_found(self):
"""LXDAPIException is raised on bogus filenames."""
def not_found(request, context):
context.status_code = 500
rule = {
'text': not_found,
'method': 'GET',
'url': (r'^http://pylxd.test/1.0/containers/an-container/files'
r'\?path=%2Ftmp%2Fgetted$'),
}
self.add_rule(rule)
self.assertRaises(
exceptions.LXDAPIException,
self.container.files.get, '/tmp/getted')
def test_get_error(self):
"""LXDAPIException is raised on error."""
def not_found(request, context):
context.status_code = 503
rule = {
'text': not_found,
'method': 'GET',
'url': (r'^http://pylxd.test/1.0/containers/an-container/files'
r'\?path=%2Ftmp%2Fgetted$'),
}
self.add_rule(rule)
self.assertRaises(
exceptions.LXDAPIException,
self.container.files.get, '/tmp/getted')
# for bug/281 -- getting an empty json file is interpreted as an API
# get rather than a raw get.
def test_get_json_file(self):
data = self.container.files.get('/tmp/json-get')
self.assertEqual(b'{"some": "value"}', data)
| 34.570081 | 79 | 0.580484 |
795bec10b4d5a5724670e31496ed492bfb2054cd | 3,799 | py | Python | skyfield/tests/test_vectors.py | brunobord/python-skyfield | bd8cfdc151e05d6bd47f9808c497f0a4318d7444 | [
"MIT"
] | null | null | null | skyfield/tests/test_vectors.py | brunobord/python-skyfield | bd8cfdc151e05d6bd47f9808c497f0a4318d7444 | [
"MIT"
] | null | null | null | skyfield/tests/test_vectors.py | brunobord/python-skyfield | bd8cfdc151e05d6bd47f9808c497f0a4318d7444 | [
"MIT"
] | null | null | null | # Test the behavior of all combinations of vector.
from assay import assert_raises
from skyfield.api import Topos, load
from skyfield.positionlib import Geocentric
def test_bad_addition():
planets = load('de421.bsp')
earth = planets['earth']
mars = planets['mars']
with assert_raises(ValueError, 'the center where the other vector starts'):
earth + mars
def test_bad_subtraction():
planets = load('de421.bsp')
earth = planets['earth']
usno = Topos('38.9215 N', '77.0669 W', elevation_m=92.0)
with assert_raises(ValueError, 'if they both start at the same center'):
earth - usno
def test_chebyshev_subtraction():
planets = load('de421.bsp')
v = planets['earth barycenter'] - planets['sun']
assert str(v) == """\
Sum of 2 vectors:
Reversed 'de421.bsp' segment 10 SUN -> 0 SOLAR SYSTEM BARYCENTER
'de421.bsp' segment 0 SOLAR SYSTEM BARYCENTER -> 3 EARTH BARYCENTER"""
assert repr(v) == """\
<VectorSum of 2 vectors:
Reversed 'de421.bsp' segment 10 SUN -> 0 SOLAR SYSTEM BARYCENTER
'de421.bsp' segment 0 SOLAR SYSTEM BARYCENTER -> 3 EARTH BARYCENTER>"""
def test_negation():
ts = load.timescale()
t = ts.utc(2020, 8, 30, 16, 5)
usno = Topos('38.9215 N', '77.0669 W', elevation_m=92.0)
neg = -usno
p1 = usno.at(t)
p2 = neg.at(t)
assert (p1.position.au == - p2.position.au).all()
assert (p1.velocity.au_per_d == - p2.velocity.au_per_d).all()
# A second negation should return the unwrapped original.
neg = -neg
assert neg is usno
def test_vectors():
ts = load.timescale()
t = ts.tt(2017, 1, 23, 10, 44)
planets = load('de421.bsp')
earth = planets['earth']
mars = planets['mars']
v = earth
assert str(v) == """\
Sum of 2 vectors:
'de421.bsp' segment 0 SOLAR SYSTEM BARYCENTER -> 3 EARTH BARYCENTER
'de421.bsp' segment 3 EARTH BARYCENTER -> 399 EARTH"""
assert repr(v) == """\
<VectorSum of 2 vectors:
'de421.bsp' segment 0 SOLAR SYSTEM BARYCENTER -> 3 EARTH BARYCENTER
'de421.bsp' segment 3 EARTH BARYCENTER -> 399 EARTH>"""
assert str(v.at(t)) == "\
<Barycentric BCRS position and velocity at date t center=0 target=399>"
v = earth + Topos('38.9215 N', '77.0669 W', elevation_m=92.0)
assert str(v) == """\
Sum of 3 vectors:
'de421.bsp' segment 0 SOLAR SYSTEM BARYCENTER -> 3 EARTH BARYCENTER
'de421.bsp' segment 3 EARTH BARYCENTER -> 399 EARTH
Geodetic 399 EARTH -> IERS2010 latitude 38deg 55' 17.4" N longitude -77deg 04' 00.8" E elevation 92 m"""
assert repr(v) == """\
<VectorSum of 3 vectors:
'de421.bsp' segment 0 SOLAR SYSTEM BARYCENTER -> 3 EARTH BARYCENTER
'de421.bsp' segment 3 EARTH BARYCENTER -> 399 EARTH
Geodetic 399 EARTH -> IERS2010 latitude 38deg 55' 17.4" N longitude -77deg 04' 00.8" E elevation 92 m>"""
assert str(v.at(t)) == """\
<Barycentric BCRS position and velocity at date t center=0 target=IERS2010 latitude 38deg 55' 17.4" N longitude -77deg 04' 00.8" E elevation 92 m>"""
v = earth - mars
assert str(v) == """\
Sum of 4 vectors:
Reversed 'de421.bsp' segment 499 MARS -> 4 MARS BARYCENTER
Reversed 'de421.bsp' segment 4 MARS BARYCENTER -> 0 SOLAR SYSTEM BARYCENTER
'de421.bsp' segment 0 SOLAR SYSTEM BARYCENTER -> 3 EARTH BARYCENTER
'de421.bsp' segment 3 EARTH BARYCENTER -> 399 EARTH"""
assert repr(v) == """\
<VectorSum of 4 vectors:
Reversed 'de421.bsp' segment 499 MARS -> 4 MARS BARYCENTER
Reversed 'de421.bsp' segment 4 MARS BARYCENTER -> 0 SOLAR SYSTEM BARYCENTER
'de421.bsp' segment 0 SOLAR SYSTEM BARYCENTER -> 3 EARTH BARYCENTER
'de421.bsp' segment 3 EARTH BARYCENTER -> 399 EARTH>"""
assert str(v.at(t)) == "\
<ICRF position and velocity at date t center=499 target=399>"
geocentric = Geocentric([0,0,0])
assert geocentric.center == 399
| 34.536364 | 149 | 0.674388 |
795bec59162e7a0e86335a5f36249c87c16d6e1a | 734 | py | Python | backend/admin.py | Esmidth/blog | 59b432fe779770dd44dcac717536e914690f0944 | [
"MIT"
] | null | null | null | backend/admin.py | Esmidth/blog | 59b432fe779770dd44dcac717536e914690f0944 | [
"MIT"
] | null | null | null | backend/admin.py | Esmidth/blog | 59b432fe779770dd44dcac717536e914690f0944 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import Author, Image, Article, Tag
# admin.site.register(Author)
# admin.site.register(Image)
# admin.site.register(Article)
# admin.site.register(Tag)
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ['tag_name']
@admin.register(Author)
class AuthorAdmin(admin.ModelAdmin):
list_display = ['nickname', 'avatar', 'comments']
@admin.register(Image)
class ImageAdmin(admin.ModelAdmin):
list_display = ['image']
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
list_display = ['title', 'created_time', 'modified_time', 'author', 'tags', 'views', 'is_top',
'is_show', 'post_type']
| 22.9375 | 98 | 0.708447 |
795bec9dab858cf6a975825d777a40f41329b5b6 | 2,634 | py | Python | setup.py | nithyanandan/AstroUtils | 97473f52d4247bb9c8507598899215d0662e8d6f | [
"MIT"
] | 1 | 2018-10-31T03:49:39.000Z | 2018-10-31T03:49:39.000Z | setup.py | nithyanandan/AstroUtils | 97473f52d4247bb9c8507598899215d0662e8d6f | [
"MIT"
] | 5 | 2017-11-18T01:45:50.000Z | 2020-05-30T12:26:50.000Z | setup.py | nithyanandan/AstroUtils | 97473f52d4247bb9c8507598899215d0662e8d6f | [
"MIT"
] | 1 | 2019-10-14T08:44:40.000Z | 2019-10-14T08:44:40.000Z | import setuptools, re, glob, os
from setuptools import setup, find_packages
from subprocess import Popen, PIPE
githash = 'unknown'
if os.path.isdir(os.path.dirname(os.path.abspath(__file__))+'/.git'):
try:
gitproc = Popen(['git', 'rev-parse', 'HEAD'], stdout = PIPE)
githash = gitproc.communicate()[0]
if gitproc.returncode != 0:
print "unable to run git, assuming githash to be unknown"
githash = 'unknown'
except EnvironmentError:
print "unable to run git, assuming githash to be unknown"
githash = githash.replace('\n', '')
with open(os.path.dirname(os.path.abspath(__file__))+'/astroutils/githash.txt', 'w+') as githash_file:
githash_file.write(githash)
metafile = open('./astroutils/__init__.py').read()
metadata = dict(re.findall("__([a-z]+)__\s*=\s*'([^']+)'", metafile))
setup(name='AstroUtils',
version=metadata['version'],
description=metadata['description'],
long_description=open("README.rst").read(),
url=metadata['url'],
author=metadata['author'],
author_email=metadata['authoremail'],
maintainer=metadata['maintainer'],
maintainer_email=metadata['maintaineremail'],
license='MIT',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Utilities'],
packages=find_packages(),
package_data={'astroutils': ['*.txt', 'examples/cosmotile/*.yaml',
'examples/image_cutout/*.yaml',
'examples/catalogops/*.yaml',
'examples/codes/lightcone_operations/*.py',
'examples/codes/lightcone_operations/*.yaml']},
include_package_data=True,
scripts=glob.glob('scripts/*.py'),
install_requires=['astropy>=1.0, <3.0', 'blessings>=1.6', 'healpy>=1.5.3',
'ipdb>=0.6.1', 'mpi4py>=1.2.2', 'numpy>=1.8.1',
'scipy>=0.15.1', 'astroquery>=0.3.8',
'beautifulsoup4>=4.6', 'scikit-image'],
setup_requires=['astropy>=1.0, <3.0', 'blessings>=1.6', 'ipdb>=0.6.1',
'healpy>=1.5.3', 'mpi4py>=1.2.2', 'numpy>=1.8.1',
'scipy>=0.15.1', 'astroquery>=0.3.8', 'beautifulsoup4>=4.6',
'scikit-image<0.15'],
tests_require=['pytest'],
zip_safe=False)
| 44.644068 | 102 | 0.572893 |
795beccb15fea4452770f711d6475d163054fa69 | 9,696 | py | Python | ryu/app/my_arp_v2_r1.py | umkcdcrg01/ryu_openflow | 37ed5b88f7d119344e07c95314a7450235c037a8 | [
"Apache-2.0"
] | null | null | null | ryu/app/my_arp_v2_r1.py | umkcdcrg01/ryu_openflow | 37ed5b88f7d119344e07c95314a7450235c037a8 | [
"Apache-2.0"
] | null | null | null | ryu/app/my_arp_v2_r1.py | umkcdcrg01/ryu_openflow | 37ed5b88f7d119344e07c95314a7450235c037a8 | [
"Apache-2.0"
] | null | null | null | # Simple Arp Handler v2
# Jack Zhao
# s.zhao.j@gmail.com
from operator import attrgetter
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import arp
from ryu.lib.packet.packet import Packet
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.arp import arp
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ether
from ryu.ofproto import inet
import time
import os
# config logging
# LOG = logging.getLogger('SimpleArp')
# LOG.setLevel(logging.DEBUG)
# logging.basicConfig()
OFP_SWITCHES_LIST_PREVIOUS = \
'./network-data/ofp_switches_list_prev.db'
OFP_SWITCHES_LIST_SCRIPT = \
'./scripts/remote_ovs_operation/get_switch_ofpbr_datapath_id.sh'
class MySimpleArp(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(MySimpleArp, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.arp_learning = {} # self.arp_learning = {srcMAC:[dst_ip,in_port,time]}
self.packetToport = {}
self.hostname_list = {}
self.dpset = kwargs['dpset']
def _get_hwaddr(self, dpid, port_no):
return self.dpset.get_port(dpid, port_no).hw_addr
def _hostname_Check(self, datapath):
# Given decimal datapath ID, return hostname
if os.path.exists(os.path.abspath(OFP_SWITCHES_LIST_PREVIOUS)):
f = os.path.abspath(OFP_SWITCHES_LIST_PREVIOUS)
else:
f = os.path.abspath(OFP_SWITCHES_LIST)
with open(f, 'r') as iff:
for line in iff:
hostname, dpid = line.split()
self.hostname_list[int(dpid, 16)] = hostname
# print self.hostname_list
# NEED add some datapath check later
if datapath not in self.hostname_list.keys():
return datapath
else:
return self.hostname_list[datapath]
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
""" install table-miss flow entry """
self.logger.debug("my_arp: switch_features_handler:")
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# self.logger.info("################### datapath in decimal %s", datapath.id)
# self.logger.info("################### datapath in hex %s", hex(int(datapath.id)))
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
self.logger.debug("my_arp:add_flow")
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
self.logger.debug("my_arp: _packet_in_handler:")
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
# ofproto = datapath.ofproto
inPort = msg.match['in_port']
packets = Packet(msg.data)
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
eth = packets.get_protocols(ethernet)[0]
src = eth.src
dst = eth.dst
self.mac_to_port[dpid][src] = inPort
data = msg.data
self.arp_learning.setdefault(dpid, [])
self.packetToport.setdefault(dpid, {})
etherFrame = packets.get_protocol(ethernet)
# if dst == LLDP_MAC_NEAREST_BRIDGE:
# return
# print "packets: ", packets
# print "packets.get_protocols(ethernet): ", packets.get_protocols(ethernet)
# print "etherFrame######", etherFrame
# etherFrame = packets.get_protocol(ethernet)
etherFrame = packets.get_protocol(ethernet)
# print etherFrame
# print ether
# print hex(etherFrame.ethertype)
# print hex(ether.ETH_TYPE_ARP)
if etherFrame.ethertype == ether.ETH_TYPE_ARP:
arpPacket = packets.get_protocol(arp)
arpArriveTime = time.time()
srcMac = etherFrame.src
arp_dstIP = arpPacket.dst_ip
dst = eth.dst
if dst == "ff:ff:ff:ff:ff:ff":
self.packetToport[datapath.id][(srcMac, arp_dstIP, inPort)] = arpArriveTime
# print "arp"
# print "packets: ", packets
# print "packets.get_protocols(ethernet): ", packets.get_protocols(ethernet)
# print "ARP: %s" % arpPacket.opcode
# self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# if arpPacket.opcode == 1:
# print "ARP Requst"
# self.logger.info("packet in %s %s %s %s", datapath.id, srcMac, dst, inPort)
# elif arpPacket.opcode == 2:
# print "ARP Reply"
# self.logger.info("packet in %s %s %s %s", datapath.id, srcMac, dst, inPort)
self.receive_arp(datapath, packets, etherFrame, inPort, data)
return 0
else:
self.logger.debug("Drop packet")
return 1
def receive_arp(self, datapath, packets, etherFrame, inPort, data):
arpPacket = packets.get_protocol(arp)
arp_dstIP = arpPacket.dst_ip
arp_srcIP = arpPacket.src_ip
if arpPacket.opcode == 1:
# self.logger.info("%s: receive ARP request %s => %s (port%d)"
# % (self._hostname_Check(datapath.id), etherFrame.src, etherFrame.dst, inPort))
if self.anti_arp_brodcast(datapath, etherFrame, inPort, arp_dstIP):
self.logger.info("%s: receive ARP request %s => %s (port%d) src_ip=%s dst_ip=%s"
% (self._hostname_Check(datapath.id), etherFrame.src, etherFrame.dst, inPort, arp_srcIP, arp_dstIP))
# print "-----packetToport: ", self.packetToport
# print "-----arp_learning: ", self.arp_learning
self.reply_arp(datapath, etherFrame, arpPacket, arp_dstIP, inPort, data)
elif arpPacket.opcode == 2:
self.reply_arp(datapath, etherFrame, arpPacket, arp_dstIP, inPort, data)
def anti_arp_brodcast(self, datapath, etherFrame, inPort, arp_dstIP):
if etherFrame.dst == "ff:ff:ff:ff:ff:ff":
if self.packetToport[datapath.id]:
if ((etherFrame.src, arp_dstIP, inPort) in self.packetToport[datapath.id].keys()):
print "1"
return False
else:
print("Another muticast packet form %s at %i port in %s " % (
etherFrame.src, inPort, self._hostname_Check(datapath.id)))
print "packetToport: ", self.packetToport
print "arp_learning: ", self.arp_learning
self.packetToport[datapath.id][(etherFrame.src, arp_dstIP, inPort)] = time.time()
print "2"
return True
# else:
# # add to dictionary self.packetToport
# arpArriveTime = time.time()
# srcMac = etherFrame.src
# self.packetToport[datapath.id] = [srcMac, arp_dstIP, inPort, arpArriveTime]
# self.arp_learning[datapath.id] = [srcMac, inPort, arpArriveTime]
# print "packetToport: ", self.packetToport
# print "arp_learning: ", self.arp_learning
# print "3"
# return True
else:
print "4"
return True
def reply_arp(self, datapath, etherFrame, arpPacket, arp_dstIp, inPort, data):
"""flood the arp """
# print "flood"
dst = etherFrame.dst
dpid = datapath.id
if dst in self.mac_to_port[datapath.id]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = datapath.ofproto.OFPP_FLOOD
actions = [datapath.ofproto_parser.OFPActionOutput(out_port)]
out = datapath.ofproto_parser.OFPPacketOut(datapath=datapath, buffer_id=0xffffffff,
in_port=inPort, actions=actions, data=data)
datapath.send_msg(out)
| 43.285714 | 133 | 0.60231 |
795becdecf4a0ae159771026317b4b5bd619ff5f | 21,876 | py | Python | object-detection/train_yolov3.py | auto-bwcx-me/aws-autonomous-driving-data-lake-image-extraction-pipeline-from-ros-bagfiles | 3abcade5e111c06a232560bcc6fa02cefe64a0bf | [
"MIT"
] | 5 | 2021-07-30T17:46:33.000Z | 2022-03-01T12:50:54.000Z | object-detection/train_yolov3.py | auto-bwcx-me/aws-autonomous-driving-data-lake-image-extraction-pipeline-from-ros-bagfiles | 3abcade5e111c06a232560bcc6fa02cefe64a0bf | [
"MIT"
] | 2 | 2022-03-01T07:09:15.000Z | 2022-03-10T03:03:52.000Z | object-detection/train_yolov3.py | aws-samples/aws-autonomous-driving-data-lake-image-extraction-pipeline-from-ros-bagfiles | 053e684c6a0b30cf095861c102dfc427f964d9b4 | [
"Apache-2.0",
"MIT"
] | 3 | 2022-03-10T09:01:09.000Z | 2022-03-10T14:50:35.000Z | """Train YOLOv3 with random shapes."""
import argparse
import os
import logging
import time
import warnings
import numpy as np
import mxnet as mx
print("mxnet_version: ",mx.__version__)
from mxnet import nd, gluon, autograd
import gluoncv as gcv
gcv.utils.check_version('0.8.0')
from gluoncv import data as gdata
from gluoncv import utils as gutils
from gluoncv.model_zoo import get_model
from gluoncv.data.batchify import Tuple, Stack, Pad
from gluoncv.data.transforms.presets.yolo import YOLO3DefaultTrainTransform, YOLO3DefaultValTransform
from gluoncv.data.dataloader import RandomTransformDataLoader
from gluoncv.utils.metrics.voc_detection import VOC07MApMetric
from gluoncv.utils import LRScheduler, LRSequential
import json
from matplotlib import pyplot as plt
logging.basicConfig(level=logging.DEBUG)
def parse_args():
parser = argparse.ArgumentParser(description='Train YOLO networks with random input shape.')
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--test', type=str, default=os.environ['SM_CHANNEL_TEST'])
parser.add_argument('--val', type=str, default=os.environ['SM_CHANNEL_VAL'])
parser.add_argument("--checkpoint-dir",type=str,default="/opt/ml/checkpoints",help="Path where checkpoints will be saved.")
parser.add_argument('--dataset', type=str, choices=['custom','coco','voc'],default='custom',
help='Training dataset. Now support voc.')
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
parser.add_argument('--network', type=str, choices=['darknet53','mobilenet1.0'], default='darknet53',
help="Base network name which serves as feature extraction base.")
parser.add_argument('--data-shape', type=int, default=512,
help="Input data shape for evaluation, use 320, 416, 608... " +
"Training is with random shapes from (320 to 608).")
parser.add_argument('--batch-size', type=int, default=24, help='Training mini-batch size')
parser.add_argument('--num-workers', '-j', dest='num_workers', type=int,
default=8, help='Number of data workers, you can use larger '
'number to accelerate data loading, if you CPU and GPUs are powerful.')
parser.add_argument('--gpus', type=int, default=os.environ['SM_NUM_GPUS'],
help='Training with GPUs, you can specify 1,3 for example.')
parser.add_argument('--epochs', type=int, default=1,
help='Training epochs.')
parser.add_argument('--resume', type=str, default='',
help='Resume from previously saved parameters if not None. '
'For example, you can resume from ./yolo3_xxx_0123.params')
parser.add_argument('--start-epoch', type=int, default=0,
help='Starting epoch for resuming, default is 0 for new training.'
'You can specify it to 100 for example to start from 100 epoch.')
parser.add_argument('--lr', type=float, default=0.001,
help='Learning rate, default is 0.001')
parser.add_argument('--lr-mode', type=str, default='step',
help='learning rate scheduler mode. options are step, poly and cosine.')
parser.add_argument('--lr-decay', type=float, default=0.1,
help='decay rate of learning rate. default is 0.1.')
parser.add_argument('--lr-decay-period', type=int, default=0,
help='interval for periodic learning rate decays. default is 0 to disable.')
parser.add_argument('--lr-decay-epoch', type=str, default='160,180',
help='epochs at which learning rate decays. default is 160,180.')
parser.add_argument('--warmup-lr', type=float, default=0.0,
help='starting warmup learning rate. default is 0.0.')
parser.add_argument('--warmup-epochs', type=int, default=2,
help='number of warmup epochs.')
parser.add_argument('--momentum', type=float, default=0.9,
help='SGD momentum, default is 0.9')
parser.add_argument('--wd', type=float, default=0.0005,
help='Weight decay, default is 5e-4')
parser.add_argument('--log-interval', type=int, default=100,
help='Logging mini-batch interval. Default is 100.')
parser.add_argument('--save-prefix', type=str, default='',
help='Saving parameter prefix')
parser.add_argument('--save-interval', type=int, default=10,
help='Saving parameters epoch interval, best model will always be saved.')
parser.add_argument('--val-interval', type=int, default=5,
help='Epoch interval for validation, increase the number will reduce the '
'training time if validation is slow.')
parser.add_argument('--seed', type=int, default=233,
help='Random seed to be fixed.')
parser.add_argument('--num-samples', type=int, default=-1,
help='Training images. Use -1 to automatically get the number.')
parser.add_argument('--syncbn', action='store_true',
help='Use synchronize BN across devices.')
parser.add_argument('--no-random-shape', action='store_true',
help='Use fixed size(data-shape) throughout the training, which will be faster '
'and require less memory. However, final model will be slightly worse.')
parser.add_argument('--no-wd', action='store_true',
help='whether to remove weight decay on bias, and beta/gamma for batchnorm layers.')
parser.add_argument('--mixup', type=bool, default=True,
help='whether to enable mixup.')
parser.add_argument('--no-mixup-epochs', type=int, default=20,
help='Disable mixup training if enabled in the last N epochs.')
parser.add_argument('--pretrained-model', type=str, choices=['Coco', 'None'], default='Coco',
help='Use a pre-trained model on Coco')
parser.add_argument('--label-smooth', action='store_true', help='Use label smoothing.')
args = parser.parse_args()
return args
def get_dataset(args):
"""loads the .rec and .idx files from the specified in the arguments and initi"""
train_dataset = gcv.data.RecordFileDetection(args.train+ '/train.rec', coord_normalized=True)
val_dataset = gcv.data.RecordFileDetection(args.val+ '/val.rec', coord_normalized=True)
test_dataset = gcv.data.RecordFileDetection(args.test+ '/test.rec', coord_normalized=True)
classes = ['car']
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=classes)
if args.num_samples < 0:
args.num_samples = len(train_dataset)
if args.mixup:
from gluoncv.data import MixupDetection
train_dataset = MixupDetection(train_dataset)
return train_dataset, val_dataset,test_dataset, val_metric
def get_dataloader(net, train_dataset, val_dataset, test_dataset, data_shape, batch_size, num_workers, args):
"""Get dataloader."""
if train_dataset is not None:
width, height = data_shape, data_shape
batchify_fn = Tuple(*([Stack() for _ in range(6)] + [Pad(axis=0, pad_val=-1) for _ in range(1)])) # stack image, all targets generated
train_loader = mx.gluon.data.DataLoader(
train_dataset.transform(YOLO3DefaultTrainTransform(width, height, net,mixup=False)),
batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
val_loader = mx.gluon.data.DataLoader(val_dataset.transform(YOLO3DefaultValTransform(width, height)),
batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers)
test_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
test_loader = mx.gluon.data.DataLoader(test_dataset.transform(YOLO3DefaultValTransform(width, height)),
batch_size, False, batchify_fn=test_batchify_fn, last_batch='keep', num_workers=num_workers)
return train_loader, val_loader, test_loader
def save_params(net, best_map, current_map, epoch, save_interval, prefix, checkpoint_dir):
"""saving model parameters in case the mAP has improved"""
current_map = float(current_map)
if current_map > best_map[0]:
logging.info('current_map {} > best_map {}]'.format(current_map,best_map[0]))
best_map[0] = current_map
print('{:s}_best.params'.format(checkpoint_dir,prefix, epoch, current_map))
net.save_parameters('{:s}_best.params'.format(checkpoint_dir,prefix, epoch, current_map))
with open(prefix+'_best_map.log', 'a') as f:
f.write('{:04d}:\t{:.4f}\n'.format(epoch, current_map))
if save_interval and epoch % save_interval == 0:
net.save_parameters('{:s}_{:s}_{:04d}_{:.4f}.params'.format(checkpoint_dir,prefix, epoch, current_map))
def validate(net, val_data, ctx, eval_metric):
"""Test on validation dataset."""
eval_metric.reset()
# set nms threshold and topk constraint
net.set_nms(nms_thresh=0.45, nms_topk=400)
mx.nd.waitall()
net.hybridize()
for batch in val_data:
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for x, y in zip(data, label):
# get prediction resultsx
ids, scores, bboxes = net(x)
det_ids.append(ids)
det_scores.append(scores)
# clip to image size
det_bboxes.append(bboxes.clip(0, batch[0].shape[2]))
# split ground truths
gt_ids.append(y.slice_axis(axis=-1, begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4))
gt_difficults.append(y.slice_axis(axis=-1, begin=5, end=6) if y.shape[-1] > 5 else None)
# update metric
eval_metric.update(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids,gt_difficults)
return eval_metric.get()
def train(net, train_data, val_data, eval_metric, ctx, args):
"""Training pipeline"""
if args.no_wd:
for k, v in net.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
if args.label_smooth:
net._target_generator._label_smooth = True
if args.lr_decay_period > 0:
lr_decay_epoch = list(range(args.lr_decay_period, args.epochs, args.lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in args.lr_decay_epoch.split(',')]
lr_scheduler = LRSequential([
LRScheduler('linear', base_lr=0, target_lr=args.lr,
nepochs=args.warmup_epochs, iters_per_epoch=args.batch_size),
LRScheduler(args.lr_mode, base_lr=args.lr,
nepochs=args.epochs - args.warmup_epochs,
iters_per_epoch=args.batch_size,
step_epoch=lr_decay_epoch,
step_factor=args.lr_decay, power=2),
])
trainer = gluon.Trainer(
net.collect_params(), 'sgd',
{'wd': args.wd, 'momentum': args.momentum, 'lr_scheduler': lr_scheduler},
kvstore='local')
# targets
sigmoid_ce = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
l1_loss = gluon.loss.L1Loss()
# metrics
obj_metrics = mx.metric.Loss('ObjLoss')
center_metrics = mx.metric.Loss('BoxCenterLoss')
scale_metrics = mx.metric.Loss('BoxScaleLoss')
cls_metrics = mx.metric.Loss('ClassLoss')
logging.info('Start training from [Epoch {}]'.format(args.start_epoch))
best_map = [0]
for epoch in range(args.start_epoch, args.epochs):
if args.mixup:
# TODO(zhreshold): more elegant way to control mixup during runtime
try:
train_data._dataset.set_mixup(np.random.beta, 1.5, 1.5)
except AttributeError:
train_data._dataset._data.set_mixup(np.random.beta, 1.5, 1.5)
if epoch >= args.epochs - args.no_mixup_epochs:
try:
train_data._dataset.set_mixup(None)
except AttributeError:
train_data._dataset._data.set_mixup(None)
tic = time.time()
btic = time.time()
mx.nd.waitall()
net.hybridize()
for i, batch in enumerate(train_data):
batch_size = batch[0].shape[0]
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
# objectness, center_targets, scale_targets, weights, class_targets
fixed_targets = [gluon.utils.split_and_load(batch[it], ctx_list=ctx, batch_axis=0) for it in range(1, 6)]
gt_boxes = gluon.utils.split_and_load(batch[6], ctx_list=ctx, batch_axis=0)
sum_losses = []
obj_losses = []
center_losses = []
scale_losses = []
cls_losses = []
with autograd.record():
for ix, x in enumerate(data):
obj_loss, center_loss, scale_loss, cls_loss = net(x, gt_boxes[ix], *[ft[ix] for ft in fixed_targets])
sum_losses.append(obj_loss + center_loss + scale_loss + cls_loss)
obj_losses.append(obj_loss)
center_losses.append(center_loss)
scale_losses.append(scale_loss)
cls_losses.append(cls_loss)
autograd.backward(sum_losses)
trainer.step(args.batch_size)
obj_metrics.update(0, obj_losses)
center_metrics.update(0, center_losses)
scale_metrics.update(0, scale_losses)
cls_metrics.update(0, cls_losses)
#if args.log_interval and not (i + 1) % args.log_interval:
# name1, loss1 = obj_metrics.get()
# name2, loss2 = center_metrics.get()
# name3, loss3 = scale_metrics.get()
# name4, loss4 = cls_metrics.get()
# logging.info('[Epoch {}][Batch {}], LR: {:.2E}, Speed: {:.3f} samples/sec, {}={:.3f}, {}={:.3f}, {}={:.3f}, {}={:.3f},'.format(
# epoch, i, trainer.learning_rate, batch_size/(time.time()-btic), name1, loss1, name2, loss2, name3, loss3, name4, loss4))
btic = time.time()
name1, loss1 = obj_metrics.get()
name2, loss2 = center_metrics.get()
name3, loss3 = scale_metrics.get()
name4, loss4 = cls_metrics.get()
logging.info('[Epoch {}] Training time: {:.3f}, {}={:.3f}, {}={:.3f}, {}={:.3f}, {}={:.3f},'.format(
epoch, (time.time()-tic), name1, loss1, name2, loss2, name3, loss3, name4, loss4))
if not (epoch + 1) % args.val_interval:
# consider reduce the frequency of validation to save time
map_name, mean_ap = validate(net, val_data, ctx, eval_metric)
val_msg = '\n'.join(['{}={},'.format(k, v) for k, v in zip(["val:" + metric for metric in map_name], mean_ap)])
logging.info('[Epoch {}] Validation: \n{}'.format(epoch, val_msg))
current_map = float(mean_ap[-1])
else:
current_map = 0.
save_params(net, best_map, current_map, epoch, args.save_interval, args.save_prefix, args.checkpoint_dir)
print("saved to: ")
print('{:s}/model'.format(args.model_dir))
net.export(path='{:s}/model'.format(args.model_dir))
# ------------------------------------------------------------ #
# Hosting methods #
# ------------------------------------------------------------ #
def get_ctx():
"function to get machine hardware context"
try:
_ = mx.nd.array([0], ctx=mx.gpu())
ctx = mx.gpu()
except:
try:
_ = mx.nd.array([0], ctx=mx.eia())
ctx = mx.eia()
except:
ctx = mx.cpu()
return ctx
def model_fn(model_dir):
"""
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case a Gluon network)
"""
logging.info('Invoking user-defined model_fn')
import neomx
logging.info('MXNet version used for model loading {}'.format(mx.__version__))
#select CPU/GPU context
ctx = get_ctx()
net = gluon.SymbolBlock.imports(
'%s/compiled-symbol.json' % model_dir,
['data'],
'%s/compiled-0000.params' % model_dir,
ctx=ctx
)
net.hybridize(static_alloc=True, static_shape=True)
#run warm-up inference on empty data
warmup_data = mx.nd.empty((1,3,512,512), ctx=ctx)
class_IDs, scores, bounding_boxes = net(warmup_data)
return net
def transform_fn(net, data, input_content_type, output_content_type):
"""
Transform a request using the Gluon model. Called once per request.
:param net: The Gluon model.
:param data: The request payload.
:param input_content_type: The request content type.
:param output_content_type: The (desired) response content type.
:return: response payload and content type.
"""
logging.info("Invoking user defined transform_fn")
import gluoncv as gcv
#change context to mx.cpu() when optimizing and deploying with Neo for CPU endpoints
ctx = get_ctx()
# we can use content types to vary input/output handling, but
# here we just assume json for both
data = json.loads(data)
#preprocess image
x, image = gcv.data.transforms.presets.yolo.transform_test(mx.nd.array(data), 512)
#load image onto right context
x = x.as_in_context(ctx)
class_IDs, scores, bounding_boxes = net(x)
#create list of results
result = [class_IDs.asnumpy().tolist(), scores.asnumpy().tolist(), bounding_boxes.asnumpy().tolist()]
#decode as json string
response_body = json.dumps(result)
return response_body, output_content_type
# ------------------------------------------------------------ #
# Training execution #
# ------------------------------------------------------------ #
if __name__ == '__main__':
args = parse_args()
# fix seed for mxnet, numpy and python builtin random generator.
gutils.random.seed(args.seed)
# training contexts
if args.gpus > 0:
ctx = [mx.gpu(int(i)) for i in list(range(0,args.gpus))]
else:
ctx = ctx if ctx else [mx.cpu()]
print("ctx: ",ctx)
# network
net_name = '_'.join(('yolo3', args.network, args.dataset))
logging.info('net_name: {}'.format(net_name))
args.save_prefix += net_name
# use sync bn if specified
num_sync_bn_devices = len(ctx) if args.syncbn else -1
classes = ['car']
if args.syncbn and len(ctx) > 1:
net = get_model(net_name, pretrained_base=False, transfer='coco', norm_layer=gluon.contrib.nn.SyncBatchNorm,
norm_kwargs={'num_devices': len(num_sync_bn_devices)},classes=classes)
async_net = get_model(net_name, pretrained_base=False, transfer='coco',classes=classes) # used by cpu worker
else:
if args.pretrained_model == 'Coco':
logging.info('using Coco pre-trained model')
net = get_model(net_name,norm_layer=gluon.nn.BatchNorm,
classes=classes,pretrained_base=False, transfer='coco')
else:
logging.info('training model from scratch - no pre-trained model is used.')
net = get_model(net_name,norm_layer=gluon.nn.BatchNorm,pretrained_base=False)
async_net = net
if args.resume.strip():
net.load_parameters(args.resume.strip())
async_net.load_parameters(args.resume.strip())
else:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
net.initialize()
async_net.initialize()
# training data
train_dataset, val_dataset, test_dataset, eval_metric = get_dataset(args)
train_data, val_data, test_data = get_dataloader(
async_net, train_dataset, val_dataset, test_dataset, args.data_shape, args.batch_size, args.num_workers, args)
net.collect_params().reset_ctx(ctx)
# No Transfer Learning
map_name, mean_ap = validate(net, test_data, ctx, eval_metric)
val_msg = '\n'.join(['{}={},'.format(k, v) for k, v in zip(["test:" + metric for metric in map_name], mean_ap)])
logging.info('Performance on test set before finetuning: \n{}'.format(val_msg))
start_time_train = time.time()
# training
train(net, train_data, val_data, eval_metric, ctx, args)
logging.info("--- %s training seconds ---" % (time.time() - start_time_train))
# After Transfer Learning
start_time_test= time.time()
map_name, mean_ap = validate(net, test_data, ctx, eval_metric)
speed = len(test_data) / (time.time() - start_time_test)
print('Throughput is %f img/sec.'% speed)
val_msg = '\n'.join(['{}={},'.format(k, v) for k, v in zip(["test:" + metric for metric in map_name], mean_ap)])
logging.info('Performance on test set after finetuning: \n{}'.format(val_msg)) | 50.059497 | 147 | 0.624566 |
795becdf6ee42da0866ca3f00bfd660b5d655d79 | 6,453 | py | Python | dataset.py | RobinSmits/Dutch-NLP-Experiments | e7d48cc77650a7600341095ee236ac3760074cfc | [
"MIT"
] | null | null | null | dataset.py | RobinSmits/Dutch-NLP-Experiments | e7d48cc77650a7600341095ee236ac3760074cfc | [
"MIT"
] | null | null | null | dataset.py | RobinSmits/Dutch-NLP-Experiments | e7d48cc77650a7600341095ee236ac3760074cfc | [
"MIT"
] | null | null | null | import os
import numpy as np
import pandas as pd
import tensorflow as tf
from tqdm import tqdm
from transformers import AutoTokenizer
from typing import Tuple
from urllib.request import urlopen
def download_articles_by_publisher(cache_dir: str)->None:
# URLs taken from: https://github.com/dpgmedia/partisan-news2019
articles_by_publisher_url = 'https://partisan-news2019.s3-eu-west-1.amazonaws.com/dpgMedia2019-articles-bypublisher.jsonl'
labels_by_publisher_url = 'https://github.com/dpgmedia/partisan-news2019/raw/master/dpgMedia2019-labels-bypublisher.jsonl'
# Articles By Publisher
if os.path.isfile(os.path.join(cache_dir, 'dpgMedia2019-articles-bypublisher.jsonl')):
print ("Articles By Publisher File exist")
else:
# Download...
print ('Downloading: Articles By Publisher File....')
# Download File and save
with urlopen(articles_by_publisher_url) as file_stream:
file_data = file_stream.read()
with open(os.path.join(cache_dir, 'dpgMedia2019-articles-bypublisher.jsonl'), 'wb') as f:
f.write(file_data)
# Labels By Publisher
if os.path.isfile(os.path.join(cache_dir, 'dpgMedia2019-labels-bypublisher.jsonl')):
print('Labels By Publisher File exist')
else:
# Download...
print ('Downloading: Labels By Publisher File....')
# Download File and save
with urlopen(labels_by_publisher_url) as file_stream:
file_data = file_stream.read()
with open(os.path.join(cache_dir, 'dpgMedia2019-labels-bypublisher.jsonl'), 'wb') as f:
f.write(file_data)
def get_dpgnews_df(cache_dir: str)->pd.DataFrame:
# Set 1: Articles
articles_df = pd.read_json(os.path.join(cache_dir, 'dpgMedia2019-articles-bypublisher.jsonl'), lines = True)
articles_df = articles_df.set_index('id')
print(articles_df.shape)
# Set 2: Labels
labels_df = pd.read_json(os.path.join(cache_dir, 'dpgMedia2019-labels-bypublisher.jsonl'), lines = True)
labels_df = labels_df.set_index('id')
print(labels_df.shape)
# Finalize Full Data
dpgnews_df = articles_df.join(labels_df, on = ['id'], how = 'inner')
print(dpgnews_df.shape)
# Randomize all rows...
dpgnews_df = dpgnews_df.sample(frac = 1.0, random_state = 42)
return dpgnews_df
def tokenize_dpgnews_df(df: pd.DataFrame, max_len: int, tokenizer: AutoTokenizer)->Tuple[np.ndarray, np.ndarray, np.ndarray]:
total_samples = df.shape[0]
# Placeholders input
input_ids = np.zeros((total_samples, max_len), dtype = 'int32')
input_masks = np.zeros((total_samples, max_len), dtype = 'int32')
labels = np.zeros((total_samples, ), dtype = 'int32')
for index, row in tqdm(zip(range(0, total_samples), df.iterrows()), total = total_samples):
# Get title and description as strings
text = row[1]['text']
partisan = row[1]['partisan']
# Process Description - Set Label for real as 0
input_encoded = tokenizer.encode_plus(text, add_special_tokens = True, max_length = max_len, truncation = True, padding = 'max_length')
input_ids[index,:] = input_encoded['input_ids']
input_masks[index,:] = input_encoded['attention_mask']
labels[index] = 1 if partisan == 'true' else 0
# Return Arrays
return (input_ids, input_masks, labels)
def tokenize_t5_dpgnews_df(df: pd.DataFrame, max_len: int, max_label_len: int, tokenizer: AutoTokenizer)->Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
total_samples = df.shape[0]
# Placeholders input
input_ids = np.zeros((total_samples, max_len), dtype = 'int32')
input_masks = np.zeros((total_samples, max_len), dtype = 'int32')
# Placeholders output
output_ids = np.zeros((total_samples, max_label_len), dtype = 'int32')
output_masks = np.zeros((total_samples, max_label_len), dtype = 'int32')
labels = np.zeros((total_samples, ), dtype = 'int32')
for index, row in tqdm(zip(range(0, total_samples), df.iterrows()), total = total_samples):
# Get title and description as strings
text = row[1]['text']
partisan = row[1]['partisan']
# Process Input
input_encoded = tokenizer.encode_plus('classificeer: ' + text, add_special_tokens = True, max_length = max_len, truncation = True, padding = 'max_length')
input_ids[index,:] = input_encoded['input_ids']
input_masks[index,:] = input_encoded['attention_mask']
# Process Output
labels[index] = 1 if partisan == 'true' else 0
partisan_label = 'politiek' if partisan == 'true' else 'neutraal'
output_encoded = tokenizer.encode_plus(partisan_label, add_special_tokens = True, max_length = max_label_len, truncation = True, padding = 'max_length')
output_ids[index,:] = output_encoded['input_ids']
output_masks[index,:] = output_encoded['attention_mask']
# Return Arrays
return (input_ids, input_masks, output_ids, output_masks, labels)
def create_dataset(input_ids: np.ndarray, input_masks: np.ndarray, labels: np.ndarray)->tf.data.Dataset:
return tf.data.Dataset.from_tensor_slices(({'input_ids': input_ids, 'attention_mask': input_masks}, labels))
def create_train_dataset(input_ids: np.ndarray, input_masks: np.ndarray, labels: np.ndarray, batch_size: int)->tf.data.Dataset:
train_dataset = create_dataset(input_ids, input_masks, labels)
train_dataset = train_dataset.shuffle(1024, reshuffle_each_iteration = True)
train_dataset = train_dataset.batch(batch_size)
train_dataset = train_dataset.repeat(-1)
train_dataset = train_dataset.prefetch(1024)
return train_dataset
def create_validation_dataset(input_ids: np.ndarray, input_masks: np.ndarray, labels: np.ndarray, batch_size: int)->tf.data.Dataset:
validation_dataset = create_dataset(input_ids, input_masks, labels)
validation_dataset = validation_dataset.batch(batch_size)
validation_dataset = validation_dataset.repeat(-1)
validation_dataset = validation_dataset.prefetch(1024)
return validation_dataset
def create_svc_dataset(input_ids: np.ndarray, input_masks: np.ndarray, labels: np.ndarray, batch_size: int)->tf.data.Dataset:
dataset = create_dataset(input_ids, input_masks, labels)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(1024)
return dataset | 44.8125 | 172 | 0.703394 |
795bedae1fe45cee289ae996c2536730b4107ef4 | 556 | py | Python | src/icotools/icosoc/mod_gpio/mod_gpio.py | dm7h/fpga-event-recorder | 4e53babbbb514ee375f4b5585b1d24e5b40f8df7 | [
"0BSD"
] | null | null | null | src/icotools/icosoc/mod_gpio/mod_gpio.py | dm7h/fpga-event-recorder | 4e53babbbb514ee375f4b5585b1d24e5b40f8df7 | [
"0BSD"
] | null | null | null | src/icotools/icosoc/mod_gpio/mod_gpio.py | dm7h/fpga-event-recorder | 4e53babbbb514ee375f4b5585b1d24e5b40f8df7 | [
"0BSD"
] | null | null | null |
def generate_c_code(icosoc_h, icosoc_c, mod):
code = """
static inline void icosoc_@name@_set(uint32_t bitmask) {
*(volatile uint32_t*)(0x20000000 + @addr@ * 0x10000) = bitmask;
}
static inline uint32_t icosoc_@name@_get() {
return *(volatile uint32_t*)(0x20000000 + @addr@ * 0x10000);
}
static inline void icosoc_@name@_dir(uint32_t bitmask) {
*(volatile uint32_t*)(0x20000004 + @addr@ * 0x10000) = bitmask;
}
"""
code = code.replace("@name@", mod["name"])
code = code.replace("@addr@", mod["addr"])
icosoc_h.append(code)
| 26.47619 | 67 | 0.663669 |
795bedb448a265216e0d8babeee40046b167767a | 386 | py | Python | node/no_op.py | muddyfish/PYKE | f1bb0f5d7af5663129bd37ca58a0246e5a3699c7 | [
"MIT"
] | 24 | 2016-02-21T07:41:45.000Z | 2021-08-12T04:34:00.000Z | node/no_op.py | muddyfish/PYKE | f1bb0f5d7af5663129bd37ca58a0246e5a3699c7 | [
"MIT"
] | 1 | 2017-08-18T08:14:57.000Z | 2017-08-19T14:59:08.000Z | node/no_op.py | muddyfish/PYKE | f1bb0f5d7af5663129bd37ca58a0246e5a3699c7 | [
"MIT"
] | 4 | 2016-08-06T18:07:13.000Z | 2017-08-12T13:51:52.000Z | #!/usr/bin/env python
import lang_ast
from nodes import Node
class NoOp(Node):
char = " "
contents = " "
args = 1
results = 1
def func(self, arg):
"""Does nothing"""
return [arg]
@classmethod
def accepts(cls, code):
if code[:1] in cls.char+lang_ast.AST.END_CHARS:
return code[1:], cls()
return None, None
| 17.545455 | 55 | 0.549223 |
795bf012e571e695aa3fa5f17ab69e7a20d12242 | 429 | py | Python | 0/2/2839/2839.py | chr0m3/boj-codes | d71d0a22d0a3ae62c225f382442461275f56fe8f | [
"MIT"
] | 3 | 2017-07-08T16:29:06.000Z | 2020-07-20T00:17:45.000Z | 0/2/2839/2839.py | chr0m3/boj-codes | d71d0a22d0a3ae62c225f382442461275f56fe8f | [
"MIT"
] | null | null | null | 0/2/2839/2839.py | chr0m3/boj-codes | d71d0a22d0a3ae62c225f382442461275f56fe8f | [
"MIT"
] | 2 | 2017-11-20T14:06:06.000Z | 2020-07-20T00:17:47.000Z | target = int(input())
now = 0
for i in range(0, target // 5 + 1):
count_5k = target // 5 - i
count_3k = 0
if count_5k * 5 == target:
print(count_5k + count_3k)
break
else:
if (target - count_5k * 5) % 3 == 0:
count_3k = (target - count_5k * 5) // 3
print(count_5k + count_3k)
break
else:
if count_5k == 0:
print(-1)
| 23.833333 | 51 | 0.4662 |
795bf0b584a30b985d595629f3ac491ae344235c | 22,340 | py | Python | tests/mobly/controllers/android_device_lib/services/logcat_test.py | booneng/mobly | 539788309c7631c20fa5381937e10f9cd997e2d0 | [
"Apache-2.0"
] | 532 | 2016-11-07T22:01:00.000Z | 2022-03-30T17:11:40.000Z | tests/mobly/controllers/android_device_lib/services/logcat_test.py | booneng/mobly | 539788309c7631c20fa5381937e10f9cd997e2d0 | [
"Apache-2.0"
] | 528 | 2016-11-22T01:42:19.000Z | 2022-03-24T02:27:15.000Z | tests/mobly/controllers/android_device_lib/services/logcat_test.py | booneng/mobly | 539788309c7631c20fa5381937e10f9cd997e2d0 | [
"Apache-2.0"
] | 169 | 2016-11-18T15:12:26.000Z | 2022-03-24T01:22:08.000Z | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import logging
import mock
import os
import shutil
import tempfile
import unittest
from mobly import records
from mobly import utils
from mobly import runtime_test_info
from mobly.controllers import android_device
from mobly.controllers.android_device_lib import adb
from mobly.controllers.android_device_lib.services import logcat
from tests.lib import mock_android_device
# The expected result of the cat adb operation.
MOCK_ADB_LOGCAT_CAT_RESULT = [
'02-29 14:02:21.456 4454 Something\n',
'02-29 14:02:21.789 4454 Something again\n'
]
# A mocked piece of adb logcat output.
MOCK_ADB_LOGCAT = (u'02-29 14:02:19.123 4454 Nothing\n'
u'%s'
u'02-29 14:02:22.123 4454 Something again and again\n'
) % u''.join(MOCK_ADB_LOGCAT_CAT_RESULT)
# The expected result of the cat adb operation.
MOCK_ADB_UNICODE_LOGCAT_CAT_RESULT = [
'02-29 14:02:21.456 4454 Something \u901a\n',
'02-29 14:02:21.789 4454 Something again\n'
]
# A mocked piece of adb logcat output.
MOCK_ADB_UNICODE_LOGCAT = (
u'02-29 14:02:19.123 4454 Nothing\n'
u'%s'
u'02-29 14:02:22.123 4454 Something again and again\n'
) % u''.join(MOCK_ADB_UNICODE_LOGCAT_CAT_RESULT)
# Mock start and end time of the adb cat.
MOCK_ADB_LOGCAT_BEGIN_TIME = '02-29 14:02:20.123'
MOCK_ADB_LOGCAT_END_TIME = '02-29 14:02:22.000'
# Mock AdbError for missing logpersist scripts
MOCK_LOGPERSIST_STOP_MISSING_ADB_ERROR = adb.AdbError(
'logpersist.stop --clear', b'',
'/system/bin/sh: logpersist.stop: not found', 0)
MOCK_LOGPERSIST_START_MISSING_ADB_ERROR = adb.AdbError(
'logpersist.start --clear', b'',
b'/system/bin/sh: logpersist.stop: not found', 0)
class LogcatTest(unittest.TestCase):
"""Tests for Logcat service and its integration with AndroidDevice."""
def setUp(self):
# Set log_path to logging since mobly logger setup is not called.
if not hasattr(logging, 'log_path'):
setattr(logging, 'log_path', '/tmp/logs')
# Creates a temp dir to be used by tests in this test class.
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
"""Removes the temp dir.
"""
shutil.rmtree(self.tmp_dir)
def AssertFileContains(self, content, file_path):
with open(file_path, 'r') as f:
output = f.read()
self.assertIn(content, output)
def AssertFileDoesNotContain(self, content, file_path):
with open(file_path, 'r') as f:
output = f.read()
self.assertNotIn(content, output)
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess', return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@mock.patch.object(logcat.Logcat, '_open_logcat_file')
@mock.patch('mobly.logger.get_log_file_timestamp')
def test_start_and_stop(self, get_timestamp_mock, open_logcat_mock,
stop_proc_mock, start_proc_mock, create_dir_mock,
FastbootProxy, MockAdbProxy):
"""Verifies the steps of collecting adb logcat on an AndroidDevice
object, including various function calls and the expected behaviors of
the calls.
"""
mock_serial = '1'
get_timestamp_mock.return_value = '123'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service.start()
# Verify start did the correct operations.
self.assertTrue(logcat_service._adb_logcat_process)
expected_log_path = os.path.join(logging.log_path,
'AndroidDevice%s' % ad.serial,
'logcat,%s,fakemodel,123.txt' % ad.serial)
create_dir_mock.assert_called_with(os.path.dirname(expected_log_path))
adb_cmd = ' "adb" -s %s logcat -v threadtime -T 1 >> %s'
start_proc_mock.assert_called_with(adb_cmd %
(ad.serial, '"%s" ' % expected_log_path),
shell=True)
self.assertEqual(logcat_service.adb_logcat_file_path, expected_log_path)
expected_msg = ('Logcat thread is already running, cannot start another'
' one.')
# Expect error if start is called back to back.
with self.assertRaisesRegex(logcat.Error, expected_msg):
logcat_service.start()
# Verify stop did the correct operations.
logcat_service.stop()
stop_proc_mock.assert_called_with('process')
self.assertIsNone(logcat_service._adb_logcat_process)
self.assertEqual(logcat_service.adb_logcat_file_path, expected_log_path)
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess', return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@mock.patch.object(logcat.Logcat, '_open_logcat_file')
def test_update_config(self, open_logcat_mock, stop_proc_mock,
start_proc_mock, create_dir_mock, FastbootProxy,
MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service.start()
logcat_service.stop()
new_log_params = '-a -b -c'
new_file_path = 'some/path/log.txt'
new_config = logcat.Config(logcat_params=new_log_params,
output_file_path=new_file_path)
logcat_service.update_config(new_config)
logcat_service.start()
self.assertTrue(logcat_service._adb_logcat_process)
create_dir_mock.assert_has_calls([mock.call('some/path')])
expected_adb_cmd = (' "adb" -s 1 logcat -v threadtime -T 1 -a -b -c >> '
'"some/path/log.txt" ')
start_proc_mock.assert_called_with(expected_adb_cmd, shell=True)
self.assertEqual(logcat_service.adb_logcat_file_path, 'some/path/log.txt')
logcat_service.stop()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess', return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@mock.patch.object(logcat.Logcat, '_open_logcat_file')
def test_update_config_while_running(self, open_logcat_mock, stop_proc_mock,
start_proc_mock, create_dir_mock,
FastbootProxy, MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service.start()
new_config = logcat.Config(logcat_params='-blah',
output_file_path='some/path/file.txt')
with self.assertRaisesRegex(
logcat.Error,
'Logcat thread is already running, cannot start another one'):
logcat_service.update_config(new_config)
self.assertTrue(logcat_service.is_alive)
logcat_service.stop()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess', return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@mock.patch.object(logcat.Logcat, '_open_logcat_file')
@mock.patch(
'mobly.controllers.android_device_lib.services.logcat.Logcat.clear_adb_log',
return_value=mock_android_device.MockAdbProxy('1'))
def test_pause_and_resume(self, clear_adb_mock, open_logcat_mock,
stop_proc_mock, start_proc_mock, create_dir_mock,
FastbootProxy, MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad, logcat.Config(clear_log=True))
logcat_service.start()
clear_adb_mock.assert_called_once_with()
self.assertTrue(logcat_service.is_alive)
logcat_service.pause()
self.assertFalse(logcat_service.is_alive)
stop_proc_mock.assert_called_with('process')
self.assertIsNone(logcat_service._adb_logcat_process)
clear_adb_mock.reset_mock()
logcat_service.resume()
self.assertTrue(logcat_service.is_alive)
clear_adb_mock.assert_not_called()
logcat_service.stop()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.start_standing_subprocess', return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@mock.patch(
'mobly.controllers.android_device_lib.services.logcat.Logcat.clear_adb_log',
return_value=mock_android_device.MockAdbProxy('1'))
def test_logcat_service_create_output_excerpts(self, clear_adb_mock,
stop_proc_mock,
start_proc_mock, FastbootProxy,
MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._start()
def _write_logcat_file_and_assert_excerpts_exists(logcat_file_content,
test_begin_time,
test_name):
with open(logcat_service.adb_logcat_file_path, 'a') as f:
f.write(logcat_file_content)
test_output_dir = os.path.join(self.tmp_dir, test_name)
mock_record = records.TestResultRecord(test_name)
mock_record.begin_time = test_begin_time
mock_record.signature = f'{test_name}-{test_begin_time}'
test_run_info = runtime_test_info.RuntimeTestInfo(test_name,
test_output_dir,
mock_record)
actual_path = logcat_service.create_output_excerpts(test_run_info)[0]
expected_path = os.path.join(
test_output_dir, '{test_name}-{test_begin_time}'.format(
test_name=test_name, test_begin_time=test_begin_time),
'logcat,{mock_serial},fakemodel,{test_name}-{test_begin_time}.txt'.
format(mock_serial=mock_serial,
test_name=test_name,
test_begin_time=test_begin_time))
self.assertEqual(actual_path, expected_path)
self.assertTrue(os.path.exists(expected_path))
return expected_path
# Generate logs before the file pointer is created.
# This message will not be captured in the excerpt.
NOT_IN_EXCERPT = 'Not in excerpt.\n'
with open(logcat_service.adb_logcat_file_path, 'a') as f:
f.write(NOT_IN_EXCERPT)
# With the file pointer created, generate logs and make an excerpt.
logcat_service._open_logcat_file()
FILE_CONTENT = 'Some log.\n'
expected_path1 = _write_logcat_file_and_assert_excerpts_exists(
logcat_file_content=FILE_CONTENT,
test_begin_time=123,
test_name='test_foo',
)
self.AssertFileContains(FILE_CONTENT, expected_path1)
self.AssertFileDoesNotContain(NOT_IN_EXCERPT, expected_path1)
# Generate some new logs and do another excerpt.
FILE_CONTENT = 'Some more logs!!!\n'
expected_path2 = _write_logcat_file_and_assert_excerpts_exists(
logcat_file_content=FILE_CONTENT,
test_begin_time=456,
test_name='test_bar',
)
self.AssertFileContains(FILE_CONTENT, expected_path2)
self.AssertFileDoesNotContain(FILE_CONTENT, expected_path1)
# Simulate devices accidentally go offline, logcat service stopped.
logcat_service.stop()
FILE_CONTENT = 'Whatever logs\n'
expected_path3 = _write_logcat_file_and_assert_excerpts_exists(
logcat_file_content=FILE_CONTENT,
test_begin_time=789,
test_name='test_offline',
)
self.assertEqual(os.stat(expected_path3).st_size, 0)
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess', return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@mock.patch.object(logcat.Logcat, '_open_logcat_file')
@mock.patch('mobly.logger.get_log_file_timestamp')
def test_take_logcat_with_extra_params(self, get_timestamp_mock,
open_logcat_mock, stop_proc_mock,
start_proc_mock, create_dir_mock,
FastbootProxy, MockAdbProxy):
"""Verifies the steps of collecting adb logcat on an AndroidDevice
object, including various function calls and the expected behaviors of
the calls.
"""
mock_serial = '1'
get_timestamp_mock.return_value = '123'
ad = android_device.AndroidDevice(serial=mock_serial)
configs = logcat.Config()
configs.logcat_params = '-b radio'
logcat_service = logcat.Logcat(ad, configs)
logcat_service.start()
# Verify start did the correct operations.
self.assertTrue(logcat_service._adb_logcat_process)
expected_log_path = os.path.join(logging.log_path,
'AndroidDevice%s' % ad.serial,
'logcat,%s,fakemodel,123.txt' % ad.serial)
create_dir_mock.assert_called_with(os.path.dirname(expected_log_path))
adb_cmd = ' "adb" -s %s logcat -v threadtime -T 1 -b radio >> %s'
start_proc_mock.assert_called_with(adb_cmd %
(ad.serial, '"%s" ' % expected_log_path),
shell=True)
self.assertEqual(logcat_service.adb_logcat_file_path, expected_log_path)
logcat_service.stop()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test_instantiation(self, MockFastboot, MockAdbProxy):
"""Verifies the AndroidDevice object's basic attributes are correctly
set after instantiation.
"""
mock_serial = 1
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
self.assertIsNone(logcat_service._adb_logcat_process)
self.assertIsNone(logcat_service.adb_logcat_file_path)
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_logpersist(self, MockFastboot, MockAdbProxy):
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'userdebug',
'ro.debuggable': '1',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': True,
'logpersist.stop': True,
}[command]
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_has_calls([
mock.call('logpersist.stop --clear'),
mock.call('logpersist.start'),
])
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_user_build_device(self, MockFastboot,
MockAdbProxy):
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'user',
'ro.debuggable': '0',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': True,
'logpersist.stop': True,
}[command]
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_not_called()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_missing_all_logpersist(
self, MockFastboot, MockAdbProxy):
def adb_shell_helper(command):
if command == 'logpersist.start':
raise MOCK_LOGPERSIST_START_MISSING_ADB_ERROR
elif command == 'logpersist.stop --clear':
raise MOCK_LOGPERSIST_STOP_MISSING_ADB_ERROR
else:
return b''
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'userdebug',
'ro.debuggable': '1',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': False,
'logpersist.stop': False,
}[command]
mock_adb_proxy.shell.side_effect = adb_shell_helper
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_not_called()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_missing_logpersist_stop(
self, MockFastboot, MockAdbProxy):
def adb_shell_helper(command):
if command == 'logpersist.stop --clear':
raise MOCK_LOGPERSIST_STOP_MISSING_ADB_ERROR
else:
return b''
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'userdebug',
'ro.debuggable': '1',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': True,
'logpersist.stop': False,
}[command]
mock_adb_proxy.shell.side_effect = adb_shell_helper
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_has_calls([
mock.call('logpersist.stop --clear'),
])
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_missing_logpersist_start(
self, MockFastboot, MockAdbProxy):
def adb_shell_helper(command):
if command == 'logpersist.start':
raise MOCK_LOGPERSIST_START_MISSING_ADB_ERROR
else:
return b''
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'userdebug',
'ro.debuggable': '1',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': False,
'logpersist.stop': True,
}[command]
mock_adb_proxy.shell.side_effect = adb_shell_helper
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_not_called()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy')
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test_clear_adb_log(self, MockFastboot, MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
ad.adb.logcat = mock.MagicMock()
ad.adb.logcat.side_effect = adb.AdbError(
cmd='cmd', stdout=b'', stderr=b'failed to clear "main" log', ret_code=1)
logcat_service = logcat.Logcat(ad)
logcat_service.clear_adb_log()
if __name__ == '__main__':
unittest.main()
| 44.590818 | 82 | 0.696643 |
795bf0ef250c611dab4dc311ae629fbeb3bfc042 | 371 | py | Python | src/auxiliary/simplifiers.py | yesanton/Comprehensive-Process-Drift-Detection-with-Visual-Analytics | dd6e3cd2f285dabc15a967a161d895f3f44a4250 | [
"MIT"
] | 6 | 2019-07-09T09:12:13.000Z | 2021-03-01T15:48:52.000Z | src/auxiliary/simplifiers.py | yesanton/Comprehensive-Process-Drift-Detection-with-Visual-Analytics | dd6e3cd2f285dabc15a967a161d895f3f44a4250 | [
"MIT"
] | 1 | 2019-12-13T09:37:57.000Z | 2019-12-19T15:16:00.000Z | src/auxiliary/simplifiers.py | yesanton/Process-Drift-Visualization-With-Declare | dd6e3cd2f285dabc15a967a161d895f3f44a4250 | [
"MIT"
] | null | null | null | from src.auxiliary.constant_definitions import NAME_FILE_SEPARATOR
def compose_name(*argv):
name = ''
# correctly identify the extension of the file
end = len(argv) - 1
if argv[-1][0] == '.':
end -= 1
for arg in argv[:end]:
name += str(arg) + NAME_FILE_SEPARATOR
for arg in argv[end:]:
name += str(arg)
return name | 23.1875 | 66 | 0.603774 |
795bf2213c08710782589089a691c79ae53e067b | 5,481 | py | Python | ldif/datasets/process_element.py | trisct/ldif | f166c13e0cdfdbbf98e8c785b093455834d93be2 | [
"Apache-2.0"
] | 242 | 2020-06-17T06:16:23.000Z | 2022-03-30T17:25:17.000Z | ldif/datasets/process_element.py | Sumith1896/ldif | 3dfa33c88b15178eebac3c7d93e5de1ca2682d23 | [
"Apache-2.0"
] | 20 | 2020-06-16T00:43:42.000Z | 2022-03-12T00:35:22.000Z | ldif/datasets/process_element.py | Sumith1896/ldif | 3dfa33c88b15178eebac3c7d93e5de1ca2682d23 | [
"Apache-2.0"
] | 28 | 2020-06-17T08:11:40.000Z | 2022-02-13T09:51:35.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A local tf.Dataset wrapper for LDIF."""
import os
import sys
import time
import tensorflow as tf
# LDIF is an internal package, should be imported last.
# pylint: disable=g-bad-import-order
from ldif.inference import example
from ldif.util.file_util import log
# pylint: enable=g-bad-import-order
def load_example_dict(example_directory, log_level=None):
"""Loads an example from disk and makes a str:numpy dictionary out of it."""
if log_level:
log.set_level(log_level)
entry_t = time.time()
start_t = entry_t # Keep the function entry time around for a cumulative print.
e = example.InferenceExample.from_directory(example_directory, verbose=False)
end_t = time.time()
log.verbose(f'Make example: {end_t - start_t}')
start_t = end_t
# The from_directory method should probably optionally take in a synset.
bounding_box_samples = e.uniform_samples
end_t = time.time()
log.verbose(f'Bounding box: {end_t - start_t}')
start_t = end_t
# TODO(kgenova) There is a pitfall here where the depth is divided by 1000,
# after this. So if some other depth images are provided, they would either
# need to also be stored in the GAPS format or be artificially multiplied
# by 1000.
depth_renders = e.depth_images # [20, 224, 224, 1]. 1 or 1000? trailing 1?
assert depth_renders.shape[0] == 1
depth_renders = depth_renders[0, ...]
end_t = time.time()
log.verbose(f'Depth renders: {end_t - start_t}')
start_t = end_t
mesh_name = e.mesh_name
end_t = time.time()
log.verbose(f'Mesh name: {end_t - start_t}')
start_t = end_t
log.verbose(f'Loading {mesh_name} from split {e.split}')
near_surface_samples = e.near_surface_samples
end_t = time.time()
log.verbose(f'NSS: {end_t - start_t}')
start_t = end_t
grid = e.grid
end_t = time.time()
log.verbose(f'Grid: {end_t - start_t}')
start_t = end_t
world2grid = e.world2grid
end_t = time.time()
log.verbose(f'world2grid: {end_t - start_t}')
start_t = end_t
surface_point_samples = e.precomputed_surface_samples_from_dodeca
end_t = time.time()
log.verbose(f'surface points: {end_t - start_t}')
log.verbose(f'load_example_dict total time: {end_t - entry_t}')
return {
'bounding_box_samples': bounding_box_samples,
'depth_renders': depth_renders,
'mesh_name': mesh_name,
'near_surface_samples': near_surface_samples,
'grid': grid,
'world2grid': world2grid,
'surface_point_samples': surface_point_samples,
}
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value.flatten()))
def _bytes_feature(value):
if isinstance(value, str):
value = value.encode('utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def make_tf_example(d):
feature = {
'bounding_box_samples': _float_feature(d['bounding_box_samples']),
'depth_renders': _float_feature(d['depth_renders']),
'mesh_name': _bytes_feature(d['mesh_name']),
'near_surface_samples': _float_feature(d['near_surface_samples']),
'grid': _float_feature(d['grid']),
'world2grid': _float_feature(d['world2grid']),
'surface_point_samples': _float_feature(d['surface_point_samples'])
}
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
def full_featurespec():
return {
'bounding_box_samples': tf.io.FixedLenFeature([100000, 4], tf.float32),
'depth_renders': tf.io.FixedLenFeature([20, 224, 224, 1], tf.float32),
'mesh_name': tf.io.FixedLenFeature([], tf.string),
'near_surface_samples': tf.io.FixedLenFeature([100000, 4], tf.float32),
'grid': tf.io.FixedLenFeature([32, 32, 32], tf.float32),
'world2grid': tf.io.FixedLenFeature([4, 4], tf.float32),
'surface_point_samples': tf.io.FixedLenFeature([10000, 6], tf.float32)
}
def parse_tf_example(example_proto):
d = tf.io.parse_single_example(example_proto, full_featurespec())
return (d['bounding_box_samples'], d['depth_renders'], d['mesh_name'],
d['near_surface_samples'], d['grid'], d['world2grid'],
d['surface_point_samples'])
def _example_dict_tf_func_wrapper(mesh_orig_path):
mesh_orig_path = mesh_orig_path.decode(sys.getdefaultencoding())
assert '/mesh_orig.ply' in mesh_orig_path
example_directory = mesh_orig_path.replace('/mesh_orig.ply', '')
d = load_example_dict(example_directory)
return (d['bounding_box_samples'], d['depth_renders'], d['mesh_name'],
d['near_surface_samples'], d['grid'], d['world2grid'],
d['surface_point_samples'])
def parse_example(filename):
"""A tensorflow function to return a dataset element when mapped"""
return tf.py_func(_example_dict_tf_func_wrapper, [filename], [
tf.float32, tf.float32, tf.string, tf.float32, tf.float32, tf.float32,
tf.float32])
| 36.059211 | 82 | 0.717935 |
795bf2ed829c1f22f26bb65c35577e021976b8a7 | 10,802 | py | Python | core/datautils.py | sap9433/sound-classification-with-tf-keras | fcc83db86ae46e4b6204f5a55663540d17a67ab9 | [
"MIT"
] | null | null | null | core/datautils.py | sap9433/sound-classification-with-tf-keras | fcc83db86ae46e4b6204f5a55663540d17a67ab9 | [
"MIT"
] | null | null | null | core/datautils.py | sap9433/sound-classification-with-tf-keras | fcc83db86ae46e4b6204f5a55663540d17a67ab9 | [
"MIT"
] | null | null | null | '''
datautils.py: Just some routines that we use for moving data around
'''
from __future__ import print_function
import numpy as np
import librosa
import os
from os.path import isfile, splitext
from imageio import imread, imwrite
import glob
def listdir_nohidden(path,subdirs_only=False, skip_csv=True):
'''
ignore hidden files. call should be inside list(). subdirs_only means it ignores regular files
'''
for f in os.listdir(path):
if not f.startswith('.'): # this skips the hidden
if ((False==subdirs_only) or (os.path.isdir(path+"/"+f))):
if ('.csv' == os.path.splitext(f)[1]) and (skip_csv):
pass
else:
yield f
# class names are subdirectory names in Preproc/ directory
def get_class_names(path="Preproc/Train/", sort=True):
if (sort):
class_names = sorted(list(listdir_nohidden(path, subdirs_only=True))) # sorted alphabetically for consistency with "ls" command
else:
class_names = listdir_nohidden(path) # not in same order as "ls", because Python
return class_names
def get_total_files(class_names, path="Preproc/Train/"):
sum_total = 0
for subdir in class_names:
files = os.listdir(path+subdir)
n_files = len(files)
sum_total += n_files
return sum_total
def save_melgram(outfile, melgram, out_format='npz'):
channels = melgram.shape[1]
melgram = melgram.astype(np.float16)
if (('jpeg' == out_format) or ('png' == out_format)) and (channels <=4):
melgram = np.moveaxis(melgram, 1, 3).squeeze() # we use the 'channels_first' in tensorflow, but images have channels_first. squeeze removes unit-size axes
melgram = np.flip(melgram, 0) # flip spectrogram image right-side-up before saving, for viewing
#print("first melgram.shape = ",melgram.shape,end="")
if (2 == channels): # special case: 1=greyscale, 3=RGB, 4=RGBA, ..no 2. so...?
# pad a channel of zeros (for blue) and you'll just be stuck with it forever. so channels will =3
# TODO: this is SLOWWW
b = np.zeros((melgram.shape[0], melgram.shape[1], 3)) # 3-channel array of zeros
b[:,:,:-1] = melgram # fill the zeros on the 1st 2 channels
imwrite(outfile, b, format=out_format)
else:
imwrite(outfile, melgram, format=out_format)
elif ('npy' == out_format):
np.save(outfile,melgram=melgram)
else:
np.savez_compressed(outfile,melgram=melgram) # default is compressed npz file
return
def load_audio(audio_path, mono=None, sr=None, convertOSXaliases=True): # wrapper for librosa.load
try:
signal, sr = librosa.load(audio_path, mono=mono, sr=sr)
except NoBackendError as e:
if ('Darwin' == platform.system()): # handle OS X alias files gracefully
source = resolve_osx_alias(audio_path, convert=convertOSXaliases, already_checked_os=True) # convert to symlinks for next time
try:
signal, sr = librosa.load(source, mono=mono, sr=sr)
except NoBackendError as e:
print("\n*** ERROR: Could not open audio file {}".format(audio_path),"\n",flush=True)
raise e
else:
print("\n*** ERROR: Could not open audio file {}".format(audio_path),"\n",flush=True)
raise e
return signal, sr
def load_melgram(file_path):
#auto-detect load method based on filename extension
name, extension = os.path.splitext(file_path)
if ('.npy' == extension):
melgram = np.load(file_path)
elif ('.npz' == extension): # compressed npz file (preferred)
with np.load(file_path) as data:
melgram = data['melgram']
elif ('.png' == extension) or ('.jpeg' == extension):
arr = imread(file_path)
melgram = np.reshape(arr, (1,1,arr.shape[0],arr.shape[1])) # convert 2-d image
melgram = np.flip(melgram, 0) # we save images 'rightside up' but librosa internally presents them 'upside down'
else:
print("load_melgram: Error: unrecognized file extension '",extension,"' for file ",file_path,sep="")
return melgram
def get_sample_dimensions(class_names, path='Preproc/Train/'):
classname = class_names[0]
audio_path = path + classname + '/'
infilename = os.listdir(audio_path)[0]
melgram = load_melgram(audio_path+infilename)
print(" get_sample_dimensions: "+infilename+": melgram.shape = ",melgram.shape)
return melgram.shape
def encode_class(class_name, class_names): # makes a "one-hot" vector for each class name called
try:
idx = class_names.index(class_name)
vec = np.zeros(len(class_names))
vec[idx] = 1
return vec
except ValueError:
return None
def decode_class(vec, class_names): # generates a number from the one-hot vector
return int(np.argmax(vec))
def shuffle_XY_paths(X,Y,paths): # generates a randomized order, keeping X&Y(&paths) together
assert (X.shape[0] == Y.shape[0] )
#print("shuffle_XY_paths: Y.shape[0], len(paths) = ",Y.shape[0], len(paths))
idx = np.array(range(Y.shape[0]))
np.random.shuffle(idx)
newX = np.copy(X)
newY = np.copy(Y)
newpaths = paths[:]
for i in range(len(idx)):
newX[i] = X[idx[i],:,:]
newY[i] = Y[idx[i],:]
newpaths[i] = paths[idx[i]]
return newX, newY, newpaths
def make_melgram(mono_sig, sr, n_mels=128): # @keunwoochoi upgraded form 96 to 128 mel bins in kapre
#melgram = librosa.logamplitude(librosa.feature.melspectrogram(mono_sig, # latest librosa deprecated logamplitude in favor of amplitude_to_db
# sr=sr, n_mels=96),ref_power=1.0)[np.newaxis,np.newaxis,:,:]
melgram = librosa.amplitude_to_db(librosa.feature.melspectrogram(mono_sig,
sr=sr, n_mels=n_mels))[np.newaxis,:,:,np.newaxis] # last newaxis is b/c tensorflow wants 'channels_last' order
'''
# librosa docs also include a perceptual CQT example:
CQT = librosa.cqt(mono_sig, sr=sr, fmin=librosa.note_to_hz('A1'))
freqs = librosa.cqt_frequencies(CQT.shape[0], fmin=librosa.note_to_hz('A1'))
perceptual_CQT = librosa.perceptual_weighting(CQT**2, freqs, ref=np.max)
melgram = perceptual_CQT[np.newaxis,np.newaxis,:,:]
'''
return melgram
def make_phase_gram(mono_sig, sr, n_bins=128):
stft = librosa.stft(mono_sig)#, n_fft = (2*n_bins)-1)
magnitude, phase = librosa.magphase(stft) # we don't need magnitude
# resample the phase array to match n_bins
phase = np.resize(phase, (n_bins, phase.shape[1]))[np.newaxis,:,:,np.newaxis]
return phase
# turn multichannel audio as multiple melgram layers
def make_layered_melgram(signal, sr, mels=128, phase=False):
if (signal.ndim == 1): # given the way the preprocessing code is now, this may not get called
signal = np.reshape( signal, (1,signal.shape[0]))
# get mel-spectrogram for each channel, and layer them into multi-dim array
for channel in range(signal.shape[0]):
melgram = make_melgram(signal[channel],sr, n_mels=mels)
if (0 == channel):
layers = melgram
else:
layers = np.append(layers,melgram,axis=3) # we keep axis=0 free for keras batches, axis=3 means 'channels_last'
if (phase):
phasegram = make_phase_gram(signal[channel],sr, n_bins=mels)
layers = np.append(layers,phasegram,axis=3)
return layers
def nearest_multiple( a, b ): # returns number smaller than a, which is the nearest multiple of b
return int(a/b) * b
# can be used for test dataset as well
def build_dataset(path="Preproc/Train/", load_frac=1.0, batch_size=None, tile=False):
class_names = get_class_names(path=path)
print("class_names = ",class_names)
nb_classes = len(class_names)
total_files = get_total_files(class_names, path=path)
total_load = int(total_files * load_frac)
if (batch_size is not None): # keras gets particular: dataset size must be mult. of batch_size
total_load = nearest_multiple( total_load, batch_size)
print(" total files = ",total_files,", going to load total_load = ",total_load)
print("total files = ",total_files,", going to load total_load = ",total_load)
# pre-allocate memory for speed (old method used np.concatenate, slow)
mel_dims = get_sample_dimensions(class_names,path=path) # get dims of sample data file
if (tile):
ldims = list(mel_dims)
ldims[3] = 3
mel_dims = tuple(ldims)
print(" melgram dimensions: ",mel_dims)
X = np.zeros((total_load, mel_dims[1], mel_dims[2], mel_dims[3]))
Y = np.zeros((total_load, nb_classes))
paths = []
load_count = 0
for idx, classname in enumerate(class_names):
print("")
this_Y = np.array(encode_class(classname,class_names) )
this_Y = this_Y[np.newaxis,:]
class_files = os.listdir(path+classname)
n_files = len(class_files)
n_load = int(n_files * load_frac) # n_load is how many files of THIS CLASS are expected to be loaded
printevery = 100
file_list = class_files[0:n_load]
for idx2, infilename in enumerate(file_list): # Load files in a particular class
audio_path = path + classname + '/' + infilename
if (0 == idx2 % printevery) or (idx2+1 == len(class_files)):
print("\r Loading class ",idx+1,"/",nb_classes,": \'",classname,
"\', File ",idx2+1,"/", n_load,": ",audio_path," ",
sep="",end="")
#auto-detect load method based on filename extension
melgram = load_melgram(audio_path)
if (tile) and (melgram.shape != mel_dims):
melgram = np.tile(melgram, 3)
elif (melgram.shape != mel_dims):
print("\n\n ERROR: mel_dims = ",mel_dims,", melgram.shape = ",melgram.shape)
X[load_count,:,:] = melgram
Y[load_count,:] = this_Y
paths.append(audio_path)
load_count += 1
if (load_count >= total_load): # Abort loading files after last even multiple of batch size
break
if (load_count >= total_load): # Second break needed to get out of loop over classes
break
print("")
if ( load_count != total_load ): # check to make sure we loaded everything we thought we would
raise Exception("Loaded "+str(load_count)+" files but was expecting "+str(total_load) )
X, Y, paths = shuffle_XY_paths(X,Y,paths) # mix up classes, & files within classes
return X, Y, paths, class_names
| 42.695652 | 167 | 0.63803 |
795bf3bf174cbd3f7dd66dc5c2f681716a4e7276 | 7,006 | py | Python | Agents/AFDDAgent/afdd/Archive/afdd2.py | kruthikarshankar/bemoss_os | 460a5a41b38240bb9f6dacc23d373ae1942259a8 | [
"Unlicense"
] | 3 | 2018-11-25T01:09:55.000Z | 2021-08-24T01:56:36.000Z | Agents/AFDDAgent/afdd/Archive/afdd2.py | kwarodom/bemoss_os_1.2 | 460a5a41b38240bb9f6dacc23d373ae1942259a8 | [
"Unlicense"
] | null | null | null | Agents/AFDDAgent/afdd/Archive/afdd2.py | kwarodom/bemoss_os_1.2 | 460a5a41b38240bb9f6dacc23d373ae1942259a8 | [
"Unlicense"
] | 3 | 2018-11-09T03:38:09.000Z | 2020-02-24T06:26:10.000Z | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2013, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
from math import abs
import settings
class TemperatureSensor:
"""
Usage: ...
The purpose of this proactive diagnostic measure is to identify faulty
temperature sensors on a rooftop unit (RTU).
"""
def __init__(self, parent):
"""parent is the afddagent or whatever object creating an instance of this class
"""
self._parent = parent
def run(self,voltron_data):
#Data from Voltron
self.log_message("Rob: AFDD2 is running...")
voltron_data = self._parent.get_new_data()
#configuration file: afdd_config.ini
seconds_to_steady_state = settings.seconds_to_steady_state #self._config["afdd2_seconds_to_steady_state"]
afdd2_temperature_sensor_threshold = settings.afdd2_temperature_sensor_threshold
# Main Algorithm
status = sensor_error_check(self)
if (status):
status1=self.command_damper(0)
if not status1:
afdd2=29
self.log_status("Lock not Received from controller to close damper")
return afdd2
self.sleep(seconds_to_steady_state)
voltron_data = self._parent.get_new_data()
return_air_temperature = float(voltron_data["ReturnAirTemperature"])
mixed_air_temperature = float(voltron_data["MixedAirTemperature"])
sensorcondition_1=math.fabs(mixed_air_temperature-return_air_temperature)# probably should do average over a number of minutes
if sensorcondition_1 < afdd2_temperature_sensor_threshold:
afdd2=21 #OA
self.log_status("Outdoor-air temperature sensor problem")
return afdd2
status1=self.command_damper(100)
if not status1:
afdd2=29
self.log_status("Lock not Received from controller to open damper")
return afdd2
self.sleep(seconds_to_steady_state)
voltron_data = self._parent.get_new_data()
outdoor_air_temperature = float(voltron_data["OutAirTemperature"])
mixed_air_temperature = float(voltron_data["MixedAirTemperature"])
sensorcondition_2=math.fabs(mixed_air_temperature-outdoor_air_temperature)# probably should do average over a number of minutes
if sensorcondition_2 < afdd2_temperature_sensor_threshold:
afdd2=22
self.log_status("Return-air temperature sensor problem")
return afdd2
#If it comes here => both tests fail
afdd2=23
self.log.status("Mixed-air temperature sensor problem")
return afdd2
afdd2=20
self.log_status("No Temperature Sensor faults detected")
return afdd2
def sensor_error_check(self):
status=0
voltron_data = self._parent.get_new_data()
return_air_temperature = float(voltron_data["ReturnAirTemperature"])
outdoor_air_temperature = float(voltron_data["OutsideAirTemperature"])
mixed_air_temperature = float(voltron_data["MixedAirTemperature"])
if (mixed_air_temperature<outdoor_air_temperature and mixed_air_temperature<return_air_temperature):
status=1
return status
if (mixed_air_temperature>outdoor_air_temperature and mixed_air_temperature>return_air_temperature):
status=1
return status
return status
def log_message(self,msg):
_log.debug(code)
def sleep(self,sleeptime):
self._parent.sleep(sleeptime)
def log_status(self,code):
# print or log code and exit ll
# need to release OAtemp_vpoint and CoolCall1
#print(code)
_log.debug(code)
def command_damper(self,command):
""" Command outdoor air damper to a new position """
status = self._parent.command_equip('DamperSignal',command)
if not status:
return False
return True
| 41.211765 | 139 | 0.69883 |
795bf40ad9957238ab5bb87f4f62ea393cb14723 | 7,447 | py | Python | pySUMMA/simulate/binary.py | ahsen1402/pySUMMA | 3412e1282bc2cc9b9076f38bf64ef1674d69d770 | [
"Apache-2.0"
] | 1 | 2020-01-11T19:56:05.000Z | 2020-01-11T19:56:05.000Z | pySUMMA/simulate/binary.py | learn-ensemble/PY-SUMMA | 1b82237be5dc33af57b5c2e2c16aee4d568ddb77 | [
"Apache-2.0"
] | null | null | null | pySUMMA/simulate/binary.py | learn-ensemble/PY-SUMMA | 1b82237be5dc33af57b5c2e2c16aee4d568ddb77 | [
"Apache-2.0"
] | null | null | null | """Generate synthetic binary data [-1, 1].
Generate random samples of synthetic base classifier predictions. User
can provide either:
1) both the the true positive (tpr) and true negative rates (tnr), the
corresponding balanced accuracy (ba) is then computed by
ba = 0.5 * (tpr + tnr)
2) the boundary for uniformly sampling balanced accuracies of base classifiers.
In this setting, tpr is uniformly sampled on a calculated interval
that guarantees that tpr and tnr are on the interval [0, 1], and lastly
tnr is computed from the sampled values of ba and tpr by
tnr = 2 * ba - tpr
3) no input. Here classifier parameters are sampled as in (2) with the
default limits of [0.35, 0.9] for the balanced accuracy.
Available classes:
- Binary
"""
import numpy as np
def sample_TPR_given_BA(ba):
"""
Uniformly sample TPR given BA on the interval in which,
BA, TPR and TNR are bounded by [0, 1]. How this is
accomplished is as follows.
The Balanced accuracy is defined as,
BA = 0.5 * (TPR + TNR)
with TPR being the True Positive Rate and TNR being
the True Negative Rate. Both TPR and TNR are bounded
on the interval [0, 1], consequently we take these
boundaries into account when sampling.
From above, TPR can be written
TPR = 2*BA - TNR
When TNR = 0, then TPR = 2*BA. However, if BA > 0.5,
then TPR > 1, and outside its defined boundary. Consequently,
we set,
tpr_max = minimum (1, 2 * ba)
Given that TPR >= 0, when TNR is its maximum value of 1,
tpr_min = maximum (0, 2 * ba - 1).
Input
-----
ba : ndarray
(M,) length ndarray of balanced accuracies
Returns
-------
tpr : ndarray
(M, ) length ndarray of randomly sampled true positive rates
"""
# inititate tpr array
tpr = np.zeros(ba.size)
for j in range(ba.size):
# min TPR is when TNR is its maximum.
tpr_min = np.max([0, 2 * ba[j] -1])
# Max TPR is when TNR = 0
tpr_max = np.min([1, 2 * ba[j]])
if tpr_min > tpr_max:
raise ValueError("TPR min > TPR max")
# sample tpr from computed limits
tpr[j] = np.random.rand() * (tpr_max - tpr_min) + tpr_min
return tpr
class Binary:
"""Simulate the binary predictions by an ensemble of base classifiers.
Store input parameters, if tpr and tnr are not specified in
the 'rates' argument then sample according to ba_lims.
Input
-----
M : int
N : int
N1 : int
ba_lims : tuple
tuple of floats where ba_lims[0] < ba_lims[1] and
0 <= ba_lims[i] <= 1 for i = 0, 1
rates : dict (optional)
If specified, the dictionary keys need to be 'tpr' and 'tnr', and the corresponding
values are (M,) length ndarrays of floats between 0 and 1. The jth entry
of the ndarray associated with tpr and tnr keys are the true positive
and true negative rates of the jth classifier, respectively.
Returns
-------
noting, but stores the following class properties
self.M : int
self.N : int
self.N1 : int
self.tpr : (M,) length ndarray
self.tnr : (M,) length ndarray
self.ba : (M,) length ndarray
self.labels (M,) length ndarray
"""
def __init__(self, M, N, N1,
ba_lims=(0.35, 0.9),
rates = None):
self.M = M
self.N = N
self.N1 = N1
# If rates are given, then first check that inputed rate is a
# 1) a python dictionary with keys
# 2) keys include tpr and tnr
# 3) each value is an M length ndarray
# 4) each entry is between 0 and 1.
# and then set tpr and tnr
if rates is None:
self.ba, self.tpr, self.tnr = self._sample_rates_from_ba_lims(ba_lims)
else:
if type(rates) is not dict:
raise TypeError("Rates must be a dictionary with keys\n 'tpr' and 'tnr' and the values as M length\nnumpy arrays of rates.")
else:
for j in range(self.M):
if rates["tpr"][j] < 0 or rates["tpr"][j] > 1:
raise ValueError("Each tpr must be between 0 and 1")
if rates["tnr"][j] < 0 or rates["tnr"][j] > 1:
raise ValueError("Each tnr must be between 0 and 1")
self.tpr = rates["tpr"]
self.tnr = rates["tnr"]
# set labels
self.labels = np.hstack([np.ones(N1),
-np.ones(N-N1)])
def _sample_rates_from_ba_lims(self, ba_lims):
"""
Uniformly sample balanced accuracy (ba) values for each of the
M base classifiers. Sample true positive rates (tpr), and compute
true negative rates from the sampled (BA) values.
Input
-----
ba_lims : python list of floats
The lower bound (ba_lims[0]) and upper bound (ba_lims[1]) for
uniformly sampling balanced accuracies.
Returns
-------
ba : ndarray
(M,) length ndarray of balanced accuracies
tpr : ndarray
(M,) length ndarray of true positive rates
tnr : ndarray
(M,) length ndarray of true negative rates
"""
# check that ba_lims is:
# 1) that the first entry is less than the second,
# 2) that each entry is between [0, 1]
if ba_lims[0] >= ba_lims[1]:
raise ValueError("ba_lims[0] must be less than ba_lims[1]")
elif ba_lims[0] < 0 or ba_lims[1] > 1:
raise ValueError("B.A. limits must be between 0 and 1")
# uniformly sample balanced accuracy for each method on the
# inputed interval
ba = np.random.rand(self.M) * (ba_lims[1] - ba_lims[0]) + ba_lims[0]
# sample TPR given the contstraintes of the sampled ba
tpr = sample_TPR_given_BA(ba)
# from ba and tpr, compute the tnr
tnr = 2*ba - tpr
return [ba, tpr, tnr]
def sim(self):
"""Generate simulation data, and store as class properties.
Generated properties:
self.data : ndarray
(M, N) ndarray of binary [-1, 1] predictions
self.data : ndarray
(M, N) ndarray of M binary classifier binary predictions of N samples
"""
# initialize ndarrays
self.data = np.zeros(shape=(self.M, self.N))
# generate samples for each classifier
for j in range(self.M):
# loop over samples
for i in range(self.N):
# generate random number u between 0 and 1
u = np.random.rand()
# if true sample label is positive, then sample
# from true positive rate
if self.labels[i] == 1:
self.data[j, i] = 1 if u <= self.tpr[j] else -1
# if samples are not from the positive class, they are from negative class
else:
self.data[j, i] = -1 if u <= self.tnr[j] else 1
def get_ba(self):
"""Compute the Balanced Accuracy from TPR and TNR.
Returns:
The balanced accuracies of M base classifiers ((M,) ndarray)
"""
return 0.5*(self.tpr + self.tnr)
| 32.378261 | 140 | 0.573385 |
795bf5b3a183c6908807d95c7fca88b8de2146d4 | 4,435 | py | Python | napari/_qt/qt_chunk_receiver.py | mkitti/napari | 4e954d30b5a1b70c5e495db1b8f48a3bdda1ff86 | [
"BSD-3-Clause"
] | null | null | null | napari/_qt/qt_chunk_receiver.py | mkitti/napari | 4e954d30b5a1b70c5e495db1b8f48a3bdda1ff86 | [
"BSD-3-Clause"
] | null | null | null | napari/_qt/qt_chunk_receiver.py | mkitti/napari | 4e954d30b5a1b70c5e495db1b8f48a3bdda1ff86 | [
"BSD-3-Clause"
] | null | null | null | """QtChunkReceiver and QtGuiEvent classes.
"""
import logging
from qtpy.QtCore import QObject, Signal
from ..components.chunk import chunk_loader
from ..utils.events import EmitterGroup, Event, EventEmitter
LOGGER = logging.getLogger('napari.async')
class QtGuiEvent(QObject):
"""Fires an event in the GUI thread.
Listens to an event in any thread. When that event fires, it uses a Qt
Signal/Slot to fire a gui_event in the GUI thread. If the original
event is already in the GUI thread that's fine, the gui_event will
be immediately fired the GUI thread.
Parameters
----------
parent : QObject
Parent Qt object.
emitter : EventEmitter
The event we are listening to.
Attributes
----------
emitter : EventEmitter
The event we are listening to.
events : EmitterGroup
The only event we report is events.gui_event.
Notes
-----
Qt's signal/slot mechanism is the only way we know of to "call" from a
worker thread to the GUI thread. When Qt signals from a worker thread
it posts a message to the GUI thread. When the GUI thread is next
processing messages it will receive that message and call into the Slot
to deliver the message/event.
If the original event was already in the GUI thread that's fine,
the resulting event will just be triggered right away.
"""
signal = Signal(Event)
def __init__(self, parent: QObject, emitter: EventEmitter):
super().__init__(parent)
emitter.connect(self._on_event)
self.emitter = emitter
self.events = EmitterGroup(
source=self, auto_connect=True, gui_event=None
)
self.signal.connect(self._slot)
def _on_event(self, event) -> None:
"""Event was fired, we could be in any thread."""
self.signal.emit(event)
def _slot(self, event) -> None:
"""Slot is always called in the GUI thread."""
self.events.gui_event(original_event=event)
def close(self):
"""Viewer is closing."""
self.gui_event.disconnect()
self.emitter.disconnect()
class QtChunkReceiver:
"""Passes loaded chunks to their layer.
Parameters
----------
parent : QObject
Parent Qt object.
Attributes
----------
gui_event : QtGuiEvent
We use this to call _on_chunk_loaded_gui() in the GUI thread.
Notes
-----
ChunkLoader._done "may" be called in a worker thread. The
concurrent.futures documentation only guarantees that the future's done
handler will be called in a thread in the correct process, it does not
say which thread.
We need to call Layer.on_chunk_loaded() to deliver the loaded chunk to the
Layer. We do not want to make this call from a worker thread, because our
model code is not thread safe. We don't want the GUI thread and the worker
thread changing things at the same time, both triggering events, potentially
calling into vispy or other things that also aren't thread safe.
We could add locks, but it's simpler and better if we just call
Layer.on_chunk_loaded() from the GUI thread. This class QtChunkReceiver
listens to the ChunkLoader's chunk_loaded event. It then uses QtUiEvent
to call its own _on_chunk_loaded_gui() in the GUI thread. From that
method it can safely call Layer.on_chunk_loaded.
If ChunkLoader's chunk_loaded event is already in the GUI thread for
some reason, this class will still work fine, it will just run
100% in the GUI thread.
"""
def __init__(self, parent: QObject):
listen_event = chunk_loader.events.chunk_loaded
self.gui_event = QtGuiEvent(parent, listen_event)
self.gui_event.events.gui_event.connect(self._on_chunk_loaded_gui)
def _on_chunk_loaded_gui(self, event) -> None:
"""A chunk was loaded. This method is called in the GUI thread.
Parameters
----------
event : Event
The event object from the original event.
"""
layer = event.original_event.layer
request = event.original_event.request
LOGGER.info(
"QtChunkReceiver._on_chunk_loaded_gui: data_id=%d",
request.key.data_id,
)
layer.on_chunk_loaded(request) # Pass the chunk to its layer.
def close(self):
"""Viewer is closing."""
self.gui_event.close()
| 32.137681 | 80 | 0.67283 |
795bf738f299ba7144659afc76bdcfead65d03b4 | 1,300 | py | Python | output/models/nist_data/atomic/duration/schema_instance/nistschema_sv_iv_atomic_duration_enumeration_5_xsd/nistschema_sv_iv_atomic_duration_enumeration_5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/atomic/duration/schema_instance/nistschema_sv_iv_atomic_duration_enumeration_5_xsd/nistschema_sv_iv_atomic_duration_enumeration_5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/atomic/duration/schema_instance/nistschema_sv_iv_atomic_duration_enumeration_5_xsd/nistschema_sv_iv_atomic_duration_enumeration_5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
from xsdata.models.datatype import XmlDuration
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-duration-enumeration-5-NS"
class NistschemaSvIvAtomicDurationEnumeration5Type(Enum):
P2004_Y09_M11_DT17_H07_M38_S = XmlDuration("P2004Y09M11DT17H07M38S")
P2002_Y03_M13_DT22_H40_M25_S = XmlDuration("P2002Y03M13DT22H40M25S")
P1995_Y02_M03_DT12_H24_M43_S = XmlDuration("P1995Y02M03DT12H24M43S")
P2002_Y11_M07_DT03_H22_M59_S = XmlDuration("P2002Y11M07DT03H22M59S")
P1970_Y01_M27_DT04_H00_M33_S = XmlDuration("P1970Y01M27DT04H00M33S")
P1974_Y01_M22_DT17_H35_M48_S = XmlDuration("P1974Y01M22DT17H35M48S")
P2012_Y01_M30_DT22_H51_M53_S = XmlDuration("P2012Y01M30DT22H51M53S")
P2024_Y05_M28_DT11_H34_M44_S = XmlDuration("P2024Y05M28DT11H34M44S")
P1987_Y03_M14_DT08_H37_M46_S = XmlDuration("P1987Y03M14DT08H37M46S")
@dataclass
class NistschemaSvIvAtomicDurationEnumeration5:
class Meta:
name = "NISTSchema-SV-IV-atomic-duration-enumeration-5"
namespace = "NISTSchema-SV-IV-atomic-duration-enumeration-5-NS"
value: Optional[NistschemaSvIvAtomicDurationEnumeration5Type] = field(
default=None,
metadata={
"required": True,
}
)
| 39.393939 | 74 | 0.789231 |
795bf7f5314728725244f2987e14789202362018 | 63 | py | Python | hello-world1.py | go2m/s_ilib | 7e90b4504fbb6bb02fedf145728987e2fd4eb3d9 | [
"MIT"
] | null | null | null | hello-world1.py | go2m/s_ilib | 7e90b4504fbb6bb02fedf145728987e2fd4eb3d9 | [
"MIT"
] | null | null | null | hello-world1.py | go2m/s_ilib | 7e90b4504fbb6bb02fedf145728987e2fd4eb3d9 | [
"MIT"
] | null | null | null | #C:\Users\Uwe\Documents\GitHub\ilib
print('Hello World 1 !!!')
| 21 | 35 | 0.698413 |
795bf8186ad37324429e78dcb268293772e679a1 | 926 | py | Python | setup.py | UniStuttgart-VISUS/LilyPads | a61d4cc9fd1230749cb2d035ad23afe59c9b5f44 | [
"Apache-2.0"
] | 1 | 2021-10-04T12:25:45.000Z | 2021-10-04T12:25:45.000Z | setup.py | UniStuttgart-VISUS/LilyPads | a61d4cc9fd1230749cb2d035ad23afe59c9b5f44 | [
"Apache-2.0"
] | null | null | null | setup.py | UniStuttgart-VISUS/LilyPads | a61d4cc9fd1230749cb2d035ad23afe59c9b5f44 | [
"Apache-2.0"
] | null | null | null | from setuptools import find_packages, setup
import subprocess
import re
def get_version():
version = subprocess.check_output(['git', 'describe', '--tags', '--match', 'v[[:digit:]]*.[[:digit:]]*.[[:digit:]]*'], encoding='utf-8')
version = version.strip()
# if version string contains '-', it is not a tag. then, replace first - by +, others by .
version = version.replace('-', '+', 1)
version = version.replace('-', '.')
# remove preceding 'v'
if re.match('^v', version) is not None:
version = version[1:]
#sys.stderr.write(F'::INFO:: deriving version string "{version}" from "git describe"\n')
return version
setup(
name='lilypads',
version=get_version(),
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'flask',
'flask_login',
'passlib',
'gunicorn',
'Werkzeug'
],
)
| 25.722222 | 140 | 0.600432 |
795bfa4550f82c915e74ee088b8aae359dcc6267 | 16,562 | py | Python | baselines/common/distributions.py | Mihir-Mavalankar/DeepRL_SymmetricAntGait | 6f1e14f53b2b1369dbc218ac888dfbd2ecbe9ffa | [
"MIT"
] | null | null | null | baselines/common/distributions.py | Mihir-Mavalankar/DeepRL_SymmetricAntGait | 6f1e14f53b2b1369dbc218ac888dfbd2ecbe9ffa | [
"MIT"
] | null | null | null | baselines/common/distributions.py | Mihir-Mavalankar/DeepRL_SymmetricAntGait | 6f1e14f53b2b1369dbc218ac888dfbd2ecbe9ffa | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
import baselines.common.tf_util as U
from baselines.a2c.utils import fc, fc_wshare,fc_double, quad_mirror_action_layer
from tensorflow.python.ops import math_ops
class Pd(object):
"""
A particular probability distribution
"""
def flatparam(self):
raise NotImplementedError
def mode(self):
raise NotImplementedError
def neglogp(self, x):
# Usually it's easier to define the negative logprob
raise NotImplementedError
def kl(self, other):
raise NotImplementedError
def entropy(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def logp(self, x):
return - self.neglogp(x)
def get_shape(self):
return self.flatparam().shape
@property
def shape(self):
return self.get_shape()
def __getitem__(self, idx):
return self.__class__(self.flatparam()[idx])
class PdType(object):
"""
Parametrized family of probability distributions
"""
def pdclass(self):
raise NotImplementedError
def pdfromflat(self, flat):
return self.pdclass()(flat)
def pdfromlatent(self, latent_vector, init_scale, init_bias):
raise NotImplementedError
def param_shape(self):
raise NotImplementedError
def sample_shape(self):
raise NotImplementedError
def sample_dtype(self):
raise NotImplementedError
def param_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=tf.float32, shape=prepend_shape+self.param_shape(), name=name)
def sample_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=self.sample_dtype(), shape=prepend_shape+self.sample_shape(), name=name)
def __eq__(self, other):
return (type(self) == type(other)) and (self.__dict__ == other.__dict__)
class CategoricalPdType(PdType):
def __init__(self, ncat):
self.ncat = ncat
def pdclass(self):
return CategoricalPd
def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0):
pdparam = _matching_fc(latent_vector, 'pi', self.ncat, init_scale=init_scale, init_bias=init_bias)
return self.pdfromflat(pdparam), pdparam
def param_shape(self):
return [self.ncat]
def sample_shape(self):
return []
def sample_dtype(self):
return tf.int32
class MultiCategoricalPdType(PdType):
def __init__(self, nvec):
self.ncats = nvec.astype('int32')
assert (self.ncats > 0).all()
def pdclass(self):
return MultiCategoricalPd
def pdfromflat(self, flat):
return MultiCategoricalPd(self.ncats, flat)
def pdfromlatent(self, latent, init_scale=1.0, init_bias=0.0):
pdparam = _matching_fc(latent, 'pi', self.ncats.sum(), init_scale=init_scale, init_bias=init_bias)
return self.pdfromflat(pdparam), pdparam
def param_shape(self):
return [sum(self.ncats)]
def sample_shape(self):
return [len(self.ncats)]
def sample_dtype(self):
return tf.int32
class DiagGaussianPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return DiagGaussianPd
def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0):
mean = _matching_fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)
logstd = tf.get_variable(name='pi/logstd', shape=[1, self.size], initializer=tf.zeros_initializer())
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
return self.pdfromflat(pdparam), mean
#New function for symmetric input output in last policy net layer#
def pdfromlatent_sym(self, latent_vector, init_scale=1.0, init_bias=0.0):
mean = _matching_fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)
mean = quad_mirror_action_layer(mean)
logstd = tf.get_variable(name='pi/logstd', shape=[1, self.size], initializer=tf.zeros_initializer())
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
return self.pdfromflat(pdparam), mean
##########################################################
#New function for MVDP NET#
def pdfromlatent_mvdp(self, latent_vector, init_scale=1.0, init_bias=0.0):
mean = _matching_fc_mvdp(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)
mean = quad_mirror_action_layer(mean)
logstd = tf.get_variable(name='pi/logstd', shape=[1, self.size], initializer=tf.zeros_initializer())
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
return self.pdfromflat(pdparam), mean
##########################################################
#New function for symmetric input output and weight sharing in last policy net layer#
def pdfromlatent_sym_wshare(self, latent_vector, init_scale=1.0, init_bias=0.0):
mean = _matching_fc_wshare(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)
mean = quad_mirror_action_layer(mean)
logstd = tf.get_variable(name='pi/logstd', shape=[1, self.size], initializer=tf.zeros_initializer())
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
return self.pdfromflat(pdparam), mean
##########################################################
def param_shape(self):
return [2*self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.float32
class BernoulliPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return BernoulliPd
def param_shape(self):
return [self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.int32
def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0):
pdparam = _matching_fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)
return self.pdfromflat(pdparam), pdparam
# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
# def __init__(self, logits):
# self.logits = logits
# self.ps = tf.nn.softmax(logits)
# @classmethod
# def fromflat(cls, flat):
# return cls(flat)
# def flatparam(self):
# return self.logits
# def mode(self):
# return U.argmax(self.logits, axis=-1)
# def logp(self, x):
# return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
# def kl(self, other):
# return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
# - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def entropy(self):
# return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def sample(self):
# u = tf.random_uniform(tf.shape(self.logits))
# return U.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)
class CategoricalPd(Pd):
def __init__(self, logits):
self.logits = logits
def flatparam(self):
return self.logits
def mode(self):
return tf.argmax(self.logits, axis=-1)
@property
def mean(self):
return tf.nn.softmax(self.logits)
def neglogp(self, x):
# return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x)
# Note: we can't use sparse_softmax_cross_entropy_with_logits because
# the implementation does not allow second-order derivatives...
if x.dtype in {tf.uint8, tf.int32, tf.int64}:
# one-hot encoding
x_shape_list = x.shape.as_list()
logits_shape_list = self.logits.get_shape().as_list()[:-1]
for xs, ls in zip(x_shape_list, logits_shape_list):
if xs is not None and ls is not None:
assert xs == ls, 'shape mismatch: {} in x vs {} in logits'.format(xs, ls)
x = tf.one_hot(x, self.logits.get_shape().as_list()[-1])
else:
# already encoded
assert x.shape.as_list() == self.logits.shape.as_list()
return tf.nn.softmax_cross_entropy_with_logits_v2(
logits=self.logits,
labels=x)
def kl(self, other):
a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True)
a1 = other.logits - tf.reduce_max(other.logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
ea1 = tf.exp(a1)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
z1 = tf.reduce_sum(ea1, axis=-1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), axis=-1)
def entropy(self):
a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), axis=-1)
def sample(self):
u = tf.random_uniform(tf.shape(self.logits), dtype=self.logits.dtype)
return tf.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)
@classmethod
def fromflat(cls, flat):
return cls(flat)
class MultiCategoricalPd(Pd):
def __init__(self, nvec, flat):
self.flat = flat
self.categoricals = list(map(CategoricalPd,
tf.split(flat, np.array(nvec, dtype=np.int32), axis=-1)))
def flatparam(self):
return self.flat
def mode(self):
return tf.cast(tf.stack([p.mode() for p in self.categoricals], axis=-1), tf.int32)
def neglogp(self, x):
return tf.add_n([p.neglogp(px) for p, px in zip(self.categoricals, tf.unstack(x, axis=-1))])
def kl(self, other):
return tf.add_n([p.kl(q) for p, q in zip(self.categoricals, other.categoricals)])
def entropy(self):
return tf.add_n([p.entropy() for p in self.categoricals])
def sample(self):
return tf.cast(tf.stack([p.sample() for p in self.categoricals], axis=-1), tf.int32)
@classmethod
def fromflat(cls, flat):
raise NotImplementedError
class DiagGaussianPd(Pd):
def __init__(self, flat):
self.flat = flat
mean, logstd = tf.split(axis=len(flat.shape)-1, num_or_size_splits=2, value=flat)
self.mean = mean
self.logstd = logstd
self.std = tf.exp(logstd)
def flatparam(self):
return self.flat
def mode(self):
return self.mean
def neglogp(self, x):
return 0.5 * tf.reduce_sum(tf.square((x - self.mean) / self.std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf.reduce_sum(self.logstd, axis=-1)
def kl(self, other):
assert isinstance(other, DiagGaussianPd)
return tf.reduce_sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) / (2.0 * tf.square(other.std)) - 0.5, axis=-1)
def entropy(self):
return tf.reduce_sum(self.logstd + .5 * np.log(2.0 * np.pi * np.e), axis=-1)
def sample(self):
return self.mean + self.std * tf.random_normal(tf.shape(self.mean))
@classmethod
def fromflat(cls, flat):
return cls(flat)
class BernoulliPd(Pd):
def __init__(self, logits):
self.logits = logits
self.ps = tf.sigmoid(logits)
def flatparam(self):
return self.logits
@property
def mean(self):
return self.ps
def mode(self):
return tf.round(self.ps)
def neglogp(self, x):
return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=tf.to_float(x)), axis=-1)
def kl(self, other):
return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=other.logits, labels=self.ps), axis=-1) - tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1)
def entropy(self):
return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1)
def sample(self):
u = tf.random_uniform(tf.shape(self.ps))
return tf.to_float(math_ops.less(u, self.ps))
@classmethod
def fromflat(cls, flat):
return cls(flat)
def make_pdtype(ac_space):
from gym import spaces
if isinstance(ac_space, spaces.Box):
assert len(ac_space.shape) == 1
return DiagGaussianPdType(ac_space.shape[0])
elif isinstance(ac_space, spaces.Discrete):
return CategoricalPdType(ac_space.n)
elif isinstance(ac_space, spaces.MultiDiscrete):
return MultiCategoricalPdType(ac_space.nvec)
elif isinstance(ac_space, spaces.MultiBinary):
return BernoulliPdType(ac_space.n)
else:
raise NotImplementedError
def shape_el(v, i):
maybe = v.get_shape()[i]
if maybe is not None:
return maybe
else:
return tf.shape(v)[i]
@U.in_session
def test_probtypes():
np.random.seed(0)
pdparam_diag_gauss = np.array([-.2, .3, .4, -.5, .1, -.5, .1, 0.8])
diag_gauss = DiagGaussianPdType(pdparam_diag_gauss.size // 2) #pylint: disable=E1101
validate_probtype(diag_gauss, pdparam_diag_gauss)
pdparam_categorical = np.array([-.2, .3, .5])
categorical = CategoricalPdType(pdparam_categorical.size) #pylint: disable=E1101
validate_probtype(categorical, pdparam_categorical)
nvec = [1,2,3]
pdparam_multicategorical = np.array([-.2, .3, .5, .1, 1, -.1])
multicategorical = MultiCategoricalPdType(nvec) #pylint: disable=E1101
validate_probtype(multicategorical, pdparam_multicategorical)
pdparam_bernoulli = np.array([-.2, .3, .5])
bernoulli = BernoulliPdType(pdparam_bernoulli.size) #pylint: disable=E1101
validate_probtype(bernoulli, pdparam_bernoulli)
def validate_probtype(probtype, pdparam):
N = 100000
# Check to see if mean negative log likelihood == differential entropy
Mval = np.repeat(pdparam[None, :], N, axis=0)
M = probtype.param_placeholder([N])
X = probtype.sample_placeholder([N])
pd = probtype.pdfromflat(M)
calcloglik = U.function([X, M], pd.logp(X))
calcent = U.function([M], pd.entropy())
Xval = tf.get_default_session().run(pd.sample(), feed_dict={M:Mval})
logliks = calcloglik(Xval, Mval)
entval_ll = - logliks.mean() #pylint: disable=E1101
entval_ll_stderr = logliks.std() / np.sqrt(N) #pylint: disable=E1101
entval = calcent(Mval).mean() #pylint: disable=E1101
assert np.abs(entval - entval_ll) < 3 * entval_ll_stderr # within 3 sigmas
# Check to see if kldiv[p,q] = - ent[p] - E_p[log q]
M2 = probtype.param_placeholder([N])
pd2 = probtype.pdfromflat(M2)
q = pdparam + np.random.randn(pdparam.size) * 0.1
Mval2 = np.repeat(q[None, :], N, axis=0)
calckl = U.function([M, M2], pd.kl(pd2))
klval = calckl(Mval, Mval2).mean() #pylint: disable=E1101
logliks = calcloglik(Xval, Mval2)
klval_ll = - entval - logliks.mean() #pylint: disable=E1101
klval_ll_stderr = logliks.std() / np.sqrt(N) #pylint: disable=E1101
assert np.abs(klval - klval_ll) < 3 * klval_ll_stderr # within 3 sigmas
print('ok on', probtype, pdparam)
def _matching_fc(tensor, name, size, init_scale, init_bias):
if tensor.shape[-1] == size:
return tensor
else:
return fc(tensor, name, size, init_scale=init_scale, init_bias=init_bias)
#New function added here######################
def _matching_fc_mvdp(tensor, name, size, init_scale, init_bias):
if tensor.shape[-1] == size:
return tensor
else:
batch = tensor.get_shape()[0].value
nin = tensor.get_shape()[1].value
#Split the combined tensor for a final fully connected double forward pass layer
t1 = tf.slice(tensor,[0,0],[batch,nin//2])
t2 = tf.slice(tensor,[0,nin//2],[batch,nin//2])
r1,r2 = fc_double(t1,t2, name, size, init_scale=init_scale, init_bias=init_bias)
assert r1.shape == r2.shape
#Recombine the results in a manner defined in the MVDP paper
o1 = tf.slice(r1,[0,0],[batch,r1.shape[1]//2])
o2 = tf.slice(r2,[0,0],[batch,r1.shape[1]//2])
return tf.concat([o1,o2],1)
##############################################
#New function added here######################
def _matching_fc_wshare(tensor, name, size, init_scale, init_bias):
if tensor.shape[-1] == size:
return tensor
else:
return fc_wshare(tensor, name, size, init_scale=init_scale, init_bias=init_bias)
##############################################
| 39.908434 | 217 | 0.643159 |
795bfac3b609b01545bc4cb8f1775336593c921f | 9,532 | py | Python | kubernetes_asyncio/client/models/v1beta1_resource_attributes.py | jnschaeffer/kubernetes_asyncio | 05f42510e7acb8d229bf7c2d4e2455e6328486a6 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1beta1_resource_attributes.py | jnschaeffer/kubernetes_asyncio | 05f42510e7acb8d229bf7c2d4e2455e6328486a6 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1beta1_resource_attributes.py | jnschaeffer/kubernetes_asyncio | 05f42510e7acb8d229bf7c2d4e2455e6328486a6 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.15.11
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1beta1ResourceAttributes(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'group': 'str',
'name': 'str',
'namespace': 'str',
'resource': 'str',
'subresource': 'str',
'verb': 'str',
'version': 'str'
}
attribute_map = {
'group': 'group',
'name': 'name',
'namespace': 'namespace',
'resource': 'resource',
'subresource': 'subresource',
'verb': 'verb',
'version': 'version'
}
def __init__(self, group=None, name=None, namespace=None, resource=None, subresource=None, verb=None, version=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ResourceAttributes - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._group = None
self._name = None
self._namespace = None
self._resource = None
self._subresource = None
self._verb = None
self._version = None
self.discriminator = None
if group is not None:
self.group = group
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
if resource is not None:
self.resource = resource
if subresource is not None:
self.subresource = subresource
if verb is not None:
self.verb = verb
if version is not None:
self.version = version
@property
def group(self):
"""Gets the group of this V1beta1ResourceAttributes. # noqa: E501
Group is the API Group of the Resource. \"*\" means all. # noqa: E501
:return: The group of this V1beta1ResourceAttributes. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this V1beta1ResourceAttributes.
Group is the API Group of the Resource. \"*\" means all. # noqa: E501
:param group: The group of this V1beta1ResourceAttributes. # noqa: E501
:type: str
"""
self._group = group
@property
def name(self):
"""Gets the name of this V1beta1ResourceAttributes. # noqa: E501
Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all. # noqa: E501
:return: The name of this V1beta1ResourceAttributes. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1beta1ResourceAttributes.
Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all. # noqa: E501
:param name: The name of this V1beta1ResourceAttributes. # noqa: E501
:type: str
"""
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1beta1ResourceAttributes. # noqa: E501
Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview # noqa: E501
:return: The namespace of this V1beta1ResourceAttributes. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1beta1ResourceAttributes.
Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview # noqa: E501
:param namespace: The namespace of this V1beta1ResourceAttributes. # noqa: E501
:type: str
"""
self._namespace = namespace
@property
def resource(self):
"""Gets the resource of this V1beta1ResourceAttributes. # noqa: E501
Resource is one of the existing resource types. \"*\" means all. # noqa: E501
:return: The resource of this V1beta1ResourceAttributes. # noqa: E501
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this V1beta1ResourceAttributes.
Resource is one of the existing resource types. \"*\" means all. # noqa: E501
:param resource: The resource of this V1beta1ResourceAttributes. # noqa: E501
:type: str
"""
self._resource = resource
@property
def subresource(self):
"""Gets the subresource of this V1beta1ResourceAttributes. # noqa: E501
Subresource is one of the existing resource types. \"\" means none. # noqa: E501
:return: The subresource of this V1beta1ResourceAttributes. # noqa: E501
:rtype: str
"""
return self._subresource
@subresource.setter
def subresource(self, subresource):
"""Sets the subresource of this V1beta1ResourceAttributes.
Subresource is one of the existing resource types. \"\" means none. # noqa: E501
:param subresource: The subresource of this V1beta1ResourceAttributes. # noqa: E501
:type: str
"""
self._subresource = subresource
@property
def verb(self):
"""Gets the verb of this V1beta1ResourceAttributes. # noqa: E501
Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all. # noqa: E501
:return: The verb of this V1beta1ResourceAttributes. # noqa: E501
:rtype: str
"""
return self._verb
@verb.setter
def verb(self, verb):
"""Sets the verb of this V1beta1ResourceAttributes.
Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all. # noqa: E501
:param verb: The verb of this V1beta1ResourceAttributes. # noqa: E501
:type: str
"""
self._verb = verb
@property
def version(self):
"""Gets the version of this V1beta1ResourceAttributes. # noqa: E501
Version is the API Version of the Resource. \"*\" means all. # noqa: E501
:return: The version of this V1beta1ResourceAttributes. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this V1beta1ResourceAttributes.
Version is the API Version of the Resource. \"*\" means all. # noqa: E501
:param version: The version of this V1beta1ResourceAttributes. # noqa: E501
:type: str
"""
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ResourceAttributes):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ResourceAttributes):
return True
return self.to_dict() != other.to_dict()
| 32.756014 | 374 | 0.609421 |
795bfce338925a783f19e9fcc4d77aa3e9c91a4c | 823 | py | Python | app/core/tests/test_commands.py | bezalel001/recipes-api | 59d2ae6d1925df21e6f0b13a9bad909f4435591f | [
"MIT"
] | null | null | null | app/core/tests/test_commands.py | bezalel001/recipes-api | 59d2ae6d1925df21e6f0b13a9bad909f4435591f | [
"MIT"
] | null | null | null | app/core/tests/test_commands.py | bezalel001/recipes-api | 59d2ae6d1925df21e6f0b13a9bad909f4435591f | [
"MIT"
] | null | null | null | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available."""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError, ]*5 + [True, ]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
| 34.291667 | 74 | 0.675577 |
795bfe7a73541d0812df4cf6ab6d0993f076ec1d | 112,092 | py | Python | pandas/core/indexes/multi.py | maxim-lian/pandas | 17a6bc56e5ab6ad3dab12d3a8b20ed69a5830b6f | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/multi.py | maxim-lian/pandas | 17a6bc56e5ab6ad3dab12d3a8b20ed69a5830b6f | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/multi.py | maxim-lian/pandas | 17a6bc56e5ab6ad3dab12d3a8b20ed69a5830b6f | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | # pylint: disable=E1101,E1103,W0232
from collections import OrderedDict
import datetime
from sys import getsizeof
import warnings
import numpy as np
from pandas._libs import (
Timestamp, algos as libalgos, index as libindex, lib, tslibs)
import pandas.compat as compat
from pandas.compat import lrange, lzip, map, range, zip
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.util._decorators import Appender, cache_readonly, deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_int64, ensure_platform_int, is_categorical_dtype, is_hashable,
is_integer, is_iterator, is_list_like, is_object_dtype, is_scalar,
pandas_dtype)
from pandas.core.dtypes.dtypes import ExtensionDtype, PandasExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.dtypes.missing import array_equivalent, isna
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.config import get_option
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index, InvalidIndexError, _index_shared_docs, ensure_index)
from pandas.core.indexes.frozen import FrozenList, _ensure_frozen
import pandas.core.missing as missing
from pandas.io.formats.printing import pprint_thing
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='MultiIndex',
target_klass='MultiIndex or list of tuples'))
class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine,
libindex.UInt64Engine):
"""
This class manages a MultiIndex by mapping label combinations to positive
integers.
"""
_base = libindex.UInt64Engine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each)
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine,
libindex.ObjectEngine):
"""
This class manages those (extreme) cases in which the number of possible
label combinations overflows the 64 bits integers, and uses an ObjectEngine
containing Python integers.
"""
_base = libindex.ObjectEngine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one Python integer (each), in a
strictly monotonic way (i.e. respecting the lexicographic order of
integer combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : int, or 1-dimensional array of dtype object
Integer(s) representing one combination (each)
"""
# Shift the representation of each level by the pre-calculated number
# of bits. Since this can overflow uint64, first make sure we are
# working with Python integers:
codes = codes.astype('object') << self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer (per row):
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes : sequence of arrays
Integers for each level designating which label at each location.
.. versionadded:: 0.24.0
labels : sequence of arrays
Integers for each level designating which label at each location.
.. deprecated:: 0.24.0
Use ``codes`` instead
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level).
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat).
copy : bool, default False
Copy the meta-data.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
Attributes
----------
names
levels
codes
nlevels
levshape
Methods
-------
from_arrays
from_tuples
from_product
from_frame
set_levels
set_codes
to_frame
to_flat_index
is_lexsorted
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables.
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Index : The base pandas Index type.
Examples
---------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex(levels=[[1, 2], ['blue', 'red']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/advanced.html>`_ for more.
"""
# initialize to zero-length tuples to make everything work
_typ = 'multiindex'
_names = FrozenList()
_levels = FrozenList()
_codes = FrozenList()
_comparables = ['names']
rename = Index.set_names
# --------------------------------------------------------------------
# Constructors
@deprecate_kwarg(old_arg_name='labels', new_arg_name='codes')
def __new__(cls, levels=None, codes=None, sortorder=None, names=None,
dtype=None, copy=False, name=None,
verify_integrity=True, _set_identity=True):
# compat with Index
if name is not None:
names = name
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
if len(levels) != len(codes):
raise ValueError('Length of levels and codes must be the same.')
if len(levels) == 0:
raise ValueError('Must pass non-zero number of levels/codes')
result = object.__new__(MultiIndex)
# we've already validated levels and codes, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
result._verify_integrity()
if _set_identity:
result._reset_identity()
return result
def _verify_integrity(self, codes=None, levels=None):
"""
Parameters
----------
codes : optional list
Codes to check for validity. Defaults to current codes.
levels : optional list
Levels to check for validity. Defaults to current levels.
Raises
------
ValueError
If length of levels and codes don't match, if the codes for any
level would exceed level bounds, or there are any duplicate levels.
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
codes = codes or self.codes
levels = levels or self.levels
if len(levels) != len(codes):
raise ValueError("Length of levels and codes must match. NOTE:"
" this index is in an inconsistent state.")
codes_length = len(self.codes[0])
for i, (level, level_codes) in enumerate(zip(levels, codes)):
if len(level_codes) != codes_length:
raise ValueError("Unequal code lengths: %s" %
([len(code_) for code_ in codes]))
if len(level_codes) and level_codes.max() >= len(level):
raise ValueError("On level %d, code max (%d) >= length of"
" level (%d). NOTE: this index is in an"
" inconsistent state" % (i, level_codes.max(),
len(level)))
if not level.is_unique:
raise ValueError("Level values must be unique: {values} on "
"level {level}".format(
values=[value for value in level],
level=i))
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=None):
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index : MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex(levels=[[1, 2], ['blue', 'red']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['number', 'color'])
"""
if not is_list_like(arrays):
raise TypeError("Input must be a list / sequence of array-likes.")
elif is_iterator(arrays):
arrays = list(arrays)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError('all arrays must be same length')
from pandas.core.arrays.categorical import _factorize_from_iterables
codes, levels = _factorize_from_iterables(arrays)
if names is None:
names = [getattr(arr, "name", None) for arr in arrays]
return MultiIndex(levels=levels, codes=codes, sortorder=sortorder,
names=names, verify_integrity=False)
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None):
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index : MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> tuples = [(1, u'red'), (1, u'blue'),
... (2, u'red'), (2, u'blue')]
>>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color'))
MultiIndex(levels=[[1, 2], ['blue', 'red']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['number', 'color'])
"""
if not is_list_like(tuples):
raise TypeError('Input must be a list / sequence of tuple-likes.')
elif is_iterator(tuples):
tuples = list(tuples)
if len(tuples) == 0:
if names is None:
msg = 'Cannot infer number of levels from empty list'
raise TypeError(msg)
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = tuples._values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrays = lzip(*tuples)
return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=None):
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index : MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ['green', 'purple']
>>> pd.MultiIndex.from_product([numbers, colors],
... names=['number', 'color'])
MultiIndex(levels=[[0, 1, 2], ['green', 'purple']],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=['number', 'color'])
"""
from pandas.core.arrays.categorical import _factorize_from_iterables
from pandas.core.reshape.util import cartesian_product
if not is_list_like(iterables):
raise TypeError("Input must be a list / sequence of iterables.")
elif is_iterator(iterables):
iterables = list(iterables)
codes, levels = _factorize_from_iterables(iterables)
codes = cartesian_product(codes)
return MultiIndex(levels, codes, sortorder=sortorder, names=names)
@classmethod
def from_frame(cls, df, sortorder=None, names=None):
"""
Make a MultiIndex from a DataFrame.
.. versionadded:: 0.24.0
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
sortorder : int, optional
Level of sortedness (must be lexicographically sorted by that
level).
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
... ['NJ', 'Temp'], ['NJ', 'Precip']],
... columns=['a', 'b'])
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> pd.MultiIndex.from_frame(df)
MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> pd.MultiIndex.from_frame(df, names=['state', 'observation'])
MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['state', 'observation'])
"""
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
column_names, columns = lzip(*df.iteritems())
names = column_names if names is None else names
return cls.from_arrays(columns, sortorder=sortorder, names=names)
# --------------------------------------------------------------------
@property
def levels(self):
return self._levels
@property
def _values(self):
# We override here, since our parent uses _data, which we dont' use.
return self.values
@property
def array(self):
"""
Raises a ValueError for `MultiIndex` because there's no single
array backing a MultiIndex.
Raises
------
ValueError
"""
msg = ("MultiIndex has no single backing array. Use "
"'MultiIndex.to_numpy()' to get a NumPy array of tuples.")
raise ValueError(msg)
@property
def _is_homogeneous_type(self):
"""Whether the levels of a MultiIndex all have the same dtype.
This looks at the dtypes of the levels.
See Also
--------
Index._is_homogeneous_type
DataFrame._is_homogeneous_type
Examples
--------
>>> MultiIndex.from_tuples([
... ('a', 'b'), ('a', 'c')])._is_homogeneous_type
True
>>> MultiIndex.from_tuples([
... ('a', 1), ('a', 2)])._is_homogeneous_type
False
"""
return len({x.dtype for x in self.levels}) <= 1
def _set_levels(self, levels, level=None, copy=False, validate=True,
verify_integrity=False):
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate and len(levels) == 0:
raise ValueError('Must set non-zero number of levels.')
if validate and level is None and len(levels) != self.nlevels:
raise ValueError('Length of levels must match number of levels.')
if validate and level is not None and len(levels) != len(level):
raise ValueError('Length of levels must match length of level.')
if level is None:
new_levels = FrozenList(
ensure_index(lev, copy=copy)._shallow_copy()
for lev in levels)
else:
level = [self._get_level_number(l) for l in level]
new_levels = list(self._levels)
for l, v in zip(level, levels):
new_levels[l] = ensure_index(v, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
if verify_integrity:
self._verify_integrity(levels=new_levels)
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._tuples = None
self._reset_cache()
def set_levels(self, levels, level=None, inplace=False,
verify_integrity=True):
"""
Set new levels on MultiIndex. Defaults to returning
new index.
Parameters
----------
levels : sequence or list of sequence
new level(s) to apply
level : int, level name, or sequence of int/level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and codes are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = pd.MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a','b'], [1,2]])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level=0)
MultiIndex(levels=[[u'a', u'b'], [u'one', u'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level='bar')
MultiIndex(levels=[[1, 2], [u'a', u'b']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels([['a','b'], [1,2]], level=[0,1])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
"""
if is_list_like(levels) and not isinstance(levels, Index):
levels = list(levels)
if level is not None and not is_list_like(level):
if not is_list_like(levels):
raise TypeError("Levels must be list-like")
if is_list_like(levels[0]):
raise TypeError("Levels must be list-like")
level = [level]
levels = [levels]
elif level is None or is_list_like(level):
if not is_list_like(levels) or not is_list_like(levels[0]):
raise TypeError("Levels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_levels(levels, level=level, validate=True,
verify_integrity=verify_integrity)
if not inplace:
return idx
@property
def codes(self):
return self._codes
@property
def labels(self):
warnings.warn((".labels was deprecated in version 0.24.0. "
"Use .codes instead."),
FutureWarning, stacklevel=2)
return self.codes
def _set_codes(self, codes, level=None, copy=False, validate=True,
verify_integrity=False):
if validate and level is None and len(codes) != self.nlevels:
raise ValueError("Length of codes must match number of levels")
if validate and level is not None and len(codes) != len(level):
raise ValueError('Length of codes must match length of levels.')
if level is None:
new_codes = FrozenList(
_ensure_frozen(level_codes, lev, copy=copy)._shallow_copy()
for lev, level_codes in zip(self.levels, codes))
else:
level = [self._get_level_number(l) for l in level]
new_codes = list(self._codes)
for lev_idx, level_codes in zip(level, codes):
lev = self.levels[lev_idx]
new_codes[lev_idx] = _ensure_frozen(
level_codes, lev, copy=copy)._shallow_copy()
new_codes = FrozenList(new_codes)
if verify_integrity:
self._verify_integrity(codes=new_codes)
self._codes = new_codes
self._tuples = None
self._reset_cache()
def set_labels(self, labels, level=None, inplace=False,
verify_integrity=True):
warnings.warn((".set_labels was deprecated in version 0.24.0. "
"Use .set_codes instead."),
FutureWarning, stacklevel=2)
return self.set_codes(codes=labels, level=level, inplace=inplace,
verify_integrity=verify_integrity)
@deprecate_kwarg(old_arg_name='labels', new_arg_name='codes')
def set_codes(self, codes, level=None, inplace=False,
verify_integrity=True):
"""
Set new codes on MultiIndex. Defaults to returning
new index.
.. versionadded:: 0.24.0
New name for deprecated method `set_labels`.
Parameters
----------
codes : sequence or list of sequence
new codes to apply
level : int, level name, or sequence of int/level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and codes are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = pd.MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1,0,1,0], [0,0,1,1]])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
codes=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_codes([1,0,1,0], level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
codes=[[1, 0, 1, 0], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_codes([0,0,1,1], level='bar')
MultiIndex(levels=[[1, 2], [u'one', u'two']],
codes=[[0, 0, 1, 1], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_codes([[1,0,1,0], [0,0,1,1]], level=[0,1])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
codes=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(codes):
raise TypeError("Codes must be list-like")
if is_list_like(codes[0]):
raise TypeError("Codes must be list-like")
level = [level]
codes = [codes]
elif level is None or is_list_like(level):
if not is_list_like(codes) or not is_list_like(codes[0]):
raise TypeError("Codes must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_codes(codes, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
@deprecate_kwarg(old_arg_name='labels', new_arg_name='codes')
def copy(self, names=None, dtype=None, levels=None, codes=None,
deep=False, _set_identity=False, **kwargs):
"""
Make a copy of this object. Names, dtype, levels and codes can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
levels : sequence, optional
codes : sequence, optional
Returns
-------
copy : MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
name = kwargs.get('name')
names = self._validate_names(name=name, names=names, deep=deep)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if codes is None:
codes = deepcopy(self.codes)
else:
if levels is None:
levels = self.levels
if codes is None:
codes = self.codes
return MultiIndex(levels=levels, codes=codes, names=names,
sortorder=self.sortorder, verify_integrity=False,
_set_identity=_set_identity)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
def _shallow_copy_with_infer(self, values, **kwargs):
# On equal MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
if len(values) == 0:
return MultiIndex(levels=[[] for _ in range(self.nlevels)],
codes=[[] for _ in range(self.nlevels)],
**kwargs)
return self._shallow_copy(values, **kwargs)
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
self.get_loc(key)
return True
except (LookupError, TypeError):
return False
contains = __contains__
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is not None:
names = kwargs.pop('names', kwargs.pop('name', self.names))
# discards freq
kwargs.pop('freq', None)
return MultiIndex.from_tuples(values, names=names, **kwargs)
return self.view()
@cache_readonly
def dtype(self):
return np.dtype('O')
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
def f(l):
return 'mixed' in l or 'string' in l or 'unicode' in l
return any(f(l) for l in self._inferred_type_levels)
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation uncessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self):
""" return the number of bytes in the underlying data """
return self._nbytes(False)
def _nbytes(self, deep=False):
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.codes)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
attrs = [
('levels', ibase.default_pprint(self._levels,
max_seq_items=False)),
('codes', ibase.default_pprint(self._codes,
max_seq_items=False))]
if com._any_not_none(*self.names):
attrs.append(('names', ibase.default_pprint(self.names)))
if self.sortorder is not None:
attrs.append(('sortorder', ibase.default_pprint(self.sortorder)))
return attrs
def _format_space(self):
return "\n%s" % (' ' * (len(self.__class__.__name__) + 1))
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_native_types(self, na_rep='nan', **kwargs):
new_levels = []
new_codes = []
# go through the levels and format them
for level, level_codes in zip(self.levels, self.codes):
level = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = (level_codes == -1)
if mask.any():
nan_index = len(level)
level = np.append(level, na_rep)
level_codes = level_codes.values()
level_codes[mask] = nan_index
new_levels.append(level)
new_codes.append(level_codes)
if len(new_levels) == 1:
return Index(new_levels[0])._format_native_types()
else:
# reconstruct the multi-index
mi = MultiIndex(levels=new_levels, codes=new_codes,
names=self.names, sortorder=self.sortorder,
verify_integrity=False)
return mi.values
def format(self, space=2, sparsify=None, adjoin=True, names=False,
na_rep=None, formatter=None):
if len(self) == 0:
return []
stringified_levels = []
for lev, level_codes in zip(self.levels, self.codes):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(level_codes).format(formatter=formatter)
# we have some NA
mask = level_codes == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [pprint_thing(na if isna(x) else x,
escape_chars=('\t', '\r', '\n'))
for x in algos.take_1d(lev._values, level_codes)]
stringified_levels.append(formatted)
result_levels = []
for lev, name in zip(stringified_levels, self.names):
level = []
if names:
level.append(pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
if name is not None else '')
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ''
# GH3547
# use value of sparsify as sentinel, unless it's an obvious
# "Truthey" value
if sparsify not in [True, 1]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = _sparsify(result_levels, start=int(names),
sentinel=sentinel)
if adjoin:
from pandas.io.formats.format import _get_adjustment
adj = _get_adjustment()
return adj.adjoin(space, *result_levels).split('\n')
else:
return result_levels
# --------------------------------------------------------------------
def __len__(self):
return len(self.codes[0])
def _get_names(self):
return FrozenList(level.name for level in self.levels)
def _set_names(self, names, level=None, validate=True):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : boolean, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError('Names should be list-like for a MultiIndex')
names = list(names)
if validate and level is not None and len(names) != len(level):
raise ValueError('Length of names must match length of level.')
if validate and level is None and len(names) != self.nlevels:
raise ValueError('Length of names must match number of levels in '
'MultiIndex.')
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(l) for l in level]
# set the name
for l, name in zip(level, names):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.levels[l].rename(name, inplace=True)
names = property(fset=_set_names, fget=_get_names,
doc="Names of levels in MultiIndex")
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level):
indexer = self.codes[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
codes, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_codes, uniques = algos.factorize(indexer[mask], sort=True)
codes = np.empty(len(indexer), dtype=indexer.dtype)
codes[mask] = ok_codes
codes[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
grouper = level_index.take(codes)
return grouper, codes, level_index
@property
def _constructor(self):
return MultiIndex.from_tuples
@cache_readonly
def inferred_type(self):
return 'mixed'
def _get_level_number(self, level):
count = self.names.count(level)
if (count > 1) and not is_integer(level):
raise ValueError('The name %s occurs multiple times, use a '
'level number' % level)
try:
level = self.names.index(level)
except ValueError:
if not is_integer(level):
raise KeyError('Level %s not found' % str(level))
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError('Too many levels: Index has only %d '
'levels, %d is not a valid level number' %
(self.nlevels, orig_level))
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError('Too many levels: Index has only %d levels, '
'not %d' % (self.nlevels, level + 1))
return level
_tuples = None
@cache_readonly
def _engine(self):
# Calculate the number of bits needed to represent labels in each
# level, as log2 of their sizes (including -1 for NaN):
sizes = np.ceil(np.log2([len(l) + 1 for l in self.levels]))
# Sum bit counts, starting from the _right_....
lev_bits = np.cumsum(sizes[::-1])[::-1]
# ... in order to obtain offsets such that sorting the combination of
# shifted codes (one for each level, resulting in a unique integer) is
# equivalent to sorting lexicographically the codes themselves. Notice
# that each level needs to be shifted by the number of bits needed to
# represent the _previous_ ones:
offsets = np.concatenate([lev_bits[1:], [0]]).astype('uint64')
# Check the total number of bits needed for our representation:
if lev_bits[0] > 64:
# The levels would overflow a 64 bit uint - use Python integers:
return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
return MultiIndexUIntEngine(self.levels, self.codes, offsets)
@property
def values(self):
if self._tuples is not None:
return self._tuples
values = []
for i in range(self.nlevels):
vals = self._get_level_values(i)
if is_categorical_dtype(vals):
vals = vals.get_values()
if (isinstance(vals.dtype, (PandasExtensionDtype, ExtensionDtype))
or hasattr(vals, '_box_values')):
vals = vals.astype(object)
vals = np.array(vals, copy=False)
values.append(vals)
self._tuples = lib.fast_zip(values)
return self._tuples
@property
def _has_complex_internals(self):
# to disable groupby tricks
return True
@cache_readonly
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
# reversed() because lexsort() wants the most significant key last.
values = [self._get_level_values(i).values
for i in reversed(range(len(self.levels)))]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self.values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
@cache_readonly
def _have_mixed_levels(self):
""" return a boolean list indicated if we have mixed levels """
return ['mixed' in l for l in self._inferred_type_levels]
@cache_readonly
def _inferred_type_levels(self):
""" return a list of the inferred types, one for each level """
return [i.inferred_type for i in self.levels]
@cache_readonly
def _hashed_values(self):
""" return a uint64 ndarray of my hashed values """
from pandas.core.util.hashing import hash_tuples
return hash_tuples(self)
def _hashed_indexing_key(self, key):
"""
validate and return the hash for the provided key
*this is internal for use for the cython routines*
Parameters
----------
key : string or tuple
Returns
-------
np.uint64
Notes
-----
we need to stringify if we have mixed levels
"""
from pandas.core.util.hashing import hash_tuples, hash_tuple
if not isinstance(key, tuple):
return hash_tuples(key)
if not len(key) == self.nlevels:
raise KeyError
def f(k, stringify):
if stringify and not isinstance(k, compat.string_types):
k = str(k)
return k
key = tuple(f(k, stringify)
for k, stringify in zip(key, self._have_mixed_levels))
return hash_tuple(key)
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep='first'):
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64
shape = map(len, self.levels)
ids = get_group_index(self.codes, shape, sort=False, xnull=False)
return duplicated_int64(ids, keep)
def fillna(self, value=None, downcast=None):
"""
fillna is not implemented for MultiIndex
"""
raise NotImplementedError('isna is not defined for MultiIndex')
@Appender(_index_shared_docs['dropna'])
def dropna(self, how='any'):
nans = [level_codes == -1 for level_codes in self.codes]
if how == 'any':
indexer = np.any(nans, axis=0)
elif how == 'all':
indexer = np.all(nans, axis=0)
else:
raise ValueError("invalid how option: {0}".format(how))
new_codes = [level_codes[~indexer] for level_codes in self.codes]
return self.copy(codes=new_codes, deep=True)
def get_value(self, series, key):
# somewhat broken encapsulation
from pandas.core.indexing import maybe_droplevels
# Label-based
s = com.values_from_object(series)
k = com.values_from_object(key)
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
new_values = series._values[loc]
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return series._constructor(new_values, index=new_index,
name=series.name).__finalize__(self)
try:
return self._engine.get_value(s, k)
except KeyError as e1:
try:
return _try_mi(key)
except KeyError:
pass
try:
return libindex.get_value_at(s, k)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# a Timestamp will raise a TypeError in a multi-index
# rather than a KeyError, try it here
# note that a string that 'looks' like a Timestamp will raise
# a KeyError! (GH5725)
if (isinstance(key, (datetime.datetime, np.datetime64)) or
(compat.PY3 and isinstance(key, compat.string_types))):
try:
return _try_mi(key)
except KeyError:
raise
except (IndexError, ValueError, TypeError):
pass
try:
return _try_mi(Timestamp(key))
except (KeyError, TypeError,
IndexError, ValueError, tslibs.OutOfBoundsDatetime):
pass
raise InvalidIndexError(key)
def _get_level_values(self, level, unique=False):
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int level
unique : bool, default False
if True, drop duplicated values
Returns
-------
values : ndarray
"""
values = self.levels[level]
level_codes = self.codes[level]
if unique:
level_codes = algos.unique(level_codes)
filled = algos.take_1d(values._values, level_codes,
fill_value=values._na_value)
values = values._shallow_copy(filled)
return values
def get_level_values(self, level):
"""
Return vector of label values for requested level,
equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
``values`` is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
---------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
def unique(self, level=None):
if level is None:
return super(MultiIndex, self).unique()
else:
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.set_levels([i._to_safe_for_reshape() for i in self.levels])
def to_frame(self, index=True, name=None):
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
.. versionadded:: 0.24.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of strings, optional
The passed names should substitute index level names.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
See Also
--------
DataFrame
"""
from pandas import DataFrame
if name is not None:
if not is_list_like(name):
raise TypeError("'name' must be a list / sequence "
"of column names.")
if len(name) != len(self.levels):
raise ValueError("'name' should have same length as "
"number of levels on index.")
idx_names = name
else:
idx_names = self.names
# Guarantee resulting column order
result = DataFrame(
OrderedDict([
((level if lvlname is None else lvlname),
self._get_level_values(level))
for lvlname, level in zip(idx_names, range(len(self.levels)))
]),
copy=False
)
if index:
result.index = self
return result
def to_hierarchical(self, n_repeat, n_shuffle=1):
"""
Return a MultiIndex reshaped to conform to the
shapes given by n_repeat and n_shuffle.
.. deprecated:: 0.24.0
Useful to replicate and rearrange a MultiIndex for combination
with another Index with n_repeat items.
Parameters
----------
n_repeat : int
Number of times to repeat the labels on self
n_shuffle : int
Controls the reordering of the labels. If the result is going
to be an inner level in a MultiIndex, n_shuffle will need to be
greater than one. The size of each label must divisible by
n_shuffle.
Returns
-------
MultiIndex
Examples
--------
>>> idx = pd.MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')])
>>> idx.to_hierarchical(3)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
codes=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
"""
levels = self.levels
codes = [np.repeat(level_codes, n_repeat) for
level_codes in self.codes]
# Assumes that each level_codes is divisible by n_shuffle
codes = [x.reshape(n_shuffle, -1).ravel(order='F') for x in codes]
names = self.names
warnings.warn("Method .to_hierarchical is deprecated and will "
"be removed in a future version",
FutureWarning, stacklevel=2)
return MultiIndex(levels=levels, codes=codes, names=names)
def to_flat_index(self):
"""
Convert a MultiIndex to an Index of Tuples containing the level values.
.. versionadded:: 0.24.0
Returns
-------
pd.Index
Index with the MultiIndex data represented in Tuples.
Notes
-----
This method will simply return the caller if called by anything other
than a MultiIndex.
Examples
--------
>>> index = pd.MultiIndex.from_product(
... [['foo', 'bar'], ['baz', 'qux']],
... names=['a', 'b'])
>>> index.to_flat_index()
Index([('foo', 'baz'), ('foo', 'qux'),
('bar', 'baz'), ('bar', 'qux')],
dtype='object')
"""
return Index(self.values, tupleize_cols=False)
@property
def is_all_dates(self):
return False
def is_lexsorted(self):
"""
Return True if the codes are lexicographically sorted
"""
return self.lexsort_depth == self.nlevels
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
if self.sortorder == 0:
return self.nlevels
else:
return 0
int64_codes = [ensure_int64(level_codes) for level_codes in self.codes]
for k in range(self.nlevels, 0, -1):
if libalgos.is_lexsorted(int64_codes[:k]):
return k
return 0
def _sort_levels_monotonic(self):
"""
.. versionadded:: 0.20.0
This is an *internal* function.
Create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i
MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i.sort_monotonic()
MultiIndex(levels=[['a', 'b'], ['aa', 'bb']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]])
"""
if self.is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_codes = []
for lev, level_codes in zip(self.levels, self.codes):
if not lev.is_monotonic:
try:
# indexer to reorder the levels
indexer = lev.argsort()
except TypeError:
pass
else:
lev = lev.take(indexer)
# indexer to reorder the level codes
indexer = ensure_int64(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
level_codes = algos.take_1d(ri, level_codes)
new_levels.append(lev)
new_codes.append(level_codes)
return MultiIndex(new_levels, new_codes,
names=self.names, sortorder=self.sortorder,
verify_integrity=False)
def remove_unused_levels(self):
"""
Create a new MultiIndex from the current that removes
unused levels, meaning that they are not expressed in the labels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
.. versionadded:: 0.20.0
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex.from_product([range(2), list('ab')])
MultiIndex(levels=[[0, 1], ['a', 'b']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i[2:]
MultiIndex(levels=[[0, 1], ['a', 'b']],
codes=[[1, 1], [0, 1]])
The 0 from the first level is not represented
and can be removed
>>> i[2:].remove_unused_levels()
MultiIndex(levels=[[1], ['a', 'b']],
codes=[[0, 0], [0, 1]])
"""
new_levels = []
new_codes = []
changed = False
for lev, level_codes in zip(self.levels, self.codes):
# Since few levels are typically unused, bincount() is more
# efficient than unique() - however it only accepts positive values
# (and drops order):
uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
# We have unused levels
changed = True
# Recalculate uniques, now preserving order.
# Can easily be cythonized by exploiting the already existing
# "uniques" and stop parsing "level_codes" when all items
# are found:
uniques = algos.unique(level_codes)
if has_na:
na_idx = np.where(uniques == -1)[0]
# Just ensure that -1 is in first position:
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
# codes get mapped from uniques to 0:len(uniques)
# -1 (if present) is mapped to last position
code_mapping = np.zeros(len(lev) + has_na)
# ... and reassigned value -1:
code_mapping[uniques] = np.arange(len(uniques)) - has_na
level_codes = code_mapping[level_codes]
# new levels are simple
lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_codes.append(level_codes)
result = self._shallow_copy()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_codes(new_codes, validate=False)
return result
@property
def nlevels(self):
"""Integer number of levels in this MultiIndex."""
return len(self.levels)
@property
def levshape(self):
"""A tuple with the length of each level."""
return tuple(len(x) for x in self.levels)
def __reduce__(self):
"""Necessary for making this object picklable"""
d = dict(levels=[lev for lev in self.levels],
codes=[level_codes for level_codes in self.codes],
sortorder=self.sortorder, names=list(self.names))
return ibase._new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
levels = state.get('levels')
codes = state.get('codes')
sortorder = state.get('sortorder')
names = state.get('names')
elif isinstance(state, tuple):
nd_state, own_state = state
levels, codes, sortorder, names = own_state
self._set_levels([Index(x) for x in levels], validate=False)
self._set_codes(codes)
self._set_names(names)
self.sortorder = sortorder
self._verify_integrity()
self._reset_identity()
def __getitem__(self, key):
if is_scalar(key):
key = com.cast_scalar_indexer(key)
retval = []
for lev, level_codes in zip(self.levels, self.codes):
if level_codes[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[level_codes[key]])
return tuple(retval)
else:
if com.is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
sortorder = self.sortorder
else:
# cannot be sure whether the result will be sorted
sortorder = None
if isinstance(key, Index):
key = np.asarray(key)
new_codes = [level_codes[key] for level_codes in self.codes]
return MultiIndex(levels=self.levels, codes=new_codes,
names=self.names, sortorder=sortorder,
verify_integrity=False)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_platform_int(indices)
taken = self._assert_take_fillable(self.codes, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1)
return MultiIndex(levels=self.levels, codes=taken,
names=self.names, verify_integrity=False)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=None):
""" Internal method to handle NA filling of take """
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = [lab.take(indices) for lab in self.codes]
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label.values()
label_values[mask] = na_value
masked.append(np.asarray(label_values))
taken = masked
else:
taken = [lab.take(indices) for lab in self.codes]
return taken
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels)
for o in other):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self.values, ) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except (TypeError, IndexError):
return Index(new_tuples)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@Appender(_index_shared_docs['repeat'] % _index_doc_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
return MultiIndex(levels=self.levels,
codes=[level_codes.view(np.ndarray).repeat(repeats)
for level_codes in self.codes],
names=self.names, sortorder=self.sortorder,
verify_integrity=False)
def where(self, cond, other=None):
raise NotImplementedError(".where is not supported for "
"MultiIndex operations")
@deprecate_kwarg(old_arg_name='labels', new_arg_name='codes')
def drop(self, codes, level=None, errors='raise'):
"""
Make new MultiIndex with passed list of codes deleted
Parameters
----------
codes : array-like
Must be a list of tuples
level : int or level name, default None
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(codes, level)
try:
if not isinstance(codes, (np.ndarray, Index)):
codes = com.index_labels_to_array(codes)
indexer = self.get_indexer(codes)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise ValueError('codes %s not contained in axis' %
codes[mask])
except Exception:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
inds.extend(lrange(loc.start, loc.stop))
elif com.is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn('dropping on a non-lexsorted multi-index'
' without a level parameter may impact '
'performance.',
PerformanceWarning,
stacklevel=3)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = 'unsupported indexer of type {}'.format(type(loc))
raise AssertionError(msg)
except KeyError:
if errors != 'ignore':
raise
return self.delete(inds)
def _drop_from_level(self, codes, level):
codes = com.index_labels_to_array(codes)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(codes)
mask = ~algos.isin(self.codes[i], values)
return self[mask]
def swaplevel(self, i=-2, j=-1):
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int, str, default -2
First level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
j : int, str, default -1
Second level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
Returns
-------
MultiIndex
A new MultiIndex
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
See Also
--------
Series.swaplevel : Swap levels i and j in a MultiIndex.
Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a
particular axis.
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi.swaplevel(0, 1)
MultiIndex(levels=[['bb', 'aa'], ['a', 'b']],
codes=[[0, 1, 0, 1], [0, 0, 1, 1]])
"""
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_codes[i], new_codes[j] = new_codes[j], new_codes[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
def reorder_levels(self, order):
"""
Rearrange levels using input order. May not drop or duplicate levels
Parameters
----------
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError('Length of order must be same as '
'number of levels (%d), got %d' %
(self.nlevels, len(order)))
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def _get_codes_for_sorting(self):
"""
we categorizing our codes by using the
available categories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
from pandas.core.arrays import Categorical
def cats(level_codes):
return np.arange(np.array(level_codes).max() + 1 if
len(level_codes) else 0,
dtype=level_codes.dtype)
return [Categorical.from_codes(level_codes, cats(level_codes),
ordered=True)
for level_codes in self.codes]
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level
If list-like must be names or ints of levels.
ascending : boolean, default True
False to sort in descending order
Can also be a list to specify a directed ordering
sort_remaining : sort by the remaining levels after level
Returns
-------
sorted_index : pd.MultiIndex
Resulting index
indexer : np.ndarray
Indices of output values in original index
"""
from pandas.core.sorting import indexer_from_factorized
if isinstance(level, (compat.string_types, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer([self.codes[lev] for lev in level],
orders=ascending)
# level ordering
else:
codes = list(self.codes)
shape = list(self.levshape)
# partition codes and shape
primary = tuple(codes.pop(lev - i) for i, lev in enumerate(level))
primshp = tuple(shape.pop(lev - i) for i, lev in enumerate(level))
if sort_remaining:
primary += primary + tuple(codes)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp,
compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = ensure_platform_int(indexer)
new_codes = [level_codes.take(indexer) for level_codes in self.codes]
new_index = MultiIndex(codes=new_codes, levels=self.levels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
return new_index, indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
indexer, keyarr = super(MultiIndex, self)._convert_listlike_indexer(
keyarr, kind=kind)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0],
tuple):
level = 0
_, indexer = self.reindex(keyarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(self))
check = self.levels[0].get_indexer(keyarr)
mask = check == -1
if mask.any():
raise KeyError('%s not in index' % keyarr[mask])
return indexer, keyarr
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = ensure_index(target)
# empty indexer
if is_list_like(target) and not len(target):
return ensure_platform_int(np.array([]))
if not isinstance(target, MultiIndex):
try:
target = MultiIndex.from_tuples(target)
except (TypeError, ValueError):
# let's instead try with a straight Index
if method is None:
return Index(self.values).get_indexer(target,
method=method,
limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise ValueError('Reindexing only valid with uniquely valued '
'Index objects')
if method == 'pad' or method == 'backfill':
if tolerance is not None:
raise NotImplementedError("tolerance not implemented yet "
'for MultiIndex')
indexer = self._engine.get_indexer(target, method, limit)
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for MultiIndex; see GitHub issue 9365')
else:
indexer = self._engine.get_indexer(target)
return ensure_platform_int(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
return super(MultiIndex, self).get_indexer_non_unique(target)
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'names')
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
# GH7774: preserve dtype/tz if target is empty and not an Index.
# target may be an iterator
target = ibase._ensure_has_len(target)
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype),
**attrs)
else:
target = ensure_index(target)
target, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True,
keep_order=False)
else:
target = ensure_index(target)
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
raise ValueError("cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (preserve_names and target.nlevels == self.nlevels and
target.names != self.names):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
def get_slice_bound(self, label, side, kind):
if not isinstance(label, tuple):
label = label,
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super(MultiIndex, self).slice_locs(start, end, step, kind=kind)
def _partial_tup_index(self, tup, side='left'):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
'Key length (%d) was greater than MultiIndex'
' lexsort depth (%d)' %
(len(tup), self.lexsort_depth))
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.codes)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev:
if not lev.is_type_compatible(lib.infer_dtype([lab],
skipna=False)):
raise TypeError('Level type mismatch: %s' % lab)
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == 'right' and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = lev.get_loc(lab)
if k < n - 1:
end = start + section.searchsorted(idx, side='right')
start = start + section.searchsorted(idx, side='left')
else:
return start + section.searchsorted(idx, side=side)
def get_loc(self, key, method=None):
"""
Get location for a label or a tuple of labels as an integer, slice or
boolean mask.
Parameters
----------
key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
Examples
---------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_loc('b')
slice(1, 3, None)
>>> mi.get_loc(('b', 'e'))
1
Notes
------
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
if method is not None:
raise NotImplementedError('only the default get_loc method is '
'currently supported for MultiIndex')
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != 'int64':
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype='bool')
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError('Key length ({0}) exceeds index depth ({1})'
''.format(keylen, self.nlevels))
if keylen == self.nlevels and self.is_unique:
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self.lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = (self.slice_locs(lead_key, lead_key)
if lead_key else (0, len(self)))
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn('indexing past lexsort depth may impact performance.',
PerformanceWarning, stacklevel=10)
loc = np.arange(start, stop, dtype='int64')
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self.levels[i].get_loc(k)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return (_maybe_to_slice(loc) if len(loc) != stop - start else
slice(start, stop))
def get_loc_level(self, key, level=0, drop_level=True):
"""
Get both the location for the requested label(s) and the
resulting sliced index.
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
if ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False], dtype=bool),
Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
See Also
---------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
def maybe_droplevels(indexer, levels, drop_level):
if not drop_level:
return self[indexer]
# kludgearound
orig_index = new_index = self[indexer]
levels = [self._get_level_number(i) for i in levels]
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
except ValueError:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError('Key for location must have same '
'length as number of levels')
result = None
for lev, k in zip(level, key):
loc, new_index = self.get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_droplevels(result, level, drop_level)
level = self._get_level_number(level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_droplevels(indexer, [0], drop_level)
return indexer, new_index
except TypeError:
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels,
drop_level)
if len(key) == self.nlevels and self.is_unique:
# Complete key in unique index -> standard get_loc
return (self._engine.get_loc(key), None)
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level=0, indexer=None):
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
level_codes = self.codes[level]
def convert_indexer(start, stop, step, indexer=indexer,
codes=level_codes):
# given the inputs and the codes/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(codes):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = codes.take(ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)._ndarray_values
else:
m = np.zeros(len(codes), dtype=bool)
m[np.in1d(codes, r,
assume_unique=Index(codes).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returnig a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(key.start, key.stop,
key.step, kind='loc')
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
return convert_indexer(start.start, stop.stop, step)
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = level_codes.searchsorted(start, side='left')
j = level_codes.searchsorted(stop, side='right')
return slice(i, j, step)
else:
code = level_index.get_loc(key)
if level > 0 or self.lexsort_depth == 0:
# Desired level is not sorted
locs = np.array(level_codes == code, dtype=bool, copy=False)
if not locs.any():
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return locs
i = level_codes.searchsorted(code, side='left')
j = level_codes.searchsorted(code, side='right')
if i == j:
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return slice(i, j)
def get_locs(self, seq):
"""
Get location for a given label/slice/list/mask or a sequence of such as
an array of integers.
Parameters
----------
seq : label/slice/list/mask or a sequence of such
You should use one of the above for each level.
If a level should not be used, set it to ``slice(None)``.
Returns
-------
locs : array of integers suitable for passing to iloc
Examples
---------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_locs('b')
array([1, 2], dtype=int64)
>>> mi.get_locs([slice(None), ['e', 'f']])
array([1, 2], dtype=int64)
>>> mi.get_locs([[True, False, True], slice('e', 'f')])
array([2], dtype=int64)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
"""
from .numeric import Int64Index
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self.lexsort_depth:
raise UnsortedIndexError('MultiIndex slicing requires the index '
'to be lexsorted: slicing on levels {0}, '
'lexsort depth {1}'
.format(true_slices, self.lexsort_depth))
# indexer
# this is the list of all values that we want to select
n = len(self)
indexer = None
def _convert_to_indexer(r):
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif com.is_bool_indexer(r):
if len(r) != n:
raise ValueError("cannot index with a boolean indexer "
"that is not the same length as the "
"index")
r = r.nonzero()[0]
return Int64Index(r)
def _update_indexer(idxr, indexer=indexer):
if indexer is None:
indexer = Index(np.arange(n))
if idxr is None:
return indexer
return indexer & idxr
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
indexer = _update_indexer(_convert_to_indexer(k),
indexer=indexer)
elif is_list_like(k):
# a collection of labels to include from this level (these
# are or'd)
indexers = None
for x in k:
try:
idxrs = _convert_to_indexer(
self._get_level_indexer(x, level=i,
indexer=indexer))
indexers = (idxrs if indexers is None
else indexers | idxrs)
except KeyError:
# ignore not founds
continue
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer)
else:
# no matches we are done
return Int64Index([])._ndarray_values
elif com.is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer)
elif isinstance(k, slice):
# a slice, include BOTH of the labels
indexer = _update_indexer(_convert_to_indexer(
self._get_level_indexer(k, level=i, indexer=indexer)),
indexer=indexer)
else:
# a single label
indexer = _update_indexer(_convert_to_indexer(
self.get_loc_level(k, level=i, drop_level=False)[0]),
indexer=indexer)
# empty indexer
if indexer is None:
return Int64Index([])._ndarray_values
return indexer._ndarray_values
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError('after < before')
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_codes = [level_codes[left:right] for level_codes in self.codes]
new_codes[0] = new_codes[0] - i
return MultiIndex(levels=new_levels, codes=new_codes,
verify_integrity=False)
def equals(self, other):
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if not isinstance(other, MultiIndex):
other_vals = com.values_from_object(ensure_index(other))
return array_equivalent(self._ndarray_values, other_vals)
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
self_codes = self_codes[self_codes != -1]
self_values = algos.take_nd(np.asarray(self.levels[i]._values),
self_codes, allow_fill=False)
other_codes = other.codes[i]
other_codes = other_codes[other_codes != -1]
other_values = algos.take_nd(
np.asarray(other.levels[i]._values),
other_codes, allow_fill=False)
# since we use NaT both datetime64 and timedelta64
# we can have a situation where a level is typed say
# timedelta64 in self (IOW it has other values than NaT)
# but types datetime64 in other (where its all NaT)
# but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
if not array_equivalent(self_values, other_values):
return False
return True
def equal_levels(self, other):
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
def union(self, other):
"""
Form the union of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
>>> index.union(index2)
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
uniq_tuples = lib.fast_unique_multiple([self._ndarray_values,
other._ndarray_values])
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def intersection(self, other):
"""
Form the intersection of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if self.equals(other):
return self
self_tuples = self._ndarray_values
other_tuples = other._ndarray_values
uniq_tuples = sorted(set(self_tuples) & set(other_tuples))
if len(uniq_tuples) == 0:
return MultiIndex(levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def difference(self, other, sort=True):
"""
Compute sorted set difference of two MultiIndex objects
Parameters
----------
other : MultiIndex
sort : bool, default True
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
Returns
-------
diff : MultiIndex
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0:
return self
if self.equals(other):
return MultiIndex(levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer,
assume_unique=True)
difference = this.values.take(label_diff)
if sort:
difference = sorted(difference)
if len(difference) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_tuples(difference, sortorder=0,
names=result_names)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_categorical_dtype(dtype):
msg = '> 1 ndim Categorical are not supported at this time'
raise NotImplementedError(msg)
elif not is_object_dtype(dtype):
msg = ('Setting {cls} dtype to anything other than object '
'is not supported').format(cls=self.__class__)
raise TypeError(msg)
elif copy is True:
return self._shallow_copy()
return self
def _convert_can_do_setop(self, other):
result_names = self.names
if not hasattr(other, 'names'):
if len(other) == 0:
other = MultiIndex(levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
verify_integrity=False)
else:
msg = 'other must be a MultiIndex or a list of tuples'
try:
other = MultiIndex.from_tuples(other)
except TypeError:
raise TypeError(msg)
else:
result_names = self.names if self.names == other.names else None
return other, result_names
def insert(self, loc, item):
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
# Pad the key with empty strings if lower levels of the key
# aren't specified:
if not isinstance(item, tuple):
item = (item, ) + ('', ) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError('Item must have length equal to number of '
'levels.')
new_levels = []
new_codes = []
for k, level, level_codes in zip(item, self.levels, self.codes):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other codes
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_codes.append(np.insert(
ensure_int64(level_codes), loc, lev_loc))
return MultiIndex(levels=new_levels, codes=new_codes,
names=self.names, verify_integrity=False)
def delete(self, loc):
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]
return MultiIndex(levels=self.levels, codes=new_codes,
names=self.names, verify_integrity=False)
def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
return MultiIndex.from_tuples(joined, names=names)
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
values = MultiIndex.from_tuples(values,
names=self.names).values
return algos.isin(self.values, values)
else:
num = self._get_level_number(level)
levs = self.levels[num]
level_codes = self.codes[num]
sought_labels = levs.isin(values).nonzero()[0]
if levs.size == 0:
return np.zeros(len(level_codes), dtype=np.bool_)
else:
return np.lib.arraysetops.in1d(level_codes, sought_labels)
MultiIndex._add_numeric_methods_disabled()
MultiIndex._add_numeric_methods_add_sub_disabled()
MultiIndex._add_logical_methods_disabled()
def _sparsify(label_list, start=0, sentinel=''):
pivoted = lzip(*label_list)
k = len(label_list)
result = pivoted[:start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1:]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return lzip(*result)
def _get_na_rep(dtype):
return {np.datetime64: 'NaT', np.timedelta64: 'NaT'}.get(dtype, 'NaN')
| 35.880922 | 79 | 0.542635 |
795bfe868ed25e81d6ed87127acc685549736e52 | 841 | py | Python | captive_portal/ijson.py | 84KaliPleXon3/esp32-uPyPortal | 7e5130b94b372a975fd38777db8779b548f86231 | [
"Apache-2.0"
] | 38 | 2017-11-13T20:44:21.000Z | 2022-03-31T17:23:33.000Z | captive_portal/ijson.py | 84KaliPleXon3/esp32-uPyPortal | 7e5130b94b372a975fd38777db8779b548f86231 | [
"Apache-2.0"
] | 1 | 2020-05-15T17:15:48.000Z | 2020-05-15T17:15:48.000Z | captive_portal/ijson.py | 84KaliPleXon3/esp32-uPyPortal | 7e5130b94b372a975fd38777db8779b548f86231 | [
"Apache-2.0"
] | 8 | 2017-12-28T02:57:39.000Z | 2021-10-03T10:26:50.000Z | type_gen = type((lambda: (yield))())
def conv(it):
yield '"'
#yield from it
for i in it:
yield i.replace('"', "'").replace("\n", "\\n")
yield '"'
def idumps(obj):
if isinstance(obj, str):
yield '"'
yield obj
yield '"'
elif isinstance(obj, int):
yield str(obj)
elif isinstance(obj, dict):
comma = False
yield '{'
for k, v in obj.items():
if comma:
yield ',\n'
yield from idumps(k)
yield ': '
yield from idumps(v)
comma = True
yield '}'
elif isinstance(obj, type_gen):
yield from conv(obj)
else:
assert 0, repr(obj)
if __name__ == "__main__":
def templ():
yield from ["1", "2", "3"]
print(list(idumps({"foo": templ()})))
| 21.025 | 54 | 0.472057 |
795bff7b03c692d0e1921b6a7f5c8058bf41de28 | 16,476 | py | Python | cirq/testing/circuit_compare.py | kunalq/Cirq | e73c9bef672e83143ab04e7f169988149055d630 | [
"Apache-2.0"
] | null | null | null | cirq/testing/circuit_compare.py | kunalq/Cirq | e73c9bef672e83143ab04e7f169988149055d630 | [
"Apache-2.0"
] | null | null | null | cirq/testing/circuit_compare.py | kunalq/Cirq | e73c9bef672e83143ab04e7f169988149055d630 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Iterable, Optional, Sequence, TYPE_CHECKING, Type, cast
from collections import defaultdict
import itertools
import numpy as np
import sympy
from cirq import circuits, ops, linalg, protocols, EigenGate
if TYPE_CHECKING:
# pylint: disable=unused-import
from typing import Dict, List
def highlight_text_differences(actual: str, expected: str) -> str:
diff = ""
for actual_line, desired_line in itertools.zip_longest(
actual.splitlines(), expected.splitlines(),
fillvalue=""):
diff += "".join(a if a == b else "█"
for a, b in itertools.zip_longest(
actual_line, desired_line, fillvalue="")) + "\n"
return diff
def _measurement_subspaces(
measured_qubits: Iterable[ops.Qid],
n_qubits: int
) -> Sequence[Sequence[int]]:
"""Computes subspaces associated with projective measurement.
The function computes a partitioning of the computational basis such
that the subspace spanned by each partition corresponds to a distinct
measurement outcome. In particular, if all qubits are measured then
2**n singleton partitions are returned. If no qubits are measured then
a single partition consisting of all basis states is returned.
Args:
measured_qubits: Qubits subject to measurement
n_qubits: Total number of qubits in circuit
Returns:
Sequence of subspaces where each subspace is a sequence of
computational basis states in order corresponding to qubit_order
"""
# Consider projective measurement in the computational basis on a subset
# of qubits. Each projection operator associated with the measurement is
# uniquely determined by its range, here called a measurement subspace.
#
# Suppose that qubit q is not measured. Then computational basis states
# whose indices have binary representations that differ only at position
# q belong to the same measurement subspace. Generally, if computational
# basis states a and b are such that
#
# a & measurement_mask == b & measurement_mask
#
# then a and b belong to the same measurement subspace. In this case the
# value of the expression on either side in the formula above is the
# computational basis state in the measurement subspace containing
# a and b which has the lowest index.
measurement_mask = 0
for i, _ in enumerate(sorted(measured_qubits)):
measurement_mask |= 1 << i
# Keyed by computational basis state with lowest index.
measurement_subspaces = defaultdict(list) # type: Dict[int, List[int]]
computational_basis = range(1 << n_qubits)
for basis_state in computational_basis:
subspace_key = basis_state & measurement_mask
measurement_subspaces[subspace_key].append(basis_state)
subspaces = list(measurement_subspaces.values())
# Verify this is a partitioning (i.e. full coverage, no overlaps).
assert sorted(itertools.chain(*subspaces)) == list(computational_basis)
return subspaces
def assert_circuits_with_terminal_measurements_are_equivalent(
actual: circuits.Circuit,
reference: circuits.Circuit,
atol: float) -> None:
"""Determines if two circuits have equivalent effects.
The circuits can contain measurements, but the measurements must be at the
end of the circuit. Circuits are equivalent if, for all possible inputs,
their outputs (classical bits for lines terminated with measurement and
qubits for lines without measurement) are observationally indistinguishable
up to a tolerance. Note that under this definition of equivalence circuits
that differ solely in the overall phase of the post-measurement state of
measured qubits are considered equivalent.
For example, applying an extra Z gate to an unmeasured qubit changes the
effect of a circuit. But inserting a Z gate operation just before a
measurement does not.
Args:
actual: The circuit that was actually computed by some process.
reference: A circuit with the correct function.
atol: Absolute error tolerance.
"""
measured_qubits_actual = {qubit
for op in actual.all_operations()
if protocols.is_measurement(op)
for qubit in op.qubits}
measured_qubits_reference = {qubit
for op in reference.all_operations()
if protocols.is_measurement(op)
for qubit in op.qubits}
assert actual.are_all_measurements_terminal()
assert reference.are_all_measurements_terminal()
assert measured_qubits_actual == measured_qubits_reference
all_qubits = actual.all_qubits().union(reference.all_qubits())
matrix_actual = actual.unitary(qubits_that_should_be_present=all_qubits)
matrix_reference = reference.unitary(
qubits_that_should_be_present=all_qubits)
n_qubits = len(all_qubits)
n = matrix_actual.shape[0]
assert n == 1 << n_qubits
assert matrix_actual.shape == matrix_reference.shape == (n, n)
# Consider the action of the two circuits Ca and Cr on state |x>:
#
# |ya> = Ca|x>
# |yr> = Cr|x>
#
# Ca and Cr are equivalent according to the definition above iff
# for each |x>:
# - probability of each measurement outcome is the same for |ya>
# and |yr> (across measured qubits),
# - amplitudes of each post-measurement state are the same for |ya>
# and |yr> except perhaps for an overall phase factor.
#
# These conditions are satisfied iff the matrices of the two circuits
# are identical except perhaps for an overall phase factor for each
# rectangular block spanning rows corresponding to the measurement
# subspaces and all columns.
#
# Note two special cases of the rule above:
# - if no qubits are measured then the circuits are equivalent if
# their matrices are identical except for the global phase factor,
# - if all qubits are measured then the circuits are equivalent if
# their matrices differ by a diagonal unitary factor.
subspaces = _measurement_subspaces(measured_qubits_actual, n_qubits)
for subspace in subspaces:
block_actual = matrix_actual[subspace, :]
block_reference = matrix_reference[subspace, :]
assert linalg.allclose_up_to_global_phase(
block_actual, block_reference, atol=atol), (
"Circuit's effect differs from the reference circuit.\n"
'\n'
'Diagram of actual circuit:\n'
'{}\n'
'\n'
'Diagram of reference circuit with desired function:\n'
'{}\n'.format(actual, reference))
def assert_same_circuits(actual: circuits.Circuit,
expected: circuits.Circuit,
) -> None:
"""Asserts that two circuits are identical, with a descriptive error.
Args:
actual: A circuit computed by some code under test.
expected: The circuit that should have been computed.
"""
assert actual == expected, (
"Actual circuit differs from expected circuit.\n"
"\n"
"Diagram of actual circuit:\n"
"{}\n"
"\n"
"Diagram of expected circuit:\n"
"{}\n"
"\n"
"Index of first differing moment:\n"
"{}\n"
"\n"
"Full repr of actual circuit:\n"
"{!r}\n"
"\n"
"Full repr of expected circuit:\n"
"{!r}\n").format(actual,
expected,
_first_differing_moment_index(actual, expected),
actual,
expected)
def _first_differing_moment_index(circuit1: circuits.Circuit,
circuit2: circuits.Circuit) -> Optional[int]:
for i, (m1, m2) in enumerate(itertools.zip_longest(circuit1, circuit2)):
if m1 != m2:
return i
return None # coverage: ignore
def assert_has_diagram(
actual: circuits.Circuit,
desired: str,
**kwargs) -> None:
"""Determines if a given circuit has the desired text diagram.
Args:
actual: The circuit that was actually computed by some process.
desired: The desired text diagram as a string. Newlines at the
beginning and whitespace at the end are ignored.
**kwargs: Keyword arguments to be passed to actual.to_text_diagram().
"""
actual_diagram = actual.to_text_diagram(**kwargs).lstrip("\n").rstrip()
desired_diagram = desired.lstrip("\n").rstrip()
assert actual_diagram == desired_diagram, (
"Circuit's text diagram differs from the desired diagram.\n"
'\n'
'Diagram of actual circuit:\n'
'{}\n'
'\n'
'Desired text diagram:\n'
'{}\n'
'\n'
'Highlighted differences:\n'
'{}\n'.format(actual_diagram, desired_diagram,
highlight_text_differences(actual_diagram,
desired_diagram))
)
def assert_has_consistent_apply_unitary(
val: Any,
*,
qubit_count: Optional[int] = None,
atol: float=1e-8) -> None:
"""Tests whether a value's _apply_unitary_ is correct.
Contrasts the effects of the value's `_apply_unitary_` with the
matrix returned by the value's `_unitary_` method.
Args:
val: The value under test. Should have a `__pow__` method.
qubit_count: Usually inferred. The number of qubits the value acts on.
This argument isn't needed if the gate has a unitary matrix or
implements `cirq.SingleQubitGate`/`cirq.TwoQubitGate`/
`cirq.ThreeQubitGate`.
atol: Absolute error tolerance.
"""
expected = protocols.unitary(val, default=None)
qubit_counts = [
qubit_count,
expected.shape[0].bit_length() - 1 if expected is not None else None,
_infer_qubit_count(val)
]
qubit_counts = [e for e in qubit_counts if e is not None]
if not qubit_counts:
raise NotImplementedError(
'Failed to infer qubit count of <{!r}>. Specify it.'.format(
val))
assert len(set(qubit_counts)) == 1, (
'Inconsistent qubit counts from different methods: {}'.format(
qubit_counts))
n = cast(int, qubit_counts[0])
eye = np.eye(2 << n, dtype=np.complex128).reshape((2,) * (2 * n + 2))
actual = protocols.apply_unitary(
unitary_value=val,
args=protocols.ApplyUnitaryArgs(
target_tensor=eye,
available_buffer=np.ones_like(eye) * float('nan'),
axes=list(range(1, n + 1))),
default=None)
# If you don't have a unitary, you shouldn't be able to apply a unitary.
if expected is None:
assert actual is None
else:
expected = np.kron(np.eye(2), expected)
# If you applied a unitary, it should match the one you say you have.
if actual is not None:
np.testing.assert_allclose(
actual.reshape(2 << n, 2 << n),
expected,
atol=atol)
def assert_eigen_gate_has_consistent_apply_unitary(
eigen_gate_type: Type[EigenGate],
*,
exponents=(0, 1, -1, 0.5, 0.25, -0.5, 0.1, sympy.Symbol('s')),
global_shifts=(0, 0.5, -0.5, 0.1),
qubit_count: Optional[int] = None) -> None:
"""Tests whether an EigenGate type's _apply_unitary_ is correct.
Contrasts the effects of the gate's `_apply_unitary_` with the
matrix returned by the gate's `_unitary_` method, trying various values for
the gate exponent and global shift.
Args:
eigen_gate_type: The type of gate to test. The type must have an
__init__ method that takes an exponent and a global_shift.
exponents: The exponents to try. Defaults to a variety of special and
arbitrary angles, as well as a parameterized angle (a symbol).
global_shifts: The global shifts to try. Defaults to a variety of
special angles.
qubit_count: The qubit count to use for the gate. This argument isn't
needed if the gate has a unitary matrix or implements
`cirq.SingleQubitGate`/`cirq.TwoQubitGate`/`cirq.ThreeQubitGate`; it
will be inferred.
"""
for exponent in exponents:
for shift in global_shifts:
assert_has_consistent_apply_unitary(
eigen_gate_type(exponent=exponent, global_shift=shift),
qubit_count=qubit_count)
def assert_has_consistent_apply_unitary_for_various_exponents(
val: Any,
*,
exponents=(0, 1, -1, 0.5, 0.25, -0.5, 0.1, sympy.Symbol('s')),
qubit_count: Optional[int] = None) -> None:
"""Tests whether a value's _apply_unitary_ is correct.
Contrasts the effects of the value's `_apply_unitary_` with the
matrix returned by the value's `_unitary_` method. Attempts this after
attempting to raise the value to several exponents.
Args:
val: The value under test. Should have a `__pow__` method.
exponents: The exponents to try. Defaults to a variety of special and
arbitrary angles, as well as a parameterized angle (a symbol). If
the value's `__pow__` returns `NotImplemented` for any of these,
they are skipped.
qubit_count: A minimum qubit count for the test system. This argument
isn't needed if the gate has a unitary matrix or implements
`cirq.SingleQubitGate`/`cirq.TwoQubitGate`/`cirq.ThreeQubitGate`; it
will be inferred.
"""
for exponent in exponents:
gate = protocols.pow(val, exponent, default=None)
if gate is not None:
assert_has_consistent_apply_unitary(
gate,
qubit_count=qubit_count)
def assert_has_consistent_qid_shape(val: Any,
qubit_count: Optional[int] = None) -> None:
"""Tests whether a value's `_qid_shape_` and `_num_qubits_` are correct and
consistent.
Verifies that the entries in the shape are all positive integers and the
length of shape equals `_num_qubits_` (and also equals `len(qubits)` if
`val` has `qubits`.
Args:
val: The value under test. Should have `_qid_shape_` and/or
`num_qubits_` methods. Can optionally have a `qubits` property.
qubit_count: The expected number of qubits val should use.
"""
default = (-1,)
qid_shape = protocols.qid_shape(val, default)
num_qubits = protocols.num_qubits(val, default)
if qid_shape is default or num_qubits is default:
return # Nothing to check
assert all(d >= 1 for d in qid_shape), (
f'Not all entries in qid_shape are positive: {qid_shape}')
assert len(qid_shape) == num_qubits, (
f'Length of qid_shape and num_qubits disagree: {qid_shape}, '
f'{num_qubits}')
if qubit_count is not None:
assert qubit_count == num_qubits, (
f'Expected qubits and num_qubits disagree: {qubit_count}, '
f'{num_qubits}')
infer_qubit_count = _infer_qubit_count(val)
if infer_qubit_count is not None:
assert infer_qubit_count == num_qubits, (
f'Length of qubits and num_qubits disagree: {infer_qubit_count}, '
f'{num_qubits}')
def _infer_qubit_count(val: Any) -> Optional[int]:
if isinstance(val, ops.Operation):
return len(val.qubits)
if isinstance(val, ops.Gate):
return protocols.num_qubits(val)
return None
| 40.283619 | 80 | 0.648762 |
795bffe3054510f4c4f128afafb8674a3073f45d | 3,083 | py | Python | TorNodesMap/TorNodesMap/settings/base.py | spantons/tor-nodes-map | 0bdd6cb2e747c3a6a5aa311214fa8683d0453bd3 | [
"MIT"
] | null | null | null | TorNodesMap/TorNodesMap/settings/base.py | spantons/tor-nodes-map | 0bdd6cb2e747c3a6a5aa311214fa8683d0453bd3 | [
"MIT"
] | null | null | null | TorNodesMap/TorNodesMap/settings/base.py | spantons/tor-nodes-map | 0bdd6cb2e747c3a6a5aa311214fa8683d0453bd3 | [
"MIT"
] | null | null | null | """
Django settings for TorNodesMap project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-*f_*m8^6e75sxr_0qv^y#^mngw1hnqu=_c+it#br(+=avms7a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'TorNodesMap.base',
'TorNodesMap.tor',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TorNodesMap.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TorNodesMap.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
| 26.808696 | 91 | 0.705806 |
795c00c1d6ee5bc34b7bf9a5d547e19d69aa857a | 1,494 | py | Python | identidock/app/identidock.py | moskalenk/docker_practice | 10a8a742b86530e45df851cf30cce97e16054ee0 | [
"Apache-2.0"
] | null | null | null | identidock/app/identidock.py | moskalenk/docker_practice | 10a8a742b86530e45df851cf30cce97e16054ee0 | [
"Apache-2.0"
] | null | null | null | identidock/app/identidock.py | moskalenk/docker_practice | 10a8a742b86530e45df851cf30cce97e16054ee0 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, Response, request
import requests
import hashlib
import redis
import html
app = Flask(__name__)
cache = redis.StrictRedis(host='redis', port=6379, db=0)
default_name = 'Konstantin Moskalenko'
salt = "UNIQUE_SALT"
@app.route('/', methods=['GET', 'POST'])
def mainpage():
name = default_name
if request.method == 'POST':
# name = request.form['name']
name = html.escape(request.form['name'], quote=True)
salted_name = salt + name
name_hash = hashlib.sha256(salted_name.encode()).hexdigest()
header = '<html><head><title>Identidock</title></head><body>'
body = '''<form method="POST">
Hello <input type="text" name="name" value="{0}">
<input type="submit" value="submit">
</form>
<p>You look like a:
<img src="/monster/{1}"/>
'''.format(name, name_hash)
footer = '</body></html>'
return header + body + footer
@app.route('/monster/<name>')
def get_identicon(name):
name = html.escape(name, quote=True)
image = cache.get(name)
if image is None:
print("Cache miss !!!!!!!!!!!!!!!!!!!!!!!!")
r = requests.get('http://dnmonster:8080/monster/' + name + '?size=80')
image = r.content
cache.set(name, image)
return Response(image, mimetype='image/png')
@app.route('/kostya')
def test():
return 'HEY'
if __name__ == '__main__':
app.run(host='0.0.0.0') | 25.322034 | 78 | 0.579652 |
795c0228f27f2d105eeb1d6de06874e70c193e44 | 5,565 | py | Python | graphsage0/supervised_models.py | sapphireh/HOPE | 72d697f8d96a7aa0e29898c66f5654e79cff410d | [
"MIT"
] | null | null | null | graphsage0/supervised_models.py | sapphireh/HOPE | 72d697f8d96a7aa0e29898c66f5654e79cff410d | [
"MIT"
] | null | null | null | graphsage0/supervised_models.py | sapphireh/HOPE | 72d697f8d96a7aa0e29898c66f5654e79cff410d | [
"MIT"
] | null | null | null | import tensorflow as tf
import graphsage0.models as models
import graphsage0.layers as layers
from graphsage0.aggregators import MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, SeqAggregator, GCNAggregator, MLPAggregator
flags = tf.app.flags
FLAGS = flags.FLAGS
class SupervisedGraphsage(models.SampleAndAggregate):
"""Implementation of supervised GraphSAGE."""
def __init__(self, num_classes,
placeholders, features, adj, degrees,
layer_infos, concat=True, aggregator_type="mean",
model_size="small", sigmoid_loss=False, identity_dim=0,
**kwargs):
'''
Args:
- placeholders: Stanford TensorFlow placeholder object.
- features: Numpy array with node features.
- adj: Numpy array with adjacency lists (padded with random re-samples)
- degrees: Numpy array with node degrees.
- layer_infos: List of SAGEInfo namedtuples that describe the parameters of all
the recursive layers. See SAGEInfo definition above.
- concat: whether to concatenate during recursive iterations
- aggregator_type: how to aggregate neighbor information
- model_size: one of "small" and "big"
- sigmoid_loss: Set to true if nodes can belong to multiple classes
'''
models.GeneralizedModel.__init__(self, **kwargs)
if aggregator_type == "mean":
self.aggregator_cls = MeanAggregator
elif aggregator_type == "mlp":
self.aggregator_cls = MLPAggregator
elif aggregator_type == "seq":
self.aggregator_cls = SeqAggregator
elif aggregator_type == "meanpool":
self.aggregator_cls = MeanPoolingAggregator
elif aggregator_type == "maxpool":
self.aggregator_cls = MaxPoolingAggregator
elif aggregator_type == "gcn":
self.aggregator_cls = GCNAggregator
else:
raise Exception("Unknown aggregator: ", self.aggregator_cls)
# get info from placeholders...
self.inputs1 = placeholders["batch"]
self.model_size = model_size
self.adj_info = adj
if identity_dim > 0:
self.embeds = tf.get_variable("node_embeddings", [adj.get_shape().as_list()[0], identity_dim])
else:
self.embeds = None
if features is None:
if identity_dim == 0:
raise Exception("Must have a positive value for identity feature dimension if no input features given.")
self.features = self.embeds
else:
self.features = tf.Variable(tf.constant(features, dtype=tf.float32), trainable=False)
if not self.embeds is None:
self.features = tf.concat([self.embeds, self.features], axis=1)
self.degrees = degrees
self.concat = concat
self.num_classes = num_classes
self.sigmoid_loss = sigmoid_loss
self.dims = [(0 if features is None else features.shape[1]) + identity_dim]
self.dims.extend([layer_infos[i].output_dim for i in range(len(layer_infos))])
self.batch_size = placeholders["batch_size"]
self.placeholders = placeholders
self.layer_infos = layer_infos
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
def build(self):
samples1, support_sizes1 = self.sample(self.inputs1, self.layer_infos)
num_samples = [layer_info.num_samples for layer_info in self.layer_infos]
self.outputs1, self.aggregators = self.aggregate(samples1, [self.features], self.dims, num_samples,
support_sizes1, concat=self.concat, model_size=self.model_size)
dim_mult = 2 if self.concat else 1
self.outputs1 = tf.nn.l2_normalize(self.outputs1, 1)
dim_mult = 2 if self.concat else 1
self.node_pred = layers.Dense(dim_mult*self.dims[-1], self.num_classes,
dropout=self.placeholders['dropout'],
act=lambda x : x)
# TF graph management
self.node_preds = self.node_pred(self.outputs1)
self._loss()
grads_and_vars = self.optimizer.compute_gradients(self.loss)
clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
for grad, var in grads_and_vars]
self.grad, _ = clipped_grads_and_vars[0]
self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
self.preds = self.predict()
def _loss(self):
# Weight decay loss
for aggregator in self.aggregators:
for var in aggregator.vars.values():
self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
for var in self.node_pred.vars.values():
self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# classification loss
if self.sigmoid_loss:
self.loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.node_preds,
labels=self.placeholders['labels']))
else:
self.loss += tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self.node_preds,
labels=self.placeholders['labels']))
tf.summary.scalar('loss', self.loss)
def predict(self):
if self.sigmoid_loss:
return tf.nn.sigmoid(self.node_preds)
else:
return tf.nn.softmax(self.node_preds)
| 43.139535 | 139 | 0.638455 |
795c02680c57dd9f14795bbab7a91b9f72ffb8c0 | 48,612 | py | Python | src/python/grpcio/grpc_core_dependencies.py | LaudateCorpus1/grpc-SwiftPM | 4049fc878c23a3dab401ceedce3fd34148feb0b0 | [
"Apache-2.0"
] | 3 | 2020-10-07T14:20:21.000Z | 2021-10-08T14:49:17.000Z | src/python/grpcio/grpc_core_dependencies.py | LaudateCorpus1/grpc-SwiftPM | 4049fc878c23a3dab401ceedce3fd34148feb0b0 | [
"Apache-2.0"
] | 1 | 2021-03-04T02:33:56.000Z | 2021-03-04T02:33:56.000Z | src/python/grpcio/grpc_core_dependencies.py | LaudateCorpus1/grpc-SwiftPM | 4049fc878c23a3dab401ceedce3fd34148feb0b0 | [
"Apache-2.0"
] | 5 | 2021-02-19T09:46:00.000Z | 2022-03-13T17:33:34.000Z | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_core_dependencies.py.template`!!!
CORE_SOURCE_FILES = [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_libuv.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_libuv.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/resolving_lb_policy.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/server_address.cc',
'src/core/ext/filters/client_channel/service_config.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_channel/xds/xds_api.cc',
'src/core/ext/filters/client_channel/xds/xds_bootstrap.cc',
'src/core/ext/filters/client_channel/xds/xds_channel_secure.cc',
'src/core/ext/filters/client_channel/xds/xds_client.cc',
'src/core/ext/filters/client_channel/xds/xds_client_stats.cc',
'src/core/ext/filters/client_idle/client_idle_filter.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/max_age/max_age_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc',
'src/core/ext/filters/workarounds/workaround_utils.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/authority.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/client/insecure/channel_create.cc',
'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc',
'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_plugin.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/incoming_metadata.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/envoy/annotations/deprecation.upb.c',
'src/core/ext/upb-generated/envoy/annotations/resource.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cds.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cluster.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cluster/filter.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cluster/outlier_detection.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/address.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/base.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/config_source.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/grpc_service.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/health_check.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/http_uri.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/protocol.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/discovery.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/eds.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/endpoint.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/endpoint/endpoint.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/endpoint/endpoint_components.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/endpoint/load_report.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/lds.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/listener.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/listener/listener.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/listener/listener_components.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/listener/udp_listener_config.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/rds.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/route.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/route/route.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/route/route_components.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/scoped_route.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/srds.upb.c',
'src/core/ext/upb-generated/envoy/config/filter/accesslog/v2/accesslog.upb.c',
'src/core/ext/upb-generated/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v2/api_listener.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v2/ads.upb.c',
'src/core/ext/upb-generated/envoy/service/load_stats/v2/lrs.upb.c',
'src/core/ext/upb-generated/envoy/type/http.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/regex.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/string.upb.c',
'src/core/ext/upb-generated/envoy/type/metadata/v2/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/percent.upb.c',
'src/core/ext/upb-generated/envoy/type/range.upb.c',
'src/core/ext/upb-generated/envoy/type/semantic_version.upb.c',
'src/core/ext/upb-generated/envoy/type/tracing/v2/custom_tag.upb.c',
'src/core/ext/upb-generated/gogoproto/gogo.upb.c',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/udpa/annotations/migrate.upb.c',
'src/core/ext/upb-generated/udpa/annotations/sensitive.upb.c',
'src/core/ext/upb-generated/udpa/data/orca/v1/orca_load_report.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/lib/avl/avl.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/handshaker.cc',
'src/core/lib/channel/handshaker_registry.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_args.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/compression/stream_compression.cc',
'src/core/lib/compression/stream_compression_gzip.cc',
'src/core/lib/compression/stream_compression_identity.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
'src/core/lib/gpr/cpu_posix.cc',
'src/core/lib/gpr/cpu_windows.cc',
'src/core/lib/gpr/env_linux.cc',
'src/core/lib/gpr/env_posix.cc',
'src/core/lib/gpr/env_windows.cc',
'src/core/lib/gpr/log.cc',
'src/core/lib/gpr/log_android.cc',
'src/core/lib/gpr/log_linux.cc',
'src/core/lib/gpr/log_posix.cc',
'src/core/lib/gpr/log_windows.cc',
'src/core/lib/gpr/murmur_hash.cc',
'src/core/lib/gpr/string.cc',
'src/core/lib/gpr/string_posix.cc',
'src/core/lib/gpr/string_util_windows.cc',
'src/core/lib/gpr/string_windows.cc',
'src/core/lib/gpr/sync.cc',
'src/core/lib/gpr/sync_abseil.cc',
'src/core/lib/gpr/sync_posix.cc',
'src/core/lib/gpr/sync_windows.cc',
'src/core/lib/gpr/time.cc',
'src/core/lib/gpr/time_posix.cc',
'src/core/lib/gpr/time_precise.cc',
'src/core/lib/gpr/time_windows.cc',
'src/core/lib/gpr/tls_pthread.cc',
'src/core/lib/gpr/tmpfile_msys.cc',
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/arena.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/global_config_env.cc',
'src/core/lib/gprpp/host_port.cc',
'src/core/lib/gprpp/mpscq.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/httpcli_security_connector.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_uv.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epollex_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_custom.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_uv.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/is_epollexclusive_available.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/poller/eventmanager_libuv.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_custom.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_custom.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_uv.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_custom.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/resource_quota.cc',
'src/core/lib/iomgr/sockaddr_utils.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_uv.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_custom.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_custom.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_custom.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_uv.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_custom.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/timer_uv.cc',
'src/core/lib/iomgr/udp_server.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/profiling/basic_timers.cc',
'src/core/lib/profiling/stap_timers.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/alts/alts_credentials.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/credentials_metadata.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/google_default/credentials_generic.cc',
'src/core/lib/security/credentials/google_default/google_default_credentials.cc',
'src/core/lib/security/credentials/iam/iam_credentials.cc',
'src/core/lib/security/credentials/jwt/json_token.cc',
'src/core/lib/security/credentials/jwt/jwt_credentials.cc',
'src/core/lib/security/credentials/jwt/jwt_verifier.cc',
'src/core/lib/security/credentials/local/local_credentials.cc',
'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
'src/core/lib/security/credentials/tls/tls_credentials.cc',
'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/local/local_security_connector.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.cc',
'src/core/lib/security/security_connector/ssl_utils.cc',
'src/core/lib/security/security_connector/ssl_utils_config.cc',
'src/core/lib/security/security_connector/tls/tls_security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/target_authority_table.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_intern.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/init_secure.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/metadata.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/static_metadata.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/status_metadata.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/tsi/alts/crypt/aes_gcm.cc',
'src/core/tsi/alts/crypt/gsec.cc',
'src/core/tsi/alts/frame_protector/alts_counter.cc',
'src/core/tsi/alts/frame_protector/alts_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_frame_protector.cc',
'src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc',
'src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/frame_handler.cc',
'src/core/tsi/alts/handshaker/alts_handshaker_client.cc',
'src/core/tsi/alts/handshaker/alts_shared_resource.cc',
'src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc',
'src/core/tsi/alts/handshaker/alts_tsi_utils.cc',
'src/core/tsi/alts/handshaker/transport_security_common_api.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc',
'src/core/tsi/ssl/session_cache/ssl_session_cache.cc',
'src/core/tsi/ssl/session_cache/ssl_session_openssl.cc',
'src/core/tsi/ssl_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
'third_party/abseil-cpp/absl/base/dynamic_annotations.cc',
'third_party/abseil-cpp/absl/base/internal/cycleclock.cc',
'third_party/abseil-cpp/absl/base/internal/raw_logging.cc',
'third_party/abseil-cpp/absl/base/internal/spinlock.cc',
'third_party/abseil-cpp/absl/base/internal/spinlock_wait.cc',
'third_party/abseil-cpp/absl/base/internal/sysinfo.cc',
'third_party/abseil-cpp/absl/base/internal/thread_identity.cc',
'third_party/abseil-cpp/absl/base/internal/throw_delegate.cc',
'third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.cc',
'third_party/abseil-cpp/absl/base/log_severity.cc',
'third_party/abseil-cpp/absl/numeric/int128.cc',
'third_party/abseil-cpp/absl/strings/ascii.cc',
'third_party/abseil-cpp/absl/strings/charconv.cc',
'third_party/abseil-cpp/absl/strings/escaping.cc',
'third_party/abseil-cpp/absl/strings/internal/charconv_bigint.cc',
'third_party/abseil-cpp/absl/strings/internal/charconv_parse.cc',
'third_party/abseil-cpp/absl/strings/internal/escaping.cc',
'third_party/abseil-cpp/absl/strings/internal/memutil.cc',
'third_party/abseil-cpp/absl/strings/internal/ostringstream.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/bind.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/extension.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/output.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/parser.cc',
'third_party/abseil-cpp/absl/strings/internal/utf8.cc',
'third_party/abseil-cpp/absl/strings/match.cc',
'third_party/abseil-cpp/absl/strings/numbers.cc',
'third_party/abseil-cpp/absl/strings/str_cat.cc',
'third_party/abseil-cpp/absl/strings/str_replace.cc',
'third_party/abseil-cpp/absl/strings/str_split.cc',
'third_party/abseil-cpp/absl/strings/string_view.cc',
'third_party/abseil-cpp/absl/strings/substitute.cc',
'third_party/abseil-cpp/absl/types/bad_optional_access.cc',
'third_party/address_sorting/address_sorting.c',
'third_party/address_sorting/address_sorting_posix.c',
'third_party/address_sorting/address_sorting_windows.c',
'third_party/boringssl-with-bazel/err_data.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bitstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bool.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_dup.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_gentm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_mbstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_object.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_octet.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_print.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strnid.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_time.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_type.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utctm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utf8.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_lib.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_par.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn_pack.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_string.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_dec.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_enc.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_fre.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_new.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_typ.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_utl.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/time_support.c',
'third_party/boringssl-with-bazel/src/crypto/base64/base64.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio_mem.c',
'third_party/boringssl-with-bazel/src/crypto/bio/connect.c',
'third_party/boringssl-with-bazel/src/crypto/bio/fd.c',
'third_party/boringssl-with-bazel/src/crypto/bio/file.c',
'third_party/boringssl-with-bazel/src/crypto/bio/hexdump.c',
'third_party/boringssl-with-bazel/src/crypto/bio/pair.c',
'third_party/boringssl-with-bazel/src/crypto/bio/printf.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket_helper.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/bn_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/convert.c',
'third_party/boringssl-with-bazel/src/crypto/buf/buf.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/asn1_compat.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/ber.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbb.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbs.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/unicode.c',
'third_party/boringssl-with-bazel/src/crypto/chacha/chacha.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/cipher_extra.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/derive_key.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesccm.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesctrhmac.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesgcmsiv.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_chacha20poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_null.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc2.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc4.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_tls.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/tls_cbc.c',
'third_party/boringssl-with-bazel/src/crypto/cmac/cmac.c',
'third_party/boringssl-with-bazel/src/crypto/conf/conf.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-intel.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-ppc64le.c',
'third_party/boringssl-with-bazel/src/crypto/crypto.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/spake25519.c',
'third_party/boringssl-with-bazel/src/crypto/dh/check.c',
'third_party/boringssl-with-bazel/src/crypto/dh/dh.c',
'third_party/boringssl-with-bazel/src/crypto/dh/dh_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/dh/params.c',
'third_party/boringssl-with-bazel/src/crypto/digest_extra/digest_extra.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_derive.c',
'third_party/boringssl-with-bazel/src/crypto/ecdh_extra/ecdh_extra.c',
'third_party/boringssl-with-bazel/src/crypto/ecdsa_extra/ecdsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/engine/engine.c',
'third_party/boringssl-with-bazel/src/crypto/err/err.c',
'third_party/boringssl-with-bazel/src/crypto/evp/digestsign.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_ctx.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/pbkdf.c',
'third_party/boringssl-with-bazel/src/crypto/evp/print.c',
'third_party/boringssl-with-bazel/src/crypto/evp/scrypt.c',
'third_party/boringssl-with-bazel/src/crypto/evp/sign.c',
'third_party/boringssl-with-bazel/src/crypto/ex_data.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/bcm.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/fips_shared_support.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/is_fips.c',
'third_party/boringssl-with-bazel/src/crypto/hkdf/hkdf.c',
'third_party/boringssl-with-bazel/src/crypto/hrss/hrss.c',
'third_party/boringssl-with-bazel/src/crypto/lhash/lhash.c',
'third_party/boringssl-with-bazel/src/crypto/mem.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj_xref.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_all.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_info.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_lib.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_oth.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pk8.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_xaux.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8_x509.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl-with-bazel/src/crypto/pool/pool.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/deterministic.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/forkunsafe.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/rand_extra.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/windows.c',
'third_party/boringssl-with-bazel/src/crypto/rc4/rc4.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_c11.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_lock.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_print.c',
'third_party/boringssl-with-bazel/src/crypto/siphash/siphash.c',
'third_party/boringssl-with-bazel/src/crypto/stack/stack.c',
'third_party/boringssl-with-bazel/src/crypto/thread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_none.c',
'third_party/boringssl-with-bazel/src/crypto/thread_pthread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_win.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_digest.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_sign.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_strex.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_verify.c',
'third_party/boringssl-with-bazel/src/crypto/x509/algorithm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/asn1_gen.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_dir.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_file.c',
'third_party/boringssl-with-bazel/src/crypto/x509/i2d_pr.c',
'third_party/boringssl-with-bazel/src/crypto/x509/rsa_pss.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_att.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_d2.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_def.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_ext.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_lu.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_obj.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_r2x.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_set.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_trs.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_txt.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_v3.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vfy.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vpm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509cset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509rset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_algor.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_all.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_attrib.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_exten.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pubkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_sig.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_val.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_cache.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_data.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_map.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_node.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_tree.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akeya.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_alt.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bitst.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_conf.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_cpols.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_crld.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_enum.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_extku.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_genn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ia5.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_int.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ncons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ocsp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pci.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcia.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pku.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_prn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_purp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_skey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_sxnet.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_utl.c',
'third_party/boringssl-with-bazel/src/ssl/bio_ssl.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_both.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_srtp.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_record.cc',
'third_party/boringssl-with-bazel/src/ssl/handoff.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_client.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_server.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_both.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_aead_ctx.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_asn1.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_buffer.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cert.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cipher.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_file.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_key_share.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_privkey.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_session.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_stat.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_transcript.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_versions.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_x509.cc',
'third_party/boringssl-with-bazel/src/ssl/t1_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/t1_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_both.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_client.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_server.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_record.cc',
'third_party/boringssl-with-bazel/src/third_party/fiat/curve25519.c',
'third_party/cares/cares/ares__close_sockets.c',
'third_party/cares/cares/ares__get_hostent.c',
'third_party/cares/cares/ares__read_line.c',
'third_party/cares/cares/ares__timeval.c',
'third_party/cares/cares/ares_cancel.c',
'third_party/cares/cares/ares_create_query.c',
'third_party/cares/cares/ares_data.c',
'third_party/cares/cares/ares_destroy.c',
'third_party/cares/cares/ares_expand_name.c',
'third_party/cares/cares/ares_expand_string.c',
'third_party/cares/cares/ares_fds.c',
'third_party/cares/cares/ares_free_hostent.c',
'third_party/cares/cares/ares_free_string.c',
'third_party/cares/cares/ares_getenv.c',
'third_party/cares/cares/ares_gethostbyaddr.c',
'third_party/cares/cares/ares_gethostbyname.c',
'third_party/cares/cares/ares_getnameinfo.c',
'third_party/cares/cares/ares_getopt.c',
'third_party/cares/cares/ares_getsock.c',
'third_party/cares/cares/ares_init.c',
'third_party/cares/cares/ares_library_init.c',
'third_party/cares/cares/ares_llist.c',
'third_party/cares/cares/ares_mkquery.c',
'third_party/cares/cares/ares_nowarn.c',
'third_party/cares/cares/ares_options.c',
'third_party/cares/cares/ares_parse_a_reply.c',
'third_party/cares/cares/ares_parse_aaaa_reply.c',
'third_party/cares/cares/ares_parse_mx_reply.c',
'third_party/cares/cares/ares_parse_naptr_reply.c',
'third_party/cares/cares/ares_parse_ns_reply.c',
'third_party/cares/cares/ares_parse_ptr_reply.c',
'third_party/cares/cares/ares_parse_soa_reply.c',
'third_party/cares/cares/ares_parse_srv_reply.c',
'third_party/cares/cares/ares_parse_txt_reply.c',
'third_party/cares/cares/ares_platform.c',
'third_party/cares/cares/ares_process.c',
'third_party/cares/cares/ares_query.c',
'third_party/cares/cares/ares_search.c',
'third_party/cares/cares/ares_send.c',
'third_party/cares/cares/ares_strcasecmp.c',
'third_party/cares/cares/ares_strdup.c',
'third_party/cares/cares/ares_strerror.c',
'third_party/cares/cares/ares_strsplit.c',
'third_party/cares/cares/ares_timeout.c',
'third_party/cares/cares/ares_version.c',
'third_party/cares/cares/ares_writev.c',
'third_party/cares/cares/bitncmp.c',
'third_party/cares/cares/inet_net_pton.c',
'third_party/cares/cares/inet_ntop.c',
'third_party/cares/cares/windows_port.c',
'third_party/upb/upb/decode.c',
'third_party/upb/upb/encode.c',
'third_party/upb/upb/msg.c',
'third_party/upb/upb/port.c',
'third_party/upb/upb/table.c',
'third_party/upb/upb/upb.c',
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
]
| 58.568675 | 118 | 0.751893 |
795c02851651b577c4fffe9c35e6c48459bc79bb | 1,375 | py | Python | demos/face_recognition_demo/python/utils.py | mzegla/open_model_zoo | 092576b4c598c1e301ebc38ad74b323972e54f3e | [
"Apache-2.0"
] | null | null | null | demos/face_recognition_demo/python/utils.py | mzegla/open_model_zoo | 092576b4c598c1e301ebc38ad74b323972e54f3e | [
"Apache-2.0"
] | null | null | null | demos/face_recognition_demo/python/utils.py | mzegla/open_model_zoo | 092576b4c598c1e301ebc38ad74b323972e54f3e | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from model_api.models.utils import resize_image
def crop(frame, roi):
p1 = roi.position.astype(int)
p1 = np.clip(p1, [0, 0], [frame.shape[1], frame.shape[0]])
p2 = (roi.position + roi.size).astype(int)
p2 = np.clip(p2, [0, 0], [frame.shape[1], frame.shape[0]])
return frame[p1[1]:p2[1], p1[0]:p2[0]]
def cut_rois(frame, rois):
return [crop(frame, roi) for roi in rois]
def resize_input(image, target_shape, nchw_layout):
if nchw_layout:
_, _, h, w = target_shape
else:
_, h, w, _ = target_shape
resized_image = resize_image(image, (w, h))
if nchw_layout:
resized_image = resized_image.transpose((2, 0, 1)) # HWC->CHW
resized_image = resized_image.reshape(target_shape)
return resized_image
| 31.25 | 73 | 0.697455 |
795c02e2074c79f69dd5d8cdaf47c1d53a6e271f | 124 | py | Python | environment/lib/python3.8/site-packages/numba/dataflow.py | 123972/PCA-nutricion | aff3c51a71c887c3fa367dbf9d599be5915c80cc | [
"MIT"
] | null | null | null | environment/lib/python3.8/site-packages/numba/dataflow.py | 123972/PCA-nutricion | aff3c51a71c887c3fa367dbf9d599be5915c80cc | [
"MIT"
] | 2 | 2021-05-11T16:00:55.000Z | 2021-08-23T20:45:22.000Z | environment/lib/python3.8/site-packages/numba/dataflow.py | 123972/PCA-nutricion | aff3c51a71c887c3fa367dbf9d599be5915c80cc | [
"MIT"
] | null | null | null | import sys
from numba.core.errors import _MovedModule
sys.modules[__name__] = _MovedModule(locals(), "numba.core.dataflow")
| 31 | 69 | 0.798387 |
795c03c06c7af682405e750d521994a211a1a5e3 | 1,912 | py | Python | verpy/pybin3/statistics.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | 1 | 2021-04-23T04:08:58.000Z | 2021-04-23T04:08:58.000Z | verpy/pybin3/statistics.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | null | null | null | verpy/pybin3/statistics.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | null | null | null |
import logs
from module_class import support_set
def help_main(Env):
Types = {}
for Module in Env.Modules:
Mod = Env.Modules[Module]
logs.log_info('scan %s' % Module)
gatherInstances(Mod,Types)
reportInstances(Types)
# connectivity(Mod)
def connectivity(Mod):
buildConns(Mod)
reportConns()
def reportConns():
Nets = list(CONNS.keys())
Nets.sort()
for Net in Nets:
if len(CONNS[Net]) > 10:
logs.log_info('%5d %30s : %s'%(len(CONNS[Net]),Net,CONNS[Net][:10]))
else:
logs.log_info('%5d %30s : %s'%(len(CONNS[Net]),Net,CONNS[Net]))
def buildConns(Mod):
for Net in Mod.nets:
Dir,_ = Mod.nets[Net]
if 'input' in Dir:
connect(Net,'input',Mod.Module,Mod.Module)
if 'output' in Dir:
connect(Net,'output',Mod.Module,Mod.Module)
if 'inout' in Dir:
connect(Net,'inout',Mod.Module,Mod.Module)
for Inst in Mod.insts:
Obj = Mod.insts[Inst]
Type = Obj.Type
for Pin in Obj.conns:
Con = Obj.conns[Pin]
Sup = support_set(Con)
for Net in Sup:
connect(Net,Pin,Type,Inst)
CONNS = {}
def connect(Net,Pin,Type,Inst):
if Net not in CONNS: CONNS[Net] = []
CONNS[Net].append((Pin,Type,Inst))
def gatherInstances(Mod,Types):
for Inst in Mod.insts.keys():
Type = Mod.insts[Inst].Type
if Type not in Types: Types[Type] = 0
Types[Type] += 1
def reportInstances(Types):
LL = []
for Type in Types.keys():
LL.append((Types[Type],Type))
LL.sort()
LL.reverse()
Tot = 0
Fout = open('mod.csv','w')
for ind,(Many,Type) in enumerate(LL):
Tot += Many
logs.log_info('%5d %6d / %6d %s'%(ind,Many,Tot,Type))
Fout.write('%d,%d,%d,%s,,,\n'%(ind,Many,Tot,Type))
Fout.close()
| 26.191781 | 80 | 0.556485 |
795c05e5afe0fc6b291eb748bd54f011a91bbf5a | 70 | py | Python | indecisive/game/client/scenes/util/__init__.py | markjoshua12/game-jam-2020 | 846dd052d649a609ab7a52ac0f4dcbeb71781c3b | [
"MIT"
] | 15 | 2020-04-17T12:02:14.000Z | 2022-03-16T03:01:34.000Z | indecisive/game/client/scenes/util/__init__.py | markjoshua12/game-jam-2020 | 846dd052d649a609ab7a52ac0f4dcbeb71781c3b | [
"MIT"
] | 9 | 2020-04-25T01:57:16.000Z | 2020-04-29T11:42:34.000Z | indecisive/game/client/scenes/util/__init__.py | markjoshua12/game-jam-2020 | 846dd052d649a609ab7a52ac0f4dcbeb71781c3b | [
"MIT"
] | 55 | 2020-04-17T12:01:11.000Z | 2021-12-28T10:14:02.000Z | from . colours import *
from . keys import *
from . elements import *
| 17.5 | 24 | 0.7 |
795c06bbe010dded93a723cc9bdd657d56a6dd61 | 9,518 | py | Python | whoosh/query/positional.py | rokartnaz/WhooshSearch | 084b6b7bf3a2b44d57eaf045f07f094a1048646f | [
"BSD-2-Clause"
] | 3 | 2017-03-04T14:19:51.000Z | 2017-03-28T08:39:44.000Z | whoosh/query/positional.py | rokartnaz/WhooshSearch | 084b6b7bf3a2b44d57eaf045f07f094a1048646f | [
"BSD-2-Clause"
] | null | null | null | whoosh/query/positional.py | rokartnaz/WhooshSearch | 084b6b7bf3a2b44d57eaf045f07f094a1048646f | [
"BSD-2-Clause"
] | null | null | null | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import division
import copy
from WhooshSearch.whoosh import matching
from WhooshSearch.whoosh.analysis import Token
from WhooshSearch.whoosh.compat import u
from WhooshSearch.whoosh.query import qcore, terms, compound
class Sequence(compound.CompoundQuery):
"""Matches documents containing a list of sub-queries in adjacent
positions.
This object has no sanity check to prevent you from using queries in
different fields.
"""
JOINT = " NEAR "
intersect_merge = True
def __init__(self, subqueries, slop=1, ordered=True, boost=1.0):
"""
:param subqueries: a list of :class:`whoosh.query.Query` objects to
match in sequence.
:param slop: the maximum difference in position allowed between the
subqueries.
:param ordered: if True, the position differences between subqueries
must be positive (that is, each subquery in the list must appear
after the previous subquery in the document).
:param boost: a boost factor to add to the score of documents matching
this query.
"""
compound.CompoundQuery.__init__(self, subqueries, boost=boost)
self.slop = slop
self.ordered = ordered
def __eq__(self, other):
return (other and type(self) is type(other)
and self.subqueries == other.subqueries
and self.boost == other.boost)
def __repr__(self):
return "%s(%r, slop=%d, boost=%f)" % (self.__class__.__name__,
self.subqueries, self.slop,
self.boost)
def __hash__(self):
h = hash(self.slop) ^ hash(self.boost)
for q in self.subqueries:
h ^= hash(q)
return h
def normalize(self):
# Because the subqueries are in sequence, we can't do the fancy merging
# that CompoundQuery does
return self.__class__([q.normalize() for q in self.subqueries],
self.slop, self.ordered, self.boost)
def _and_query(self):
return compound.And(self.subqueries)
def estimate_size(self, ixreader):
return self._and_query().estimate_size(ixreader)
def estimate_min_size(self, ixreader):
return self._and_query().estimate_min_size(ixreader)
def _matcher(self, subs, searcher, context):
from WhooshSearch.whoosh.query.spans import SpanNear
# Tell the sub-queries this matcher will need the current match to get
# spans
context = context.set(needs_current=True)
m = self._tree_matcher(subs, SpanNear.SpanNearMatcher, searcher,
context, None, slop=self.slop,
ordered=self.ordered)
return m
class Ordered(Sequence):
"""Matches documents containing a list of sub-queries in the given order.
"""
JOINT = " BEFORE "
def _matcher(self, subs, searcher, context):
from WhooshSearch.whoosh.query.spans import SpanBefore
return self._tree_matcher(subs, SpanBefore._Matcher, searcher,
context, None)
class Phrase(qcore.Query):
"""Matches documents containing a given phrase."""
def __init__(self, fieldname, words, slop=1, boost=1.0, char_ranges=None):
"""
:param fieldname: the field to search.
:param words: a list of words (unicode strings) in the phrase.
:param slop: the number of words allowed between each "word" in the
phrase; the default of 1 means the phrase must match exactly.
:param boost: a boost factor that to apply to the raw score of
documents matched by this query.
:param char_ranges: if a Phrase object is created by the query parser,
it will set this attribute to a list of (startchar, endchar) pairs
corresponding to the words in the phrase
"""
self.fieldname = fieldname
self.words = words
self.slop = slop
self.boost = boost
self.char_ranges = char_ranges
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.words == other.words
and self.slop == other.slop
and self.boost == other.boost)
def __repr__(self):
return "%s(%r, %r, slop=%s, boost=%f)" % (self.__class__.__name__,
self.fieldname, self.words,
self.slop, self.boost)
def __unicode__(self):
return u('%s:"%s"') % (self.fieldname, u(" ").join(self.words))
__str__ = __unicode__
def __hash__(self):
h = hash(self.fieldname) ^ hash(self.slop) ^ hash(self.boost)
for w in self.words:
h ^= hash(w)
return h
def has_terms(self):
return True
def terms(self, phrases=False):
if phrases and self.field():
for word in self.words:
yield (self.field(), word)
def tokens(self, boost=1.0):
char_ranges = self.char_ranges
startchar = endchar = None
for i, word in enumerate(self.words):
if char_ranges:
startchar, endchar = char_ranges[i]
yield Token(fieldname=self.fieldname, text=word,
boost=boost * self.boost, startchar=startchar,
endchar=endchar, chars=True)
def normalize(self):
if not self.words:
return qcore.NullQuery
if len(self.words) == 1:
t = terms.Term(self.fieldname, self.words[0])
if self.char_ranges:
t.startchar, t.endchar = self.char_ranges[0]
return t
words = [w for w in self.words if w is not None]
return self.__class__(self.fieldname, words, slop=self.slop,
boost=self.boost, char_ranges=self.char_ranges)
def replace(self, fieldname, oldtext, newtext):
q = copy.copy(self)
if q.fieldname == fieldname:
for i, word in enumerate(q.words):
if word == oldtext:
q.words[i] = newtext
return q
def _and_query(self):
return compound.And([terms.Term(self.fieldname, word)
for word in self.words])
def estimate_size(self, ixreader):
return self._and_query().estimate_size(ixreader)
def estimate_min_size(self, ixreader):
return self._and_query().estimate_min_size(ixreader)
def matcher(self, searcher, context=None):
from WhooshSearch.whoosh.query import Term, SpanNear2
fieldname = self.fieldname
if fieldname not in searcher.schema:
return matching.NullMatcher()
field = searcher.schema[fieldname]
if not field.format or not field.format.supports("positions"):
raise qcore.QueryError("Phrase search: %r field has no positions"
% self.fieldname)
terms = []
# Build a list of Term queries from the words in the phrase
reader = searcher.reader()
for word in self.words:
try:
word = field.to_bytes(word)
except ValueError:
return matching.NullMatcher()
if (fieldname, word) not in reader:
# Shortcut the query if one of the words doesn't exist.
return matching.NullMatcher()
terms.append(Term(fieldname, word))
# Create the equivalent SpanNear2 query from the terms
q = SpanNear2(terms, slop=self.slop, ordered=True, mindist=1)
# Get the matcher
m = q.matcher(searcher, context)
if self.boost != 1.0:
m = matching.WrappingMatcher(m, boost=self.boost)
return m
| 38.072 | 79 | 0.624186 |
795c06bd9fee5706fb27bff1b92b8f53381659a1 | 359 | py | Python | conftest.py | RonnyPfannschmidt/apipkg | 277cfb45798870034d695f9761488cfd136e4cbd | [
"MIT"
] | null | null | null | conftest.py | RonnyPfannschmidt/apipkg | 277cfb45798870034d695f9761488cfd136e4cbd | [
"MIT"
] | null | null | null | conftest.py | RonnyPfannschmidt/apipkg | 277cfb45798870034d695f9761488cfd136e4cbd | [
"MIT"
] | null | null | null | import pathlib
import apipkg
LOCAL_APIPKG = pathlib.Path(__file__).parent.joinpath("src/apipkg/__init__.py")
INSTALL_TYPE = "editable" if apipkg.__file__ == LOCAL_APIPKG else "full"
def pytest_report_header(startdir):
return "apipkg {install_type} install version={version}".format(
install_type=INSTALL_TYPE, version=apipkg.__version__
)
| 27.615385 | 79 | 0.766017 |
795c081e68659400b3505e0bdac4c4e686c546b4 | 1,278 | py | Python | crowdevaluation/evaluation-results/Arabic/Editor/ar-editor-preprocessing.py | www2018-550/Submission550 | d707d0e6eed98097569e4d5e3ffc4aea27d17c1f | [
"Apache-2.0"
] | 1 | 2018-05-23T07:41:53.000Z | 2018-05-23T07:41:53.000Z | crowdevaluation/evaluation-results/Arabic/Editor/ar-editor-preprocessing.py | www2018-550/Submission550 | d707d0e6eed98097569e4d5e3ffc4aea27d17c1f | [
"Apache-2.0"
] | null | null | null | crowdevaluation/evaluation-results/Arabic/Editor/ar-editor-preprocessing.py | www2018-550/Submission550 | d707d0e6eed98097569e4d5e3ffc4aea27d17c1f | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
import pandas
from math import isnan
import editdistance
import numpy
key_file = open('./processed/ar-editor-keys.csv', 'w')
df = pandas.read_csv('ar-editor.csv')
df = df.dropna(axis=1, how='all')
print 'Participants started: ' + str(len(df.index))
df = df[['Participant ID']+[i for i in df.columns if 'من فضلك قم بكتابة فقرة من الممكن أن تستخدم كأول فقرة في صفحة الويكيبيديا' in i]]
df.rename(columns=lambda x: x.replace('من فضلك قم بكتابة فقرة من الممكن أن تستخدم كأول فقرة في صفحة الويكيبيديا الخاصة بهذا الموضوع باستخدام المعلومات المعطاة لك فقط.', ''), inplace=True)
df.rename(columns=lambda x: x.replace('-- -- -- --', ''), inplace=True)
df.rename(columns=lambda x: x.strip(), inplace=True)
df = df.dropna(thresh=2)
df = df.dropna(axis=1, how='all')
print 'Participants counted: ' + str(len(df.index))
c = 0
for column in df.columns:
if not column == 'Participant ID':
df.rename(columns={column: str(c) + '_q'}, inplace=True)
tmp = column.split('-- --')
sentence = tmp[0]
triples = tmp[1:]
key_file.write(str(c) + '_q' + '\t' + sentence + '\n')
key_file.write(str(c) + '_q_triples' + '\t' + ';'.join(triples) + '\n')
c += 1
df.to_csv('./processed/ar-editor.csv', encoding='utf-8') | 36.514286 | 187 | 0.643192 |
795c082501d6d40077fd1e033a88349521b68f1b | 8,716 | py | Python | Python Basics.py | Supertatortots/Python-Practice | a08db0ddbd90a4184c89dbbdc232d55e1d151a32 | [
"MIT"
] | null | null | null | Python Basics.py | Supertatortots/Python-Practice | a08db0ddbd90a4184c89dbbdc232d55e1d151a32 | [
"MIT"
] | null | null | null | Python Basics.py | Supertatortots/Python-Practice | a08db0ddbd90a4184c89dbbdc232d55e1d151a32 | [
"MIT"
] | null | null | null | # Comments in Python use the '#' symbol
''' Multi-line comments are denoted via three single or three double quotes
And are ended with the matching set of three single or double quotes '''
''' Let's start off by defining some variables
In Python, there's no `let`, `const`, or `var` keyword to declare variables
Python is also not statically typed, so you don't declare the variable type when assigning a variable
Variables assigned outside the scope of any functions have global scope '''
a = 1
b = 2
# Also note that expressions don't end with semicolons
# EXERCISE: Define a global variable PI and have it store as many digits of pi as you can muster
PI = 3.14
# FUNCTIONS
# Functions in Python are defined via the `def` keyword
# Also, there are no braces in Python; whitespace actually matters!
''' After closing the parentheses where function parameters go,
instead of an opening curly brace, a colon is used; '''
def example_function(new_input_to_append, a_list=None):
# Same thing with if statements; end them with a colon
if a_list == None:
a_list = []
a_list.append(new_input_to_append)
return a_list
# A function body is closed by a new line
''' Functions are invoked the same way as in JavaScript and C, with parens
Here, we're invoking our example function with the variable `c` you
defined earlier and nothing as the second argument, meaning the function
will utilize the default parameter that was given in the function definition '''
print("Our example function returns: ", example_function(PI))
# LOOPS
''' For loops in Python do not follow the C-style loops you are used to writing in
both JavaScript and C. Python opts for a much cleaner syntax with regards to loops. '''
# We iterate a specified number of times using the `range` function
for x in range(5):
print(x)
# The body of the loop is denoted by indentation, so you need to delineate your whitespace correctly
# Given an list (arrays are called lists in Python), we iterate through it like so
fibonacci = [1, 1, 2, 3, 5, 8, 13]
for number in fibonacci:
print(number)
# You can loop over an element along with its index using the `enumerate` function like so:
for index, number in enumerate(fibonacci):
print(index, number)
# While loops work exactly the way you're used to working with them in JS and C
# Note however that ++ and -- are not valid ways of incrementing and decrementing in Python
count = 0
while count < 5:
print(count)
count += 1
''' EXERCISE: Loop through and print out all the even numbers from the numbers list given
below. Ensure that the numbers are printed in the same order in which they appear in the
numbers list. Don't print any numbers that come after 237 in the sequence. '''
numbers = [
951, 402, 984, 651, 360, 69, 408, 319, 601, 485, 980, 507, 725, 547, 544,
615, 83, 165, 141, 501, 263, 617, 865, 575, 219, 390, 984, 592, 236, 105, 942, 941,
386, 462, 47, 418, 907, 344, 236, 375, 823, 566, 597, 978, 328, 615, 953, 345,
399, 162, 758, 219, 918, 237, 412, 566, 826, 248, 866, 950, 626, 949, 687, 217,
815, 67, 104, 58, 512, 24, 892, 894, 767, 553, 81, 379, 843, 831, 445, 742, 717,
958, 609, 842, 451, 688, 753, 854, 685, 93, 857, 440, 380, 126, 721, 328, 753, 470,
743, 527
]
# Your code here:
for number in numbers:
if number % 2 == 0:
print(number)
if number == 237:
break
# STRINGS
# Given a list of strings, we want to join them all together into one large string
colors = ['red', 'blue', 'green', 'yellow']
# DON'T do this:
result = ''
for s in colors:
result += s
# This is extremely inefficient because strings in Python are immutable, unlike in JS and C
# We can't just mutate them willy-nilly
# Instead, join them using the built-in string `.join` method like so:
result = ''.join(colors)
print("Result of calling .join on colors list: ", result)
# If you want spaces between your substrings, you can do this:
result = ' '.join(colors)
print("Result of calling .join on colors list with spaces: ", result)
# Or if you want your substrings separated via commans:
result = ', '.join(colors)
print("Result of calling .join on colors list with commas: ", result)
# EXERCISE: Write a function to reverse an input string; feel free to look this up
def reverse_string(s):
return s[::-1]
print("reverse_string function returns: ", reverse_string('hello world'))
# STRING FORMATTING
''' Python's `%` operator works like C's `sprintf` function. If you don't know what that does,
let's illustrate with an example: '''
name = 'David'
messages = 3
text = ('Hello %s, you have %i messages' %(name, messages))
print(text)
''' The `%s` means "insert a string here", and the `%i` means "convert an integer into a string and
insert here". '''
# DICTIONARIES
''' JavaScript objects are called dictionaries in Python, but they're both just implementations
of hash tables. You work with them exactly as how you work with JS objects. The one exception is
that using dot notation to access a dict value is not a thing in Python. Bracket notation must
be used. '''
''' Here's a neat little example of building a dict from two lists, one a list of keys,
the other a list of values, using the `zip` function to conglomerate these two lists '''
first_names = ['John', 'Eric', 'Terry', 'Michael']
last_names = ['Cleese', 'Idle', 'Gilliam', 'Pallor']
full_names = dict(zip(first_names, last_names))
print("full_names dict: ", full_names)
# full_names.John doesn't work; use full_names['John'] instead
# Dicts come with the `.keys` and `.values` methods you're used to on JS objects
print("full_names dict keys: ", full_names.keys())
print("full_names dict values: ", full_names.values())
# LISTS
''' Python lists, akin to JS arrays, pretty much also work as you're used to.
Not much to say here, except the way you find the length of a list is by using
the `len()` function instead of accessing a property on the list'''
print("The length of this list is: ", len([4,6,8,4,1,4,6,7,5,4,3,2,2,8,10]))
# COMPREHENSIONS
''' Comprehensions are one of the coolest features of Python. Most of the time you'll
use them with lists, so let's illustrate that first. '''
# We have the following code:
squares = []
for x in range(10):
squares.append(x**2)
''' Looks fine. Does what you'd expect it to. In Python, though, this can be very
concisely one-lined via a list comprehension: '''
squares = [x**2 for x in range(10)]
print("Squares list comprehension returns: ", squares)
''' A list comprehension consists of brackets containing an expression followed by a
`for` clause, then zero or more `for` or `if` clauses. The result will be a new list
resulting from evaluating the expression in the context of the `for` and `if` clauses
which follow it. Another example: '''
stuff = [(x, y) for x in [1,2,3] for y in [3,1,4] if x != y]
print("Stuff list comprehension returns: ", stuff)
''' The above list comprehension combines the elements of two lists into a new list if
elements at matching indexes do not match '''
# Comprehensions are only limited to lists; they work with dicts as well!
# The following comprehension builds a dict where value is indexed by its square root:
squares_dict = {x: x**2 for x in range(10)}
print("squares_dict list comprehension returns: ", squares_dict)
''' EXERCISE: Write a dict comprehension to populate a dict with all the letters of
the alphabet as values with their keys being their index in the alphabet: '''
from string import ascii_lowercase
alphabet_dict = {index + 1: c for index, c in enumerate(ascii_lowercase)}
print("alphabet_dict list comprehension returns: ", alphabet_dict)
# LAMBDA FUNCTIONS
'''Anonymous functions in Python are called lambdas. They are denoted by the `lambda` keyword.
Here's a simple example: '''
f = lambda x, y : x + y
print("Lambda function `f` returns: ", f(1, 1))
''' Lambdas are most effective when passed to functions such as `map`, `filter`, and `reduce`.
Just like how these three functions accept callbacks that specify how you want each function
to behave, in Python, lambdas do the same thing as callbacks in this case.
Here's an example using the `filter` function which filters out the non-even fibonacci numbers: '''
fib = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
result = list(filter(lambda x: x % 2 == 0, fib))
print("Result of filtering out non-even fib numbers: ", result)
''' EXERCISE: Use `reduce` to find the maximum value in the given `ints` list.
But first, we need to import `reduce` from the `functools` module: '''
from functools import reduce
ints = [47, 11, 42, 102, 13]
result = reduce(lambda a, b : a if a > b else b, ints)
print("Result of reducing a list to find the max: ", result) | 38.061135 | 101 | 0.717646 |
795c08dd1c98f2247b61a508e5869227624d3e7d | 16,762 | py | Python | homeassistant/helpers/config_entry_oauth2_flow.py | edofullin/core | 106dc4d28ad59cb192c60fc7a354cafa86899ea4 | [
"Apache-2.0"
] | 1 | 2021-04-28T09:51:08.000Z | 2021-04-28T09:51:08.000Z | homeassistant/helpers/config_entry_oauth2_flow.py | edofullin/core | 106dc4d28ad59cb192c60fc7a354cafa86899ea4 | [
"Apache-2.0"
] | 60 | 2020-08-03T07:32:56.000Z | 2022-03-31T06:02:07.000Z | homeassistant/helpers/config_entry_oauth2_flow.py | edofullin/core | 106dc4d28ad59cb192c60fc7a354cafa86899ea4 | [
"Apache-2.0"
] | 4 | 2017-01-10T04:17:33.000Z | 2021-09-02T16:37:24.000Z | """Config Flow using OAuth2.
This module exists of the following parts:
- OAuth2 config flow which supports multiple OAuth2 implementations
- OAuth2 implementation that works with local provided client ID/secret
"""
from __future__ import annotations
from abc import ABC, ABCMeta, abstractmethod
import asyncio
import logging
import secrets
import time
from typing import Any, Awaitable, Callable, Dict, cast
from aiohttp import client, web
import async_timeout
import jwt
import voluptuous as vol
from yarl import URL
from homeassistant import config_entries
from homeassistant.components import http
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.network import NoURLAvailableError
from .aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
DATA_JWT_SECRET = "oauth2_jwt_secret"
DATA_VIEW_REGISTERED = "oauth2_view_reg"
DATA_IMPLEMENTATIONS = "oauth2_impl"
DATA_PROVIDERS = "oauth2_providers"
AUTH_CALLBACK_PATH = "/auth/external/callback"
HEADER_FRONTEND_BASE = "HA-Frontend-Base"
CLOCK_OUT_OF_SYNC_MAX_SEC = 20
class AbstractOAuth2Implementation(ABC):
"""Base class to abstract OAuth2 authentication."""
@property
@abstractmethod
def name(self) -> str:
"""Name of the implementation."""
@property
@abstractmethod
def domain(self) -> str:
"""Domain that is providing the implementation."""
@abstractmethod
async def async_generate_authorize_url(self, flow_id: str) -> str:
"""Generate a url for the user to authorize.
This step is called when a config flow is initialized. It should redirect the
user to the vendor website where they can authorize Home Assistant.
The implementation is responsible to get notified when the user is authorized
and pass this to the specified config flow. Do as little work as possible once
notified. You can do the work inside async_resolve_external_data. This will
give the best UX.
Pass external data in with:
await hass.config_entries.flow.async_configure(
flow_id=flow_id, user_input={'code': 'abcd', 'state': { … }
)
"""
@abstractmethod
async def async_resolve_external_data(self, external_data: Any) -> dict:
"""Resolve external data to tokens.
Turn the data that the implementation passed to the config flow as external
step data into tokens. These tokens will be stored as 'token' in the
config entry data.
"""
async def async_refresh_token(self, token: dict) -> dict:
"""Refresh a token and update expires info."""
new_token = await self._async_refresh_token(token)
# Force int for non-compliant oauth2 providers
new_token["expires_in"] = int(new_token["expires_in"])
new_token["expires_at"] = time.time() + new_token["expires_in"]
return new_token
@abstractmethod
async def _async_refresh_token(self, token: dict) -> dict:
"""Refresh a token."""
class LocalOAuth2Implementation(AbstractOAuth2Implementation):
"""Local OAuth2 implementation."""
def __init__(
self,
hass: HomeAssistant,
domain: str,
client_id: str,
client_secret: str,
authorize_url: str,
token_url: str,
):
"""Initialize local auth implementation."""
self.hass = hass
self._domain = domain
self.client_id = client_id
self.client_secret = client_secret
self.authorize_url = authorize_url
self.token_url = token_url
@property
def name(self) -> str:
"""Name of the implementation."""
return "Configuration.yaml"
@property
def domain(self) -> str:
"""Domain providing the implementation."""
return self._domain
@property
def redirect_uri(self) -> str:
"""Return the redirect uri."""
req = http.current_request.get()
if req is None:
raise RuntimeError("No current request in context")
ha_host = req.headers.get(HEADER_FRONTEND_BASE)
if ha_host is None:
raise RuntimeError("No header in request")
return f"{ha_host}{AUTH_CALLBACK_PATH}"
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {}
async def async_generate_authorize_url(self, flow_id: str) -> str:
"""Generate a url for the user to authorize."""
redirect_uri = self.redirect_uri
return str(
URL(self.authorize_url)
.with_query(
{
"response_type": "code",
"client_id": self.client_id,
"redirect_uri": redirect_uri,
"state": _encode_jwt(
self.hass, {"flow_id": flow_id, "redirect_uri": redirect_uri}
),
}
)
.update_query(self.extra_authorize_data)
)
async def async_resolve_external_data(self, external_data: Any) -> dict:
"""Resolve the authorization code to tokens."""
return await self._token_request(
{
"grant_type": "authorization_code",
"code": external_data["code"],
"redirect_uri": external_data["state"]["redirect_uri"],
}
)
async def _async_refresh_token(self, token: dict) -> dict:
"""Refresh tokens."""
new_token = await self._token_request(
{
"grant_type": "refresh_token",
"client_id": self.client_id,
"refresh_token": token["refresh_token"],
}
)
return {**token, **new_token}
async def _token_request(self, data: dict) -> dict:
"""Make a token request."""
session = async_get_clientsession(self.hass)
data["client_id"] = self.client_id
if self.client_secret is not None:
data["client_secret"] = self.client_secret
resp = await session.post(self.token_url, data=data)
if resp.status >= 400 and _LOGGER.isEnabledFor(logging.DEBUG):
body = await resp.text()
_LOGGER.debug(
"Token request failed with status=%s, body=%s",
resp.status,
body,
)
resp.raise_for_status()
return cast(dict, await resp.json())
class AbstractOAuth2FlowHandler(config_entries.ConfigFlow, metaclass=ABCMeta):
"""Handle a config flow."""
DOMAIN = ""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_UNKNOWN
def __init__(self) -> None:
"""Instantiate config flow."""
if self.DOMAIN == "":
raise TypeError(
f"Can't instantiate class {self.__class__.__name__} without DOMAIN being set"
)
self.external_data: Any = None
self.flow_impl: AbstractOAuth2Implementation = None # type: ignore
@property
@abstractmethod
def logger(self) -> logging.Logger:
"""Return logger."""
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {}
async def async_step_pick_implementation(
self, user_input: dict | None = None
) -> dict:
"""Handle a flow start."""
implementations = await async_get_implementations(self.hass, self.DOMAIN)
if user_input is not None:
self.flow_impl = implementations[user_input["implementation"]]
return await self.async_step_auth()
if not implementations:
return self.async_abort(reason="missing_configuration")
req = http.current_request.get()
if len(implementations) == 1 and req is not None:
# Pick first implementation if we have only one, but only
# if this is triggered by a user interaction (request).
self.flow_impl = list(implementations.values())[0]
return await self.async_step_auth()
return self.async_show_form(
step_id="pick_implementation",
data_schema=vol.Schema(
{
vol.Required(
"implementation", default=list(implementations)[0]
): vol.In({key: impl.name for key, impl in implementations.items()})
}
),
)
async def async_step_auth(
self, user_input: dict[str, Any] | None = None
) -> dict[str, Any]:
"""Create an entry for auth."""
# Flow has been triggered by external data
if user_input:
self.external_data = user_input
return self.async_external_step_done(next_step_id="creation")
try:
with async_timeout.timeout(10):
url = await self.flow_impl.async_generate_authorize_url(self.flow_id)
except asyncio.TimeoutError:
return self.async_abort(reason="authorize_url_timeout")
except NoURLAvailableError:
return self.async_abort(
reason="no_url_available",
description_placeholders={
"docs_url": "https://www.home-assistant.io/more-info/no-url-available"
},
)
url = str(URL(url).update_query(self.extra_authorize_data))
return self.async_external_step(step_id="auth", url=url)
async def async_step_creation(
self, user_input: dict[str, Any] | None = None
) -> dict[str, Any]:
"""Create config entry from external data."""
token = await self.flow_impl.async_resolve_external_data(self.external_data)
# Force int for non-compliant oauth2 providers
try:
token["expires_in"] = int(token["expires_in"])
except ValueError as err:
_LOGGER.warning("Error converting expires_in to int: %s", err)
return self.async_abort(reason="oauth_error")
token["expires_at"] = time.time() + token["expires_in"]
self.logger.info("Successfully authenticated")
return await self.async_oauth_create_entry(
{"auth_implementation": self.flow_impl.domain, "token": token}
)
async def async_oauth_create_entry(self, data: dict) -> dict:
"""Create an entry for the flow.
Ok to override if you want to fetch extra info or even add another step.
"""
return self.async_create_entry(title=self.flow_impl.name, data=data)
async_step_user = async_step_pick_implementation
@classmethod
def async_register_implementation(
cls, hass: HomeAssistant, local_impl: LocalOAuth2Implementation
) -> None:
"""Register a local implementation."""
async_register_implementation(hass, cls.DOMAIN, local_impl)
@callback
def async_register_implementation(
hass: HomeAssistant, domain: str, implementation: AbstractOAuth2Implementation
) -> None:
"""Register an OAuth2 flow implementation for an integration."""
if isinstance(implementation, LocalOAuth2Implementation) and not hass.data.get(
DATA_VIEW_REGISTERED, False
):
hass.http.register_view(OAuth2AuthorizeCallbackView()) # type: ignore
hass.data[DATA_VIEW_REGISTERED] = True
implementations = hass.data.setdefault(DATA_IMPLEMENTATIONS, {})
implementations.setdefault(domain, {})[implementation.domain] = implementation
async def async_get_implementations(
hass: HomeAssistant, domain: str
) -> dict[str, AbstractOAuth2Implementation]:
"""Return OAuth2 implementations for specified domain."""
registered = cast(
Dict[str, AbstractOAuth2Implementation],
hass.data.setdefault(DATA_IMPLEMENTATIONS, {}).get(domain, {}),
)
if DATA_PROVIDERS not in hass.data:
return registered
registered = dict(registered)
for provider_domain, get_impl in hass.data[DATA_PROVIDERS].items():
implementation = await get_impl(hass, domain)
if implementation is not None:
registered[provider_domain] = implementation
return registered
async def async_get_config_entry_implementation(
hass: HomeAssistant, config_entry: config_entries.ConfigEntry
) -> AbstractOAuth2Implementation:
"""Return the implementation for this config entry."""
implementations = await async_get_implementations(hass, config_entry.domain)
implementation = implementations.get(config_entry.data["auth_implementation"])
if implementation is None:
raise ValueError("Implementation not available")
return implementation
@callback
def async_add_implementation_provider(
hass: HomeAssistant,
provider_domain: str,
async_provide_implementation: Callable[
[HomeAssistant, str], Awaitable[AbstractOAuth2Implementation | None]
],
) -> None:
"""Add an implementation provider.
If no implementation found, return None.
"""
hass.data.setdefault(DATA_PROVIDERS, {})[
provider_domain
] = async_provide_implementation
class OAuth2AuthorizeCallbackView(http.HomeAssistantView):
"""OAuth2 Authorization Callback View."""
requires_auth = False
url = AUTH_CALLBACK_PATH
name = "auth:external:callback"
async def get(self, request: web.Request) -> web.Response:
"""Receive authorization code."""
if "code" not in request.query or "state" not in request.query:
return web.Response(
text=f"Missing code or state parameter in {request.url}"
)
hass = request.app["hass"]
state = _decode_jwt(hass, request.query["state"])
if state is None:
return web.Response(text="Invalid state")
await hass.config_entries.flow.async_configure(
flow_id=state["flow_id"],
user_input={"state": state, "code": request.query["code"]},
)
return web.Response(
headers={"content-type": "text/html"},
text="<script>window.close()</script>",
)
class OAuth2Session:
"""Session to make requests authenticated with OAuth2."""
def __init__(
self,
hass: HomeAssistant,
config_entry: config_entries.ConfigEntry,
implementation: AbstractOAuth2Implementation,
):
"""Initialize an OAuth2 session."""
self.hass = hass
self.config_entry = config_entry
self.implementation = implementation
@property
def token(self) -> dict:
"""Return the token."""
return cast(dict, self.config_entry.data["token"])
@property
def valid_token(self) -> bool:
"""Return if token is still valid."""
return (
cast(float, self.token["expires_at"])
> time.time() + CLOCK_OUT_OF_SYNC_MAX_SEC
)
async def async_ensure_token_valid(self) -> None:
"""Ensure that the current token is valid."""
if self.valid_token:
return
new_token = await self.implementation.async_refresh_token(self.token)
self.hass.config_entries.async_update_entry(
self.config_entry, data={**self.config_entry.data, "token": new_token}
)
async def async_request(
self, method: str, url: str, **kwargs: Any
) -> client.ClientResponse:
"""Make a request."""
await self.async_ensure_token_valid()
return await async_oauth2_request(
self.hass, self.config_entry.data["token"], method, url, **kwargs
)
async def async_oauth2_request(
hass: HomeAssistant, token: dict, method: str, url: str, **kwargs: Any
) -> client.ClientResponse:
"""Make an OAuth2 authenticated request.
This method will not refresh tokens. Use OAuth2 session for that.
"""
session = async_get_clientsession(hass)
return await session.request(
method,
url,
**kwargs,
headers={
**(kwargs.get("headers") or {}),
"authorization": f"Bearer {token['access_token']}",
},
)
@callback
def _encode_jwt(hass: HomeAssistant, data: dict) -> str:
"""JWT encode data."""
secret = hass.data.get(DATA_JWT_SECRET)
if secret is None:
secret = hass.data[DATA_JWT_SECRET] = secrets.token_hex()
return jwt.encode(data, secret, algorithm="HS256").decode()
@callback
def _decode_jwt(hass: HomeAssistant, encoded: str) -> dict | None:
"""JWT encode data."""
secret = cast(str, hass.data.get(DATA_JWT_SECRET))
try:
return jwt.decode(encoded, secret, algorithms=["HS256"])
except jwt.InvalidTokenError:
return None
| 32.547573 | 93 | 0.641332 |
795c0a7fb21fda5147b73781d9be4aeb80144354 | 1,723 | py | Python | web/doc/_themes/odoodoc/odoo_pygments.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | web/doc/_themes/odoodoc/odoo_pygments.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | web/doc/_themes/odoodoc/odoo_pygments.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import imp
import sys
from pygments.style import Style
from pygments.token import *
# extracted from getbootstrap.com
class OdooStyle(Style):
background_color = '#ffffcc'
highlight_color = '#fcf8e3'
styles = {
Whitespace: '#BBB',
Error: 'bg:#FAA #A00',
Keyword: '#069',
Keyword.Type: '#078',
Name.Attribute: '#4F9FCF',
Name.Builtin: '#366',
Name.Class: '#0A8',
Name.Constant: '#360',
Name.Decorator: '#99F',
Name.Entity: '#999',
Name.Exception: '#C00',
Name.Function: '#C0F',
Name.Label: '#99F',
Name.Namespace: '#0CF',
Name.Tag: '#2F6F9F',
Name.Variable: '#033',
String: '#d44950',
String.Backtick: '#C30',
String.Char: '#C30',
String.Doc: 'italic #C30',
String.Double: '#C30',
String.Escape: '#C30',
String.Heredoc: '#C30',
String.Interol: '#C30',
String.Other: '#C30',
String.Regex: '#3AA',
String.Single: '#C30',
String.Symbol: '#FC3',
Number: '#F60',
Operator: '#555',
Operator.Word: '#000',
Comment: '#999',
Comment.Preproc: '#099',
Generic.Deleted: 'bg:#FCC border:#c00',
Generic.Emph: 'italic',
Generic.Error: '#F00',
Generic.Heading: '#030',
Generic.Inserted: 'bg:#CFC border:#0C0',
Generic.Output: '#AAA',
Generic.Prompt: '#009',
Generic.Strong: '',
Generic.Subheading: '#030',
Generic.Traceback: '#9C6',
}
modname = 'pygments.styles.odoo'
m = imp.new_module(modname)
m.OdooStyle = OdooStyle
sys.modules[modname] = m
| 24.267606 | 48 | 0.531631 |
795c0abae720727f0d96187c95051c5302c87e65 | 2,700 | py | Python | PyCreator/UI/settings.py | Zachacious/PyCreator | 7c52be80ee3fef9ec00ad2fad52ab06b64d84029 | [
"MIT"
] | 3 | 2015-04-27T18:46:21.000Z | 2021-11-08T10:46:05.000Z | PyCreator/UI/settings.py | Zachacious/PyCreator | 7c52be80ee3fef9ec00ad2fad52ab06b64d84029 | [
"MIT"
] | null | null | null | PyCreator/UI/settings.py | Zachacious/PyCreator | 7c52be80ee3fef9ec00ad2fad52ab06b64d84029 | [
"MIT"
] | null | null | null | """
This module provides an easy access to the application settings
"""
import json
from pyqode.qt.QtCore import QSettings, QByteArray, QSize, QPoint
class Settings(object):
IConfigOption = 0
IConfigPath = 1
IConfigOptAuto = 0
IConfigOptManual = 1
MWState = 0
MWSize = 1
MWPosition = 2
MWMaximized = 3
def __init__(self):
self.settings = QSettings('Zach Moore', 'DeliciousPy')
@property
def mainWindowState(self):
state = [self.settings.value('mainWindow_state',
QByteArray(),
type=QByteArray),
self.settings.value('mainWindow_size',
QSize(),
type=QSize),
self.settings.value('mainWindow_position',
QPoint(),
type=QPoint),
self.settings.value('mainWindow_maximized', False)]
return state
@mainWindowState.setter
def mainWindowState(self, value):
self.settings.setValue('mainWindow_state', value[self.MWState])
self.settings.setValue('mainWindow_size', value[self.MWSize])
self.settings.setValue('mainWindow_position', value[self.MWPosition])
self.settings.setValue('mainWindow_maximized', value[self.MWMaximized])
@property
def interpreterConfig(self):
iconfig = [self.settings.value('iconfig_option', 0),
self.settings.value('iconfig_path', '')]
return iconfig
@interpreterConfig.setter
def interpreterConfig(self, value):
self.settings.setValue('iconfig_option', value[self.IConfigOption])
self.settings.setValue('iconfig_path', value[self.IConfigPath])
@property
def run_configs(self):
"""
Returns the dictionary of run configurations. A run configuration is
just a list of arguments to append to the run command.
This is internally stored as a json object
"""
string = self.settings.value('run_configs', '{}')
return json.loads(string)
@run_configs.setter
def run_configs(self, value):
self.settings.setValue('run_configs', json.dumps(value))
def get_run_config_for_file(self, filename):
try:
dic = self.run_configs
config = dic[filename]
except KeyError:
config = []
self.set_run_config_for_file(filename, config)
return config
def set_run_config_for_file(self, filename, config):
dic = self.run_configs
dic[filename] = config
self.run_configs = dic
| 32.926829 | 79 | 0.60037 |
795c0b4166facae93fffa76834c63fe601fa29f6 | 1,227 | py | Python | py/leetcode/MaxSumRectangle.py | danyfang/SourceCode | 8168f6058648f2a330a7354daf3a73a4d8a4e730 | [
"MIT"
] | null | null | null | py/leetcode/MaxSumRectangle.py | danyfang/SourceCode | 8168f6058648f2a330a7354daf3a73a4d8a4e730 | [
"MIT"
] | null | null | null | py/leetcode/MaxSumRectangle.py | danyfang/SourceCode | 8168f6058648f2a330a7354daf3a73a4d8a4e730 | [
"MIT"
] | null | null | null | '''
Leetcode problem No 363 Max Sum of Rectangle No Larger Than K
Solution written by Xuqiang Fang on 12 July, 2018
'''
from bisect import bisect_left, insort
class Solution(object):
def maxSumSubmatrix(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtype: int
"""
if not matrix:
return 0
m = min(len(matrix), len(matrix[0]))
n = max(len(matrix), len(matrix[0]))
ans = float('-inf')
for i in range(m):
s = [0] * n
for j in range(i, m):
for l in range(n):
s[l] += matrix[j][l] if m == len(matrix) else matrix[l][j]
accum_set, accum = [0], 0
for t in s:
accum += t
it = bisect_left(accum_set, accum-k)
print(accum_set)
print(it)
if it != len(accum_set):
ans = max(ans, accum-accum_set[it])
insort(accum_set, accum)
return ans
def main():
s = Solution()
matrix = [[1,0,1],[0,-2,3]]
print(s.maxSumSubmatrix(matrix, 2))
if __name__ == '__main__':
main()
| 29.926829 | 79 | 0.475143 |
795c0c8ed786614c335a5015ba53299201970619 | 52,257 | py | Python | sdk/python/pulumi_azure/datafactory/linked_service_azure_sql_database.py | roderik/pulumi-azure | f6d0c058d6f9111a709bc5f1515d1638f9d615f0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/datafactory/linked_service_azure_sql_database.py | roderik/pulumi-azure | f6d0c058d6f9111a709bc5f1515d1638f9d615f0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/datafactory/linked_service_azure_sql_database.py | roderik/pulumi-azure | f6d0c058d6f9111a709bc5f1515d1638f9d615f0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['LinkedServiceAzureSqlDatabaseArgs', 'LinkedServiceAzureSqlDatabase']
@pulumi.input_type
class LinkedServiceAzureSqlDatabaseArgs:
def __init__(__self__, *,
data_factory_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
additional_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
annotations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
connection_string: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
integration_runtime_name: Optional[pulumi.Input[str]] = None,
key_vault_connection_string: Optional[pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultConnectionStringArgs']] = None,
key_vault_password: Optional[pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultPasswordArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
service_principal_id: Optional[pulumi.Input[str]] = None,
service_principal_key: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
use_managed_identity: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a LinkedServiceAzureSqlDatabase resource.
:param pulumi.Input[str] data_factory_name: The Data Factory name in which to associate the Linked Service with. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Data Factory Linked Service Azure SQL Database. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] additional_properties: A map of additional properties to associate with the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[Sequence[pulumi.Input[str]]] annotations: List of tags that can be used for describing the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[str] connection_string: The connection string in which to authenticate with Azure SQL Database. Exactly one of either `connection_string` or `key_vault_connection_string` is required.
:param pulumi.Input[str] description: The description for the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[str] integration_runtime_name: The integration runtime reference to associate with the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultConnectionStringArgs'] key_vault_connection_string: A `key_vault_connection_string` block as defined below. Use this argument to store Azure SQL Database connection string in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either `connection_string` or `key_vault_connection_string` is required.
:param pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultPasswordArgs'] key_vault_password: A `key_vault_password` block as defined below. Use this argument to store SQL Server password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service.
:param pulumi.Input[str] name: Specifies the name of the Data Factory Linked Service Azure SQL Database. Changing this forces a new resource to be created. Must be unique within a data
factory. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: A map of parameters to associate with the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[str] service_principal_id: The service principal id in which to authenticate against the Azure SQL Database. Required if `service_principal_key` is set.
:param pulumi.Input[str] service_principal_key: The service principal key in which to authenticate against the Azure SQL Database. Required if `service_principal_id` is set.
:param pulumi.Input[str] tenant_id: The tenant id or name in which to authenticate against the Azure SQL Database.
:param pulumi.Input[bool] use_managed_identity: Whether to use the Data Factory's managed identity to authenticate against the Azure SQL Database. Incompatible with `service_principal_id` and `service_principal_key`
"""
pulumi.set(__self__, "data_factory_name", data_factory_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if additional_properties is not None:
pulumi.set(__self__, "additional_properties", additional_properties)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if description is not None:
pulumi.set(__self__, "description", description)
if integration_runtime_name is not None:
pulumi.set(__self__, "integration_runtime_name", integration_runtime_name)
if key_vault_connection_string is not None:
pulumi.set(__self__, "key_vault_connection_string", key_vault_connection_string)
if key_vault_password is not None:
pulumi.set(__self__, "key_vault_password", key_vault_password)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if service_principal_id is not None:
pulumi.set(__self__, "service_principal_id", service_principal_id)
if service_principal_key is not None:
pulumi.set(__self__, "service_principal_key", service_principal_key)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if use_managed_identity is not None:
pulumi.set(__self__, "use_managed_identity", use_managed_identity)
@property
@pulumi.getter(name="dataFactoryName")
def data_factory_name(self) -> pulumi.Input[str]:
"""
The Data Factory name in which to associate the Linked Service with. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "data_factory_name")
@data_factory_name.setter
def data_factory_name(self, value: pulumi.Input[str]):
pulumi.set(self, "data_factory_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to create the Data Factory Linked Service Azure SQL Database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="additionalProperties")
def additional_properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of additional properties to associate with the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "additional_properties")
@additional_properties.setter
def additional_properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "additional_properties", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of tags that can be used for describing the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[pulumi.Input[str]]:
"""
The connection string in which to authenticate with Azure SQL Database. Exactly one of either `connection_string` or `key_vault_connection_string` is required.
"""
return pulumi.get(self, "connection_string")
@connection_string.setter
def connection_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_string", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description for the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="integrationRuntimeName")
def integration_runtime_name(self) -> Optional[pulumi.Input[str]]:
"""
The integration runtime reference to associate with the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "integration_runtime_name")
@integration_runtime_name.setter
def integration_runtime_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "integration_runtime_name", value)
@property
@pulumi.getter(name="keyVaultConnectionString")
def key_vault_connection_string(self) -> Optional[pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultConnectionStringArgs']]:
"""
A `key_vault_connection_string` block as defined below. Use this argument to store Azure SQL Database connection string in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either `connection_string` or `key_vault_connection_string` is required.
"""
return pulumi.get(self, "key_vault_connection_string")
@key_vault_connection_string.setter
def key_vault_connection_string(self, value: Optional[pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultConnectionStringArgs']]):
pulumi.set(self, "key_vault_connection_string", value)
@property
@pulumi.getter(name="keyVaultPassword")
def key_vault_password(self) -> Optional[pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultPasswordArgs']]:
"""
A `key_vault_password` block as defined below. Use this argument to store SQL Server password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service.
"""
return pulumi.get(self, "key_vault_password")
@key_vault_password.setter
def key_vault_password(self, value: Optional[pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultPasswordArgs']]):
pulumi.set(self, "key_vault_password", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Data Factory Linked Service Azure SQL Database. Changing this forces a new resource to be created. Must be unique within a data
factory. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of parameters to associate with the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="servicePrincipalId")
def service_principal_id(self) -> Optional[pulumi.Input[str]]:
"""
The service principal id in which to authenticate against the Azure SQL Database. Required if `service_principal_key` is set.
"""
return pulumi.get(self, "service_principal_id")
@service_principal_id.setter
def service_principal_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_principal_id", value)
@property
@pulumi.getter(name="servicePrincipalKey")
def service_principal_key(self) -> Optional[pulumi.Input[str]]:
"""
The service principal key in which to authenticate against the Azure SQL Database. Required if `service_principal_id` is set.
"""
return pulumi.get(self, "service_principal_key")
@service_principal_key.setter
def service_principal_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_principal_key", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The tenant id or name in which to authenticate against the Azure SQL Database.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="useManagedIdentity")
def use_managed_identity(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to use the Data Factory's managed identity to authenticate against the Azure SQL Database. Incompatible with `service_principal_id` and `service_principal_key`
"""
return pulumi.get(self, "use_managed_identity")
@use_managed_identity.setter
def use_managed_identity(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_managed_identity", value)
@pulumi.input_type
class _LinkedServiceAzureSqlDatabaseState:
def __init__(__self__, *,
additional_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
annotations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
connection_string: Optional[pulumi.Input[str]] = None,
data_factory_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
integration_runtime_name: Optional[pulumi.Input[str]] = None,
key_vault_connection_string: Optional[pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultConnectionStringArgs']] = None,
key_vault_password: Optional[pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultPasswordArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_principal_id: Optional[pulumi.Input[str]] = None,
service_principal_key: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
use_managed_identity: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering LinkedServiceAzureSqlDatabase resources.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] additional_properties: A map of additional properties to associate with the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[Sequence[pulumi.Input[str]]] annotations: List of tags that can be used for describing the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[str] connection_string: The connection string in which to authenticate with Azure SQL Database. Exactly one of either `connection_string` or `key_vault_connection_string` is required.
:param pulumi.Input[str] data_factory_name: The Data Factory name in which to associate the Linked Service with. Changing this forces a new resource to be created.
:param pulumi.Input[str] description: The description for the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[str] integration_runtime_name: The integration runtime reference to associate with the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultConnectionStringArgs'] key_vault_connection_string: A `key_vault_connection_string` block as defined below. Use this argument to store Azure SQL Database connection string in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either `connection_string` or `key_vault_connection_string` is required.
:param pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultPasswordArgs'] key_vault_password: A `key_vault_password` block as defined below. Use this argument to store SQL Server password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service.
:param pulumi.Input[str] name: Specifies the name of the Data Factory Linked Service Azure SQL Database. Changing this forces a new resource to be created. Must be unique within a data
factory. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: A map of parameters to associate with the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Data Factory Linked Service Azure SQL Database. Changing this forces a new resource to be created.
:param pulumi.Input[str] service_principal_id: The service principal id in which to authenticate against the Azure SQL Database. Required if `service_principal_key` is set.
:param pulumi.Input[str] service_principal_key: The service principal key in which to authenticate against the Azure SQL Database. Required if `service_principal_id` is set.
:param pulumi.Input[str] tenant_id: The tenant id or name in which to authenticate against the Azure SQL Database.
:param pulumi.Input[bool] use_managed_identity: Whether to use the Data Factory's managed identity to authenticate against the Azure SQL Database. Incompatible with `service_principal_id` and `service_principal_key`
"""
if additional_properties is not None:
pulumi.set(__self__, "additional_properties", additional_properties)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if data_factory_name is not None:
pulumi.set(__self__, "data_factory_name", data_factory_name)
if description is not None:
pulumi.set(__self__, "description", description)
if integration_runtime_name is not None:
pulumi.set(__self__, "integration_runtime_name", integration_runtime_name)
if key_vault_connection_string is not None:
pulumi.set(__self__, "key_vault_connection_string", key_vault_connection_string)
if key_vault_password is not None:
pulumi.set(__self__, "key_vault_password", key_vault_password)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if service_principal_id is not None:
pulumi.set(__self__, "service_principal_id", service_principal_id)
if service_principal_key is not None:
pulumi.set(__self__, "service_principal_key", service_principal_key)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if use_managed_identity is not None:
pulumi.set(__self__, "use_managed_identity", use_managed_identity)
@property
@pulumi.getter(name="additionalProperties")
def additional_properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of additional properties to associate with the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "additional_properties")
@additional_properties.setter
def additional_properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "additional_properties", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of tags that can be used for describing the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[pulumi.Input[str]]:
"""
The connection string in which to authenticate with Azure SQL Database. Exactly one of either `connection_string` or `key_vault_connection_string` is required.
"""
return pulumi.get(self, "connection_string")
@connection_string.setter
def connection_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_string", value)
@property
@pulumi.getter(name="dataFactoryName")
def data_factory_name(self) -> Optional[pulumi.Input[str]]:
"""
The Data Factory name in which to associate the Linked Service with. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "data_factory_name")
@data_factory_name.setter
def data_factory_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_factory_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description for the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="integrationRuntimeName")
def integration_runtime_name(self) -> Optional[pulumi.Input[str]]:
"""
The integration runtime reference to associate with the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "integration_runtime_name")
@integration_runtime_name.setter
def integration_runtime_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "integration_runtime_name", value)
@property
@pulumi.getter(name="keyVaultConnectionString")
def key_vault_connection_string(self) -> Optional[pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultConnectionStringArgs']]:
"""
A `key_vault_connection_string` block as defined below. Use this argument to store Azure SQL Database connection string in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either `connection_string` or `key_vault_connection_string` is required.
"""
return pulumi.get(self, "key_vault_connection_string")
@key_vault_connection_string.setter
def key_vault_connection_string(self, value: Optional[pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultConnectionStringArgs']]):
pulumi.set(self, "key_vault_connection_string", value)
@property
@pulumi.getter(name="keyVaultPassword")
def key_vault_password(self) -> Optional[pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultPasswordArgs']]:
"""
A `key_vault_password` block as defined below. Use this argument to store SQL Server password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service.
"""
return pulumi.get(self, "key_vault_password")
@key_vault_password.setter
def key_vault_password(self, value: Optional[pulumi.Input['LinkedServiceAzureSqlDatabaseKeyVaultPasswordArgs']]):
pulumi.set(self, "key_vault_password", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Data Factory Linked Service Azure SQL Database. Changing this forces a new resource to be created. Must be unique within a data
factory. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of parameters to associate with the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to create the Data Factory Linked Service Azure SQL Database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="servicePrincipalId")
def service_principal_id(self) -> Optional[pulumi.Input[str]]:
"""
The service principal id in which to authenticate against the Azure SQL Database. Required if `service_principal_key` is set.
"""
return pulumi.get(self, "service_principal_id")
@service_principal_id.setter
def service_principal_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_principal_id", value)
@property
@pulumi.getter(name="servicePrincipalKey")
def service_principal_key(self) -> Optional[pulumi.Input[str]]:
"""
The service principal key in which to authenticate against the Azure SQL Database. Required if `service_principal_id` is set.
"""
return pulumi.get(self, "service_principal_key")
@service_principal_key.setter
def service_principal_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_principal_key", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The tenant id or name in which to authenticate against the Azure SQL Database.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="useManagedIdentity")
def use_managed_identity(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to use the Data Factory's managed identity to authenticate against the Azure SQL Database. Incompatible with `service_principal_id` and `service_principal_key`
"""
return pulumi.get(self, "use_managed_identity")
@use_managed_identity.setter
def use_managed_identity(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_managed_identity", value)
class LinkedServiceAzureSqlDatabase(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
additional_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
annotations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
connection_string: Optional[pulumi.Input[str]] = None,
data_factory_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
integration_runtime_name: Optional[pulumi.Input[str]] = None,
key_vault_connection_string: Optional[pulumi.Input[pulumi.InputType['LinkedServiceAzureSqlDatabaseKeyVaultConnectionStringArgs']]] = None,
key_vault_password: Optional[pulumi.Input[pulumi.InputType['LinkedServiceAzureSqlDatabaseKeyVaultPasswordArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_principal_id: Optional[pulumi.Input[str]] = None,
service_principal_key: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
use_managed_identity: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Manages a Linked Service (connection) between Azure SQL Database and Azure Data Factory.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_factory = azure.datafactory.Factory("exampleFactory",
location=example_resource_group.location,
resource_group_name=example_resource_group.name)
example_linked_service_azure_sql_database = azure.datafactory.LinkedServiceAzureSqlDatabase("exampleLinkedServiceAzureSqlDatabase",
resource_group_name=example_resource_group.name,
data_factory_name=example_factory.name,
connection_string="data source=serverhostname;initial catalog=master;user id=testUser;Password=test;integrated security=False;encrypt=True;connection timeout=30")
```
## Import
Data Factory Azure SQL Database Linked Service's can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:datafactory/linkedServiceAzureSqlDatabase:LinkedServiceAzureSqlDatabase example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example/providers/Microsoft.DataFactory/factories/example/linkedservices/example
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] additional_properties: A map of additional properties to associate with the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[Sequence[pulumi.Input[str]]] annotations: List of tags that can be used for describing the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[str] connection_string: The connection string in which to authenticate with Azure SQL Database. Exactly one of either `connection_string` or `key_vault_connection_string` is required.
:param pulumi.Input[str] data_factory_name: The Data Factory name in which to associate the Linked Service with. Changing this forces a new resource to be created.
:param pulumi.Input[str] description: The description for the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[str] integration_runtime_name: The integration runtime reference to associate with the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[pulumi.InputType['LinkedServiceAzureSqlDatabaseKeyVaultConnectionStringArgs']] key_vault_connection_string: A `key_vault_connection_string` block as defined below. Use this argument to store Azure SQL Database connection string in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either `connection_string` or `key_vault_connection_string` is required.
:param pulumi.Input[pulumi.InputType['LinkedServiceAzureSqlDatabaseKeyVaultPasswordArgs']] key_vault_password: A `key_vault_password` block as defined below. Use this argument to store SQL Server password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service.
:param pulumi.Input[str] name: Specifies the name of the Data Factory Linked Service Azure SQL Database. Changing this forces a new resource to be created. Must be unique within a data
factory. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: A map of parameters to associate with the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Data Factory Linked Service Azure SQL Database. Changing this forces a new resource to be created.
:param pulumi.Input[str] service_principal_id: The service principal id in which to authenticate against the Azure SQL Database. Required if `service_principal_key` is set.
:param pulumi.Input[str] service_principal_key: The service principal key in which to authenticate against the Azure SQL Database. Required if `service_principal_id` is set.
:param pulumi.Input[str] tenant_id: The tenant id or name in which to authenticate against the Azure SQL Database.
:param pulumi.Input[bool] use_managed_identity: Whether to use the Data Factory's managed identity to authenticate against the Azure SQL Database. Incompatible with `service_principal_id` and `service_principal_key`
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LinkedServiceAzureSqlDatabaseArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Linked Service (connection) between Azure SQL Database and Azure Data Factory.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_factory = azure.datafactory.Factory("exampleFactory",
location=example_resource_group.location,
resource_group_name=example_resource_group.name)
example_linked_service_azure_sql_database = azure.datafactory.LinkedServiceAzureSqlDatabase("exampleLinkedServiceAzureSqlDatabase",
resource_group_name=example_resource_group.name,
data_factory_name=example_factory.name,
connection_string="data source=serverhostname;initial catalog=master;user id=testUser;Password=test;integrated security=False;encrypt=True;connection timeout=30")
```
## Import
Data Factory Azure SQL Database Linked Service's can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:datafactory/linkedServiceAzureSqlDatabase:LinkedServiceAzureSqlDatabase example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example/providers/Microsoft.DataFactory/factories/example/linkedservices/example
```
:param str resource_name: The name of the resource.
:param LinkedServiceAzureSqlDatabaseArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LinkedServiceAzureSqlDatabaseArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
additional_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
annotations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
connection_string: Optional[pulumi.Input[str]] = None,
data_factory_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
integration_runtime_name: Optional[pulumi.Input[str]] = None,
key_vault_connection_string: Optional[pulumi.Input[pulumi.InputType['LinkedServiceAzureSqlDatabaseKeyVaultConnectionStringArgs']]] = None,
key_vault_password: Optional[pulumi.Input[pulumi.InputType['LinkedServiceAzureSqlDatabaseKeyVaultPasswordArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_principal_id: Optional[pulumi.Input[str]] = None,
service_principal_key: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
use_managed_identity: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LinkedServiceAzureSqlDatabaseArgs.__new__(LinkedServiceAzureSqlDatabaseArgs)
__props__.__dict__["additional_properties"] = additional_properties
__props__.__dict__["annotations"] = annotations
__props__.__dict__["connection_string"] = connection_string
if data_factory_name is None and not opts.urn:
raise TypeError("Missing required property 'data_factory_name'")
__props__.__dict__["data_factory_name"] = data_factory_name
__props__.__dict__["description"] = description
__props__.__dict__["integration_runtime_name"] = integration_runtime_name
__props__.__dict__["key_vault_connection_string"] = key_vault_connection_string
__props__.__dict__["key_vault_password"] = key_vault_password
__props__.__dict__["name"] = name
__props__.__dict__["parameters"] = parameters
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["service_principal_id"] = service_principal_id
__props__.__dict__["service_principal_key"] = service_principal_key
__props__.__dict__["tenant_id"] = tenant_id
__props__.__dict__["use_managed_identity"] = use_managed_identity
super(LinkedServiceAzureSqlDatabase, __self__).__init__(
'azure:datafactory/linkedServiceAzureSqlDatabase:LinkedServiceAzureSqlDatabase',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
additional_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
annotations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
connection_string: Optional[pulumi.Input[str]] = None,
data_factory_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
integration_runtime_name: Optional[pulumi.Input[str]] = None,
key_vault_connection_string: Optional[pulumi.Input[pulumi.InputType['LinkedServiceAzureSqlDatabaseKeyVaultConnectionStringArgs']]] = None,
key_vault_password: Optional[pulumi.Input[pulumi.InputType['LinkedServiceAzureSqlDatabaseKeyVaultPasswordArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_principal_id: Optional[pulumi.Input[str]] = None,
service_principal_key: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
use_managed_identity: Optional[pulumi.Input[bool]] = None) -> 'LinkedServiceAzureSqlDatabase':
"""
Get an existing LinkedServiceAzureSqlDatabase resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] additional_properties: A map of additional properties to associate with the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[Sequence[pulumi.Input[str]]] annotations: List of tags that can be used for describing the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[str] connection_string: The connection string in which to authenticate with Azure SQL Database. Exactly one of either `connection_string` or `key_vault_connection_string` is required.
:param pulumi.Input[str] data_factory_name: The Data Factory name in which to associate the Linked Service with. Changing this forces a new resource to be created.
:param pulumi.Input[str] description: The description for the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[str] integration_runtime_name: The integration runtime reference to associate with the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[pulumi.InputType['LinkedServiceAzureSqlDatabaseKeyVaultConnectionStringArgs']] key_vault_connection_string: A `key_vault_connection_string` block as defined below. Use this argument to store Azure SQL Database connection string in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either `connection_string` or `key_vault_connection_string` is required.
:param pulumi.Input[pulumi.InputType['LinkedServiceAzureSqlDatabaseKeyVaultPasswordArgs']] key_vault_password: A `key_vault_password` block as defined below. Use this argument to store SQL Server password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service.
:param pulumi.Input[str] name: Specifies the name of the Data Factory Linked Service Azure SQL Database. Changing this forces a new resource to be created. Must be unique within a data
factory. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: A map of parameters to associate with the Data Factory Linked Service Azure SQL Database.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Data Factory Linked Service Azure SQL Database. Changing this forces a new resource to be created.
:param pulumi.Input[str] service_principal_id: The service principal id in which to authenticate against the Azure SQL Database. Required if `service_principal_key` is set.
:param pulumi.Input[str] service_principal_key: The service principal key in which to authenticate against the Azure SQL Database. Required if `service_principal_id` is set.
:param pulumi.Input[str] tenant_id: The tenant id or name in which to authenticate against the Azure SQL Database.
:param pulumi.Input[bool] use_managed_identity: Whether to use the Data Factory's managed identity to authenticate against the Azure SQL Database. Incompatible with `service_principal_id` and `service_principal_key`
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LinkedServiceAzureSqlDatabaseState.__new__(_LinkedServiceAzureSqlDatabaseState)
__props__.__dict__["additional_properties"] = additional_properties
__props__.__dict__["annotations"] = annotations
__props__.__dict__["connection_string"] = connection_string
__props__.__dict__["data_factory_name"] = data_factory_name
__props__.__dict__["description"] = description
__props__.__dict__["integration_runtime_name"] = integration_runtime_name
__props__.__dict__["key_vault_connection_string"] = key_vault_connection_string
__props__.__dict__["key_vault_password"] = key_vault_password
__props__.__dict__["name"] = name
__props__.__dict__["parameters"] = parameters
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["service_principal_id"] = service_principal_id
__props__.__dict__["service_principal_key"] = service_principal_key
__props__.__dict__["tenant_id"] = tenant_id
__props__.__dict__["use_managed_identity"] = use_managed_identity
return LinkedServiceAzureSqlDatabase(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="additionalProperties")
def additional_properties(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of additional properties to associate with the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "additional_properties")
@property
@pulumi.getter
def annotations(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of tags that can be used for describing the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "annotations")
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> pulumi.Output[Optional[str]]:
"""
The connection string in which to authenticate with Azure SQL Database. Exactly one of either `connection_string` or `key_vault_connection_string` is required.
"""
return pulumi.get(self, "connection_string")
@property
@pulumi.getter(name="dataFactoryName")
def data_factory_name(self) -> pulumi.Output[str]:
"""
The Data Factory name in which to associate the Linked Service with. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "data_factory_name")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description for the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="integrationRuntimeName")
def integration_runtime_name(self) -> pulumi.Output[Optional[str]]:
"""
The integration runtime reference to associate with the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "integration_runtime_name")
@property
@pulumi.getter(name="keyVaultConnectionString")
def key_vault_connection_string(self) -> pulumi.Output[Optional['outputs.LinkedServiceAzureSqlDatabaseKeyVaultConnectionString']]:
"""
A `key_vault_connection_string` block as defined below. Use this argument to store Azure SQL Database connection string in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either `connection_string` or `key_vault_connection_string` is required.
"""
return pulumi.get(self, "key_vault_connection_string")
@property
@pulumi.getter(name="keyVaultPassword")
def key_vault_password(self) -> pulumi.Output[Optional['outputs.LinkedServiceAzureSqlDatabaseKeyVaultPassword']]:
"""
A `key_vault_password` block as defined below. Use this argument to store SQL Server password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service.
"""
return pulumi.get(self, "key_vault_password")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Data Factory Linked Service Azure SQL Database. Changing this forces a new resource to be created. Must be unique within a data
factory. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of parameters to associate with the Data Factory Linked Service Azure SQL Database.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the Data Factory Linked Service Azure SQL Database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="servicePrincipalId")
def service_principal_id(self) -> pulumi.Output[Optional[str]]:
"""
The service principal id in which to authenticate against the Azure SQL Database. Required if `service_principal_key` is set.
"""
return pulumi.get(self, "service_principal_id")
@property
@pulumi.getter(name="servicePrincipalKey")
def service_principal_key(self) -> pulumi.Output[Optional[str]]:
"""
The service principal key in which to authenticate against the Azure SQL Database. Required if `service_principal_id` is set.
"""
return pulumi.get(self, "service_principal_key")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[Optional[str]]:
"""
The tenant id or name in which to authenticate against the Azure SQL Database.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter(name="useManagedIdentity")
def use_managed_identity(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to use the Data Factory's managed identity to authenticate against the Azure SQL Database. Incompatible with `service_principal_id` and `service_principal_key`
"""
return pulumi.get(self, "use_managed_identity")
| 60.412717 | 429 | 0.713493 |
795c0caddfdfd2ebe6ac64abc3fcca5c6e3f7725 | 553 | py | Python | setup.py | Renedyn/Fun_package | c6269c4a86d4d1b8014e5589bebbc84a9a02a478 | [
"MIT"
] | 1 | 2020-05-05T16:47:14.000Z | 2020-05-05T16:47:14.000Z | setup.py | Renedyn/Fun_package | c6269c4a86d4d1b8014e5589bebbc84a9a02a478 | [
"MIT"
] | 1 | 2021-12-24T18:02:56.000Z | 2021-12-24T18:02:56.000Z | setup.py | Renedyn/Fun_package | c6269c4a86d4d1b8014e5589bebbc84a9a02a478 | [
"MIT"
] | 2 | 2020-09-08T17:34:47.000Z | 2021-12-24T18:07:46.000Z | import setuptools
setuptools.setup(
name="Huy",
version="1.2",
author="Petr Srakovorodnikov",
author_email="petya.obosralsya@mail.ru",
description="A package to draw dicks",
url="https://github.com/Torrentov/Fun_package",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
package_data={'': ['huy.py']},
include_package_data=True,
)
| 27.65 | 51 | 0.640145 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.