code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from typing import Dict
class FakeContext:
def __init__(self, urls: Dict[str, str]):
self._urls = urls
def get_text(self, url, **kwargs):
# TODO: raise proper error
return self._urls[url]
def get_bytes(self, url, **kwargs):
# TODO: raise proper error
return self._urls[url]
def get_test_data(path: str) -> str:
root = Path(os.environ.get("RSSFLY_TEST_DATA_ROOT", ".")) / path
with root.open("rb") as f:
return f.read()
| [
"os.environ.get"
] | [((992, 1036), 'os.environ.get', 'os.environ.get', (['"""RSSFLY_TEST_DATA_ROOT"""', '"""."""'], {}), "('RSSFLY_TEST_DATA_ROOT', '.')\n", (1006, 1036), False, 'import os\n')] |
import os
import sys
from typing import Iterable
from jinja2 import Environment, FileSystemLoader, Template
import config as cfg
from . import app_root_dir, doc_root_dir, resource_dir, template_dir
_usage = "Usage: generate.py <onprem|aws|gcp|azure|k8s|alibabacloud|oci|programming|saas>"
def load_tmpl(tmpl: str) -> Template:
env = Environment(loader=FileSystemLoader(template_dir()))
env.filters["up_or_title"] = up_or_title
return env.get_template(tmpl)
def up_or_title(pvd: str, s: str) -> str:
if s in cfg.UPPER_WORDS.get(pvd, ()):
return s.upper()
if s in cfg.TITLE_WORDS.get(pvd, {}):
return cfg.TITLE_WORDS[pvd][s]
return s.title()
def gen_classes(pvd: str, typ: str, paths: Iterable[str]) -> str:
"""Generate all service node classes based on resources paths with class templates."""
tmpl = load_tmpl(cfg.TMPL_MODULE)
# TODO: extract the gen class metas for sharing
# TODO: independent function for generating all pvd/typ/paths pairs
def _gen_class_meta(path: str) -> dict:
base = os.path.splitext(path)[0]
name = "".join([up_or_title(pvd, s) for s in base.split("-")])
return {"name": name, "icon": path}
metas = map(_gen_class_meta, paths)
aliases = cfg.ALIASES[pvd][typ] if typ in cfg.ALIASES[pvd] else {}
return tmpl.render(pvd=pvd, typ=typ, metas=metas, aliases=aliases)
def gen_apidoc(pvd: str, typ_paths: dict) -> str:
tmpl = load_tmpl(cfg.TMPL_APIDOC)
# TODO: remove
def _gen_class_name(path: str) -> str:
base = os.path.splitext(path)[0]
name = "".join([up_or_title(pvd, s) for s in base.split("-")])
return name
typ_classes = {}
for typ, paths in sorted(typ_paths.items()):
typ_classes[typ] = []
for name in map(_gen_class_name, paths):
alias = cfg.ALIASES[pvd].get(typ, {}).get(name)
typ_classes[typ].append({"name": name, "alias": alias})
return tmpl.render(pvd=pvd, typ_classes=typ_classes)
def make_module(pvd: str, typ: str, classes: str) -> None:
"""Create a module file"""
mod_path = os.path.join(app_root_dir(pvd), f"{typ}.py")
with open(mod_path, "w+") as f:
f.write(classes)
def make_apidoc(pvd: str, content: str) -> None:
"""Create an api documentation file"""
mod_path = os.path.join(doc_root_dir(), f"{pvd}.md")
with open(mod_path, "w+") as f:
f.write(content)
def generate(pvd: str) -> None:
"""Generates a service node classes."""
typ_paths = {}
for root, _, files in os.walk(resource_dir(pvd)):
# Extract the names and paths from resources.
files.sort()
pngs = list(filter(lambda f: f.endswith(".png"), files))
paths = list(filter(lambda f: "rounded" not in f, pngs))
# Skip the top-root directory.
typ = os.path.basename(root)
if typ == pvd:
continue
classes = gen_classes(pvd, typ, paths)
make_module(pvd, typ, classes)
typ_paths[typ] = paths
# Build API documentation
apidoc = gen_apidoc(pvd, typ_paths)
make_apidoc(pvd, apidoc)
if __name__ == "__main__":
pvd = sys.argv[1]
if pvd not in cfg.PROVIDERS:
sys.exit()
generate(pvd)
| [
"os.path.splitext",
"os.path.basename",
"sys.exit",
"config.TITLE_WORDS.get",
"config.UPPER_WORDS.get"
] | [((530, 558), 'config.UPPER_WORDS.get', 'cfg.UPPER_WORDS.get', (['pvd', '()'], {}), '(pvd, ())\n', (549, 558), True, 'import config as cfg\n'), ((597, 625), 'config.TITLE_WORDS.get', 'cfg.TITLE_WORDS.get', (['pvd', '{}'], {}), '(pvd, {})\n', (616, 625), True, 'import config as cfg\n'), ((2847, 2869), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (2863, 2869), False, 'import os\n'), ((3224, 3234), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3232, 3234), False, 'import sys\n'), ((1068, 1090), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (1084, 1090), False, 'import os\n'), ((1560, 1582), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (1576, 1582), False, 'import os\n')] |
# Generated by Django 3.2.8 on 2022-05-31 10:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("eap_api", "0004_auto_20220531_0935"),
]
operations = [
migrations.AlterField(
model_name="eapuser",
name="is_active",
field=models.BooleanField(
default=True,
help_text=(
"Designates whether this user should be treated as active. "
"Unselect this instead of deleting accounts."
),
verbose_name="active",
),
),
]
| [
"django.db.models.BooleanField"
] | [((340, 522), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'help_text': '"""Designates whether this user should be treated as active. Unselect this instead of deleting accounts."""', 'verbose_name': '"""active"""'}), "(default=True, help_text=\n 'Designates whether this user should be treated as active. Unselect this instead of deleting accounts.'\n , verbose_name='active')\n", (359, 522), False, 'from django.db import migrations, models\n')] |
import unittest
import numpy as np
from op_test import OpTest
def modified_huber_loss_forward(val):
if val < -1:
return -4 * val
elif val < 1:
return (1 - val) * (1 - val)
else:
return 0
class TestModifiedHuberLossOp(OpTest):
def setUp(self):
self.op_type = 'modified_huber_loss'
samples_num = 32
self.inputs = {
'X': np.random.uniform(-1, 1., (samples_num, 1)).astype('float32'),
'Y': np.random.choice([0, 1], samples_num).reshape((samples_num, 1))
}
product_res = self.inputs['X'] * (2 * self.inputs['Y'] - 1)
loss = np.vectorize(modified_huber_loss_forward)(product_res)
self.outputs = {
'IntermediateVal': product_res,
'Out': loss.reshape((samples_num, 1))
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', max_relative_error=0.005)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.random.choice",
"numpy.vectorize",
"numpy.random.uniform"
] | [((1011, 1026), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1024, 1026), False, 'import unittest\n'), ((635, 676), 'numpy.vectorize', 'np.vectorize', (['modified_huber_loss_forward'], {}), '(modified_huber_loss_forward)\n', (647, 676), True, 'import numpy as np\n'), ((398, 442), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1.0)', '(samples_num, 1)'], {}), '(-1, 1.0, (samples_num, 1))\n', (415, 442), True, 'import numpy as np\n'), ((478, 515), 'numpy.random.choice', 'np.random.choice', (['[0, 1]', 'samples_num'], {}), '([0, 1], samples_num)\n', (494, 515), True, 'import numpy as np\n')] |
"""The WaveBlocks Project
Plot some quadrature rules.
@author: <NAME>
@copyright: Copyright (C) 2010, 2011 <NAME>
@license: Modified BSD License
"""
from numpy import squeeze
from matplotlib.pyplot import *
from WaveBlocks import GaussHermiteQR
tests = (2, 3, 4, 7, 32, 64, 128)
for I in tests:
Q = GaussHermiteQR(I)
print(Q)
N = Q.get_nodes()
N = squeeze(N)
W = Q.get_weights()
W = squeeze(W)
fig = figure()
ax = fig.gca()
ax.stem(N, W)
ax.set_xlabel(r"$\gamma_i$")
ax.set_ylabel(r"$\omega_i$")
ax.set_title(r"Gauss-Hermite quadrature with $"+str(Q.get_number_nodes())+r"$ nodes")
fig.savefig("qr_order_"+str(Q.get_order())+".png")
| [
"WaveBlocks.GaussHermiteQR",
"numpy.squeeze"
] | [((315, 332), 'WaveBlocks.GaussHermiteQR', 'GaussHermiteQR', (['I'], {}), '(I)\n', (329, 332), False, 'from WaveBlocks import GaussHermiteQR\n'), ((382, 392), 'numpy.squeeze', 'squeeze', (['N'], {}), '(N)\n', (389, 392), False, 'from numpy import squeeze\n'), ((438, 448), 'numpy.squeeze', 'squeeze', (['W'], {}), '(W)\n', (445, 448), False, 'from numpy import squeeze\n')] |
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import scipy
ROWS = 64
COLS = 64
CHANNELS = 3
TRAIN_DIR = 'Train_data/'
TEST_DIR = 'Test_data/'
train_images = [TRAIN_DIR+i for i in os.listdir(TRAIN_DIR)]
test_images = [TEST_DIR+i for i in os.listdir(TEST_DIR)]
def read_image(file_path):
img = cv2.imread(file_path, cv2.IMREAD_COLOR)
return cv2.resize(img, (ROWS, COLS), interpolation=cv2.INTER_CUBIC)
def prepare_data(images):
m = len(images)
X = np.zeros((m, ROWS, COLS, CHANNELS), dtype=np.uint8)
y = np.zeros((1, m))
for i, image_file in enumerate(images):
X[i,:] = read_image(image_file)
if 'dog' in image_file.lower():
y[0, i] = 1
elif 'cat' in image_file.lower():
y[0, i] = 0
return X, y
def sigmoid(z):
s = 1/(1+np.exp(-z))
return s
train_set_x, train_set_y = prepare_data(train_images)
test_set_x, test_set_y = prepare_data(test_images)
train_set_x_flatten = train_set_x.reshape(train_set_x.shape[0], ROWS*COLS*CHANNELS).T
test_set_x_flatten = test_set_x.reshape(test_set_x.shape[0], -1).T
train_set_x = train_set_x_flatten/255
test_set_x = test_set_x_flatten/255
#train_set_x_flatten shape: (12288, 6002)
#train_set_y shape: (1, 6002)
def initialize_parameters(input_layer, hidden_layer, output_layer):
# initialize 1st layer output and input with random values
W1 = np.random.randn(hidden_layer, input_layer) * 0.01
# initialize 1st layer output bias
b1 = np.zeros((hidden_layer, 1))
# initialize 2nd layer output and input with random values
W2 = np.random.randn(output_layer, hidden_layer) * 0.01
# initialize 2nd layer output bias
b2 = np.zeros((output_layer,1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
def forward_propagation(X, parameters):
# Retrieve each parameter from the dictionary "parameters"
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Implementing Forward Propagation to calculate A2 probabilities
Z1 = np.dot(W1, X) + b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)
# Values needed in the backpropagation are stored in "cache"
cache = {"Z1": Z1,
"A1": A1,
"Z2": Z2,
"A2": A2}
return A2, cache
def compute_cost(A2, Y, parameters):
# number of example
m = Y.shape[1]
# Compute the cross-entropy cost
logprobs = np.multiply(np.log(A2),Y) + np.multiply(np.log(1-A2), (1-Y))
cost = -1/m*np.sum(logprobs)
# makes sure cost is in dimension we expect, E.g., turns [[51]] into 51
cost = np.squeeze(cost)
return cost
def backward_propagation(parameters, cache, X, Y):
# number of example
m = X.shape[1]
# Retrieve W1 and W2 from the "parameters" dictionary
W1 = parameters["W1"]
W2 = parameters["W2"]
# Retrieve A1 and A2 from "cache" dictionary
A1 = cache["A1"]
A2 = cache["A2"]
# Backward propagation for dW1, db1, dW2, db2
dZ2 = A2-Y
dW2 = 1./m*np.dot(dZ2, A1.T)
db2 = 1./m*np.sum(dZ2, axis = 1, keepdims=True)
dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2))
dW1 = 1./m*np.dot(dZ1, X.T)
db1 = 1./m*np.sum(dZ1, axis = 1, keepdims=True)
grads = {"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2}
return grads
def update_parameters(parameters, grads, learning_rate = 0.1):
# Retrieve each parameter from the dictionary "parameters"
W1 = parameters["W1"]
W2 = parameters["W2"]
b1 = parameters["b1"]
b2 = parameters["b2"]
# Retrieve each gradient from the "grads" dictionary
dW1 = grads["dW1"]
db1 = grads["db1"]
dW2 = grads["dW2"]
db2 = grads["db2"]
# Update rule for each parameter
W1 = W1 - dW1 * learning_rate
b1 = b1 - db1 * learning_rate
W2 = W2 - dW2 * learning_rate
b2 = b2 - db2 * learning_rate
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
def predict(parameters, X):
# Computes probabilities using forward propagation
Y_prediction = np.zeros((1, X.shape[1]))
A2, cache = forward_propagation(X, parameters)
for i in range(A2.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
if A2[0,i] > 0.5:
Y_prediction[[0],[i]] = 1
else:
Y_prediction[[0],[i]] = 0
return Y_prediction
def nn_model(X_train, Y_train, X_test, Y_test, n_h, num_iterations = 1000, learning_rate = 0.05, print_cost=False):
n_x = X_train.shape[0]
n_y = Y_train.shape[0]
# Initialize parameters with nputs: "n_x, n_h, n_y"
parameters = initialize_parameters(n_x, n_h, n_y)
# Retrieve W1, b1, W2, b2
W1 = parameters["W1"]
W2 = parameters["W2"]
b1 = parameters["b1"]
b2 = parameters["b2"]
costs = []
for i in range(0, num_iterations):
# Forward propagation. Inputs: "X, parameters". Outputs: "A2, cache".
A2, cache = forward_propagation(X_train, parameters)
# Cost function. Inputs: "A2, Y, parameters". Outputs: "cost".
cost = compute_cost(A2, Y_train, parameters)
# Backpropagation. Inputs: "parameters, cache, X, Y". Outputs: "grads".
grads = backward_propagation(parameters, cache, X_train, Y_train)
# Gradient descent parameter update. Inputs: "parameters, grads". Outputs: "parameters".
parameters = update_parameters(parameters, grads, learning_rate)
# Print the cost every 200 iterations
if print_cost and i % 200 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
# Record the cost
if i % 100 == 0:
costs.append(cost)
# Predict test/train set examples
Y_prediction_test = predict(parameters,X_test)
Y_prediction_train = predict(parameters,X_train)
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
parameters.update({"costs": costs, "n_h": n_h})
return parameters
#nn_model(train_set_x, train_set_y, test_set_x, test_set_y, n_h = 10, num_iterations = 3000, learning_rate = 0.05, print_cost = True)
hidden_layer = [10, 50, 100, 200, 400]
models = {}
for i in hidden_layer:
print ("hidden layer is: ",i)
models[i] = nn_model(train_set_x, train_set_y, test_set_x, test_set_y, n_h = i, num_iterations = 10000, learning_rate = 0.05, print_cost = True)
print ("-------------------------------------------------------")
for i in hidden_layer:
plt.plot(np.squeeze(models[i]["costs"]), label= str(models[i]["n_h"]))
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
| [
"numpy.abs",
"os.listdir",
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.xlabel",
"numpy.log",
"numpy.tanh",
"numpy.squeeze",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.random.randn",
"numpy.dot",
"cv2.resize",
"cv2.imread",
"matplotlib.pyplot.legend",
"matplotli... | [((6966, 6984), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cost"""'], {}), "('cost')\n", (6976, 6984), True, 'import matplotlib.pyplot as plt\n'), ((6985, 7020), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations (hundreds)"""'], {}), "('iterations (hundreds)')\n", (6995, 7020), True, 'import matplotlib.pyplot as plt\n'), ((7031, 7074), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'shadow': '(True)'}), "(loc='upper center', shadow=True)\n", (7041, 7074), True, 'import matplotlib.pyplot as plt\n'), ((7130, 7140), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7138, 7140), True, 'import matplotlib.pyplot as plt\n'), ((327, 366), 'cv2.imread', 'cv2.imread', (['file_path', 'cv2.IMREAD_COLOR'], {}), '(file_path, cv2.IMREAD_COLOR)\n', (337, 366), False, 'import cv2\n'), ((378, 438), 'cv2.resize', 'cv2.resize', (['img', '(ROWS, COLS)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (ROWS, COLS), interpolation=cv2.INTER_CUBIC)\n', (388, 438), False, 'import cv2\n'), ((494, 545), 'numpy.zeros', 'np.zeros', (['(m, ROWS, COLS, CHANNELS)'], {'dtype': 'np.uint8'}), '((m, ROWS, COLS, CHANNELS), dtype=np.uint8)\n', (502, 545), True, 'import numpy as np\n'), ((554, 570), 'numpy.zeros', 'np.zeros', (['(1, m)'], {}), '((1, m))\n', (562, 570), True, 'import numpy as np\n'), ((1503, 1530), 'numpy.zeros', 'np.zeros', (['(hidden_layer, 1)'], {}), '((hidden_layer, 1))\n', (1511, 1530), True, 'import numpy as np\n'), ((1702, 1729), 'numpy.zeros', 'np.zeros', (['(output_layer, 1)'], {}), '((output_layer, 1))\n', (1710, 1729), True, 'import numpy as np\n'), ((2189, 2200), 'numpy.tanh', 'np.tanh', (['Z1'], {}), '(Z1)\n', (2196, 2200), True, 'import numpy as np\n'), ((2753, 2769), 'numpy.squeeze', 'np.squeeze', (['cost'], {}), '(cost)\n', (2763, 2769), True, 'import numpy as np\n'), ((4309, 4334), 'numpy.zeros', 'np.zeros', (['(1, X.shape[1])'], {}), '((1, X.shape[1]))\n', (4317, 4334), True, 'import numpy as np\n'), ((208, 229), 'os.listdir', 'os.listdir', (['TRAIN_DIR'], {}), '(TRAIN_DIR)\n', (218, 229), False, 'import os\n'), ((267, 287), 'os.listdir', 'os.listdir', (['TEST_DIR'], {}), '(TEST_DIR)\n', (277, 287), False, 'import os\n'), ((1405, 1447), 'numpy.random.randn', 'np.random.randn', (['hidden_layer', 'input_layer'], {}), '(hidden_layer, input_layer)\n', (1420, 1447), True, 'import numpy as np\n'), ((1603, 1646), 'numpy.random.randn', 'np.random.randn', (['output_layer', 'hidden_layer'], {}), '(output_layer, hidden_layer)\n', (1618, 1646), True, 'import numpy as np\n'), ((2160, 2173), 'numpy.dot', 'np.dot', (['W1', 'X'], {}), '(W1, X)\n', (2166, 2173), True, 'import numpy as np\n'), ((2210, 2224), 'numpy.dot', 'np.dot', (['W2', 'A1'], {}), '(W2, A1)\n', (2216, 2224), True, 'import numpy as np\n'), ((2647, 2663), 'numpy.sum', 'np.sum', (['logprobs'], {}), '(logprobs)\n', (2653, 2663), True, 'import numpy as np\n'), ((3174, 3191), 'numpy.dot', 'np.dot', (['dZ2', 'A1.T'], {}), '(dZ2, A1.T)\n', (3180, 3191), True, 'import numpy as np\n'), ((3207, 3241), 'numpy.sum', 'np.sum', (['dZ2'], {'axis': '(1)', 'keepdims': '(True)'}), '(dZ2, axis=1, keepdims=True)\n', (3213, 3241), True, 'import numpy as np\n'), ((3254, 3271), 'numpy.dot', 'np.dot', (['W2.T', 'dZ2'], {}), '(W2.T, dZ2)\n', (3260, 3271), True, 'import numpy as np\n'), ((3311, 3327), 'numpy.dot', 'np.dot', (['dZ1', 'X.T'], {}), '(dZ1, X.T)\n', (3317, 3327), True, 'import numpy as np\n'), ((3343, 3377), 'numpy.sum', 'np.sum', (['dZ1'], {'axis': '(1)', 'keepdims': '(True)'}), '(dZ1, axis=1, keepdims=True)\n', (3349, 3377), True, 'import numpy as np\n'), ((6903, 6933), 'numpy.squeeze', 'np.squeeze', (["models[i]['costs']"], {}), "(models[i]['costs'])\n", (6913, 6933), True, 'import numpy as np\n'), ((831, 841), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (837, 841), True, 'import numpy as np\n'), ((2581, 2591), 'numpy.log', 'np.log', (['A2'], {}), '(A2)\n', (2587, 2591), True, 'import numpy as np\n'), ((2610, 2624), 'numpy.log', 'np.log', (['(1 - A2)'], {}), '(1 - A2)\n', (2616, 2624), True, 'import numpy as np\n'), ((3279, 3294), 'numpy.power', 'np.power', (['A1', '(2)'], {}), '(A1, 2)\n', (3287, 3294), True, 'import numpy as np\n'), ((6185, 6221), 'numpy.abs', 'np.abs', (['(Y_prediction_train - Y_train)'], {}), '(Y_prediction_train - Y_train)\n', (6191, 6221), True, 'import numpy as np\n'), ((6284, 6318), 'numpy.abs', 'np.abs', (['(Y_prediction_test - Y_test)'], {}), '(Y_prediction_test - Y_test)\n', (6290, 6318), True, 'import numpy as np\n')] |
"""
A module for generating a map with 10 nearest movies
"""
from data_reader import read_data, select_year
from locations_finder import coord_finder, find_nearest_movies
from map_generator import generate_map
def start():
year = int(input("Please enter a year you would like to have a map for:"))
user_location = tuple(int(loc) for loc in input("Please enter your location (format: lat, long):").split(','))
movie_data = read_data("smaller_locations.list")
n_year_movies = select_year(movie_data,year)
all_movie_locations = coord_finder(n_year_movies)
nearest_movie_locations = find_nearest_movies(all_movie_locations, user_location)
generate_map(nearest_movie_locations,user_location)
return "Open map.html to enjoy the map."
if __name__=="__main__":
start() | [
"locations_finder.coord_finder",
"data_reader.read_data",
"map_generator.generate_map",
"data_reader.select_year",
"locations_finder.find_nearest_movies"
] | [((435, 470), 'data_reader.read_data', 'read_data', (['"""smaller_locations.list"""'], {}), "('smaller_locations.list')\n", (444, 470), False, 'from data_reader import read_data, select_year\n'), ((491, 520), 'data_reader.select_year', 'select_year', (['movie_data', 'year'], {}), '(movie_data, year)\n', (502, 520), False, 'from data_reader import read_data, select_year\n'), ((546, 573), 'locations_finder.coord_finder', 'coord_finder', (['n_year_movies'], {}), '(n_year_movies)\n', (558, 573), False, 'from locations_finder import coord_finder, find_nearest_movies\n'), ((604, 659), 'locations_finder.find_nearest_movies', 'find_nearest_movies', (['all_movie_locations', 'user_location'], {}), '(all_movie_locations, user_location)\n', (623, 659), False, 'from locations_finder import coord_finder, find_nearest_movies\n'), ((664, 716), 'map_generator.generate_map', 'generate_map', (['nearest_movie_locations', 'user_location'], {}), '(nearest_movie_locations, user_location)\n', (676, 716), False, 'from map_generator import generate_map\n')] |
import math
import re
from typing import List, Dict
from wbtools.db.generic import WBGenericDBManager
from wbtools.lib.nlp.common import EntityType
from wbtools.lib.nlp.literature_index.abstract_index import AbstractLiteratureIndex
ALL_VAR_REGEX = r'({designations}|m|p|It)(_)?([A-z]+)?([0-9]+)([a-zA-Z]{{1,4}}[0-9]*)?(\[[0-9]+\])?([a-zA-Z]{{1,4}}' \
r'[0-9]*)?(\[.+\])?'
NEW_VAR_REGEX = r'[\(\s]({designations}|m|p)([0-9]+)((?:{designations}|m|p|ts|gf|lf|d|sd|am|cs)[0-9]+)?[\)\s\[]'
STRAIN_REGEX = r'[\(\s,\.:;\'\"]({designations})([0-9]+)[\)\s\,\.:;\'\"]'
OPENING_REGEX_STR = "[\\.\\n\\t\\'\\/\\(\\)\\[\\]\\{\\}:;\\,\\!\\?> ]"
CLOSING_REGEX_STR = "[\\.\\n\\t\\'\\/\\(\\)\\[\\]\\{\\}:;\\,\\!\\?> ]"
OPENING_CLOSING_REGEXES = {
EntityType.VARIATION: [r'[\(\s](', r')[\)\s\[]'],
EntityType.STRAIN: [r'[\(\s,\.:;\'\"](', r')[\)\s,\.:;\'\"]']
}
class NttExtractor:
def __init__(self, db_manager: WBGenericDBManager = None):
self.db_manager = db_manager
self.curated_entities = {}
for entity_type in EntityType:
self.curated_entities[entity_type] = None
allele_designations = self.db_manager.get_allele_designations()
new_var_regex = NEW_VAR_REGEX.format(designations="|".join(allele_designations))
strain_regex = STRAIN_REGEX.format(designations="|".join(self.db_manager.get_strain_designations()))
self.entity_type_regex_map = {
EntityType.VARIATION: new_var_regex,
EntityType.STRAIN: strain_regex
}
def get_curated_entities(self, entity_type: EntityType, exclude_id_used_as_name: bool = True):
if not self.curated_entities[entity_type]:
self.curated_entities[entity_type] = self.db_manager.get_curated_entities(
entity_type=entity_type, exclude_id_used_as_name=exclude_id_used_as_name)
return self.curated_entities[entity_type]
@staticmethod
def match_entities_regex(text, regex):
res = re.findall(regex, " " + text + " ")
return ["".join(entity_arr) for entity_arr in res]
@staticmethod
def count_keyword_matches_regex(keyword, text, case_sensitive: bool = True,
match_uppercase: bool = False) -> int:
keyword = keyword if case_sensitive else keyword.upper()
text = text if case_sensitive else text.upper()
match_uppercase = False if keyword.upper() == keyword else match_uppercase
if keyword in text or match_uppercase and keyword.upper() in text:
try:
match_count = len(re.findall(OPENING_REGEX_STR + re.escape(keyword) + CLOSING_REGEX_STR, text))
if match_uppercase:
match_count += len(re.findall(OPENING_REGEX_STR + re.escape(keyword.upper()) +
CLOSING_REGEX_STR, text))
return match_count
except:
pass
return 0
@staticmethod
def is_entity_meaningful(entity_keywords: List[str], text, lit_index: AbstractLiteratureIndex,
match_uppercase: bool = False, min_num_occurrences: int = 1,
tfidf_threshold: float = 0.0) -> bool:
min_num_occurrences = 1 if min_num_occurrences < 1 else min_num_occurrences
raw_count = sum(NttExtractor.count_keyword_matches_regex(keyword=keyword, text=text,
match_uppercase=match_uppercase) for
keyword in entity_keywords)
return True if raw_count >= min_num_occurrences and (
tfidf_threshold <= 0 or 0 < tfidf_threshold < NttExtractor.tfidf(entity_keywords=entity_keywords,
raw_count=raw_count,
lit_index=lit_index)) else False
@staticmethod
def tfidf(entity_keywords: List[str], raw_count, lit_index: AbstractLiteratureIndex) -> float:
doc_counter = sum(lit_index.count_matching_documents(keyword) for keyword in entity_keywords)
idf = math.log(float(lit_index.num_documents()) / (doc_counter if doc_counter > 0 else 0.5))
return raw_count * idf
@staticmethod
def extract_meaningful_entities_by_keywords(keywords: List[str], text: str,
lit_index: AbstractLiteratureIndex = None,
match_uppercase: bool = False, min_matches: int = 1,
tfidf_threshold: float = 0.0,
blacklist: List[str] = None) -> List[str]:
blacklist = set(blacklist) if blacklist else set()
return [keyword for keyword in set(keywords) if keyword not in blacklist and
NttExtractor.is_entity_meaningful(
entity_keywords=[keyword], text=text, match_uppercase=match_uppercase, min_num_occurrences=min_matches,
tfidf_threshold=tfidf_threshold, lit_index=lit_index)]
def extract_species_regex(self, text: str, taxon_id_name_map: Dict[str, List[str]] = None,
blacklist: List[str] = None,
whitelist: List[str] = None, min_matches: int = 1, tfidf_threshold: float = 0.0,
lit_index: AbstractLiteratureIndex = None):
blacklist = set(blacklist) if blacklist else set()
whitelist = set(whitelist) if whitelist else set()
if taxon_id_name_map is None:
taxon_id_name_map = self.db_manager.get_taxon_id_names_map()
return [regex_list[0].replace("\\", "") for taxon_id, regex_list in taxon_id_name_map.items() if
taxon_id not in blacklist and (taxon_id in whitelist or
NttExtractor.is_entity_meaningful(entity_keywords=regex_list, text=text,
match_uppercase=False,
lit_index=lit_index,
min_num_occurrences=min_matches,
tfidf_threshold=tfidf_threshold))]
@staticmethod
def get_entity_ids_from_names(entity_names: List[str], entity_name_id_map: Dict[str, str]):
return list(set([(entity_name_id_map[entity_name], entity_name) for entity_name in entity_names]))
def extract_all_entities_by_type(self, text: str, entity_type: EntityType, include_new: bool = True,
match_curated: bool = False, exclude_curated: bool = False,
match_entities: List[str] = None, exclude_entities: List[str] = None,
exclude_id_used_as_name: bool = True):
"""
extract entities mentioned in text
Args:
text (str): the input text
entity_type (EntityType): the type of entities to extract
include_new (bool): whether to include possibly new entities not yet in the curation database
match_curated (bool): whether to extract curated entities obtained from the provided DB manager
exclude_curated (bool): whether to remove curated entities obtained from the provided DB manager from the
extracted ones
match_entities (List[str]): match the provided entities
exclude_entities (List[str]): exclude the provided entities from the results
exclude_id_used_as_name (bool): do not extract entity ids when used as names in the DB
Returns:
list: the list of entities extracted from text
"""
entities = set()
if include_new:
entities.update(NttExtractor.match_entities_regex(text, self.entity_type_regex_map[entity_type]))
if match_curated:
entities.update(NttExtractor.match_entities_regex(
text, OPENING_CLOSING_REGEXES[entity_type][0] + '|'.join(self.db_manager.get_curated_entities(
entity_type=entity_type, exclude_id_used_as_name=exclude_id_used_as_name)) +
OPENING_CLOSING_REGEXES[entity_type][1]))
if exclude_curated:
entities -= set(self.get_curated_entities(entity_type=entity_type, exclude_id_used_as_name=exclude_id_used_as_name))
if match_entities:
entities.update(NttExtractor.match_entities_regex(
text, OPENING_CLOSING_REGEXES[entity_type][0] + '|'.join(match_entities) +
OPENING_CLOSING_REGEXES[entity_type][1]))
if exclude_entities:
entities -= set(exclude_entities)
return sorted(list(entities))
| [
"re.findall",
"re.escape"
] | [((1992, 2027), 're.findall', 're.findall', (['regex', "(' ' + text + ' ')"], {}), "(regex, ' ' + text + ' ')\n", (2002, 2027), False, 'import re\n'), ((2622, 2640), 're.escape', 're.escape', (['keyword'], {}), '(keyword)\n', (2631, 2640), False, 'import re\n')] |
"""Test motif.features.bitteli
"""
import unittest
import numpy as np
from motif.feature_extractors import bitteli
def array_equal(array1, array2):
return np.all(np.isclose(array1, array2, atol=1e-7))
class TestBitteliFeatures(unittest.TestCase):
def setUp(self):
self.ftr = bitteli.BitteliFeatures()
def test_ref_hz(self):
expected = 55.0
actual = self.ftr.ref_hz
self.assertEqual(expected, actual)
def test_poly_degree(self):
expected = 5
actual = self.ftr.poly_degree
self.assertEqual(expected, actual)
def test_min_freq(self):
expected = 3
actual = self.ftr.min_freq
self.assertEqual(expected, actual)
def test_max_freq(self):
expected = 30
actual = self.ftr.max_freq
self.assertEqual(expected, actual)
def test_freq_step(self):
expected = 0.1
actual = self.ftr.freq_step
self.assertEqual(expected, actual)
def test_vibrato_threshold(self):
expected = 0.25
actual = self.ftr.vibrato_threshold
self.assertEqual(expected, actual)
def test_get_feature_vector(self):
times = np.linspace(0, 1, 2000)
freqs_hz = 440.0 * np.ones((2000, ))
salience = 0.5 * np.ones((2000, ))
sample_rate = 2000
actual = self.ftr.get_feature_vector(
times, freqs_hz, salience, sample_rate
)
expected = np.array([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
3600.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0,
0.0, 0.5, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0
])
self.assertTrue(array_equal(expected, actual))
self.assertEqual(len(actual), len(self.ftr.feature_names))
def test_get_feature_names(self):
expected = [
'vibrato rate',
'vibrato extent',
'vibrato coverage',
'vibrato coverage - beginning',
'vibrato coverage - middle',
'vibrato coverage - end',
'0th polynomial coeff - freq',
'1st polynomial coeff - freq',
'2nd polynomial coeff - freq',
'3rd polynomial coeff - freq',
'4th polynomial coeff - freq',
'5th polynomial coeff - freq',
'polynomial fit residual - freq',
'overall model fit residual - freq',
'0th polynomial coeff - salience',
'1st polynomial coeff - salience',
'2nd polynomial coeff - salience',
'3rd polynomial coeff - salience',
'4th polynomial coeff - salience',
'5th polynomial coeff - salience',
'polynomial fit residual - salience',
'duration',
'pitch stddev (cents)',
'pitch range (cents)',
'pitch average variation',
'salience stdev',
'salience range',
'salience average variation'
]
actual = self.ftr.feature_names
self.assertEqual(expected, actual)
def test_get_id(self):
expected = 'bitteli'
actual = self.ftr.get_id()
self.assertEqual(expected, actual)
| [
"numpy.isclose",
"numpy.ones",
"motif.feature_extractors.bitteli.BitteliFeatures",
"numpy.array",
"numpy.linspace"
] | [((169, 207), 'numpy.isclose', 'np.isclose', (['array1', 'array2'], {'atol': '(1e-07)'}), '(array1, array2, atol=1e-07)\n', (179, 207), True, 'import numpy as np\n'), ((297, 322), 'motif.feature_extractors.bitteli.BitteliFeatures', 'bitteli.BitteliFeatures', ([], {}), '()\n', (320, 322), False, 'from motif.feature_extractors import bitteli\n'), ((1184, 1207), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(2000)'], {}), '(0, 1, 2000)\n', (1195, 1207), True, 'import numpy as np\n'), ((1449, 1612), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3600.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3600.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0])\n', (1457, 1612), True, 'import numpy as np\n'), ((1235, 1251), 'numpy.ones', 'np.ones', (['(2000,)'], {}), '((2000,))\n', (1242, 1251), True, 'import numpy as np\n'), ((1278, 1294), 'numpy.ones', 'np.ones', (['(2000,)'], {}), '((2000,))\n', (1285, 1294), True, 'import numpy as np\n')] |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from qf_lib.common.utils.dateutils.relative_delta import RelativeDelta
class RegularDateTimeRule(object):
"""
RegularDateTimeRule is a helper class for TimeEvents. It has a convenience method for calculating
next trigger time for events which occur on certain date/time on regular basis (e.g. each day at 9:30,
each first day of a month, etc.).
"""
def __init__(self, year: int = None, month: int = None, day: int = None, weekday: int = None, hour: int = None,
minute: int = None, second: int = None, microsecond: int = None):
self.trigger_time = RelativeDelta(
year=year, month=month, day=day, weekday=weekday, hour=hour, minute=minute,
second=second, microsecond=microsecond
)
def next_trigger_time(self, now: datetime) -> datetime:
next_trigger_time = now + self.trigger_time
# check if next_trigger_time is in the past and if it is, it needs to be adjusted so that it's in the future
if next_trigger_time <= now:
next_trigger_time = self._get_next_trigger_time_after(next_trigger_time)
return next_trigger_time
def _get_next_trigger_time_after(self, start_time: datetime):
# calculate proper adjustment (time shift):
# if the month is important for the trigger time, than we should go to the next year
# for getting the next occurrence, if it is unimportant but day is important,
# then we should go to the next month etc.
time_adjustment = None
if self.trigger_time.year is not None:
# nothing can be done if the year is important. No way of getting next occurrence (there will never be
# the same year again)
raise ArithmeticError(
"Cannot get next occurrence of the event with `year` specified "
"(there will never be the same year again)."
)
elif self.trigger_time.month is not None:
time_adjustment = RelativeDelta(years=1)
elif self.trigger_time.day is not None:
time_adjustment = RelativeDelta(months=1)
elif self.trigger_time.weekday is not None:
time_adjustment = RelativeDelta(weeks=1)
elif self.trigger_time.hour is not None:
time_adjustment = RelativeDelta(days=1)
elif self.trigger_time.minute is not None:
time_adjustment = RelativeDelta(hours=1)
elif self.trigger_time.second is not None:
time_adjustment = RelativeDelta(minutes=1)
elif self.trigger_time.microsecond is not None:
time_adjustment = RelativeDelta(seconds=1)
next_trigger_time = start_time + time_adjustment
return next_trigger_time
| [
"qf_lib.common.utils.dateutils.relative_delta.RelativeDelta"
] | [((1293, 1426), 'qf_lib.common.utils.dateutils.relative_delta.RelativeDelta', 'RelativeDelta', ([], {'year': 'year', 'month': 'month', 'day': 'day', 'weekday': 'weekday', 'hour': 'hour', 'minute': 'minute', 'second': 'second', 'microsecond': 'microsecond'}), '(year=year, month=month, day=day, weekday=weekday, hour=hour,\n minute=minute, second=second, microsecond=microsecond)\n', (1306, 1426), False, 'from qf_lib.common.utils.dateutils.relative_delta import RelativeDelta\n'), ((2693, 2715), 'qf_lib.common.utils.dateutils.relative_delta.RelativeDelta', 'RelativeDelta', ([], {'years': '(1)'}), '(years=1)\n', (2706, 2715), False, 'from qf_lib.common.utils.dateutils.relative_delta import RelativeDelta\n'), ((2794, 2817), 'qf_lib.common.utils.dateutils.relative_delta.RelativeDelta', 'RelativeDelta', ([], {'months': '(1)'}), '(months=1)\n', (2807, 2817), False, 'from qf_lib.common.utils.dateutils.relative_delta import RelativeDelta\n'), ((2900, 2922), 'qf_lib.common.utils.dateutils.relative_delta.RelativeDelta', 'RelativeDelta', ([], {'weeks': '(1)'}), '(weeks=1)\n', (2913, 2922), False, 'from qf_lib.common.utils.dateutils.relative_delta import RelativeDelta\n'), ((3002, 3023), 'qf_lib.common.utils.dateutils.relative_delta.RelativeDelta', 'RelativeDelta', ([], {'days': '(1)'}), '(days=1)\n', (3015, 3023), False, 'from qf_lib.common.utils.dateutils.relative_delta import RelativeDelta\n'), ((3105, 3127), 'qf_lib.common.utils.dateutils.relative_delta.RelativeDelta', 'RelativeDelta', ([], {'hours': '(1)'}), '(hours=1)\n', (3118, 3127), False, 'from qf_lib.common.utils.dateutils.relative_delta import RelativeDelta\n'), ((3209, 3233), 'qf_lib.common.utils.dateutils.relative_delta.RelativeDelta', 'RelativeDelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (3222, 3233), False, 'from qf_lib.common.utils.dateutils.relative_delta import RelativeDelta\n'), ((3320, 3344), 'qf_lib.common.utils.dateutils.relative_delta.RelativeDelta', 'RelativeDelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (3333, 3344), False, 'from qf_lib.common.utils.dateutils.relative_delta import RelativeDelta\n')] |
# Generated by Django 2.2.6 on 2019-10-16 02:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
],
options={
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(blank=True, max_length=255, null=True, verbose_name='title')),
('content', models.TextField(blank=True, null=True)),
('author', models.ForeignKey(blank=True, help_text='作者id', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='author', to='blog.Author', verbose_name='作者')),
],
options={
'ordering': ['id'],
},
),
migrations.CreateModel(
name='UserFavorite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('blog', models.ForeignKey(blank=True, help_text='博客id', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='blog', to='blog.Blog', verbose_name='博客')),
('user', models.ForeignKey(help_text='收藏人id', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': '用户收藏',
'verbose_name_plural': '用户收藏',
'ordering': ['id'],
'unique_together': {('user', 'blog')},
},
),
]
| [
"django.db.models.EmailField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((435, 528), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (451, 528), False, 'from django.db import migrations, models\n'), ((558, 597), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (578, 597), False, 'from django.db import migrations, models\n'), ((632, 667), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (652, 667), False, 'from django.db import migrations, models\n'), ((695, 726), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (711, 726), False, 'from django.db import migrations, models\n'), ((755, 788), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (772, 788), False, 'from django.db import migrations, models\n'), ((991, 1084), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1007, 1084), False, 'from django.db import migrations, models\n'), ((1114, 1153), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1134, 1153), False, 'from django.db import migrations, models\n'), ((1188, 1223), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1208, 1223), False, 'from django.db import migrations, models\n'), ((1252, 1329), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'null': '(True)', 'verbose_name': '"""title"""'}), "(blank=True, max_length=255, null=True, verbose_name='title')\n", (1268, 1329), False, 'from django.db import migrations, models\n'), ((1360, 1399), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1376, 1399), False, 'from django.db import migrations, models\n'), ((1429, 1602), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""作者id"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""author"""', 'to': '"""blog.Author"""', 'verbose_name': '"""作者"""'}), "(blank=True, help_text='作者id', null=True, on_delete=django\n .db.models.deletion.SET_NULL, related_name='author', to='blog.Author',\n verbose_name='作者')\n", (1446, 1602), False, 'from django.db import migrations, models\n'), ((1804, 1897), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1820, 1897), False, 'from django.db import migrations, models\n'), ((1927, 1966), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1947, 1966), False, 'from django.db import migrations, models\n'), ((2001, 2036), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2021, 2036), False, 'from django.db import migrations, models\n'), ((2064, 2233), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""博客id"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""blog"""', 'to': '"""blog.Blog"""', 'verbose_name': '"""博客"""'}), "(blank=True, help_text='博客id', null=True, on_delete=django\n .db.models.deletion.SET_NULL, related_name='blog', to='blog.Blog',\n verbose_name='博客')\n", (2081, 2233), False, 'from django.db import migrations, models\n'), ((2252, 2398), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""收藏人id"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""用户"""'}), "(help_text='收藏人id', null=True, on_delete=django.db.models.\n deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')\n", (2269, 2398), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
"""
# 3D Image Data Synthesis.
# Copyright (C) 2021 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Liceense at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Please refer to the documentation for more information about the software
# as well as for installation instructions.
#
"""
import os
import glob
import csv
import numpy as np
def get_files(folders, data_root='', descriptor='', filetype='tif'):
filelist = []
for folder in folders:
files = glob.glob(os.path.join(data_root, folder, '*'+descriptor+'*.'+filetype))
filelist.extend([os.path.join(folder, os.path.split(f)[-1]) for f in files])
return filelist
def read_csv(list_path, data_root=''):
filelist = []
with open(list_path, 'r') as f:
reader = csv.reader(f, delimiter=';')
for row in reader:
if len(row)==0 or np.sum([len(r) for r in row])==0: continue
row = [os.path.join(data_root, r) for r in row]
filelist.append(row)
return filelist
def create_csv(data_list, save_path='list_folder/experiment_name', test_split=0.2, val_split=0.1, shuffle=False):
if shuffle:
np.random.shuffle(data_list)
# Get number of files for each split
num_files = len(data_list)
num_test_files = int(test_split*num_files)
num_val_files = int((num_files-num_test_files)*val_split)
num_train_files = num_files - num_test_files - num_val_files
# Get file indices
file_idx = np.arange(num_files)
# Save csv files
if num_test_files > 0:
test_idx = sorted(np.random.choice(file_idx, size=num_test_files, replace=False))
with open(save_path+'_test.csv', 'w') as fh:
writer = csv.writer(fh, delimiter=';')
for idx in test_idx:
writer.writerow(data_list[idx])
else:
test_idx = []
if num_val_files > 0:
val_idx = sorted(np.random.choice(list(set(file_idx)-set(test_idx)), size=num_val_files, replace=False))
with open(save_path+'_val.csv', 'w') as fh:
writer = csv.writer(fh, delimiter=';')
for idx in val_idx:
writer.writerow(data_list[idx])
else:
val_idx = []
if num_train_files > 0:
train_idx = sorted(list(set(file_idx) - set(test_idx) - set(val_idx)))
with open(save_path+'_train.csv', 'w') as fh:
writer = csv.writer(fh, delimiter=';')
for idx in train_idx:
writer.writerow(data_list[idx]) | [
"numpy.random.choice",
"csv.writer",
"os.path.join",
"os.path.split",
"csv.reader",
"numpy.arange",
"numpy.random.shuffle"
] | [((2046, 2066), 'numpy.arange', 'np.arange', (['num_files'], {}), '(num_files)\n', (2055, 2066), True, 'import numpy as np\n'), ((1316, 1344), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""";"""'}), "(f, delimiter=';')\n", (1326, 1344), False, 'import csv\n'), ((1723, 1751), 'numpy.random.shuffle', 'np.random.shuffle', (['data_list'], {}), '(data_list)\n', (1740, 1751), True, 'import numpy as np\n'), ((997, 1064), 'os.path.join', 'os.path.join', (['data_root', 'folder', "('*' + descriptor + '*.' + filetype)"], {}), "(data_root, folder, '*' + descriptor + '*.' + filetype)\n", (1009, 1064), False, 'import os\n'), ((2146, 2208), 'numpy.random.choice', 'np.random.choice', (['file_idx'], {'size': 'num_test_files', 'replace': '(False)'}), '(file_idx, size=num_test_files, replace=False)\n', (2162, 2208), True, 'import numpy as np\n'), ((2284, 2313), 'csv.writer', 'csv.writer', (['fh'], {'delimiter': '""";"""'}), "(fh, delimiter=';')\n", (2294, 2313), False, 'import csv\n'), ((2648, 2677), 'csv.writer', 'csv.writer', (['fh'], {'delimiter': '""";"""'}), "(fh, delimiter=';')\n", (2658, 2677), False, 'import csv\n'), ((2976, 3005), 'csv.writer', 'csv.writer', (['fh'], {'delimiter': '""";"""'}), "(fh, delimiter=';')\n", (2986, 3005), False, 'import csv\n'), ((1464, 1490), 'os.path.join', 'os.path.join', (['data_root', 'r'], {}), '(data_root, r)\n', (1476, 1490), False, 'import os\n'), ((1106, 1122), 'os.path.split', 'os.path.split', (['f'], {}), '(f)\n', (1119, 1122), False, 'import os\n')] |
import errno
import os.path
import yaml
from parameterized import param, parameterized
from salt.exceptions import CommandExecutionError
from salttesting.mixins import LoaderModuleMockMixin
from salttesting.unit import TestCase
from salttesting.mock import MagicMock, mock_open, patch
import metalk8s_solutions
from tests.unit import utils
YAML_TESTS_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"files", "test_metalk8s_solutions.yaml"
)
with open(YAML_TESTS_FILE) as fd:
YAML_TESTS_CASES = yaml.safe_load(fd)
class Metalk8sSolutionsTestCase(TestCase, LoaderModuleMockMixin):
"""
TestCase for `metalk8s_solutions` module
"""
loader_module = metalk8s_solutions
def test_virtual_success(self):
"""
Tests the return of `__virtual__` function, success
"""
dict_patch = {'metalk8s.archive_info_from_iso': MagicMock()}
with patch.dict(metalk8s_solutions.__salt__, dict_patch):
self.assertEqual(
metalk8s_solutions.__virtual__(), 'metalk8s_solutions'
)
def test_virtual_missing_metalk8s_module(self):
"""
Tests the return of `__virtual__` function,
when metalk8s module is missing
"""
self.assertEqual(
metalk8s_solutions.__virtual__(),
(False, "Failed to load 'metalk8s' module.")
)
@utils.parameterized_from_cases(YAML_TESTS_CASES["read_config"])
def test_read_config(self, create=False, config=None, result=None,
raises=False):
"""
Tests the return of `read_config` function
"""
open_mock = mock_open(read_data=config)
if not config:
open_mock.side_effect = IOError(
errno.ENOENT, "No such file or directory"
)
with patch("metalk8s_solutions.open", open_mock), \
patch("metalk8s_solutions._write_config_file", MagicMock()):
if raises:
self.assertRaisesRegexp(
CommandExecutionError,
result,
metalk8s_solutions.read_config
)
else:
if create:
self.assertEqual(
metalk8s_solutions.read_config(create),
result
)
else:
self.assertEqual(
metalk8s_solutions.read_config(),
result
)
@utils.parameterized_from_cases(YAML_TESTS_CASES["configure_archive"])
def test_configure_archive(self, archive, removed=None, config=None,
result=None, raises=False):
"""
Tests the return of `configure_archive` function
"""
def _write_config_file_mock(new_config):
if raises:
raise CommandExecutionError(
"Failed to write Solutions config file"
)
config = new_config
read_config_mock = MagicMock(return_value=config)
write_config_file_mock = MagicMock(side_effect=_write_config_file_mock)
with patch("metalk8s_solutions.read_config", read_config_mock), \
patch("metalk8s_solutions._write_config_file",
write_config_file_mock):
if raises:
self.assertRaisesRegexp(
CommandExecutionError,
"Failed to write Solutions config file",
metalk8s_solutions.configure_archive,
archive
)
else:
self.assertEqual(
metalk8s_solutions.configure_archive(
archive, removed=removed
),
True
)
self.assertEqual(config, result)
@utils.parameterized_from_cases(YAML_TESTS_CASES["activate_solution"])
def test_activate_solution(self, solution, version=None, config=None,
result=None, available=None, raises=False):
"""
Tests the return of `activate_solution` function
"""
def _yaml_safe_dump_mock(data, _):
if raises:
raise Exception("Something bad happened! :/")
config = data
list_available_mock = MagicMock(return_value=available or {})
read_config_mock = MagicMock(return_value=config)
yaml_safe_dump_mock = MagicMock(side_effect=_yaml_safe_dump_mock)
with patch("metalk8s_solutions.list_available", list_available_mock), \
patch("metalk8s_solutions.read_config", read_config_mock), \
patch("metalk8s_solutions.open", mock_open()), \
patch("yaml.safe_dump", yaml_safe_dump_mock):
if raises:
self.assertRaisesRegexp(
CommandExecutionError,
result,
metalk8s_solutions.activate_solution,
solution,
version
)
else:
if version:
self.assertEqual(
metalk8s_solutions.activate_solution(
solution, version
),
True
)
else:
self.assertEqual(
metalk8s_solutions.activate_solution(solution),
True
)
self.assertEqual(config, result)
@utils.parameterized_from_cases(YAML_TESTS_CASES["deactivate_solution"])
def test_deactivate_solution(self, solution, config=None, raises=False,
result=None):
"""
Tests the return of `deactivate_solution` function
"""
def _yaml_safe_dump_mock(data, _):
if raises:
raise Exception("Something bad happened! :/")
config = data
read_config_mock = MagicMock(return_value=config)
yaml_safe_dump_mock = MagicMock(side_effect=_yaml_safe_dump_mock)
with patch("metalk8s_solutions.read_config", read_config_mock), \
patch("yaml.safe_dump", yaml_safe_dump_mock), \
patch("metalk8s_solutions.open", mock_open()):
if raises:
self.assertRaisesRegexp(
CommandExecutionError,
"Failed to write Solutions config file",
metalk8s_solutions.deactivate_solution,
solution
)
else:
self.assertEqual(
metalk8s_solutions.deactivate_solution(solution),
True
)
self.assertEqual(config, result)
@utils.parameterized_from_cases(YAML_TESTS_CASES["list_solution_images"])
def test_list_solution_images(self, images=None, result=None,
raises=False):
"""
Tests the return of `list_solution_images` function
"""
mountpoint = '/srv/scality/my-solution/'
image_dir_prefix_len = len(os.path.join(mountpoint, 'images'))
if not images:
images = {}
def _get_image_name_and_version(path):
version = None
basename = path[image_dir_prefix_len:].lstrip('/')
try:
name, version = basename.split('/')
except ValueError:
name = basename
return name, version
def _path_isdir_mock(path):
name, version = _get_image_name_and_version(path)
return images and (not name or images[name]) and \
(not version or images[name][version])
def _listdir_mock(path):
name, version = _get_image_name_and_version(path)
if not name:
return images.keys()
return images[name].keys()
path_isdir_mock = MagicMock(side_effect=_path_isdir_mock)
listdir_mock = MagicMock(side_effect=_listdir_mock)
with patch("os.path.isdir", path_isdir_mock), \
patch("os.listdir", listdir_mock):
if raises:
self.assertRaisesRegexp(
CommandExecutionError,
result,
metalk8s_solutions.list_solution_images,
mountpoint
)
else:
self.assertItemsEqual(
metalk8s_solutions.list_solution_images(mountpoint),
result
)
@utils.parameterized_from_cases(YAML_TESTS_CASES["read_solution_config"])
def test_read_solution_config(self, config=None, result=None,
raises=False):
"""
Tests the return of `read_solution_config` function
"""
path_isfile_mock = MagicMock(return_value=config is not None)
list_solution_images_mock = MagicMock(return_value=[])
fopen_mock = mock_open(read_data=config)
read_solution_config_args = [
'/srv/scality/my-solution', 'my-solution', '1.0.0'
]
with patch("os.path.isfile", path_isfile_mock), \
patch("salt.utils.files.fopen", fopen_mock), \
patch("metalk8s_solutions.list_solution_images",
list_solution_images_mock):
if raises:
self.assertRaisesRegexp(
CommandExecutionError,
result,
metalk8s_solutions.read_solution_config,
*read_solution_config_args
)
else:
self.assertEqual(
metalk8s_solutions.read_solution_config(
*read_solution_config_args
),
result
)
@utils.parameterized_from_cases(YAML_TESTS_CASES["list_available"])
def test_list_available(self, mountpoints=None, archive_infos=None,
result=None, raises=False):
"""
Tests the return of `list_available` function
"""
def _archive_info_from_tree(path):
if archive_infos:
return archive_infos
raise Exception('Path has no "product.txt"')
if not mountpoints:
mountpoints = {}
if not result:
result = {}
mount_active_mock = MagicMock(return_value=mountpoints)
archive_info_from_tree_mock = MagicMock(
side_effect=_archive_info_from_tree
)
read_solution_config_mock = MagicMock(return_value=None)
salt_dict_patch = {
'mount.active': mount_active_mock,
'metalk8s.archive_info_from_tree': archive_info_from_tree_mock,
}
with patch.dict(metalk8s_solutions.__salt__, salt_dict_patch), \
patch("metalk8s_solutions.read_solution_config",
read_solution_config_mock):
if raises:
self.assertRaisesRegexp(
Exception,
'Path has no "product.txt"',
metalk8s_solutions.list_available
)
else:
self.assertEqual(
metalk8s_solutions.list_available(),
result
)
| [
"salttesting.mock.patch.dict",
"metalk8s_solutions.__virtual__",
"salttesting.mock.mock_open",
"metalk8s_solutions.list_solution_images",
"metalk8s_solutions.read_solution_config",
"metalk8s_solutions.read_config",
"metalk8s_solutions.deactivate_solution",
"salttesting.mock.MagicMock",
"metalk8s_sol... | [((530, 548), 'yaml.safe_load', 'yaml.safe_load', (['fd'], {}), '(fd)\n', (544, 548), False, 'import yaml\n'), ((1402, 1465), 'tests.unit.utils.parameterized_from_cases', 'utils.parameterized_from_cases', (["YAML_TESTS_CASES['read_config']"], {}), "(YAML_TESTS_CASES['read_config'])\n", (1432, 1465), False, 'from tests.unit import utils\n'), ((2559, 2628), 'tests.unit.utils.parameterized_from_cases', 'utils.parameterized_from_cases', (["YAML_TESTS_CASES['configure_archive']"], {}), "(YAML_TESTS_CASES['configure_archive'])\n", (2589, 2628), False, 'from tests.unit import utils\n'), ((3945, 4014), 'tests.unit.utils.parameterized_from_cases', 'utils.parameterized_from_cases', (["YAML_TESTS_CASES['activate_solution']"], {}), "(YAML_TESTS_CASES['activate_solution'])\n", (3975, 4014), False, 'from tests.unit import utils\n'), ((5665, 5736), 'tests.unit.utils.parameterized_from_cases', 'utils.parameterized_from_cases', (["YAML_TESTS_CASES['deactivate_solution']"], {}), "(YAML_TESTS_CASES['deactivate_solution'])\n", (5695, 5736), False, 'from tests.unit import utils\n'), ((6927, 6999), 'tests.unit.utils.parameterized_from_cases', 'utils.parameterized_from_cases', (["YAML_TESTS_CASES['list_solution_images']"], {}), "(YAML_TESTS_CASES['list_solution_images'])\n", (6957, 6999), False, 'from tests.unit import utils\n'), ((8745, 8817), 'tests.unit.utils.parameterized_from_cases', 'utils.parameterized_from_cases', (["YAML_TESTS_CASES['read_solution_config']"], {}), "(YAML_TESTS_CASES['read_solution_config'])\n", (8775, 8817), False, 'from tests.unit import utils\n'), ((10046, 10112), 'tests.unit.utils.parameterized_from_cases', 'utils.parameterized_from_cases', (["YAML_TESTS_CASES['list_available']"], {}), "(YAML_TESTS_CASES['list_available'])\n", (10076, 10112), False, 'from tests.unit import utils\n'), ((1672, 1699), 'salttesting.mock.mock_open', 'mock_open', ([], {'read_data': 'config'}), '(read_data=config)\n', (1681, 1699), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((3097, 3127), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': 'config'}), '(return_value=config)\n', (3106, 3127), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((3161, 3207), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'side_effect': '_write_config_file_mock'}), '(side_effect=_write_config_file_mock)\n', (3170, 3207), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((4430, 4469), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': '(available or {})'}), '(return_value=available or {})\n', (4439, 4469), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((4497, 4527), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': 'config'}), '(return_value=config)\n', (4506, 4527), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((4558, 4601), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'side_effect': '_yaml_safe_dump_mock'}), '(side_effect=_yaml_safe_dump_mock)\n', (4567, 4601), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((6125, 6155), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': 'config'}), '(return_value=config)\n', (6134, 6155), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((6186, 6229), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'side_effect': '_yaml_safe_dump_mock'}), '(side_effect=_yaml_safe_dump_mock)\n', (6195, 6229), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((8111, 8150), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'side_effect': '_path_isdir_mock'}), '(side_effect=_path_isdir_mock)\n', (8120, 8150), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((8174, 8210), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'side_effect': '_listdir_mock'}), '(side_effect=_listdir_mock)\n', (8183, 8210), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((9044, 9086), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': '(config is not None)'}), '(return_value=config is not None)\n', (9053, 9086), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((9123, 9149), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': '[]'}), '(return_value=[])\n', (9132, 9149), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((9171, 9198), 'salttesting.mock.mock_open', 'mock_open', ([], {'read_data': 'config'}), '(read_data=config)\n', (9180, 9198), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((10620, 10655), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': 'mountpoints'}), '(return_value=mountpoints)\n', (10629, 10655), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((10694, 10740), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'side_effect': '_archive_info_from_tree'}), '(side_effect=_archive_info_from_tree)\n', (10703, 10740), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((10799, 10827), 'salttesting.mock.MagicMock', 'MagicMock', ([], {'return_value': 'None'}), '(return_value=None)\n', (10808, 10827), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((894, 905), 'salttesting.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (903, 905), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((920, 971), 'salttesting.mock.patch.dict', 'patch.dict', (['metalk8s_solutions.__salt__', 'dict_patch'], {}), '(metalk8s_solutions.__salt__, dict_patch)\n', (930, 971), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((1295, 1327), 'metalk8s_solutions.__virtual__', 'metalk8s_solutions.__virtual__', ([], {}), '()\n', (1325, 1327), False, 'import metalk8s_solutions\n'), ((1854, 1897), 'salttesting.mock.patch', 'patch', (['"""metalk8s_solutions.open"""', 'open_mock'], {}), "('metalk8s_solutions.open', open_mock)\n", (1859, 1897), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((3222, 3279), 'salttesting.mock.patch', 'patch', (['"""metalk8s_solutions.read_config"""', 'read_config_mock'], {}), "('metalk8s_solutions.read_config', read_config_mock)\n", (3227, 3279), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((3299, 3369), 'salttesting.mock.patch', 'patch', (['"""metalk8s_solutions._write_config_file"""', 'write_config_file_mock'], {}), "('metalk8s_solutions._write_config_file', write_config_file_mock)\n", (3304, 3369), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((4616, 4679), 'salttesting.mock.patch', 'patch', (['"""metalk8s_solutions.list_available"""', 'list_available_mock'], {}), "('metalk8s_solutions.list_available', list_available_mock)\n", (4621, 4679), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((4699, 4756), 'salttesting.mock.patch', 'patch', (['"""metalk8s_solutions.read_config"""', 'read_config_mock'], {}), "('metalk8s_solutions.read_config', read_config_mock)\n", (4704, 4756), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((4841, 4885), 'salttesting.mock.patch', 'patch', (['"""yaml.safe_dump"""', 'yaml_safe_dump_mock'], {}), "('yaml.safe_dump', yaml_safe_dump_mock)\n", (4846, 4885), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((6244, 6301), 'salttesting.mock.patch', 'patch', (['"""metalk8s_solutions.read_config"""', 'read_config_mock'], {}), "('metalk8s_solutions.read_config', read_config_mock)\n", (6249, 6301), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((6321, 6365), 'salttesting.mock.patch', 'patch', (['"""yaml.safe_dump"""', 'yaml_safe_dump_mock'], {}), "('yaml.safe_dump', yaml_safe_dump_mock)\n", (6326, 6365), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((8225, 8264), 'salttesting.mock.patch', 'patch', (['"""os.path.isdir"""', 'path_isdir_mock'], {}), "('os.path.isdir', path_isdir_mock)\n", (8230, 8264), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((8284, 8317), 'salttesting.mock.patch', 'patch', (['"""os.listdir"""', 'listdir_mock'], {}), "('os.listdir', listdir_mock)\n", (8289, 8317), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((9324, 9365), 'salttesting.mock.patch', 'patch', (['"""os.path.isfile"""', 'path_isfile_mock'], {}), "('os.path.isfile', path_isfile_mock)\n", (9329, 9365), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((9385, 9428), 'salttesting.mock.patch', 'patch', (['"""salt.utils.files.fopen"""', 'fopen_mock'], {}), "('salt.utils.files.fopen', fopen_mock)\n", (9390, 9428), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((9448, 9523), 'salttesting.mock.patch', 'patch', (['"""metalk8s_solutions.list_solution_images"""', 'list_solution_images_mock'], {}), "('metalk8s_solutions.list_solution_images', list_solution_images_mock)\n", (9453, 9523), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((11003, 11059), 'salttesting.mock.patch.dict', 'patch.dict', (['metalk8s_solutions.__salt__', 'salt_dict_patch'], {}), '(metalk8s_solutions.__salt__, salt_dict_patch)\n', (11013, 11059), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((11079, 11154), 'salttesting.mock.patch', 'patch', (['"""metalk8s_solutions.read_solution_config"""', 'read_solution_config_mock'], {}), "('metalk8s_solutions.read_solution_config', read_solution_config_mock)\n", (11084, 11154), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((1019, 1051), 'metalk8s_solutions.__virtual__', 'metalk8s_solutions.__virtual__', ([], {}), '()\n', (1049, 1051), False, 'import metalk8s_solutions\n'), ((1964, 1975), 'salttesting.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1973, 1975), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((2936, 2998), 'salt.exceptions.CommandExecutionError', 'CommandExecutionError', (['"""Failed to write Solutions config file"""'], {}), "('Failed to write Solutions config file')\n", (2957, 2998), False, 'from salt.exceptions import CommandExecutionError\n'), ((4809, 4820), 'salttesting.mock.mock_open', 'mock_open', ([], {}), '()\n', (4818, 4820), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((6418, 6429), 'salttesting.mock.mock_open', 'mock_open', ([], {}), '()\n', (6427, 6429), False, 'from salttesting.mock import MagicMock, mock_open, patch\n'), ((3737, 3799), 'metalk8s_solutions.configure_archive', 'metalk8s_solutions.configure_archive', (['archive'], {'removed': 'removed'}), '(archive, removed=removed)\n', (3773, 3799), False, 'import metalk8s_solutions\n'), ((6779, 6827), 'metalk8s_solutions.deactivate_solution', 'metalk8s_solutions.deactivate_solution', (['solution'], {}), '(solution)\n', (6817, 6827), False, 'import metalk8s_solutions\n'), ((8641, 8692), 'metalk8s_solutions.list_solution_images', 'metalk8s_solutions.list_solution_images', (['mountpoint'], {}), '(mountpoint)\n', (8680, 8692), False, 'import metalk8s_solutions\n'), ((9880, 9947), 'metalk8s_solutions.read_solution_config', 'metalk8s_solutions.read_solution_config', (['*read_solution_config_args'], {}), '(*read_solution_config_args)\n', (9919, 9947), False, 'import metalk8s_solutions\n'), ((11466, 11501), 'metalk8s_solutions.list_available', 'metalk8s_solutions.list_available', ([], {}), '()\n', (11499, 11501), False, 'import metalk8s_solutions\n'), ((2289, 2327), 'metalk8s_solutions.read_config', 'metalk8s_solutions.read_config', (['create'], {}), '(create)\n', (2319, 2327), False, 'import metalk8s_solutions\n'), ((2466, 2498), 'metalk8s_solutions.read_config', 'metalk8s_solutions.read_config', ([], {}), '()\n', (2496, 2498), False, 'import metalk8s_solutions\n'), ((5264, 5319), 'metalk8s_solutions.activate_solution', 'metalk8s_solutions.activate_solution', (['solution', 'version'], {}), '(solution, version)\n', (5300, 5319), False, 'import metalk8s_solutions\n'), ((5510, 5556), 'metalk8s_solutions.activate_solution', 'metalk8s_solutions.activate_solution', (['solution'], {}), '(solution)\n', (5546, 5556), False, 'import metalk8s_solutions\n')] |
import pytest
from unittest.mock import MagicMock
import logging
from ecs_crd.canaryReleaseInfos import CanaryReleaseInfos
from ecs_crd.prepareDeploymentContainerDefinitionsStep import PrepareDeploymentContainerDefinitionsStep
from ecs_crd.canaryReleaseInfos import ScaleInfos
logger = logging.Logger('mock')
infos = CanaryReleaseInfos(action='test')
step = PrepareDeploymentContainerDefinitionsStep(infos, logger)
def test_process_container_name_valid():
# default
source = {}
target = {}
step._process_container_name(source, target)
target['Name'] == 'default'
# with name
source = {}
source['name']='test'
target = {}
step._process_container_name(source, target)
target['Name'] == source['name']
def test_process_container_name_valid():
# default
source = {}
target = {}
step.infos.account_id='123456789'
step.infos.region='eu-west-3'
step.infos.service_name='service'
step.infos.service_version='latest'
step._process_container_image(source, target)
assert target['Image'] == '123456789.dkr.ecr.eu-west-3.amazonaws.com/service:latest'
# with name
source = {}
source['image']='test'
target = {}
step._process_container_image(source, target)
assert target['Image'] == source['image']
def test_process_container_cpu_invalid():
source = {}
source['cpu'] = 'a'
target = {}
with pytest.raises(ValueError):
step._process_container_cpu(source, target)
def test_process_container_cpu_valid():
source = {}
target = {}
# default value
step._process_container_cpu(source, target)
assert target['Cpu'] == 128
# set value
source['cpu'] = 256
target = {}
step._process_container_cpu(source, target)
assert target['Cpu'] == source['cpu']
def test_process_container_entry_point_valid():
source = {}
source['entry_point']=[]
source['entry_point'].append('a')
source['entry_point'].append('b')
target = {}
step._process_container_entry_point(source, target)
assert target['EntryPoint'] == 'a,b'
def test_process_container_entry_point_invalid():
source = {}
source['entry_point']='a'
target = {}
with pytest.raises(ValueError):
step._process_container_entry_point(source, target)
def test_process_container_command_valid():
source = {}
source['command']=[]
source['command'].append('a')
source['command'].append('b')
target = {}
step._process_container_command(source, target)
assert len(target['Command'])==2
assert target['Command'][0] == 'a'
assert target['Command'][1] == 'b'
def test_process_container_command_invalid():
source = {}
source['command']='b'
target = {}
with pytest.raises(ValueError):
step._process_container_command(source, target)
def test_process_container_dns_search_domains_valid():
source = {}
source['dns_search_domains']=[]
source['dns_search_domains'].append('a')
source['dns_search_domains'].append('b')
target = {}
step._process_container_dns_search_domains(source, target)
assert len(target['DnsSearchDomains'])==2
assert target['DnsSearchDomains'][0] == 'a'
assert target['DnsSearchDomains'][1] == 'b'
def _process_container_dns_search_domains_invalid():
source = {}
source['dns_search_domains']='b'
target = {}
with pytest.raises(ValueError):
step._process_container_dns_search_domains(source, target)
def test_process_container_disable_networking_valid():
source = {}
source['disable_networking'] = True
target = {}
step._process_container_disable_networking(source, target)
assert target['DisableNetworking'] == source['disable_networking']
source = {}
source['disable_networking'] = False
target = {}
step._process_container_disable_networking(source, target)
assert target['DisableNetworking'] == source['disable_networking']
def _process_container_disable_networking_invalid():
source = {}
source['disable_networking']='b'
target = {}
with pytest.raises(ValueError):
step._process_container_disable_networking(source, target)
def test_process_container_dns_servers_valid():
source = {}
source['dns_servers']=[]
source['dns_servers'].append('a')
source['dns_servers'].append('b')
target = {}
step._process_container_dns_servers(source, target)
assert len(target['DnsServers'])==2
assert target['DnsServers'][0] == 'a'
assert target['DnsServers'][1] == 'b'
def _process_container_dns_servers_invalid():
source = {}
source['dns_servers']='b'
target = {}
with pytest.raises(ValueError):
step._process_container_dns_servers(source, target)
def test_process_container_start_timeout_invalid():
source = {}
source['start_timeout'] = 'a'
target = {}
with pytest.raises(ValueError):
step._process_container_start_timeout(source, target)
def test_process_container_start_timeout_valid():
source = {}
source['start_timeout']=60
target = {}
step._process_container_start_timeout(source, target)
assert target['StartTimeout'] == source['start_timeout']
def test_process_container_stop_timeout_invalid():
source = {}
source['stop_timeout'] = 'a'
target = {}
with pytest.raises(ValueError):
step._process_container_stop_timeout(source, target)
def test_process_container_stop_timeout_valid():
source = {}
source['stop_timeout']=60
target = {}
step._process_container_stop_timeout(source, target)
assert target['StopTimeout'] == source['stop_timeout']
def test_process_container_hostname_valid():
source = {}
source['hostname']='a'
target = {}
step._process_container_hostname(source, target)
assert target['Hostname'] == source['hostname']
| [
"logging.Logger",
"ecs_crd.prepareDeploymentContainerDefinitionsStep.PrepareDeploymentContainerDefinitionsStep",
"pytest.raises",
"ecs_crd.canaryReleaseInfos.CanaryReleaseInfos"
] | [((288, 310), 'logging.Logger', 'logging.Logger', (['"""mock"""'], {}), "('mock')\n", (302, 310), False, 'import logging\n'), ((319, 352), 'ecs_crd.canaryReleaseInfos.CanaryReleaseInfos', 'CanaryReleaseInfos', ([], {'action': '"""test"""'}), "(action='test')\n", (337, 352), False, 'from ecs_crd.canaryReleaseInfos import CanaryReleaseInfos\n'), ((360, 416), 'ecs_crd.prepareDeploymentContainerDefinitionsStep.PrepareDeploymentContainerDefinitionsStep', 'PrepareDeploymentContainerDefinitionsStep', (['infos', 'logger'], {}), '(infos, logger)\n', (401, 416), False, 'from ecs_crd.prepareDeploymentContainerDefinitionsStep import PrepareDeploymentContainerDefinitionsStep\n'), ((1409, 1434), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1422, 1434), False, 'import pytest\n'), ((2236, 2261), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2249, 2261), False, 'import pytest\n'), ((2787, 2812), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2800, 2812), False, 'import pytest\n'), ((3437, 3462), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3450, 3462), False, 'import pytest\n'), ((4147, 4172), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4160, 4172), False, 'import pytest\n'), ((4738, 4763), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4751, 4763), False, 'import pytest\n'), ((4957, 4982), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4970, 4982), False, 'import pytest\n'), ((5411, 5436), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5424, 5436), False, 'import pytest\n')] |
from fastapi import HTTPException, Query, APIRouter
from starlette.requests import Request
from starlette.status import HTTP_404_NOT_FOUND
from .models import db, Metadata
mod = APIRouter()
@mod.get("/metadata")
async def search_metadata(
request: Request,
data: bool = Query(
False,
description="Switch to returning a list of GUIDs (false), "
"or GUIDs mapping to their metadata (true).",
),
limit: int = Query(
10, description="Maximum number of records returned. (max: 2000)"
),
offset: int = Query(0, description="Return results at this given offset."),
):
"""Search the metadata.
Without filters, this will return all data. Add filters as query strings like this:
GET /metadata?a=1&b=2
This will match all records that have metadata containing all of:
{"a": 1, "b": 2}
The values are always treated as strings for filtering. Nesting is supported:
GET /metadata?a.b.c=3
Matching records containing:
{"a": {"b": {"c": 3}}}
Providing the same key with more than one value filters records whose value of the
given key matches any of the given values. But values of different keys must all
match. For example:
GET /metadata?a.b.c=3&a.b.c=33&a.b.d=4
Matches these:
{"a": {"b": {"c": 3, "d": 4}}}
{"a": {"b": {"c": 33, "d": 4}}}
{"a": {"b": {"c": "3", "d": 4, "e": 5}}}
But won't match these:
{"a": {"b": {"c": 3}}}
{"a": {"b": {"c": 3, "d": 5}}}
{"a": {"b": {"d": 5}}}
{"a": {"b": {"c": "333", "d": 4}}}
"""
limit = min(limit, 2000)
queries = {}
for key, value in request.query_params.multi_items():
if key not in {"data", "limit", "offset"}:
queries.setdefault(key, []).append(value)
def add_filter(query):
for path, values in queries.items():
query = query.where(
db.or_(Metadata.data[list(path.split("."))].astext == v for v in values)
)
return query.offset(offset).limit(limit)
if data:
return {
metadata.guid: metadata.data
for metadata in await add_filter(Metadata.query).gino.all()
}
else:
return [
row[0]
for row in await add_filter(db.select([Metadata.guid]))
.gino.return_model(False)
.all()
]
@mod.get("/metadata/{guid:path}")
async def get_metadata(guid):
"""Get the metadata of the GUID."""
metadata = await Metadata.get(guid)
if metadata:
return metadata.data
else:
raise HTTPException(HTTP_404_NOT_FOUND, f"Not found: {guid}")
def init_app(app):
app.include_router(mod, tags=["Query"])
| [
"fastapi.APIRouter",
"fastapi.HTTPException",
"fastapi.Query"
] | [((180, 191), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (189, 191), False, 'from fastapi import HTTPException, Query, APIRouter\n'), ((282, 407), 'fastapi.Query', 'Query', (['(False)'], {'description': '"""Switch to returning a list of GUIDs (false), or GUIDs mapping to their metadata (true)."""'}), "(False, description=\n 'Switch to returning a list of GUIDs (false), or GUIDs mapping to their metadata (true).'\n )\n", (287, 407), False, 'from fastapi import HTTPException, Query, APIRouter\n'), ((450, 522), 'fastapi.Query', 'Query', (['(10)'], {'description': '"""Maximum number of records returned. (max: 2000)"""'}), "(10, description='Maximum number of records returned. (max: 2000)')\n", (455, 522), False, 'from fastapi import HTTPException, Query, APIRouter\n'), ((556, 616), 'fastapi.Query', 'Query', (['(0)'], {'description': '"""Return results at this given offset."""'}), "(0, description='Return results at this given offset.')\n", (561, 616), False, 'from fastapi import HTTPException, Query, APIRouter\n'), ((2640, 2695), 'fastapi.HTTPException', 'HTTPException', (['HTTP_404_NOT_FOUND', 'f"""Not found: {guid}"""'], {}), "(HTTP_404_NOT_FOUND, f'Not found: {guid}')\n", (2653, 2695), False, 'from fastapi import HTTPException, Query, APIRouter\n')] |
#!/usr/bin/env python
# Download .mp3 podcast files of Radio Belgrade show Govori da bih te video (Speak so that I can see you)
# grab all mp3s and save them with parsed name and date to the output folder
import requests
import os
import time
import xml.dom.minidom
from urllib.parse import urlparse
url = "https://www.rts.rs/page/radio/sr/podcast/5433/govori-da-bih-te-video/audio.html"
# url results with xml that is further parsed
timestamp = time.strftime("%Y%m%d-%H%M%S")
out_dir = os.path.join("govori_" + timestamp)
doc_path = "govori_" + timestamp + ".xml"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
try:
req = requests.get(url)
req.raise_for_status()
doc = xml.dom.minidom.parseString(req.text) # TODO check if it is valid XML
items = doc.getElementsByTagName("item")
print("found ", len(items), " items")
for item in items:
# titles = item.getElementsByTagName("title")
# if len(titles) > 0:
# print(titles[0].firstChild.data)
links = item.getElementsByTagName("link")
if len(links) > 0:
print(links[0].firstChild.data) # read element data value
# get only filename of the .html https://bit.ly/2ZnqwK7
a = urlparse(links[0].firstChild.data)
out_fname_pname = os.path.basename(a.path).replace('.html', '')
else:
out_fname_pname = "NA"
enclosures = item.getElementsByTagName("enclosure")
if len(enclosures) > 0:
url_value = enclosures[0].attributes["url"].value # read attribute value
print(url_value)
if url_value.endswith('.mp3'):
url_elements = urlparse(url_value).path.split('/')
if len(url_elements) >= 5:
out_fname_date = ''.join(url_elements[-5:-2]) # https://bit.ly/3e6mXMk
else:
out_fname_date = "NA"
out_file = out_fname_date + "_" + out_fname_pname + ".mp3"
print("saved to " + os.path.join(out_dir, out_file))
# save mp3 file from url_value to out_file
# https://dzone.com/articles/simple-examples-of-downloading-files-using-python
print("saving... ", end='')
try:
req = requests.get(url_value)
req.raise_for_status()
open(os.path.join(out_dir, out_file), 'wb').write(req.content)
print("saved to " + os.path.join(out_dir, out_file))
except requests.exceptions.HTTPError as err:
print(err)
# raise SystemExit(err)
print("")
# save rss xml
with open(os.path.join(out_dir, doc_path), "w", encoding="utf-8") as f:
f.write(doc.toprettyxml())
print(os.path.join(out_dir, doc_path))
except requests.exceptions.HTTPError as err:
print(err)
# raise SystemExit(err)
| [
"os.path.exists",
"urllib.parse.urlparse",
"os.makedirs",
"time.strftime",
"os.path.join",
"requests.get",
"os.path.basename"
] | [((448, 478), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (461, 478), False, 'import time\n'), ((489, 524), 'os.path.join', 'os.path.join', (["('govori_' + timestamp)"], {}), "('govori_' + timestamp)\n", (501, 524), False, 'import os\n'), ((575, 598), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (589, 598), False, 'import os\n'), ((604, 624), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (615, 624), False, 'import os\n'), ((641, 658), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (653, 658), False, 'import requests\n'), ((2829, 2860), 'os.path.join', 'os.path.join', (['out_dir', 'doc_path'], {}), '(out_dir, doc_path)\n', (2841, 2860), False, 'import os\n'), ((1242, 1276), 'urllib.parse.urlparse', 'urlparse', (['links[0].firstChild.data'], {}), '(links[0].firstChild.data)\n', (1250, 1276), False, 'from urllib.parse import urlparse\n'), ((2722, 2753), 'os.path.join', 'os.path.join', (['out_dir', 'doc_path'], {}), '(out_dir, doc_path)\n', (2734, 2753), False, 'import os\n'), ((1307, 1331), 'os.path.basename', 'os.path.basename', (['a.path'], {}), '(a.path)\n', (1323, 1331), False, 'import os\n'), ((2309, 2332), 'requests.get', 'requests.get', (['url_value'], {}), '(url_value)\n', (2321, 2332), False, 'import requests\n'), ((2030, 2061), 'os.path.join', 'os.path.join', (['out_dir', 'out_file'], {}), '(out_dir, out_file)\n', (2042, 2061), False, 'import os\n'), ((1683, 1702), 'urllib.parse.urlparse', 'urlparse', (['url_value'], {}), '(url_value)\n', (1691, 1702), False, 'from urllib.parse import urlparse\n'), ((2500, 2531), 'os.path.join', 'os.path.join', (['out_dir', 'out_file'], {}), '(out_dir, out_file)\n', (2512, 2531), False, 'import os\n'), ((2402, 2433), 'os.path.join', 'os.path.join', (['out_dir', 'out_file'], {}), '(out_dir, out_file)\n', (2414, 2433), False, 'import os\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['StoredInfoTypeArgs', 'StoredInfoType']
@pulumi.input_type
class StoredInfoTypeArgs:
def __init__(__self__, *,
config: pulumi.Input['GooglePrivacyDlpV2StoredInfoTypeConfigArgs'],
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
stored_info_type_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a StoredInfoType resource.
:param pulumi.Input['GooglePrivacyDlpV2StoredInfoTypeConfigArgs'] config: Configuration of the storedInfoType to create.
:param pulumi.Input[str] stored_info_type_id: The storedInfoType ID can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one.
"""
pulumi.set(__self__, "config", config)
if location is not None:
pulumi.set(__self__, "location", location)
if project is not None:
pulumi.set(__self__, "project", project)
if stored_info_type_id is not None:
pulumi.set(__self__, "stored_info_type_id", stored_info_type_id)
@property
@pulumi.getter
def config(self) -> pulumi.Input['GooglePrivacyDlpV2StoredInfoTypeConfigArgs']:
"""
Configuration of the storedInfoType to create.
"""
return pulumi.get(self, "config")
@config.setter
def config(self, value: pulumi.Input['GooglePrivacyDlpV2StoredInfoTypeConfigArgs']):
pulumi.set(self, "config", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="storedInfoTypeId")
def stored_info_type_id(self) -> Optional[pulumi.Input[str]]:
"""
The storedInfoType ID can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one.
"""
return pulumi.get(self, "stored_info_type_id")
@stored_info_type_id.setter
def stored_info_type_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stored_info_type_id", value)
class StoredInfoType(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config: Optional[pulumi.Input[pulumi.InputType['GooglePrivacyDlpV2StoredInfoTypeConfigArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
stored_info_type_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a pre-built stored infoType to be used for inspection. See https://cloud.google.com/dlp/docs/creating-stored-infotypes to learn more.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['GooglePrivacyDlpV2StoredInfoTypeConfigArgs']] config: Configuration of the storedInfoType to create.
:param pulumi.Input[str] stored_info_type_id: The storedInfoType ID can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StoredInfoTypeArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a pre-built stored infoType to be used for inspection. See https://cloud.google.com/dlp/docs/creating-stored-infotypes to learn more.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param StoredInfoTypeArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StoredInfoTypeArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config: Optional[pulumi.Input[pulumi.InputType['GooglePrivacyDlpV2StoredInfoTypeConfigArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
stored_info_type_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StoredInfoTypeArgs.__new__(StoredInfoTypeArgs)
if config is None and not opts.urn:
raise TypeError("Missing required property 'config'")
__props__.__dict__["config"] = config
__props__.__dict__["location"] = location
__props__.__dict__["project"] = project
__props__.__dict__["stored_info_type_id"] = stored_info_type_id
__props__.__dict__["current_version"] = None
__props__.__dict__["name"] = None
__props__.__dict__["pending_versions"] = None
super(StoredInfoType, __self__).__init__(
'google-native:dlp/v2:StoredInfoType',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'StoredInfoType':
"""
Get an existing StoredInfoType resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StoredInfoTypeArgs.__new__(StoredInfoTypeArgs)
__props__.__dict__["current_version"] = None
__props__.__dict__["name"] = None
__props__.__dict__["pending_versions"] = None
return StoredInfoType(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="currentVersion")
def current_version(self) -> pulumi.Output['outputs.GooglePrivacyDlpV2StoredInfoTypeVersionResponse']:
"""
Current version of the stored info type.
"""
return pulumi.get(self, "current_version")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="pendingVersions")
def pending_versions(self) -> pulumi.Output[Sequence['outputs.GooglePrivacyDlpV2StoredInfoTypeVersionResponse']]:
"""
Pending versions of the stored info type. Empty if no versions are pending.
"""
return pulumi.get(self, "pending_versions")
| [
"pulumi.getter",
"pulumi.set",
"pulumi.ResourceOptions",
"pulumi.get"
] | [((2560, 2598), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""storedInfoTypeId"""'}), "(name='storedInfoTypeId')\n", (2573, 2598), False, 'import pulumi\n'), ((8292, 8328), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""currentVersion"""'}), "(name='currentVersion')\n", (8305, 8328), False, 'import pulumi\n'), ((8743, 8780), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""pendingVersions"""'}), "(name='pendingVersions')\n", (8756, 8780), False, 'import pulumi\n'), ((1304, 1342), 'pulumi.set', 'pulumi.set', (['__self__', '"""config"""', 'config'], {}), "(__self__, 'config', config)\n", (1314, 1342), False, 'import pulumi\n'), ((1849, 1875), 'pulumi.get', 'pulumi.get', (['self', '"""config"""'], {}), "(self, 'config')\n", (1859, 1875), False, 'import pulumi\n'), ((1993, 2026), 'pulumi.set', 'pulumi.set', (['self', '"""config"""', 'value'], {}), "(self, 'config', value)\n", (2003, 2026), False, 'import pulumi\n'), ((2131, 2159), 'pulumi.get', 'pulumi.get', (['self', '"""location"""'], {}), "(self, 'location')\n", (2141, 2159), False, 'import pulumi\n'), ((2250, 2285), 'pulumi.set', 'pulumi.set', (['self', '"""location"""', 'value'], {}), "(self, 'location', value)\n", (2260, 2285), False, 'import pulumi\n'), ((2389, 2416), 'pulumi.get', 'pulumi.get', (['self', '"""project"""'], {}), "(self, 'project')\n", (2399, 2416), False, 'import pulumi\n'), ((2505, 2539), 'pulumi.set', 'pulumi.set', (['self', '"""project"""', 'value'], {}), "(self, 'project', value)\n", (2515, 2539), False, 'import pulumi\n'), ((2953, 2992), 'pulumi.get', 'pulumi.get', (['self', '"""stored_info_type_id"""'], {}), "(self, 'stored_info_type_id')\n", (2963, 2992), False, 'import pulumi\n'), ((3105, 3151), 'pulumi.set', 'pulumi.set', (['self', '"""stored_info_type_id"""', 'value'], {}), "(self, 'stored_info_type_id', value)\n", (3115, 3151), False, 'import pulumi\n'), ((8524, 8559), 'pulumi.get', 'pulumi.get', (['self', '"""current_version"""'], {}), "(self, 'current_version')\n", (8534, 8559), False, 'import pulumi\n'), ((8698, 8722), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (8708, 8722), False, 'import pulumi\n'), ((9022, 9058), 'pulumi.get', 'pulumi.get', (['self', '"""pending_versions"""'], {}), "(self, 'pending_versions')\n", (9032, 9058), False, 'import pulumi\n'), ((1388, 1430), 'pulumi.set', 'pulumi.set', (['__self__', '"""location"""', 'location'], {}), "(__self__, 'location', location)\n", (1398, 1430), False, 'import pulumi\n'), ((1475, 1515), 'pulumi.set', 'pulumi.set', (['__self__', '"""project"""', 'project'], {}), "(__self__, 'project', project)\n", (1485, 1515), False, 'import pulumi\n'), ((1572, 1636), 'pulumi.set', 'pulumi.set', (['__self__', '"""stored_info_type_id"""', 'stored_info_type_id'], {}), "(__self__, 'stored_info_type_id', stored_info_type_id)\n", (1582, 1636), False, 'import pulumi\n'), ((6114, 6138), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (6136, 6138), False, 'import pulumi\n'), ((7946, 7975), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (7968, 7975), False, 'import pulumi\n')] |
##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import IECore
import IECoreScene
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
def appendViewContextMenuItems( viewer, view, menuDefinition ) :
if not isinstance( view, GafferSceneUI.SceneView ) :
return None
menuDefinition.append(
"/History",
{
"subMenu" : functools.partial(
__historySubMenu,
context = view.getContext(),
scene = view["in"],
selectedPath = __sceneViewSelectedPath( view )
)
}
)
def connectToEditor( editor ) :
if isinstance( editor, GafferUI.Viewer ) :
editor.keyPressSignal().connect( __viewerKeyPress, scoped = False )
elif isinstance( editor, GafferSceneUI.HierarchyView ) or isinstance( editor, GafferSceneUI.LightEditor ) :
editor.keyPressSignal().connect( __hierarchyViewKeyPress, scoped = False )
elif isinstance( editor, GafferUI.Editor ) :
editor.keyPressSignal().connect( __nodeEditorKeyPress, scoped = False )
##########################################################################
# Internal implementation
##########################################################################
def __historySubMenu( menu, context, scene, selectedPath ) :
menuDefinition = IECore.MenuDefinition()
menuDefinition.append(
"/Edit Source...",
{
"active" : selectedPath is not None,
"command" : functools.partial( __editSourceNode, context, scene, selectedPath ),
"shortCut" : "Alt+E",
}
)
menuDefinition.append(
"/Edit Tweaks...",
{
"active" : selectedPath is not None,
"command" : functools.partial( __editTweaksNode, context, scene, selectedPath ),
"shortCut" : "Alt+Shift+E",
}
)
return menuDefinition
def __sceneViewSelectedPath( sceneView ) :
sceneGadget = sceneView.viewportGadget().getPrimaryChild()
if sceneGadget.getSelection().size() == 1 :
return sceneGadget.getSelection().paths()[0]
else :
return None
def __contextSelectedPath( context ) :
selection = GafferSceneUI.ContextAlgo.getSelectedPaths( context )
if selection.size() != 1 :
return None
return selection.paths()[0]
def __editSourceNode( context, scene, path, nodeEditor = None ) :
with context :
source = GafferScene.SceneAlgo.source( scene, path )
if source is None :
return
node = source.node()
node = __ancestorWithNonViewableChildNodes( node ) or node
if nodeEditor is not None :
nodeEditor.setNodeSet( Gaffer.StandardSet( [ node ] ) )
else :
GafferUI.NodeEditor.acquire( node, floating = True )
def __tweaksNode( scene, path ) :
tweaks = GafferScene.SceneAlgo.objectTweaks( scene, path )
if tweaks is not None :
return tweaks
attributes = scene.fullAttributes( path )
shaderAttributeNames = [ x[0] for x in attributes.items() if isinstance( x[1], IECoreScene.ShaderNetwork ) ]
# Just happens to order as Surface, Light, Displacement, which is what we want.
shaderAttributeNames = list( reversed( sorted( shaderAttributeNames ) ) )
if not len( shaderAttributeNames ) :
return None
return GafferScene.SceneAlgo.shaderTweaks( scene, path, shaderAttributeNames[0] )
def __editTweaksNode( context, scene, path, nodeEditor = None ) :
with context :
tweaks = __tweaksNode( scene, path )
if tweaks is None :
return
node = __ancestorWithNonViewableChildNodes( tweaks ) or tweaks
if nodeEditor is not None :
nodeEditor.setNodeSet( Gaffer.StandardSet( [ node ] ) )
else :
GafferUI.NodeEditor.acquire( node, floating = True )
def __ancestorWithNonViewableChildNodes( node ) :
result = None
while isinstance( node, Gaffer.Node ) :
if Gaffer.Metadata.value( node, "graphEditor:childrenViewable" ) == False :
result = node
node = node.parent()
return result
__editSourceKeyPress = GafferUI.KeyEvent( "E", GafferUI.KeyEvent.Modifiers.Alt )
__editTweaksKeyPress = GafferUI.KeyEvent(
"E",
GafferUI.KeyEvent.Modifiers(
GafferUI.KeyEvent.Modifiers.Alt | GafferUI.KeyEvent.Modifiers.Shift
)
)
def __viewerKeyPress( viewer, event ) :
view = viewer.view()
if not isinstance( view, GafferSceneUI.SceneView ) :
return False
if event == __editSourceKeyPress :
selectedPath = __sceneViewSelectedPath( view )
if selectedPath is not None :
__editSourceNode( view.getContext(), view["in"], selectedPath )
return True
elif event == __editTweaksKeyPress :
selectedPath = __sceneViewSelectedPath( view )
if selectedPath is not None :
__editTweaksNode( view.getContext(), view["in"], selectedPath )
return True
def __hierarchyViewKeyPress( hierarchyView, event ) :
if event == __editSourceKeyPress :
selectedPath = __contextSelectedPath( hierarchyView.getContext() )
if selectedPath is not None :
__editSourceNode( hierarchyView.getContext(), hierarchyView.scene(), selectedPath )
return True
elif event == __editTweaksKeyPress :
selectedPath = __contextSelectedPath( hierarchyView.getContext() )
if selectedPath is not None :
__editTweaksNode( hierarchyView.getContext(), hierarchyView.scene(), selectedPath )
return True
def __nodeEditorKeyPress( nodeEditor, event ) :
layout = nodeEditor.ancestor( GafferUI.CompoundEditor )
if layout is None :
return False
## \todo In Gaffer 0.61, we should get the scene directly from the focus node.
scene = None
for hierarchyView in layout.editors( GafferSceneUI.HierarchyView ) :
if hierarchyView.scene() is not None :
scene = hierarchyView.scene()
break
if scene is None :
for viewer in layout.editors( GafferUI.Viewer ) :
if isinstance( viewer.view(), GafferSceneUI.SceneView ) :
scene = viewer.view()["in"]
break
if scene is None :
return False
context = layout.scriptNode().context()
if event == __editSourceKeyPress :
selectedPath = __contextSelectedPath( context )
if selectedPath is not None :
__editSourceNode( context, scene, selectedPath, nodeEditor )
return True
elif event == __editTweaksKeyPress :
selectedPath = __contextSelectedPath( context )
if selectedPath is not None :
__editTweaksNode( context, scene, selectedPath, nodeEditor )
return True
| [
"GafferUI.KeyEvent.Modifiers",
"Gaffer.Metadata.value",
"GafferSceneUI.ContextAlgo.getSelectedPaths",
"GafferUI.KeyEvent",
"GafferScene.SceneAlgo.shaderTweaks",
"IECore.MenuDefinition",
"GafferUI.NodeEditor.acquire",
"functools.partial",
"GafferScene.SceneAlgo.source",
"Gaffer.StandardSet",
"Gaf... | [((5473, 5528), 'GafferUI.KeyEvent', 'GafferUI.KeyEvent', (['"""E"""', 'GafferUI.KeyEvent.Modifiers.Alt'], {}), "('E', GafferUI.KeyEvent.Modifiers.Alt)\n", (5490, 5528), False, 'import GafferUI\n'), ((2986, 3009), 'IECore.MenuDefinition', 'IECore.MenuDefinition', ([], {}), '()\n', (3007, 3009), False, 'import IECore\n'), ((3725, 3776), 'GafferSceneUI.ContextAlgo.getSelectedPaths', 'GafferSceneUI.ContextAlgo.getSelectedPaths', (['context'], {}), '(context)\n', (3767, 3776), False, 'import GafferSceneUI\n'), ((4300, 4347), 'GafferScene.SceneAlgo.objectTweaks', 'GafferScene.SceneAlgo.objectTweaks', (['scene', 'path'], {}), '(scene, path)\n', (4334, 4347), False, 'import GafferScene\n'), ((4763, 4835), 'GafferScene.SceneAlgo.shaderTweaks', 'GafferScene.SceneAlgo.shaderTweaks', (['scene', 'path', 'shaderAttributeNames[0]'], {}), '(scene, path, shaderAttributeNames[0])\n', (4797, 4835), False, 'import GafferScene\n'), ((5580, 5681), 'GafferUI.KeyEvent.Modifiers', 'GafferUI.KeyEvent.Modifiers', (['(GafferUI.KeyEvent.Modifiers.Alt | GafferUI.KeyEvent.Modifiers.Shift)'], {}), '(GafferUI.KeyEvent.Modifiers.Alt | GafferUI.\n KeyEvent.Modifiers.Shift)\n', (5607, 5681), False, 'import GafferUI\n'), ((3946, 3987), 'GafferScene.SceneAlgo.source', 'GafferScene.SceneAlgo.source', (['scene', 'path'], {}), '(scene, path)\n', (3974, 3987), False, 'import GafferScene\n'), ((4201, 4249), 'GafferUI.NodeEditor.acquire', 'GafferUI.NodeEditor.acquire', (['node'], {'floating': '(True)'}), '(node, floating=True)\n', (4228, 4249), False, 'import GafferUI\n'), ((5154, 5202), 'GafferUI.NodeEditor.acquire', 'GafferUI.NodeEditor.acquire', (['node'], {'floating': '(True)'}), '(node, floating=True)\n', (5181, 5202), False, 'import GafferUI\n'), ((3115, 3180), 'functools.partial', 'functools.partial', (['__editSourceNode', 'context', 'scene', 'selectedPath'], {}), '(__editSourceNode, context, scene, selectedPath)\n', (3132, 3180), False, 'import functools\n'), ((3321, 3386), 'functools.partial', 'functools.partial', (['__editTweaksNode', 'context', 'scene', 'selectedPath'], {}), '(__editTweaksNode, context, scene, selectedPath)\n', (3338, 3386), False, 'import functools\n'), ((4158, 4184), 'Gaffer.StandardSet', 'Gaffer.StandardSet', (['[node]'], {}), '([node])\n', (4176, 4184), False, 'import Gaffer\n'), ((5111, 5137), 'Gaffer.StandardSet', 'Gaffer.StandardSet', (['[node]'], {}), '([node])\n', (5129, 5137), False, 'import Gaffer\n'), ((5320, 5379), 'Gaffer.Metadata.value', 'Gaffer.Metadata.value', (['node', '"""graphEditor:childrenViewable"""'], {}), "(node, 'graphEditor:childrenViewable')\n", (5341, 5379), False, 'import Gaffer\n')] |
import unittest
from brightics.function.extraction.encoder import label_encoder, \
label_encoder_model
from brightics.common.datasets import load_iris
import random
def get_iris_randomgroup():
df = load_iris()
random_group1 = []
random_group2 = []
random_group2_map = {1:'A', 2:'B'}
for i in range(len(df)):
random_group1.append(random.randint(1, 2))
random_group2.append(random_group2_map[random.randint(1, 2)])
df['random_group1'] = random_group1
df['random_group2'] = random_group2
return df
class LabelEncoderTest(unittest.TestCase):
def test_groupby1(self):
df = get_iris_randomgroup()
enc_out = label_encoder(df, input_col='species', group_by=['random_group1', 'random_group2'])
print(enc_out['out_table'])
print(enc_out['model'].keys())
model_out = label_encoder_model(df, enc_out['model'])
print(model_out['out_table'])
| [
"brightics.function.extraction.encoder.label_encoder",
"brightics.function.extraction.encoder.label_encoder_model",
"brightics.common.datasets.load_iris",
"random.randint"
] | [((208, 219), 'brightics.common.datasets.load_iris', 'load_iris', ([], {}), '()\n', (217, 219), False, 'from brightics.common.datasets import load_iris\n'), ((678, 765), 'brightics.function.extraction.encoder.label_encoder', 'label_encoder', (['df'], {'input_col': '"""species"""', 'group_by': "['random_group1', 'random_group2']"}), "(df, input_col='species', group_by=['random_group1',\n 'random_group2'])\n", (691, 765), False, 'from brightics.function.extraction.encoder import label_encoder, label_encoder_model\n'), ((857, 898), 'brightics.function.extraction.encoder.label_encoder_model', 'label_encoder_model', (['df', "enc_out['model']"], {}), "(df, enc_out['model'])\n", (876, 898), False, 'from brightics.function.extraction.encoder import label_encoder, label_encoder_model\n'), ((361, 381), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (375, 381), False, 'import random\n'), ((428, 448), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (442, 448), False, 'import random\n')] |
import hashlib
import json
import logging
import re
import sqlite3
from typing import List, Optional, Tuple
import pystache
from redash.models import Query, User
from redash.query_runner import TYPE_STRING, guess_type, register
from redash.query_runner.query_results import Results, _load_query, create_table
from redash.utils import json_dumps
logger = logging.getLogger(__name__)
class ChildQueryExecutionError(Exception):
pass
class ChildQuery:
query_id: int
params: dict
table: str
token: str
def __init__(self, query_id: int, params: dict, table: str, token: str) -> None:
super().__init__()
self.query_id = query_id
self.params = params
self.table = table
self.token = token
def _extract_child_queries(query: str) -> List[ChildQuery]:
tokens_list = _collect_tokens(query)
child_queries = []
for tokens in tokens_list:
child_query_token = tokens[0]
query_id = tokens[1]
params = json.loads(tokens[2]) if tokens[2] else {}
table = _tmp_table_name(query_id, child_query_token)
child_queries.append(ChildQuery(query_id, params, table, child_query_token))
return child_queries
def _collect_tokens(query: str) -> list:
pattern = re.compile(r"\s(query_(\d+)(?:\(\s*'({.+})'\s*\))?)", re.IGNORECASE)
matches = pattern.findall(query)
return [(m[0], int(m[1]), m[2]) for m in list(matches)]
def _tmp_table_name(query_id: int, child_query_token: str):
return f"tmp_query{query_id}_{hashlib.sha256(child_query_token.encode('utf-8')).hexdigest()}"
def _create_tables_from_child_queries(
user: User,
connection: sqlite3.Connection,
query: str,
child_queries: List[ChildQuery],
) -> str:
for i, child_query in enumerate(child_queries):
loaded_child_query = _load_query(user, child_query.query_id)
params = (
child_query.params
if child_query.params
else get_default_params(loaded_child_query)
)
_rendered_child_query = pystache.render(loaded_child_query.query_text, params)
logger.debug(
f"ResultsWithParams child_queries[{i}], query_id={child_query.query_id} : {_rendered_child_query}"
)
results, error = loaded_child_query.data_source.query_runner.run_query(
_rendered_child_query, user
)
if error:
raise ChildQueryExecutionError(
f"Failed loading results for query id {loaded_child_query.id}."
)
results = json.loads(results)
table_name = child_query.table
create_table(connection, table_name, results)
query = query.replace(child_query.token, table_name, 1)
return query
def get_default_params(query: Query) -> dict:
return {p["name"]: p["value"] for p in query.options.get("parameters", {})}
class ParameterSupportedResults(Results):
@classmethod
def name(cls):
return "Parameter Supported Query Results(PoC)"
def run_query(
self, query: Query, user: User
) -> Tuple[Optional[str], Optional[str]]:
child_queries = _extract_child_queries(query)
connection = None
cursor = None
try:
connection = sqlite3.connect(":memory:")
query = _create_tables_from_child_queries(
user, connection, query, child_queries
)
cursor = connection.cursor()
cursor.execute(query)
if cursor.description is None:
return None, "Query completed but it returned no data."
columns = self.fetch_columns([(d[0], None) for d in cursor.description])
rows = []
column_names = [c["name"] for c in columns]
for i, row in enumerate(cursor):
if i == 0:
for j, col in enumerate(row):
guess = guess_type(col)
if columns[j]["type"] is None:
columns[j]["type"] = guess
elif columns[j]["type"] != guess:
columns[j]["type"] = TYPE_STRING
rows.append(dict(zip(column_names, row)))
return json_dumps({"columns": columns, "rows": rows}), None
except KeyboardInterrupt:
if connection:
connection.interrupt()
return None, "Query cancelled by user."
finally:
if cursor:
cursor.close()
if connection:
connection.close()
register(ParameterSupportedResults)
| [
"logging.getLogger",
"json.loads",
"redash.query_runner.query_results.create_table",
"sqlite3.connect",
"re.compile",
"redash.utils.json_dumps",
"redash.query_runner.guess_type",
"pystache.render",
"redash.query_runner.register",
"redash.query_runner.query_results._load_query"
] | [((357, 384), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (374, 384), False, 'import logging\n'), ((4593, 4628), 'redash.query_runner.register', 'register', (['ParameterSupportedResults'], {}), '(ParameterSupportedResults)\n', (4601, 4628), False, 'from redash.query_runner import TYPE_STRING, guess_type, register\n'), ((1268, 1341), 're.compile', 're.compile', (['"""\\\\s(query_(\\\\d+)(?:\\\\(\\\\s*\'({.+})\'\\\\s*\\\\))?)"""', 're.IGNORECASE'], {}), '("\\\\s(query_(\\\\d+)(?:\\\\(\\\\s*\'({.+})\'\\\\s*\\\\))?)", re.IGNORECASE)\n', (1278, 1341), False, 'import re\n'), ((1831, 1870), 'redash.query_runner.query_results._load_query', '_load_query', (['user', 'child_query.query_id'], {}), '(user, child_query.query_id)\n', (1842, 1870), False, 'from redash.query_runner.query_results import Results, _load_query, create_table\n'), ((2055, 2109), 'pystache.render', 'pystache.render', (['loaded_child_query.query_text', 'params'], {}), '(loaded_child_query.query_text, params)\n', (2070, 2109), False, 'import pystache\n'), ((2558, 2577), 'json.loads', 'json.loads', (['results'], {}), '(results)\n', (2568, 2577), False, 'import json\n'), ((2625, 2670), 'redash.query_runner.query_results.create_table', 'create_table', (['connection', 'table_name', 'results'], {}), '(connection, table_name, results)\n', (2637, 2670), False, 'from redash.query_runner.query_results import Results, _load_query, create_table\n'), ((995, 1016), 'json.loads', 'json.loads', (['tokens[2]'], {}), '(tokens[2])\n', (1005, 1016), False, 'import json\n'), ((3264, 3291), 'sqlite3.connect', 'sqlite3.connect', (['""":memory:"""'], {}), "(':memory:')\n", (3279, 3291), False, 'import sqlite3\n'), ((4253, 4299), 'redash.utils.json_dumps', 'json_dumps', (["{'columns': columns, 'rows': rows}"], {}), "({'columns': columns, 'rows': rows})\n", (4263, 4299), False, 'from redash.utils import json_dumps\n'), ((3928, 3943), 'redash.query_runner.guess_type', 'guess_type', (['col'], {}), '(col)\n', (3938, 3943), False, 'from redash.query_runner import TYPE_STRING, guess_type, register\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('notes', '0004_auto_20151022_1517'),
]
operations = [
migrations.CreateModel(
name='Notebook',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['title'],
},
bases=(models.Model,),
),
migrations.AddField(
model_name='note',
name='notebook',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='notes.Notebook', null=True),
preserve_default=True,
),
]
| [
"django.db.migrations.swappable_dependency",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((243, 300), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (274, 300), False, 'from django.db import models, migrations\n'), ((950, 1061), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.SET_NULL', 'blank': '(True)', 'to': '"""notes.Notebook"""', 'null': '(True)'}), "(on_delete=django.db.models.deletion.SET_NULL, blank=True,\n to='notes.Notebook', null=True)\n", (967, 1061), False, 'from django.db import models, migrations\n'), ((479, 572), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (495, 572), False, 'from django.db import models, migrations\n'), ((597, 629), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (613, 629), False, 'from django.db import models, migrations\n'), ((657, 703), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': 'settings.AUTH_USER_MODEL'}), '(to=settings.AUTH_USER_MODEL)\n', (674, 703), False, 'from django.db import models, migrations\n')] |
from flask import Flask
from Config import app_config, app_active
from flask import render_template
from flask_sqlalchemy import SQLAlchemy
import Forms
import LDB
import gGraficos
config = app_config[app_active]
def create_app(config_name):
app = Flask(__name__, template_folder='templates')
app.secret_key = config.SECRET
app.config.from_object(app_config[app_active])
app.config.from_pyfile('Config.py')
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://root:@localhost:3306/csv'
db = SQLAlchemy(app)
db.__init__(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/grafico', methods = ["POST","GET"])
def grafico():
info = Forms.info()
if info.idade.data == None and info.sexo.data == None:
arq = 'Figura'
if info.situacao_cidade.data == None:
info.situacao_cidade.date = False
return render_template('grafico.html',
info=info,
arq=arq)
else:
dados = LDB.leitura(info.idade.data, info.sexo.data,info.cidade.data,info.situacao_cidade.data)
if len(dados) == 2:
dados.append((0, 0))
arq = gGraficos.criarimg(dados[0][0], dados[0][1], dados[1][0], dados[1][1], dados[2][0], dados[2][1],tipo=info.tipo.data)
arq ='png-grafico/' + str(arq)
return render_template('grafico.html',
info=info,
arq=arq)
return app | [
"flask.render_template",
"Forms.info",
"flask.Flask",
"LDB.leitura",
"gGraficos.criarimg",
"flask_sqlalchemy.SQLAlchemy"
] | [((256, 300), 'flask.Flask', 'Flask', (['__name__'], {'template_folder': '"""templates"""'}), "(__name__, template_folder='templates')\n", (261, 300), False, 'from flask import Flask\n'), ((531, 546), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (541, 546), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((621, 650), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (636, 650), False, 'from flask import render_template\n'), ((738, 750), 'Forms.info', 'Forms.info', ([], {}), '()\n', (748, 750), False, 'import Forms\n'), ((961, 1012), 'flask.render_template', 'render_template', (['"""grafico.html"""'], {'info': 'info', 'arq': 'arq'}), "('grafico.html', info=info, arq=arq)\n", (976, 1012), False, 'from flask import render_template\n'), ((1109, 1203), 'LDB.leitura', 'LDB.leitura', (['info.idade.data', 'info.sexo.data', 'info.cidade.data', 'info.situacao_cidade.data'], {}), '(info.idade.data, info.sexo.data, info.cidade.data, info.\n situacao_cidade.data)\n', (1120, 1203), False, 'import LDB\n'), ((1284, 1405), 'gGraficos.criarimg', 'gGraficos.criarimg', (['dados[0][0]', 'dados[0][1]', 'dados[1][0]', 'dados[1][1]', 'dados[2][0]', 'dados[2][1]'], {'tipo': 'info.tipo.data'}), '(dados[0][0], dados[0][1], dados[1][0], dados[1][1],\n dados[2][0], dados[2][1], tipo=info.tipo.data)\n', (1302, 1405), False, 'import gGraficos\n'), ((1463, 1514), 'flask.render_template', 'render_template', (['"""grafico.html"""'], {'info': 'info', 'arq': 'arq'}), "('grafico.html', info=info, arq=arq)\n", (1478, 1514), False, 'from flask import render_template\n')] |
import numpy as np
import torch
from modules.frustum import get_box_corners_3d
from kitti_meters.util import get_box_iou_3d
__all__ = ['MeterFrustumKitti']
class MeterFrustumKitti:
def __init__(self, num_heading_angle_bins, num_size_templates, size_templates, class_name_to_class_id,
metric='iou_3d'):
super().__init__()
assert metric in ['iou_2d', 'iou_3d', 'accuracy', 'iou_3d_accuracy', 'iou_3d_class_accuracy']
self.metric = metric
self.num_heading_angle_bins = num_heading_angle_bins
self.num_size_templates = num_size_templates
self.size_templates = size_templates.view(self.num_size_templates, 3)
self.heading_angle_bin_centers = torch.arange(0, 2 * np.pi, 2 * np.pi / self.num_heading_angle_bins)
self.class_name_to_class_id = class_name_to_class_id
self.reset()
def reset(self):
self.total_seen_num = 0
self.total_correct_num = 0
self.iou_3d_corrent_num = 0
self.iou_2d_sum = 0
self.iou_3d_sum = 0
self.iou_3d_corrent_num_per_class = {cls: 0 for cls in self.class_name_to_class_id.keys()}
self.total_seen_num_per_class = {cls: 0 for cls in self.class_name_to_class_id.keys()}
def update(self, outputs, targets):
if self.metric == 'accuracy':
mask_logits = outputs['mask_logits']
mask_logits_target = targets['mask_logits']
self.total_seen_num += mask_logits_target.numel()
self.total_correct_num += torch.sum(mask_logits.argmax(dim=1) == mask_logits_target).item()
else:
center = outputs['center'] # (B, 3)
heading_scores = outputs['heading_scores'] # (B, NH)
heading_residuals = outputs['heading_residuals'] # (B, NH)
size_scores = outputs['size_scores'] # (B, NS)
size_residuals = outputs['size_residuals'] # (B, NS, 3)
center_target = targets['center'] # (B, 3)
heading_bin_id_target = targets['heading_bin_id'] # (B, )
heading_residual_target = targets['heading_residual'] # (B, )
size_template_id_target = targets['size_template_id'] # (B, )
size_residual_target = targets['size_residual'] # (B, 3)
class_id_target = targets['class_id'].cpu().numpy() # (B, )
batch_size = center.size(0)
batch_id = torch.arange(batch_size, device=center.device)
self.size_templates = self.size_templates.to(center.device)
self.heading_angle_bin_centers = self.heading_angle_bin_centers.to(center.device)
heading_bin_id = torch.argmax(heading_scores, dim=1)
heading = self.heading_angle_bin_centers[heading_bin_id] + heading_residuals[batch_id, heading_bin_id]
size_template_id = torch.argmax(size_scores, dim=1)
size = self.size_templates[size_template_id] + size_residuals[batch_id, size_template_id] # (B, 3)
corners = get_box_corners_3d(centers=center, headings=heading, sizes=size, with_flip=False) # (B, 8, 3)
heading_target = self.heading_angle_bin_centers[heading_bin_id_target] + heading_residual_target # (B, )
size_target = self.size_templates[size_template_id_target] + size_residual_target # (B, 3)
corners_target = get_box_corners_3d(centers=center_target, headings=heading_target,
sizes=size_target, with_flip=False) # (B, 8, 3)
iou_3d, iou_2d = get_box_iou_3d(corners.cpu().detach().numpy(), corners_target.cpu().detach().numpy())
self.iou_2d_sum += iou_2d.sum()
self.iou_3d_sum += iou_3d.sum()
self.iou_3d_corrent_num += np.sum(iou_3d >= 0.7)
self.total_seen_num += batch_size
for cls, cls_id in self.class_name_to_class_id.items():
mask = (class_id_target == cls_id)
self.iou_3d_corrent_num_per_class[cls] += np.sum(iou_3d[mask] >= (0.7 if cls == 'Car' else 0.5))
self.total_seen_num_per_class[cls] += np.sum(mask)
def compute(self):
if self.metric == 'iou_3d':
return self.iou_3d_sum / self.total_seen_num
elif self.metric == 'iou_2d':
return self.iou_2d_sum / self.total_seen_num
elif self.metric == 'accuracy':
return self.total_correct_num / self.total_seen_num
elif self.metric == 'iou_3d_accuracy':
return self.iou_3d_corrent_num / self.total_seen_num
elif self.metric == 'iou_3d_class_accuracy':
return sum(self.iou_3d_corrent_num_per_class[cls] / max(self.total_seen_num_per_class[cls], 1)
for cls in self.class_name_to_class_id.keys()) / len(self.class_name_to_class_id)
else:
raise KeyError
| [
"modules.frustum.get_box_corners_3d",
"numpy.sum",
"torch.arange",
"torch.argmax"
] | [((717, 784), 'torch.arange', 'torch.arange', (['(0)', '(2 * np.pi)', '(2 * np.pi / self.num_heading_angle_bins)'], {}), '(0, 2 * np.pi, 2 * np.pi / self.num_heading_angle_bins)\n', (729, 784), False, 'import torch\n'), ((2407, 2453), 'torch.arange', 'torch.arange', (['batch_size'], {'device': 'center.device'}), '(batch_size, device=center.device)\n', (2419, 2453), False, 'import torch\n'), ((2650, 2685), 'torch.argmax', 'torch.argmax', (['heading_scores'], {'dim': '(1)'}), '(heading_scores, dim=1)\n', (2662, 2685), False, 'import torch\n'), ((2832, 2864), 'torch.argmax', 'torch.argmax', (['size_scores'], {'dim': '(1)'}), '(size_scores, dim=1)\n', (2844, 2864), False, 'import torch\n'), ((2999, 3085), 'modules.frustum.get_box_corners_3d', 'get_box_corners_3d', ([], {'centers': 'center', 'headings': 'heading', 'sizes': 'size', 'with_flip': '(False)'}), '(centers=center, headings=heading, sizes=size, with_flip=\n False)\n', (3017, 3085), False, 'from modules.frustum import get_box_corners_3d\n'), ((3345, 3452), 'modules.frustum.get_box_corners_3d', 'get_box_corners_3d', ([], {'centers': 'center_target', 'headings': 'heading_target', 'sizes': 'size_target', 'with_flip': '(False)'}), '(centers=center_target, headings=heading_target, sizes=\n size_target, with_flip=False)\n', (3363, 3452), False, 'from modules.frustum import get_box_corners_3d\n'), ((3751, 3772), 'numpy.sum', 'np.sum', (['(iou_3d >= 0.7)'], {}), '(iou_3d >= 0.7)\n', (3757, 3772), True, 'import numpy as np\n'), ((3996, 4050), 'numpy.sum', 'np.sum', (["(iou_3d[mask] >= (0.7 if cls == 'Car' else 0.5))"], {}), "(iou_3d[mask] >= (0.7 if cls == 'Car' else 0.5))\n", (4002, 4050), True, 'import numpy as np\n'), ((4105, 4117), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (4111, 4117), True, 'import numpy as np\n')] |
import os
import json
import ConfigParser
import logging.config
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# load the shared settings file
settings_file_path = os.path.join(base_dir, 'config', 'settings.config')
settings = ConfigParser.ConfigParser()
settings.read(settings_file_path)
# set up logging
with open(os.path.join(base_dir, 'config', 'logging.json'), 'r') as f:
logging_config = json.load(f)
logging.config.dictConfig(logging_config)
log = logging.getLogger(__name__)
log.info("---------------------------------------------------------------------------")
requests_logger = logging.getLogger('requests')
requests_logger.setLevel(logging.INFO) | [
"json.load",
"os.path.join",
"os.path.abspath",
"ConfigParser.ConfigParser"
] | [((191, 242), 'os.path.join', 'os.path.join', (['base_dir', '"""config"""', '"""settings.config"""'], {}), "(base_dir, 'config', 'settings.config')\n", (203, 242), False, 'import os\n'), ((254, 281), 'ConfigParser.ConfigParser', 'ConfigParser.ConfigParser', ([], {}), '()\n', (279, 281), False, 'import ConfigParser\n'), ((426, 438), 'json.load', 'json.load', (['f'], {}), '(f)\n', (435, 438), False, 'import json\n'), ((108, 133), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (123, 133), False, 'import os\n'), ((344, 392), 'os.path.join', 'os.path.join', (['base_dir', '"""config"""', '"""logging.json"""'], {}), "(base_dir, 'config', 'logging.json')\n", (356, 392), False, 'import os\n')] |
from flask import Flask
from flask_restplus import Api, Resource, fields
from services.serviceHandler import convertCurrency, getCurrencyExchangeRates
from services.countryCurrencyCodeHandler import (
getCountryAndCurrencyCode,
getCurrencyNameAndCode,
)
app = Flask(__name__)
api = Api(
app,
version="1.0.0",
title="Bee Travels Currency Data Service",
description="This is a microservice that handles currency exchange rate data for Bee Travels",
)
currencyNS = api.namespace(
"Currency",
description="Operations associated with currency exchange rate conversions",
)
currencyNameOrCurrencyCode = api.model(
"currencyNameOrCurrencyCode",
{
"currencyCode": fields.String(
required=False, description="3 letter currency code"
),
"country": fields.String(required=False, description="country name"),
},
)
@currencyNS.route("/")
class CurrencyList(Resource):
"""Shows a list of currency ex rates"""
def get(self):
return getCurrencyExchangeRates()
# /currency/{currencyFromAmount}/{currencyFromCode}/{currencyToCode}
# /currency/10/EUR/USD
@currencyNS.route("/<int:currencyFromAmount>/<currencyFromCode>/<currencyToCode>")
@currencyNS.response(404, "Currency Code not found")
@currencyNS.param("currencyFromAmount", "currency to convert from value (float)")
@currencyNS.param("currencyFromCode", "currency (3 character code) to convert from")
@currencyNS.param("currencyToCode", "currency (3 character code) to convert to")
class Currency(Resource):
def get(self, currencyFromAmount, currencyFromCode, currencyToCode):
result = convertCurrency(
float(currencyFromAmount), currencyFromCode, currencyToCode
)
return {"result": result}
@currencyNS.route("/search")
@currencyNS.response(404, "Currency Code not found")
class Search(Resource):
@currencyNS.doc("search_currency_meta")
@currencyNS.expect(currencyNameOrCurrencyCode)
@currencyNS.marshal_with(currencyNameOrCurrencyCode, code=201)
def post(self):
if "currencyCode" in api.payload:
return getCountryAndCurrencyCode(api.payload["currencyCode"])
elif "country" in api.payload:
return getCurrencyNameAndCode(api.payload["country"])
else:
api.abort(400, "Pass in either the currencyCode or country name")
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True, port=7878)
| [
"services.countryCurrencyCodeHandler.getCountryAndCurrencyCode",
"services.serviceHandler.getCurrencyExchangeRates",
"flask.Flask",
"services.countryCurrencyCodeHandler.getCurrencyNameAndCode",
"flask_restplus.Api",
"flask_restplus.fields.String"
] | [((269, 284), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (274, 284), False, 'from flask import Flask\n'), ((291, 468), 'flask_restplus.Api', 'Api', (['app'], {'version': '"""1.0.0"""', 'title': '"""Bee Travels Currency Data Service"""', 'description': '"""This is a microservice that handles currency exchange rate data for Bee Travels"""'}), "(app, version='1.0.0', title='Bee Travels Currency Data Service',\n description=\n 'This is a microservice that handles currency exchange rate data for Bee Travels'\n )\n", (294, 468), False, 'from flask_restplus import Api, Resource, fields\n'), ((708, 775), 'flask_restplus.fields.String', 'fields.String', ([], {'required': '(False)', 'description': '"""3 letter currency code"""'}), "(required=False, description='3 letter currency code')\n", (721, 775), False, 'from flask_restplus import Api, Resource, fields\n'), ((818, 875), 'flask_restplus.fields.String', 'fields.String', ([], {'required': '(False)', 'description': '"""country name"""'}), "(required=False, description='country name')\n", (831, 875), False, 'from flask_restplus import Api, Resource, fields\n'), ((1020, 1046), 'services.serviceHandler.getCurrencyExchangeRates', 'getCurrencyExchangeRates', ([], {}), '()\n', (1044, 1046), False, 'from services.serviceHandler import convertCurrency, getCurrencyExchangeRates\n'), ((2130, 2184), 'services.countryCurrencyCodeHandler.getCountryAndCurrencyCode', 'getCountryAndCurrencyCode', (["api.payload['currencyCode']"], {}), "(api.payload['currencyCode'])\n", (2155, 2184), False, 'from services.countryCurrencyCodeHandler import getCountryAndCurrencyCode, getCurrencyNameAndCode\n'), ((2243, 2289), 'services.countryCurrencyCodeHandler.getCurrencyNameAndCode', 'getCurrencyNameAndCode', (["api.payload['country']"], {}), "(api.payload['country'])\n", (2265, 2289), False, 'from services.countryCurrencyCodeHandler import getCountryAndCurrencyCode, getCurrencyNameAndCode\n')] |
import numpy as np
import pandas as pd
from Bio import AlignIO, Seq
# parameter to determine the maximum missing proportion that we keep
missing_thresh = 0.4
# load the alignments and turn them into a numpy array
alignments = AlignIO.read(snakemake.input[0], 'fasta')
align_arr = np.array([list(rec) for rec in alignments])
# get a list of missing values per base
missing_bases = []
# iterate over the whole alignment counting missing bases
for base in range(align_arr.shape[1]):
missing = 0
for seq in range(align_arr.shape[0]):
if alignments[seq, base] not in ['A', 'T', 'G', 'C']:
missing += 1
missing_bases.append(missing)
# calculate the proportion of missing bases for each column
missing_prop = np.array([m / align_arr.shape[0] for m in missing_bases])
align_arr = align_arr[:, missing_prop < missing_thresh]
for r, rec in enumerate(alignments):
joined_seq = ''.join(align_arr[r])
print(joined_seq[:10])
rec.seq = Seq.Seq(joined_seq)
with open(snakemake.output[0], 'w') as fout:
AlignIO.write(alignments, fout, 'fasta')
| [
"numpy.array",
"Bio.Seq.Seq",
"Bio.AlignIO.read",
"Bio.AlignIO.write"
] | [((228, 269), 'Bio.AlignIO.read', 'AlignIO.read', (['snakemake.input[0]', '"""fasta"""'], {}), "(snakemake.input[0], 'fasta')\n", (240, 269), False, 'from Bio import AlignIO, Seq\n'), ((739, 798), 'numpy.array', 'np.array', (['[(m / align_arr.shape[0]) for m in missing_bases]'], {}), '([(m / align_arr.shape[0]) for m in missing_bases])\n', (747, 798), True, 'import numpy as np\n'), ((971, 990), 'Bio.Seq.Seq', 'Seq.Seq', (['joined_seq'], {}), '(joined_seq)\n', (978, 990), False, 'from Bio import AlignIO, Seq\n'), ((1041, 1081), 'Bio.AlignIO.write', 'AlignIO.write', (['alignments', 'fout', '"""fasta"""'], {}), "(alignments, fout, 'fasta')\n", (1054, 1081), False, 'from Bio import AlignIO, Seq\n')] |
import pandas as pd
import sys
def fix(lists):
df = pd.read_json(lists)
df2 = pd.DataFrame([p for p1 in df.players for p in p1])
df2['theme1'] = ''
df2['theme2'] = ''
for i, l in df2.list2.iteritems():
try:
df2.theme2.iloc[i] = l['theme']
except KeyError:
continue
except TypeError:
continue
for i, l in df2.list2.iteritems():
try:
df2.theme2.iloc[i] = l['theme']
except KeyError:
df2.theme2.iloc[i] = 'None'
except TypeError:
continue
for i, l in df2.list1.iteritems():
try:
df2.theme1.iloc[i] = l['theme']
except KeyError:
df2.theme1.iloc[i] = 'None'
except TypeError:
continue
for i, l in df2.list2.iteritems():
try:
df2.list2.iloc[i] = l['list']
except KeyError:
continue
except TypeError:
continue
for i, l in df2.list1.iteritems():
try:
df2.list1.iloc[i] = l['list']
except KeyError:
continue
except TypeError:
continue
df2.to_json('fixed.json')
if __name__ == "__main__":
fix(sys.argv[1])
| [
"pandas.DataFrame",
"pandas.read_json"
] | [((58, 77), 'pandas.read_json', 'pd.read_json', (['lists'], {}), '(lists)\n', (70, 77), True, 'import pandas as pd\n'), ((89, 139), 'pandas.DataFrame', 'pd.DataFrame', (['[p for p1 in df.players for p in p1]'], {}), '([p for p1 in df.players for p in p1])\n', (101, 139), True, 'import pandas as pd\n')] |
import multiprocessing
import os
import time
rootdir = input()
keyword = input()
batch_size = 1
def try_multiple_operations(file_path):
try:
with open(file_path, "rb") as f: # open the file for reading
for line in f: # use: for i, line in enumerate(f) if you need line numbers
try:
line = line.decode("utf-8") # try to decode the contents to utf-8
except ValueError: # decoding failed, skip the line
continue
if keyword in line: # if the keyword exists on the current line...
print(file_path) # print the file path
except (IOError, OSError): # ignore read and permission errors
pass
def walk_dirs(directory, batch_size):
walk_dirs_generator = os.walk(directory)
for dirname, subdirectories, filenames in walk_dirs_generator:
for i in range(0, len(filenames), batch_size):
the_queue.put(os.path.join(dirname, filenames[i]))
the_queue = multiprocessing.Queue()
walk_dirs(rootdir, batch_size)
def worker_main(queue):
while True:
item = queue.get(True)
try_multiple_operations(item)
the_pool = multiprocessing.Pool(3, worker_main,(the_queue,))
| [
"multiprocessing.Queue",
"os.path.join",
"multiprocessing.Pool",
"os.walk"
] | [((1019, 1042), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (1040, 1042), False, 'import multiprocessing\n'), ((1197, 1247), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(3)', 'worker_main', '(the_queue,)'], {}), '(3, worker_main, (the_queue,))\n', (1217, 1247), False, 'import multiprocessing\n'), ((802, 820), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (809, 820), False, 'import os\n'), ((969, 1004), 'os.path.join', 'os.path.join', (['dirname', 'filenames[i]'], {}), '(dirname, filenames[i])\n', (981, 1004), False, 'import os\n')] |
from flask import Blueprint, request
router = Blueprint("router", __name__)
@router.route("/check")
def check():
return "Congratulations! Your app works. :)"
@router.route("/add", methods=["POST"])
def add():
first_number = request.form['FirstNumber']
second_number = request.form['SecondNumber']
result = first_number + second_number
return result
| [
"flask.Blueprint"
] | [((47, 76), 'flask.Blueprint', 'Blueprint', (['"""router"""', '__name__'], {}), "('router', __name__)\n", (56, 76), False, 'from flask import Blueprint, request\n')] |
import io
import struct
class SteamPacketBuffer(io.BytesIO):
"""In-memory byte buffer."""
def __len__(self):
return len(self.getvalue())
def __repr__(self):
return '<PacketBuffer: {}: {}>'.format(len(self), self.getvalue())
def __str__(self):
return str(self.getvalue())
def read_byte(self):
return struct.unpack('<B', self.read(1))[0]
def write_byte(self, value):
self.write(struct.pack('<B', value))
def read_short(self):
return struct.unpack('<h', self.read(2))[0]
def write_short(self, value):
self.write(struct.pack('<h', value))
def read_float(self):
return struct.unpack('<f', self.read(4))[0]
def write_float(self, value):
self.write(struct.pack('<f', value))
def read_long(self):
return struct.unpack('<l', self.read(4))[0]
def write_long(self, value):
self.write(struct.pack('<l', value))
def read_long_long(self):
return struct.unpack('<Q', self.read(8))[0]
def write_long_long(self, value):
self.write(struct.pack('<Q', value))
def read_string(self):
# TODO: find a more pythonic way doing this
value = []
while True:
char = self.read(1)
if char == b'\x00':
break
else:
value.append(char)
return ''.join(map(lambda char: chr(ord(char)), value))
def write_string(self, value):
self.write(bytearray('{0}\x00'.format(value), 'utf-8'))
| [
"struct.pack"
] | [((447, 471), 'struct.pack', 'struct.pack', (['"""<B"""', 'value'], {}), "('<B', value)\n", (458, 471), False, 'import struct\n'), ((606, 630), 'struct.pack', 'struct.pack', (['"""<h"""', 'value'], {}), "('<h', value)\n", (617, 630), False, 'import struct\n'), ((765, 789), 'struct.pack', 'struct.pack', (['"""<f"""', 'value'], {}), "('<f', value)\n", (776, 789), False, 'import struct\n'), ((922, 946), 'struct.pack', 'struct.pack', (['"""<l"""', 'value'], {}), "('<l', value)\n", (933, 946), False, 'import struct\n'), ((1089, 1113), 'struct.pack', 'struct.pack', (['"""<Q"""', 'value'], {}), "('<Q', value)\n", (1100, 1113), False, 'import struct\n')] |
""" GLSL shader generation """
from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace
from utils import isArray, getArrayLen, getArrayBaseName, getMacroName, DescriptorSets, is_groupshared_decl
import os, sys, importlib, re
from shutil import copyfile
def pssl(fsl, dst, rootSignature=None):
return d3d(fsl, dst, pssl=True, d3d12=False, rootSignature=rootSignature)
def prospero(fsl, dst):
return d3d(fsl, dst, pssl=True, prospero=True)
def xbox(fsl, dst, rootSignature=None):
return d3d(fsl, dst, xbox=True, d3d12=True, rootSignature=rootSignature)
def d3d12(fsl, dst):
return d3d(fsl, dst, d3d12=True)
def scarlett(fsl, dst, rootSignature=None):
return xbox(fsl, dst, rootSignature)
def d3d(fsl, dst, pssl=False, prospero=False, xbox=False, rootSignature=None, d3d12=False):
shader = getShader(fsl, dst)
shader_src = getHeader(fsl)
if not (d3d12 or pssl or xbox):
shader_src += ['#define DIRECT3D11\n']
if prospero:
import prospero
pssl = prospero
shader_src += ['#define PROSPERO\n']
shader_src += prospero.preamble()
elif pssl:
import orbis
pssl = orbis
shader_src += ['#define ORBIS\n']
shader_src += orbis.preamble()
if xbox:
import xbox
shader_src += ['#define XBOX\n']
shader_src += xbox.preamble()
if d3d12:
shader_src += ['#define DIRECT3D12\n']
shader_src += ['#define STAGE_', shader.stage.name, '\n']
if shader.enable_waveops:
shader_src += ['#define ENABLE_WAVEOPS()\n']
# directly embed d3d header in shader
header_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'includes', 'd3d.h')
header_lines = open(header_path).readlines()
shader_src += header_lines + ['\n']
nonuniformresourceindex = None
# tesselation
pcf_returnType = None
# for SV_PrimitiveID usage in pixel shaders, generate a pass-through gs
passthrough_gs = False
if pssl and shader.stage == Stages.FRAG:
for dtype, dvar in shader.flat_args:
if getMacroName(dtype).upper() == 'SV_PRIMITIVEID':
passthrough_gs = True
if prospero:
prospero.gen_passthrough_gs(shader, dst.replace('frag', 'geom'))
else:
orbis.gen_passthrough_gs(shader, dst.replace('frag', 'geom'))
last_res_decl = 0
explicit_res_decl = None
srt_resources = { descriptor_set.name: [] for descriptor_set in DescriptorSets }
srt_free_resources = []
srt_references = []
defineLoc = len(shader_src)
parsing_struct = None
skip_semantics = False
struct_elements = []
srt_redirections = set()
for line in shader.lines:
def get_uid(name):
return name + '_' + str(len(shader_src))
# dont process commented lines
if line.strip().startswith('//'):
shader_src += [line]
continue
if is_groupshared_decl(line):
dtype, dname = getMacro(line)
basename = getArrayBaseName(dname)
shader_src += ['#define srt_'+basename+' '+basename+'\n']
if not pssl:
line = 'groupshared '+dtype+' '+dname+';\n'
else:
line = 'thread_group_memory '+dtype+' '+dname+';\n'
if 'DECLARE_RESOURCES' in line:
explicit_res_decl = len(shader_src) + 1
line = '//' + line
if line.strip().startswith('STRUCT(') or line.strip().startswith('CBUFFER(') or line.strip().startswith('PUSH_CONSTANT('):
parsing_struct = getMacro(line)
struct_name = parsing_struct[0]
struct_elements = []
if pssl and 'PUSH_CONSTANT' in line:
skip_semantics = True
macro = get_uid(struct_name)
shader_src += ['#define ', macro, '\n']
srt_free_resources += [(macro, pssl.declare_rootconstant(struct_name))]
if pssl and 'CBUFFER' in line:
skip_semantics = True
res_freq = parsing_struct[1]
macro = get_uid(struct_name)
shader_src += ['#define ', macro, '\n']
if 'rootcbv' in struct_name:
srt_free_resources += [(macro, pssl.declare_cbuffer(struct_name))]
else:
srt_resources[res_freq] += [(macro, pssl.declare_cbuffer(struct_name))]
if parsing_struct and line.strip().startswith('DATA('):
data_decl = getMacro(line)
if skip_semantics or data_decl[-1] == 'None':
line = get_whitespace(line) + data_decl[0] + ' ' + data_decl[1] + ';\n'
if pssl and type(parsing_struct) is not str:
basename = getArrayBaseName(data_decl[1])
macro = 'REF_' + get_uid(basename)
shader_src += ['#define ', macro, '\n']
init, ref = pssl.declare_element_reference(shader, parsing_struct, data_decl)
shader_src += [*init, '\n']
srt_redirections.add(basename)
struct_elements += [(macro, ref)]
srt_references += [(macro, (init, ref))]
shader_src += [line]
continue
if parsing_struct and '};' in line:
# if this shader is the receiving end of a passthrough_gs, insert the necessary inputs
if passthrough_gs and shader.struct_args[0][0] == parsing_struct:
shader_src += ['\tDATA(FLAT(uint), PrimitiveID, TEXCOORD8);\n']
shader_src += [line]
skip_semantics = False
if type(parsing_struct) is not str:
last_res_decl = len(shader_src)+1
parsing_struct = None
continue
resource_decl = None
if line.strip().startswith('RES('):
resource_decl = getMacro(line)
last_res_decl = len(shader_src)+1
if pssl and resource_decl:
# shader_src += ['// ', line.strip(), '\n']
_, res_name, res_freq, _, _ = resource_decl
basename = getArrayBaseName(res_name)
macro = get_uid(basename)
# shader_src += ['#define ', macro, ' //', line.strip(), '\n']
shader_src += ['#define ', macro, '\n']
srt_resources[res_freq] += [(macro, pssl.declare_resource(resource_decl))]
# macro = 'REF_' + macro
# shader_src += ['#define ', macro, '\n']
init, ref = pssl.declare_reference(shader, resource_decl)
shader_src += [*init, '\n']
srt_references += [(macro, (init, ref))]
srt_redirections.add(basename)
last_res_decl = len(shader_src)+1
# continue
if 'TESS_VS_SHADER(' in line and prospero:
vs_filename = getMacro(line).strip('"')
vs_fsl_path = os.path.join(os.path.dirname(fsl), vs_filename)
ls_vs_filename = 'ls_'+vs_filename.replace('.fsl', '')
vs_pssl = os.path.join(os.path.dirname(dst), ls_vs_filename)
d3d(vs_fsl_path, vs_pssl, pssl=True, prospero=True)
shader_src += [
'#undef VS_MAIN\n',
'#define VS_MAIN vs_main\n',
'#include "', ls_vs_filename, '"\n'
]
continue
if '_MAIN(' in line and shader.stage == Stages.TESC and prospero:
shader_src += pssl.insert_tesc('vs_main')
if '_MAIN(' in line and shader.returnType:
if shader.returnType not in shader.structs:
if shader.stage == Stages.FRAG:
if not 'SV_DEPTH' in shader.returnType.upper():
line = line[:-1] + ': SV_TARGET\n'
else:
line = line[:-1] + ': SV_DEPTH\n'
if shader.stage == Stages.VERT:
line = line[:-1] + ': SV_POSITION\n'
# manually transform Type(var) to Type var (necessary for DX11/fxc)
if '_MAIN(' in line:
for dtype, var in shader.struct_args:
line = line.replace(dtype+'('+var+')', dtype + ' ' + var)
for dtype, dvar in shader.flat_args:
sem = getMacroName(dtype).upper()
innertype = getMacro(dtype)
ldtype = line.find(dtype)
line = line[:ldtype]+innertype+line[ldtype+len(dtype):]
l0 = line.find(' '+dvar, ldtype) + len(dvar)+1
line = line[:l0]+' : '+sem+line[l0:]
# if this shader is the receiving end of a passthrough_gs, get rid of the PrimitiveID input
if passthrough_gs:
for dtype, dvar in shader.flat_args:
if 'SV_PRIMITIVEID' in dtype.upper():
upper_line = line.upper()
l0 = upper_line.find('SV_PRIMITIVEID')
l1 = upper_line.rfind(',', 0, l0)
line = line.replace(line[l1: l0+len('SV_PRIMITIVEID')], '')
if pssl:
for dtype, darg in shader.flat_args:
if 'SV_INSTANCEID' in dtype.upper():
shader_src += pssl.set_indirect_draw()
if '_MAIN(' in line and (pssl or xbox) and rootSignature:
l0 = rootSignature.find('SrtSignature')
l1 = rootSignature.find('{', l0)
srt_name = rootSignature[l0: l1].split()[-1]
res_sig = 'RootSignature' if xbox else 'SrtSignature'
shader_src += ['[', res_sig, '(', srt_name, ')]\n', line]
continue
# if 'INIT_MAIN' in line:
# if pssl:
# shader_src += ['\tinit_global_references();\n']
if 'INIT_MAIN' in line and shader.returnType:
# mName = getMacroName(shader.returnType)
# mArg = getMacro(shader.returnType)
# line = line.replace('INIT_MAIN', '{} {}'.format(mName, mArg))
line = get_whitespace(line)+'//'+line.strip()+'\n'
# if this shader is the receiving end of a passthrough_gs, copy the PrimitiveID from GS output
if passthrough_gs:
for dtype, dvar in shader.flat_args:
if 'SV_PRIMITIVEID' in dtype.upper():
shader_src += ['uint ', dvar, ' = ', shader.struct_args[0][1], '.PrimitiveID;\n']
if 'BeginNonUniformResourceIndex(' in line:
index, max_index = getMacro(line), None
assert index != [], 'No index provided for {}'.format(line)
if type(index) == list:
max_index = index[1]
index = index[0]
nonuniformresourceindex = index
if pssl:
shader_src += pssl.begin_nonuniformresourceindex(nonuniformresourceindex, max_index)
continue
else:
line = '#define {0} NonUniformResourceIndex({0})\n'.format(nonuniformresourceindex)
if 'EndNonUniformResourceIndex()' in line:
assert nonuniformresourceindex, 'EndNonUniformResourceIndex: BeginNonUniformResourceIndex not called/found'
if pssl:
shader_src += pssl.end_nonuniformresourceindex(nonuniformresourceindex)
continue
else:
line = '#undef {}\n'.format(nonuniformresourceindex)
nonuniformresourceindex = None
elif re.match('\s*RETURN', line):
if shader.returnType:
line = line.replace('RETURN', 'return ')
else:
line = line.replace('RETURN()', 'return')
# tesselation
if shader.pcf and shader.pcf in line and not pcf_returnType:
loc = line.find(shader.pcf)
pcf_returnType = line[:loc].strip()
# line = getMacroName(pcf_returnType) + ' ' + line[loc:]
for dtype, dvar in shader.pcf_arguments:
if not 'INPUT_PATCH' in dtype and not 'OUTPUT_PATCH' in dtype:
line = line.replace(dtype, getMacro(dtype))
line = line.replace(dvar, dvar+': '+getMacroName(dtype))
if pcf_returnType and re.match('\s*PCF_INIT', line):
# line = line.replace('PCF_INIT', getMacroName(pcf_returnType) + ' ' + getMacro(pcf_returnType))
line = line.replace('PCF_INIT', '')
if pcf_returnType and 'PCF_RETURN' in line:
line = line.replace('PCF_RETURN', 'return ')
# line = line.replace('PCF_RETURN', '{ return ' + getMacro(pcf_returnType) + ';}')
if 'INDIRECT_DRAW(' in line:
if pssl:
shader_src += pssl.set_indirect_draw()
line = '//' + line
if 'SET_OUTPUT_FORMAT(' in line:
if pssl:
shader_src += pssl.set_output_format(getMacro(line))
line = '//' + line
if 'PS_ZORDER_EARLYZ(' in line:
if xbox:
shader_src += xbox.set_ps_zorder_earlyz()
line = '//' + line
shader_src += [line]
if pssl:
if explicit_res_decl:
last_res_decl = explicit_res_decl
if last_res_decl > 0: # skip srt altogether if no declared resourced or not requested
srt = pssl.gen_srt(srt_resources, srt_free_resources, srt_references)
open(dst + '.srt.h', 'w').write(srt)
shader_src.insert(last_res_decl, '\n#include \"' + os.path.basename(dst) + '.srt.h\"\n')
# insert root signature at the end (not sure whether that will work for xbox)
if rootSignature and pssl:
shader_src += [_line+'\n' for _line in rootSignature.splitlines()]# + shader.lines
if rootSignature and xbox:
shader_src += rootSignature + ['\n']# + shader.lines
open(dst, 'w').writelines(shader_src)
return 0 | [
"utils.get_whitespace",
"utils.getMacro",
"xbox",
"utils.getShader",
"utils.is_groupshared_decl",
"utils.getHeader",
"re.match",
"orbis.preamble",
"prospero.preamble",
"os.path.dirname",
"xbox.set_ps_zorder_earlyz",
"os.path.basename",
"utils.getMacroName",
"utils.getArrayBaseName",
"xbo... | [((722, 751), 'xbox', 'xbox', (['fsl', 'dst', 'rootSignature'], {}), '(fsl, dst, rootSignature)\n', (726, 751), False, 'import xbox\n'), ((859, 878), 'utils.getShader', 'getShader', (['fsl', 'dst'], {}), '(fsl, dst)\n', (868, 878), False, 'from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace\n'), ((897, 911), 'utils.getHeader', 'getHeader', (['fsl'], {}), '(fsl)\n', (906, 911), False, 'from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace\n'), ((1128, 1147), 'prospero.preamble', 'prospero.preamble', ([], {}), '()\n', (1145, 1147), False, 'import prospero\n'), ((1385, 1400), 'xbox.preamble', 'xbox.preamble', ([], {}), '()\n', (1398, 1400), False, 'import xbox\n'), ((3030, 3055), 'utils.is_groupshared_decl', 'is_groupshared_decl', (['line'], {}), '(line)\n', (3049, 3055), False, 'from utils import isArray, getArrayLen, getArrayBaseName, getMacroName, DescriptorSets, is_groupshared_decl\n'), ((1271, 1287), 'orbis.preamble', 'orbis.preamble', ([], {}), '()\n', (1285, 1287), False, 'import orbis\n'), ((1708, 1733), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1723, 1733), False, 'import os, sys, importlib, re\n'), ((3084, 3098), 'utils.getMacro', 'getMacro', (['line'], {}), '(line)\n', (3092, 3098), False, 'from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace\n'), ((3122, 3145), 'utils.getArrayBaseName', 'getArrayBaseName', (['dname'], {}), '(dname)\n', (3138, 3145), False, 'from utils import isArray, getArrayLen, getArrayBaseName, getMacroName, DescriptorSets, is_groupshared_decl\n'), ((3672, 3686), 'utils.getMacro', 'getMacro', (['line'], {}), '(line)\n', (3680, 3686), False, 'from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace\n'), ((4606, 4620), 'utils.getMacro', 'getMacro', (['line'], {}), '(line)\n', (4614, 4620), False, 'from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace\n'), ((5972, 5986), 'utils.getMacro', 'getMacro', (['line'], {}), '(line)\n', (5980, 5986), False, 'from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace\n'), ((6204, 6230), 'utils.getArrayBaseName', 'getArrayBaseName', (['res_name'], {}), '(res_name)\n', (6220, 6230), False, 'from utils import isArray, getArrayLen, getArrayBaseName, getMacroName, DescriptorSets, is_groupshared_decl\n'), ((11536, 11564), 're.match', 're.match', (['"""\\\\s*RETURN"""', 'line'], {}), "('\\\\s*RETURN', line)\n", (11544, 11564), False, 'import os, sys, importlib, re\n'), ((12285, 12315), 're.match', 're.match', (['"""\\\\s*PCF_INIT"""', 'line'], {}), "('\\\\s*PCF_INIT', line)\n", (12293, 12315), False, 'import os, sys, importlib, re\n'), ((4852, 4882), 'utils.getArrayBaseName', 'getArrayBaseName', (['data_decl[1]'], {}), '(data_decl[1])\n', (4868, 4882), False, 'from utils import isArray, getArrayLen, getArrayBaseName, getMacroName, DescriptorSets, is_groupshared_decl\n'), ((6995, 7015), 'os.path.dirname', 'os.path.dirname', (['fsl'], {}), '(fsl)\n', (7010, 7015), False, 'import os, sys, importlib, re\n'), ((7132, 7152), 'os.path.dirname', 'os.path.dirname', (['dst'], {}), '(dst)\n', (7147, 7152), False, 'import os, sys, importlib, re\n'), ((8394, 8409), 'utils.getMacro', 'getMacro', (['dtype'], {}), '(dtype)\n', (8402, 8409), False, 'from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace\n'), ((10579, 10593), 'utils.getMacro', 'getMacro', (['line'], {}), '(line)\n', (10587, 10593), False, 'from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace\n'), ((13078, 13105), 'xbox.set_ps_zorder_earlyz', 'xbox.set_ps_zorder_earlyz', ([], {}), '()\n', (13103, 13105), False, 'import xbox\n'), ((6930, 6944), 'utils.getMacro', 'getMacro', (['line'], {}), '(line)\n', (6938, 6944), False, 'from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace\n'), ((12939, 12953), 'utils.getMacro', 'getMacro', (['line'], {}), '(line)\n', (12947, 12953), False, 'from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace\n'), ((2136, 2155), 'utils.getMacroName', 'getMacroName', (['dtype'], {}), '(dtype)\n', (2148, 2155), False, 'from utils import isArray, getArrayLen, getArrayBaseName, getMacroName, DescriptorSets, is_groupshared_decl\n'), ((8338, 8357), 'utils.getMacroName', 'getMacroName', (['dtype'], {}), '(dtype)\n', (8350, 8357), False, 'from utils import isArray, getArrayLen, getArrayBaseName, getMacroName, DescriptorSets, is_groupshared_decl\n'), ((10094, 10114), 'utils.get_whitespace', 'get_whitespace', (['line'], {}), '(line)\n', (10108, 10114), False, 'from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace\n'), ((12160, 12175), 'utils.getMacro', 'getMacro', (['dtype'], {}), '(dtype)\n', (12168, 12175), False, 'from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace\n'), ((13545, 13566), 'os.path.basename', 'os.path.basename', (['dst'], {}), '(dst)\n', (13561, 13566), False, 'import os, sys, importlib, re\n'), ((12233, 12252), 'utils.getMacroName', 'getMacroName', (['dtype'], {}), '(dtype)\n', (12245, 12252), False, 'from utils import isArray, getArrayLen, getArrayBaseName, getMacroName, DescriptorSets, is_groupshared_decl\n'), ((4702, 4722), 'utils.get_whitespace', 'get_whitespace', (['line'], {}), '(line)\n', (4716, 4722), False, 'from utils import Stages, getHeader, getShader, getMacro, genFnCall, fsl_assert, get_whitespace\n')] |
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pydriller.repository import Repository
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
def test_between_revisions():
from_tag = 'tag1'
to_tag = 'tag3'
lc = list(Repository('test-repos/tags',
from_tag=from_tag,
to_tag=to_tag).traverse_commits())
assert len(lc) == 5
assert '6bb9e2c6a8080e6b5b34e6e316c894b2ddbf7fcd' == lc[0].hash
assert 'f1a90b8d7b151ceefd3e3dfc0dc1d0e12b5f48d0' == lc[1].hash
assert '4638730126d40716e230c2040751a13153fb1556' == lc[2].hash
assert 'a26f1438bd85d6b22497c0e5dae003812becd0bc' == lc[3].hash
assert '627e1ad917a188a861c9fedf6e5858b79edbe439' == lc[4].hash
def test_multiple_repos_with_tags():
from_tag = 'tag2'
to_tag = 'tag3'
repos = [
'test-repos/tags',
'test-repos/tags',
'test-repos/tags'
]
lc = list(Repository(path_to_repo=repos,
from_tag=from_tag,
to_tag=to_tag).traverse_commits())
assert len(lc) == 9
| [
"logging.basicConfig",
"pydriller.repository.Repository"
] | [((630, 725), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n", (649, 725), False, 'import logging\n'), ((811, 874), 'pydriller.repository.Repository', 'Repository', (['"""test-repos/tags"""'], {'from_tag': 'from_tag', 'to_tag': 'to_tag'}), "('test-repos/tags', from_tag=from_tag, to_tag=to_tag)\n", (821, 874), False, 'from pydriller.repository import Repository\n'), ((1505, 1569), 'pydriller.repository.Repository', 'Repository', ([], {'path_to_repo': 'repos', 'from_tag': 'from_tag', 'to_tag': 'to_tag'}), '(path_to_repo=repos, from_tag=from_tag, to_tag=to_tag)\n', (1515, 1569), False, 'from pydriller.repository import Repository\n')] |
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as InterpUS
from powderday.nebular_emission.cloudy_tools import sym_to_name
"""
------------------------------------------------------------------------------------------
From cloudyfsps written by <NAME>.
(Source https://github.com/nell-byler/cloudyfsps/blob/master/cloudyfsps/nebAbundTools.py
retrieved in October 2019)
------------------------------------------------------------------------------------------
"""
def getNebAbunds(set_name, logZ, dust=True, re_z=False, **kwargs):
"""
neb_abund.get_abunds(set_name, logZ, dust=True, re_z=False)
set_name must be 'dopita', 'newdopita', 'cl01' or 'yeh'
"""
allowed_names = ['dopita', 'newdopita', 'cl01', 'yeh',
'varyNO', 'gutkin', 'UVbyler', 'varyCO']
if set_name in allowed_names:
return eval('{}({}, dust={}, re_z={})'.format(set_name, logZ, dust, re_z))
else:
raise IOError(allowed_names)
class abundSet(object):
def __init__(self, set_name, logZ):
"""
overarching class for abundance sets.
abundSet('dopita', 0.0)
"""
self.logZ = logZ
self.abund_0 = load_abund(set_name)
self.depl = load_depl(set_name)
self.calcSpecial()
self.calcFinal()
self.inputStrings()
def calcSpecial(self):
return
def calcFinal(self):
return
def inputStrings(self):
self.solarstr = 'abundances {} {}'.format(self.solar, self.grains)
elem_strs = []
names = sym_to_name()
for key in self.abund_0.keys():
elm = names[key]
abund = self.__getattribute__(key)
# if hasattr(self, 're_z'):
# if key != 'He':
# abund -= self.re_z
outstr = 'element abundance {0} {1:.2f} log'.format(elm, abund)
elem_strs.append(outstr)
self.__setattr__('elem_strs', elem_strs)
return
class dopita(abundSet):
solar = 'old solar 84'
def __init__(self, logZ, dust=True, re_z=False):
"""
Dopita+2001: old solar abundances = 0.019
ISM grains
"""
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
if re_z:
self.re_z = logZ
else:
self.re_z = 0.0
abundSet.__init__(self, 'dopita', logZ)
def calcSpecial(self):
"""
piece-wise function for nitrogen abund (step-function)
functional form for helium
"""
def calc_N(logZ):
if logZ <= -0.63:
return -4.57 + logZ
else:
return -3.94 + (2.0 * logZ)
def calc_He(logZ):
return np.log10(0.08096 + (0.02618 * (10.0 ** logZ)))
self.__setattr__('He', calc_He(self.logZ))
self.__setattr__('N', calc_N(self.logZ) + self.depl['N'])
return
def calcFinal(self):
"""
apply depletions and scale with logZ
"""
[self.__setattr__(key, val + self.logZ + self.depl[key])
for key, val in self.abund_0.items() if not hasattr(self, key)]
return
class newdopita(abundSet):
solar = 'GASS10'
def __init__(self, logZ, dust=True, re_z=False):
"""
Abundances from Dopita (2013)
Solar Abundances from Grevasse 2010 - z= 0.013
includes smooth polynomial for N/O, C/O relationship
functional form for He(z)
new depletion factors
ISM grains
"""
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
self.re_z = re_z
abundSet.__init__(self, 'newdopita', logZ)
def calcSpecial(self):
def calc_He(logZ):
return np.log10(0.0737 + (0.024 * (10.0 ** logZ)))
def calc_CNO(logZ):
oxy = np.array([7.39, 7.50, 7.69, 7.99, 8.17,
8.39, 8.69, 8.80, 8.99, 9.17, 9.39])
nit = np.array([-6.61, -6.47, -6.23, -5.79, -5.51,
-5.14, -4.60, -4.40, -4.04, -3.67, -3.17])
car = np.array([-5.58, -5.44, -5.20, -4.76, -4.48,
-4.11, -3.57, -3.37, -3.01, -2.64, -2.14])
O = self.abund_0['O'] + logZ
C = float(InterpUS(oxy, car, k=1)(O + 12.0))
N = float(InterpUS(oxy, nit, k=1)(O + 12.0))
return C, N, O
self.__setattr__('He', calc_He(self.logZ))
C, N, O = calc_CNO(self.logZ)
[self.__setattr__(key, val + self.depl[key])
for key, val in zip(['C', 'N', 'O'], [C, N, O])]
return
def calcFinal(self):
[self.__setattr__(key, val + self.logZ + self.depl[key])
for key, val in self.abund_0.items() if not hasattr(self, key)]
return
class UVbyler(abundSet):
solar = 'GASS10'
def __init__(self, logZ, dust=True, re_z=False):
"""
Abundances from Dopita (2013)
Solar Abundances from Grevasse 2010 - z= 0.013
New fit for N/O, C/O relationship
functional form for He(z)
new depletion factors
ISM grains
"""
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
self.re_z = re_z
abundSet.__init__(self, 'UVbyler', logZ)
def calcSpecial(self):
def calc_He(logZ):
return np.log10(0.0737 + (0.024 * (10.0 ** logZ)))
def calc_CNO(logZ):
O = self.abund_0['O'] + logZ
# C = np.log10((1.0*10.**O)*(10.**-1.1 + 10.**(2.96 + O)))
C = np.log10((10. ** O) * (10. ** -0.7 + 10. ** (4.8 + 1.45 * O)))
# N = np.log10((1.0*10.**O)*(10.**-1.8 + 10.**(2.2 + O)))
# N = np.log10((10.**O)*(10.**-1.5 + 10.**(2.5 + 1.2*O)))
N = np.log10((1.0 * 10. ** O) * (10. ** -1.55 + 10. ** (2.3 + 1.1 * O)))
# N = -4.81 + logZ if logZ <= -0.3 else -4.51 + 2.0*logZ
return C, N, O
self.__setattr__('He', calc_He(self.logZ))
C, N, O = calc_CNO(self.logZ)
[self.__setattr__(key, val + self.depl[key])
for key, val in zip(['C', 'N', 'O'], [C, N, O])]
return
def calcFinal(self):
[self.__setattr__(key, val + self.logZ + self.depl[key])
for key, val in self.abund_0.items() if not hasattr(self, key)]
return
class gutkin(abundSet):
solar = 'GASS10'
def __init__(self, logZ, dust=True, re_z=False):
"""
Gutkin+2016
PARSEC metallicity (Bressan+2012)
based on Grevesse+Sauvel (1998) and Caffau+2011
"""
if dust:
self.grains = 'no grains\ngrains ISM'
else:
self.grains = 'no grains'
self.re_z = re_z
abundSet.__init__(self, 'gutkin', logZ)
def calcSpecial(self):
def calc_He(logZ):
Z = (10. ** logZ) * 0.01524
Y = 0.2485 + 1.7756 * Z
X = 1. - Y - Z
return np.log10(Y / X / 4.)
def calc_CNO(logZ):
O = self.abund_0['O'] + logZ
N = np.log10((0.41 * 10. ** O) * (10. ** -1.6 + 10. ** (2.33 + O)))
C = self.abund_0['C'] + logZ
return C, N, O
self.__setattr__('He', calc_He(self.logZ))
C, N, O = calc_CNO(self.logZ)
[self.__setattr__(key, val)
for key, val in zip(['C', 'N', 'O'], [C, N, O])]
return
def calcFinal(self):
[self.__setattr__(key, val)
for key, val in self.abund_0.items() if not hasattr(self, key)]
return
def load_abund(set_name):
if set_name == 'dopita':
adict = dict(He=-1.01,
C=-3.44,
N=-3.95,
O=-3.07,
Ne=-3.91,
Mg=-4.42,
Si=-4.45,
S=-4.79,
Ar=-5.44,
Ca=-5.64,
Fe=-4.33,
F=-7.52,
Na=-5.69,
Al=-5.53,
P=-6.43,
Cl=-6.73,
K=-6.87,
Ti=-6.96,
Cr=-6.32,
Mn=-6.47,
Co=-7.08,
Ni=-5.75,
Cu=-7.73,
Zn=-7.34)
elif set_name == 'newdopita':
adict = dict(He=-1.01,
C=-3.57,
N=-4.60,
O=-3.31,
Ne=-4.07,
Na=-5.75,
Mg=-4.40,
Al=-5.55,
Si=-4.49,
S=-4.86,
Cl=-6.63,
Ar=-5.60,
Ca=-5.66,
Fe=-4.50,
Ni=-5.78,
F=-7.44,
P=-6.59,
K=-6.97,
Cr=-6.36,
Ti=-7.05,
Mn=-6.57,
Co=-7.01,
Cu=-7.81,
Zn=-7.44)
elif set_name == 'UVbyler':
adict = dict(He=-1.01,
C=-3.57,
N=-4.17,
O=-3.31,
Ne=-4.07,
Na=-5.75,
Mg=-4.40,
Al=-5.55,
Si=-4.49,
S=-4.86,
Cl=-6.63,
Ar=-5.60,
Ca=-5.66,
Fe=-4.50,
Ni=-5.78,
F=-7.44,
P=-6.59,
K=-6.97,
Cr=-6.36,
Ti=-7.05,
Mn=-6.57,
Co=-7.01,
Cu=-7.81,
Zn=-7.44)
elif set_name == 'gutkin':
adict = dict(He=-1.01,
C=-3.53,
N=-4.32,
O=-3.17,
F=-7.47,
Ne=-4.01,
Na=-5.70,
Mg=-4.45,
Al=-5.56,
Si=-4.48,
P=-6.57,
S=-4.87,
Cl=-6.53,
Ar=-5.63,
K=-6.92,
Ca=-5.67,
Sc=-8.86,
Ti=-7.01,
V=-8.03,
Cr=-6.36,
Mn=-6.64,
Fe=-4.51,
Co=-7.11,
Ni=-5.78,
Cu=-7.82,
Zn=-7.43)
return adict
def load_depl(set_name):
if set_name == 'dopita':
ddict = dict(C=-0.30,
N=-0.22,
O=-0.22,
Ne=0.0,
Mg=-0.70,
Si=-1.0,
S=0.0,
Ar=0.0,
Ca=-2.52,
Fe=-2.0,
F=0.0,
Na=0.0,
Al=0.0,
P=0.0,
Cl=0.0,
K=0.0,
Ti=0.0,
Cr=0.0,
Mn=0.0,
Co=0.0,
Ni=0.0,
Cu=0.0,
Zn=0.0)
elif set_name == 'newdopita':
ddict = dict(He=0.00,
C=-0.30,
N=-0.05,
O=-0.07,
Ne=0.00,
Na=-1.00,
Mg=-1.08,
Al=-1.39,
Si=-0.81,
S=0.00,
Cl=-1.00,
Ar=0.00,
Ca=-2.52,
Fe=-1.31,
Ni=-2.00,
F=0.0,
P=0.0,
K=0.0,
Cr=0.0,
Ti=0.0,
Mn=0.0,
Co=0.0,
Cu=0.0,
Zn=0.0)
elif set_name == 'UVbyler':
ddict = dict(He=0.00,
C=-0.30,
N=-0.05,
O=-0.07,
Ne=0.00,
Na=-1.00,
Mg=-1.08,
Al=-1.39,
Si=-0.81,
S=0.00,
Cl=-1.00,
Ar=0.00,
Ca=-2.52,
Fe=-1.31,
Ni=-2.00,
F=0.0,
P=0.0,
K=0.0,
Cr=0.0,
Ti=0.0,
Mn=0.0,
Co=0.0,
Cu=0.0,
Zn=0.0)
elif set_name == 'gutkin':
ddict = dict(He=0.00,
Li=-0.8,
C=-0.30,
O=-0.15,
Na=-0.60,
Mg=-0.70,
Al=-1.70,
Si=-1.00,
Cl=-0.30,
Ca=-2.52,
Fe=-2.00,
Ni=-1.40)
return ddict
| [
"numpy.array",
"numpy.log10",
"scipy.interpolate.InterpolatedUnivariateSpline",
"powderday.nebular_emission.cloudy_tools.sym_to_name"
] | [((1685, 1698), 'powderday.nebular_emission.cloudy_tools.sym_to_name', 'sym_to_name', ([], {}), '()\n', (1696, 1698), False, 'from powderday.nebular_emission.cloudy_tools import sym_to_name\n'), ((2912, 2954), 'numpy.log10', 'np.log10', (['(0.08096 + 0.02618 * 10.0 ** logZ)'], {}), '(0.08096 + 0.02618 * 10.0 ** logZ)\n', (2920, 2954), True, 'import numpy as np\n'), ((3994, 4033), 'numpy.log10', 'np.log10', (['(0.0737 + 0.024 * 10.0 ** logZ)'], {}), '(0.0737 + 0.024 * 10.0 ** logZ)\n', (4002, 4033), True, 'import numpy as np\n'), ((4085, 4159), 'numpy.array', 'np.array', (['[7.39, 7.5, 7.69, 7.99, 8.17, 8.39, 8.69, 8.8, 8.99, 9.17, 9.39]'], {}), '([7.39, 7.5, 7.69, 7.99, 8.17, 8.39, 8.69, 8.8, 8.99, 9.17, 9.39])\n', (4093, 4159), True, 'import numpy as np\n'), ((4208, 4298), 'numpy.array', 'np.array', (['[-6.61, -6.47, -6.23, -5.79, -5.51, -5.14, -4.6, -4.4, -4.04, -3.67, -3.17]'], {}), '([-6.61, -6.47, -6.23, -5.79, -5.51, -5.14, -4.6, -4.4, -4.04, -\n 3.67, -3.17])\n', (4216, 4298), True, 'import numpy as np\n'), ((4342, 4433), 'numpy.array', 'np.array', (['[-5.58, -5.44, -5.2, -4.76, -4.48, -4.11, -3.57, -3.37, -3.01, -2.64, -2.14]'], {}), '([-5.58, -5.44, -5.2, -4.76, -4.48, -4.11, -3.57, -3.37, -3.01, -\n 2.64, -2.14])\n', (4350, 4433), True, 'import numpy as np\n'), ((5666, 5705), 'numpy.log10', 'np.log10', (['(0.0737 + 0.024 * 10.0 ** logZ)'], {}), '(0.0737 + 0.024 * 10.0 ** logZ)\n', (5674, 5705), True, 'import numpy as np\n'), ((5867, 5930), 'numpy.log10', 'np.log10', (['(10.0 ** O * (10.0 ** -0.7 + 10.0 ** (4.8 + 1.45 * O)))'], {}), '(10.0 ** O * (10.0 ** -0.7 + 10.0 ** (4.8 + 1.45 * O)))\n', (5875, 5930), True, 'import numpy as np\n'), ((6086, 6155), 'numpy.log10', 'np.log10', (['(1.0 * 10.0 ** O * (10.0 ** -1.55 + 10.0 ** (2.3 + 1.1 * O)))'], {}), '(1.0 * 10.0 ** O * (10.0 ** -1.55 + 10.0 ** (2.3 + 1.1 * O)))\n', (6094, 6155), True, 'import numpy as np\n'), ((7267, 7288), 'numpy.log10', 'np.log10', (['(Y / X / 4.0)'], {}), '(Y / X / 4.0)\n', (7275, 7288), True, 'import numpy as np\n'), ((7374, 7438), 'numpy.log10', 'np.log10', (['(0.41 * 10.0 ** O * (10.0 ** -1.6 + 10.0 ** (2.33 + O)))'], {}), '(0.41 * 10.0 ** O * (10.0 ** -1.6 + 10.0 ** (2.33 + O)))\n', (7382, 7438), True, 'import numpy as np\n'), ((4521, 4544), 'scipy.interpolate.InterpolatedUnivariateSpline', 'InterpUS', (['oxy', 'car'], {'k': '(1)'}), '(oxy, car, k=1)\n', (4529, 4544), True, 'from scipy.interpolate import InterpolatedUnivariateSpline as InterpUS\n'), ((4578, 4601), 'scipy.interpolate.InterpolatedUnivariateSpline', 'InterpUS', (['oxy', 'nit'], {'k': '(1)'}), '(oxy, nit, k=1)\n', (4586, 4601), True, 'from scipy.interpolate import InterpolatedUnivariateSpline as InterpUS\n')] |
# Generated by Django 3.2.5 on 2021-08-12 02:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('character', '0003_alter_character_id'),
]
operations = [
migrations.AlterField(
model_name='character',
name='alignment',
field=models.CharField(blank=True, choices=[('LG', 'Lawful Good'), ('NG', 'Neutral Good'), ('CG', 'Chaotic Good'), ('LN', 'Lawful Neutral'), ('N', 'True Neutral'), ('CN', 'Chaotic Neutral'), ('LE', 'Lawful Evil'), ('NE', 'Neutral Evil'), ('CE', 'Chaotic Evil')], max_length=2, null=True),
),
]
| [
"django.db.models.CharField"
] | [((344, 638), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'choices': "[('LG', 'Lawful Good'), ('NG', 'Neutral Good'), ('CG', 'Chaotic Good'), (\n 'LN', 'Lawful Neutral'), ('N', 'True Neutral'), ('CN',\n 'Chaotic Neutral'), ('LE', 'Lawful Evil'), ('NE', 'Neutral Evil'), (\n 'CE', 'Chaotic Evil')]", 'max_length': '(2)', 'null': '(True)'}), "(blank=True, choices=[('LG', 'Lawful Good'), ('NG',\n 'Neutral Good'), ('CG', 'Chaotic Good'), ('LN', 'Lawful Neutral'), ('N',\n 'True Neutral'), ('CN', 'Chaotic Neutral'), ('LE', 'Lawful Evil'), (\n 'NE', 'Neutral Evil'), ('CE', 'Chaotic Evil')], max_length=2, null=True)\n", (360, 638), False, 'from django.db import migrations, models\n')] |
import keras
from keras.models import Sequential, load_model, Model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from scipy import io
mat_contents = io.loadmat('Data/X_test_0.mat')
X_test_0 = mat_contents['X_test_0']
mat_contents = io.loadmat('Data/X_test_1.mat')
X_test_1 = mat_contents['X_test_1']
batch_size = 40
num_classes = 2
test_datasize, patch_rows, patch_cols = X_test_0.shape[0], X_test_0.shape[1], X_test_0.shape[2]
X_test_0 = X_test_0.reshape(test_datasize, patch_rows, patch_cols, 1)
test_datasize, patch_rows, patch_cols = X_test_1.shape[0], X_test_1.shape[1], X_test_1.shape[2]
X_test_1 = X_test_1.reshape(test_datasize, patch_rows, patch_cols, 1)
print('X_test_0 shape:', X_test_0.shape)
print('X_test_1 shape:', X_test_1.shape)
# load trained model
model = load_model('Data/trained_model.h5')
# prediction
Y_test_0 = model.predict(X_test_0, batch_size=batch_size, verbose=1)
Y_test_1 = model.predict(X_test_1, batch_size=batch_size, verbose=1)
io.savemat('Data/Y_test_0.mat', {'Y_test_0':Y_test_0})
io.savemat('Data/Y_test_1.mat', {'Y_test_1':Y_test_1})
| [
"scipy.io.loadmat",
"keras.models.load_model",
"scipy.io.savemat"
] | [((200, 231), 'scipy.io.loadmat', 'io.loadmat', (['"""Data/X_test_0.mat"""'], {}), "('Data/X_test_0.mat')\n", (210, 231), False, 'from scipy import io\n'), ((283, 314), 'scipy.io.loadmat', 'io.loadmat', (['"""Data/X_test_1.mat"""'], {}), "('Data/X_test_1.mat')\n", (293, 314), False, 'from scipy import io\n'), ((830, 865), 'keras.models.load_model', 'load_model', (['"""Data/trained_model.h5"""'], {}), "('Data/trained_model.h5')\n", (840, 865), False, 'from keras.models import Sequential, load_model, Model\n'), ((1019, 1074), 'scipy.io.savemat', 'io.savemat', (['"""Data/Y_test_0.mat"""', "{'Y_test_0': Y_test_0}"], {}), "('Data/Y_test_0.mat', {'Y_test_0': Y_test_0})\n", (1029, 1074), False, 'from scipy import io\n'), ((1074, 1129), 'scipy.io.savemat', 'io.savemat', (['"""Data/Y_test_1.mat"""', "{'Y_test_1': Y_test_1}"], {}), "('Data/Y_test_1.mat', {'Y_test_1': Y_test_1})\n", (1084, 1129), False, 'from scipy import io\n')] |
from appJar import gui
def press(btn):
if btn == "info": app.infoBox("Title Here", "Message here...")
if btn == "error": app.errorBox("Title Here", "Message here...")
if btn == "warning": app.warningBox("Title Here", "Message here...")
if btn == "yesno": app.yesNoBox("Title Here", "Message here...")
if btn == "question": app.questionBox("Title Here", "Message here...")
if btn == "ok": app.okBox("Title Here", "Message here...")
if btn == "retry": app.retryBox("Title Here", "Message here...")
if btn == "text": app.textBox("Title Here", "Message here...")
if btn == "number": app.numberBox("Title Here", "Message here...")
app=gui()
app.addButtons(["info", "error", "warning", "yesno", "question"], press)
app.addButtons(["ok", "retry", "text", "number"], press)
app.go()
| [
"appJar.gui"
] | [((668, 673), 'appJar.gui', 'gui', ([], {}), '()\n', (671, 673), False, 'from appJar import gui\n')] |
from flask import request
import pytest
import json
from app import create_app, create_rest_api
from db import get_db
from change_light import is_aquarium_id_valid
@pytest.fixture
def client():
local_app = create_app()
create_rest_api(local_app)
client = local_app.test_client()
yield client
def get_max_aquarium_id():
light_data = get_db().execute(
'SELECT id, timestamp, default_mode, total_food_quantity'
' FROM aquarium'
' ORDER BY id DESC'
).fetchone()
return light_data['id']
def test_get_aquarium_light_color_invalid_id(client):
with create_app().app_context():
invalid_id = get_max_aquarium_id() + 1
request = client.get('/lightColor/' + str(invalid_id))
assert request.status_code == 403
def test_get_aquarium_light_color_valid_id(client):
with create_app().app_context():
valid_id = get_max_aquarium_id()
request = client.get('/lightColor/' + str(valid_id))
assert request.status_code == 200
def test_change_light_color_valid_aquarium_id(client):
color = 'test_color'
with create_app().app_context():
valid_id = get_max_aquarium_id()
request = client.put('/lightColor/' + str(valid_id) + '?color=' + color)
assert request.status_code == 200
def test_change_light_color_invalid_aquarium_id(client):
color = 'test_color'
with create_app().app_context():
invalid_id = get_max_aquarium_id() + 1
request = client.put('/lightColor/' + str(invalid_id) + '?color=' + color)
assert request.status_code == 403 | [
"db.get_db",
"app.create_rest_api",
"app.create_app"
] | [((212, 224), 'app.create_app', 'create_app', ([], {}), '()\n', (222, 224), False, 'from app import create_app, create_rest_api\n'), ((229, 255), 'app.create_rest_api', 'create_rest_api', (['local_app'], {}), '(local_app)\n', (244, 255), False, 'from app import create_app, create_rest_api\n'), ((607, 619), 'app.create_app', 'create_app', ([], {}), '()\n', (617, 619), False, 'from app import create_app, create_rest_api\n'), ((843, 855), 'app.create_app', 'create_app', ([], {}), '()\n', (853, 855), False, 'from app import create_app, create_rest_api\n'), ((1099, 1111), 'app.create_app', 'create_app', ([], {}), '()\n', (1109, 1111), False, 'from app import create_app, create_rest_api\n'), ((1378, 1390), 'app.create_app', 'create_app', ([], {}), '()\n', (1388, 1390), False, 'from app import create_app, create_rest_api\n'), ((356, 364), 'db.get_db', 'get_db', ([], {}), '()\n', (362, 364), False, 'from db import get_db\n')] |
#######################
# <NAME> #
# inventory.py #
# Copyright 2018-2020 #
# <NAME> #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
from lib.litnumbers import *
from lib.vigenere import *
import random
NAME = "inventory"
CATEGORIES = ["items"]
ALIASES = ["inv", "i"]
USAGE = "inventory"
DESCRIPTION = "List all of the items in your inventory."
def COMMAND(console, args):
# Perform initial checks.
if not COMMON.check(NAME, console, args, argc=0, awake=True):
return False
# Check if our inventory is empty.
if not console.user["equipment"]:
console.msg("You are not holding anything.")
cursedinv=False
for item in console.user["inventory"]+console.user["equipment"]:
item = COMMON.check_item(NAME, console, item, reason=False)
try:
if item["cursed"]["cursetype"]=="invmess":
cursedinv=True
break
except:
pass
mylang=console.database.user_by_name(console.user["name"])["lang"]
# Holding items
if console.user["equipment"]:
hitemlist=[]
for hitem in console.user["equipment"]:
hitem = console.database.item_by_id(hitem)
hitemname=hitem["name"]
hitemid=hitem["id"]
if cursedinv:
hitemname=encvigenere(hitemname, mylang)
hitemid=random.randint(1,100)
if console.user["builder"]["enabled"]: hitemlist.append("{0} (ID: {1})".format(COMMON.format_item(NAME, hitemname),hitemid))
else: hitemlist.append("{0}".format(COMMON.format_item(NAME, hitemname)))
hitemlist=' and '.join(hitemlist)
console.msg("You are holding {0}.".format(hitemlist))
# Check if our inventory is empty.
if not console.user["inventory"]:
console.msg("Your inventory is empty.")
# Enumerate our inventory.
itemcount = 0
for itemid in sorted(console.user["inventory"]):
# Lookup the target item and perform item checks.
thisitem = COMMON.check_item(NAME, console, itemid, reason=False)
# Uh oh, an item in our inventory doesn't actually exist.
if not thisitem:
console.log.error("Item referenced in user inventory does not exist: {user} :: {item}",
user=console.user["name"], item=itemid)
console.msg("{0}: ERROR: Item referenced in your inventory does not exist: {1}".format(NAME, itemid))
continue
# Show the item's name and ID.
hitemname=thisitem["name"]
if cursedinv:
hitemname=encvigenere(hitemname, mylang)
itemid=random.randint(1,100)
if console.user["builder"]["enabled"]: console.msg("{0} (ID: {1})".format(hitemname, itemid))
else: console.msg("{0}".format(hitemname))
# Keep count.
itemcount += 1
# Finished.
if itemcount>1:
console.msg("There are {0} items in your inventory.".format(int_to_en(itemcount)))
elif itemcount==1:
console.msg("There is one item in your inventory.".format(int_to_en(itemcount)))
else:
console.msg("There are no items in your inventory.")
return True
| [
"random.randint"
] | [((3730, 3752), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (3744, 3752), False, 'import random\n'), ((2446, 2468), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (2460, 2468), False, 'import random\n')] |
from custom_objects import FinanceCalculator
from tkinter import messagebox
class CalculationsPresenter(object):
def __init__(self, view):
self.view = view
def convert_price(self, price):
try:
converted_price = FinanceCalculator.decimal_to_treasury(price)
self.view.display_conversion(new_price=converted_price)
return None
except (ValueError, IndexError) as err:
pass
try:
converted_price = FinanceCalculator.treasury_to_decimal(price)
self.view.display_conversion(new_price=converted_price)
except (ValueError, IndexError) as err:
messagebox.showinfo(
message="An example of a valid price would be 108.50 or 108-16",
title="Invalid Price",
)
| [
"custom_objects.FinanceCalculator.treasury_to_decimal",
"custom_objects.FinanceCalculator.decimal_to_treasury",
"tkinter.messagebox.showinfo"
] | [((250, 294), 'custom_objects.FinanceCalculator.decimal_to_treasury', 'FinanceCalculator.decimal_to_treasury', (['price'], {}), '(price)\n', (287, 294), False, 'from custom_objects import FinanceCalculator\n'), ((495, 539), 'custom_objects.FinanceCalculator.treasury_to_decimal', 'FinanceCalculator.treasury_to_decimal', (['price'], {}), '(price)\n', (532, 539), False, 'from custom_objects import FinanceCalculator\n'), ((668, 785), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', ([], {'message': '"""An example of a valid price would be 108.50 or 108-16"""', 'title': '"""Invalid Price"""'}), "(message=\n 'An example of a valid price would be 108.50 or 108-16', title=\n 'Invalid Price')\n", (687, 785), False, 'from tkinter import messagebox\n')] |
from django.shortcuts import render
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from .serializers import WeatherSerializer
import requests
import json
import math
import os
import yaml
from rest_framework.decorators import action
from django.conf import settings
def api_docs(request):
"""
Base API Docs endpoint function for the Swagger
"""
file = open(os.path.join(settings.BASE_DIR, 'api.yaml'), encoding='utf8')
spec = yaml.safe_load(file.read())
return render(request, template_name="swagger_base.html", context={'data': json.dumps(spec)})
class WeatherViewSet(ViewSet):
"""
General ViewSet for Weather API
"""
serializer_class = WeatherSerializer
@action(methods=['get'], detail=False, url_path=r'(?P<city>[\w-]+)/', url_name='get_weather')
def get(self, request, *args, **kwargs):
data = {'city': kwargs.get(
'city', None), 'days': request.GET.get('days', 1)}
serializer = WeatherSerializer(data=data)
if serializer.is_valid():
weather = []
data = {}
response = json.loads(requests.get(
f'{settings.BASE_WEATHER_API_URL}forecast.json?key={settings.WEATHER_API_KEY}&q={serializer.data["city"]}&days={serializer.data["days"]}&aqi=no&alerts=no').content)
if "error" in response:
return Response(response['error']['message'],status=400)
data['location'] = response['location']['name']
for d in response['forecast']['forecastday']:
day = {
"date": d["date"],
"maximum": d["day"]["maxtemp_c"],
"minimum": d["day"]["mintemp_c"],
"average": d["day"]["avgtemp_c"]
}
hours = []
for hour in d['hour']:
hours.append(hour['temp_c'])
hours.sort()
if len(hours) % 2 == 0:
middle = int(len(hours)/2)
day['median'] = round(
(hours[middle] + hours[middle+1])/2, 2)
else:
day['median'] = round(hours[math.ceil(len(hours)/2)], 2)
weather.append(day)
data['weather'] = weather
return Response(data)
return Response(serializer.errors,status=400)
| [
"json.dumps",
"os.path.join",
"requests.get",
"rest_framework.response.Response",
"rest_framework.decorators.action"
] | [((748, 844), 'rest_framework.decorators.action', 'action', ([], {'methods': "['get']", 'detail': '(False)', 'url_path': '"""(?P<city>[\\\\w-]+)/"""', 'url_name': '"""get_weather"""'}), "(methods=['get'], detail=False, url_path='(?P<city>[\\\\w-]+)/',\n url_name='get_weather')\n", (754, 844), False, 'from rest_framework.decorators import action\n'), ((417, 460), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""api.yaml"""'], {}), "(settings.BASE_DIR, 'api.yaml')\n", (429, 460), False, 'import os\n'), ((2388, 2427), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': '(400)'}), '(serializer.errors, status=400)\n', (2396, 2427), False, 'from rest_framework.response import Response\n'), ((2358, 2372), 'rest_framework.response.Response', 'Response', (['data'], {}), '(data)\n', (2366, 2372), False, 'from rest_framework.response import Response\n'), ((597, 613), 'json.dumps', 'json.dumps', (['spec'], {}), '(spec)\n', (607, 613), False, 'import json\n'), ((1417, 1467), 'rest_framework.response.Response', 'Response', (["response['error']['message']"], {'status': '(400)'}), "(response['error']['message'], status=400)\n", (1425, 1467), False, 'from rest_framework.response import Response\n'), ((1150, 1328), 'requests.get', 'requests.get', (['f"""{settings.BASE_WEATHER_API_URL}forecast.json?key={settings.WEATHER_API_KEY}&q={serializer.data[\'city\']}&days={serializer.data[\'days\']}&aqi=no&alerts=no"""'], {}), '(\n f"{settings.BASE_WEATHER_API_URL}forecast.json?key={settings.WEATHER_API_KEY}&q={serializer.data[\'city\']}&days={serializer.data[\'days\']}&aqi=no&alerts=no"\n )\n', (1162, 1328), False, 'import requests\n')] |
import sys
sys.path.append("/usr/lib/python2.7/site-packages")
import redis
_r = redis.Redis(host='localhost', port=6379, db=0)
import cherrypy
class Test(object):
def index(self):
_r.incr("/")
return "OK!"
index.exposed = True
cherrypy.quickstart(Test())
| [
"sys.path.append",
"redis.Redis"
] | [((11, 62), 'sys.path.append', 'sys.path.append', (['"""/usr/lib/python2.7/site-packages"""'], {}), "('/usr/lib/python2.7/site-packages')\n", (26, 62), False, 'import sys\n'), ((82, 128), 'redis.Redis', 'redis.Redis', ([], {'host': '"""localhost"""', 'port': '(6379)', 'db': '(0)'}), "(host='localhost', port=6379, db=0)\n", (93, 128), False, 'import redis\n')] |
# -*- coding: utf-8 -*-
import httplib as http
from flask import request
from framework.exceptions import HTTPError
from framework.auth.decorators import must_be_logged_in
from website.util import web_url_for
from website.project.decorators import (
must_have_addon, must_be_addon_authorizer,
must_have_permission, must_not_be_registration,
must_be_valid_project
)
from ..api import Figshare
from ..utils import options_to_hgrid
###### AJAX Config
@must_be_logged_in
@must_be_valid_project
@must_have_addon('figshare', 'node')
def figshare_config_get(node_addon, auth, **kwargs):
"""API that returns the serialized node settings."""
return {
'result': serialize_settings(node_addon, auth.user),
}, http.OK
@must_have_permission('write')
@must_not_be_registration
@must_have_addon('figshare', 'node')
@must_be_addon_authorizer('figshare')
def figshare_config_put(node_addon, auth, **kwargs):
"""View for changing a node's linked figshare folder."""
fields = request.json.get('selected', {})
node = node_addon.owner
node_addon.update_fields(fields, node, auth)
return {
'result': {
'linked': {
'title': fields.get('title') or '',
'id': fields.get('id') or None,
'type': fields.get('type') or None
},
'urls': serialize_urls(node_addon)
},
'message': 'Successfully updated settings.',
}, http.OK
@must_have_permission('write')
@must_have_addon('figshare', 'node')
def figshare_import_user_auth(auth, node_addon, **kwargs):
"""Import figshare credentials from the currently logged-in user to a node.
"""
user = auth.user
user_addon = user.get_addon('figshare')
if user_addon is None or node_addon is None:
raise HTTPError(http.BAD_REQUEST)
node_addon.authorize(user_addon, save=True)
return {
'result': serialize_settings(node_addon, user),
'message': 'Successfully imported access token from profile.',
}, http.OK
@must_have_permission('write')
@must_have_addon('figshare', 'node')
@must_not_be_registration
def figshare_deauthorize(auth, node_addon, **kwargs):
node_addon.deauthorize(auth=auth, save=True)
return {}
def serialize_settings(node_settings, current_user, client=None):
"""View helper that returns a dictionary representation of a
FigshareNodeSettings record. Provides the return value for the
figshare config endpoints.
"""
current_user_settings = current_user.get_addon('figshare')
user_settings = node_settings.user_settings
user_has_auth = current_user_settings is not None and current_user_settings.has_auth
user_is_owner = user_settings is not None and (
user_settings.owner._primary_key == current_user._primary_key
)
valid_credentials = True
if user_settings:
client = client or Figshare.from_settings(user_settings)
articles, status = client.articles(node_settings)
if status == 401:
valid_credentials = False
result = {
'nodeHasAuth': node_settings.has_auth,
'userHasAuth': user_has_auth,
'userIsOwner': user_is_owner,
'urls': serialize_urls(node_settings),
'validCredentials': valid_credentials,
}
if node_settings.has_auth:
# Add owner's profile URL
result['urls']['owner'] = web_url_for('profile_view_id',
uid=user_settings.owner._primary_key)
result['ownerName'] = user_settings.owner.fullname
# Show available projects
linked = node_settings.linked_content or {'id': None, 'type': None, 'title': None}
result['linked'] = linked
return result
def serialize_urls(node_settings):
node = node_settings.owner
urls = {
'config': node.api_url_for('figshare_config_put'),
'deauthorize': node.api_url_for('figshare_deauthorize'),
'auth': node.api_url_for('figshare_oauth_start'),
'importAuth': node.api_url_for('figshare_import_user_auth'),
'options': node.api_url_for('figshare_get_options'),
'folders': node.api_url_for('figshare_get_options'),
'files': node.web_url_for('collect_file_trees'),
# Endpoint for fetching only folders (including root)
'contents': node.api_url_for('figshare_hgrid_data_contents'),
'settings': web_url_for('user_addons')
}
return urls
@must_be_valid_project
@must_have_addon('figshare', 'node')
def figshare_get_options(node_addon, **kwargs):
options = Figshare.from_settings(node_addon.user_settings).get_options()
# TODO: Fix error handling
if options == 401 or not isinstance(options, list):
raise HTTPError(http.BAD_REQUEST)
# self.user_settings.remove_auth()
# push_status_message(messages.OAUTH_INVALID)
else:
node = node_addon.owner
return options_to_hgrid(node, options) or []
| [
"framework.exceptions.HTTPError",
"website.project.decorators.must_have_addon",
"website.util.web_url_for",
"flask.request.json.get",
"website.project.decorators.must_have_permission",
"website.project.decorators.must_be_addon_authorizer"
] | [((510, 545), 'website.project.decorators.must_have_addon', 'must_have_addon', (['"""figshare"""', '"""node"""'], {}), "('figshare', 'node')\n", (525, 545), False, 'from website.project.decorators import must_have_addon, must_be_addon_authorizer, must_have_permission, must_not_be_registration, must_be_valid_project\n'), ((748, 777), 'website.project.decorators.must_have_permission', 'must_have_permission', (['"""write"""'], {}), "('write')\n", (768, 777), False, 'from website.project.decorators import must_have_addon, must_be_addon_authorizer, must_have_permission, must_not_be_registration, must_be_valid_project\n'), ((805, 840), 'website.project.decorators.must_have_addon', 'must_have_addon', (['"""figshare"""', '"""node"""'], {}), "('figshare', 'node')\n", (820, 840), False, 'from website.project.decorators import must_have_addon, must_be_addon_authorizer, must_have_permission, must_not_be_registration, must_be_valid_project\n'), ((842, 878), 'website.project.decorators.must_be_addon_authorizer', 'must_be_addon_authorizer', (['"""figshare"""'], {}), "('figshare')\n", (866, 878), False, 'from website.project.decorators import must_have_addon, must_be_addon_authorizer, must_have_permission, must_not_be_registration, must_be_valid_project\n'), ((1469, 1498), 'website.project.decorators.must_have_permission', 'must_have_permission', (['"""write"""'], {}), "('write')\n", (1489, 1498), False, 'from website.project.decorators import must_have_addon, must_be_addon_authorizer, must_have_permission, must_not_be_registration, must_be_valid_project\n'), ((1500, 1535), 'website.project.decorators.must_have_addon', 'must_have_addon', (['"""figshare"""', '"""node"""'], {}), "('figshare', 'node')\n", (1515, 1535), False, 'from website.project.decorators import must_have_addon, must_be_addon_authorizer, must_have_permission, must_not_be_registration, must_be_valid_project\n'), ((2045, 2074), 'website.project.decorators.must_have_permission', 'must_have_permission', (['"""write"""'], {}), "('write')\n", (2065, 2074), False, 'from website.project.decorators import must_have_addon, must_be_addon_authorizer, must_have_permission, must_not_be_registration, must_be_valid_project\n'), ((2076, 2111), 'website.project.decorators.must_have_addon', 'must_have_addon', (['"""figshare"""', '"""node"""'], {}), "('figshare', 'node')\n", (2091, 2111), False, 'from website.project.decorators import must_have_addon, must_be_addon_authorizer, must_have_permission, must_not_be_registration, must_be_valid_project\n'), ((4456, 4491), 'website.project.decorators.must_have_addon', 'must_have_addon', (['"""figshare"""', '"""node"""'], {}), "('figshare', 'node')\n", (4471, 4491), False, 'from website.project.decorators import must_have_addon, must_be_addon_authorizer, must_have_permission, must_not_be_registration, must_be_valid_project\n'), ((1006, 1038), 'flask.request.json.get', 'request.json.get', (['"""selected"""', '{}'], {}), "('selected', {})\n", (1022, 1038), False, 'from flask import request\n'), ((1811, 1838), 'framework.exceptions.HTTPError', 'HTTPError', (['http.BAD_REQUEST'], {}), '(http.BAD_REQUEST)\n', (1820, 1838), False, 'from framework.exceptions import HTTPError\n'), ((3401, 3469), 'website.util.web_url_for', 'web_url_for', (['"""profile_view_id"""'], {'uid': 'user_settings.owner._primary_key'}), "('profile_view_id', uid=user_settings.owner._primary_key)\n", (3412, 3469), False, 'from website.util import web_url_for\n'), ((4381, 4407), 'website.util.web_url_for', 'web_url_for', (['"""user_addons"""'], {}), "('user_addons')\n", (4392, 4407), False, 'from website.util import web_url_for\n'), ((4719, 4746), 'framework.exceptions.HTTPError', 'HTTPError', (['http.BAD_REQUEST'], {}), '(http.BAD_REQUEST)\n', (4728, 4746), False, 'from framework.exceptions import HTTPError\n')] |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import sys
import json
import urllib
import Cookie
from twisted.internet import reactor
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
import autobahn
from autobahn.util import newid, utcnow
from autobahn.websocket import http
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource
class PersonaServerProtocol(WebSocketServerProtocol):
"""
WebSocket server protocol that tracks WebSocket connections using HTTP cookies,
and authenticates WebSocket connections using Mozilla Persona.
"""
def onConnect(self, request):
# This is called during the initial WebSocket opening handshake.
protocol, headers = None, {}
# our cookie tracking ID
self._cbtid = None
# see if there already is a cookie set ..
if 'cookie' in request.headers:
try:
cookie = Cookie.SimpleCookie()
cookie.load(str(request.headers['cookie']))
except Cookie.CookieError:
pass
else:
if 'cbtid' in cookie:
cbtid = cookie['cbtid'].value
if cbtid in self.factory._cookies:
self._cbtid = cbtid
log.msg("Cookie already set: %s" % self._cbtid)
# if no cookie is set, create a new one ..
if self._cbtid is None:
self._cbtid = newid()
maxAge = 86400
cbtData = {'created': utcnow(),
'authenticated': None,
'maxAge': maxAge,
'connections': set()}
self.factory._cookies[self._cbtid] = cbtData
# do NOT add the "secure" cookie attribute! "secure" refers to the
# scheme of the Web page that triggered the WS, not WS itself!!
##
headers['Set-Cookie'] = 'cbtid=%s;max-age=%d' % (self._cbtid, maxAge)
log.msg("Setting new cookie: %s" % self._cbtid)
# add this WebSocket connection to the set of connections
# associated with the same cookie
self.factory._cookies[self._cbtid]['connections'].add(self)
# accept the WebSocket connection, speaking subprotocol `protocol`
# and setting HTTP headers `headers`
return (protocol, headers)
def onOpen(self):
# This is called when initial WebSocket opening handshake has
# been completed.
# see if we are authenticated ..
authenticated = self.factory._cookies[self._cbtid]['authenticated']
if not authenticated:
# .. if not, send authentication request
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_REQUIRED'}))
else:
# .. if yes, send info on authenticated user
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATED', 'email': authenticated}))
def onClose(self, wasClean, code, reason):
# This is called when WebSocket connection is gone
# remove this connection from list of connections associated with
# same cookie
self.factory._cookies[self._cbtid]['connections'].remove(self)
# if list gets empty, possibly do something ..
if not self.factory._cookies[self._cbtid]['connections']:
log.msg("All connections for {} gone".format(self._cbtid))
def onMessage(self, payload, isBinary):
# This is called when we receive a WebSocket message
if not isBinary:
msg = json.loads(payload)
if msg['cmd'] == 'AUTHENTICATE':
# The client did it's Mozilla Persona authentication thing
# and now wants to verify the authentication and login.
assertion = msg.get('assertion')
audience = msg.get('audience')
# To verify the authentication, we need to send a HTTP/POST
# to Mozilla Persona. When successful, Persona will send us
# back something like:
# {
# "audience": "http://192.168.1.130:8080/",
# "expires": 1393681951257,
# "issuer": "gmail.login.persona.org",
# "email": "<EMAIL>",
# "status": "okay"
# }
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
body = urllib.urlencode({'audience': audience, 'assertion': assertion})
from twisted.web.client import getPage
d = getPage(url="https://verifier.login.persona.org/verify",
method='POST',
postdata=body,
headers=headers)
log.msg("Authentication request sent.")
def done(res):
res = json.loads(res)
if res['status'] == 'okay':
# Mozilla Persona successfully authenticated the user
# remember the user's email address. this marks the cookie as
# authenticated
self.factory._cookies[self._cbtid]['authenticated'] = res['email']
# inform _all_ WebSocket connections of the successful auth.
msg = json.dumps({'cmd': 'AUTHENTICATED', 'email': res['email']})
for proto in self.factory._cookies[self._cbtid]['connections']:
proto.sendMessage(msg)
log.msg("Authenticated user {}".format(res['email']))
else:
log.msg("Authentication failed: {}".format(res.get('reason')))
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_FAILED', 'reason': res.get('reason')}))
self.sendClose()
def error(err):
log.msg("Authentication request failed: {}".format(err.value))
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_FAILED', 'reason': str(err.value)}))
self.sendClose()
d.addCallbacks(done, error)
elif msg['cmd'] == 'LOGOUT':
# user wants to logout ..
if self.factory._cookies[self._cbtid]['authenticated']:
self.factory._cookies[self._cbtid]['authenticated'] = False
# inform _all_ WebSocket connections of the logout
msg = json.dumps({'cmd': 'LOGGED_OUT'})
for proto in self.factory._cookies[self._cbtid]['connections']:
proto.sendMessage(msg)
else:
log.msg("unknown command {}".format(msg))
class PersonaServerFactory(WebSocketServerFactory):
"""
WebSocket server factory with cookie/sessions map.
"""
protocol = PersonaServerProtocol
def __init__(self, url):
WebSocketServerFactory.__init__(self, url)
# map of cookies
self._cookies = {}
if __name__ == '__main__':
log.startLogging(sys.stdout)
print("Running Autobahn|Python {}".format(autobahn.version))
# our WebSocket server factory
factory = PersonaServerFactory("ws://127.0.0.1:8080")
# we serve static files under "/" ..
root = File(".")
# .. and our WebSocket server under "/ws" (note that Twisted uses
# bytes for URIs)
resource = WebSocketResource(factory)
root.putChild(b"ws", resource)
# run both under one Twisted Web Site
site = Site(root)
site.log = lambda _: None # disable any logging
reactor.listenTCP(8080, site)
reactor.run()
| [
"autobahn.twisted.resource.WebSocketResource",
"json.loads",
"autobahn.twisted.websocket.WebSocketServerFactory.__init__",
"autobahn.util.newid",
"twisted.python.log.msg",
"json.dumps",
"twisted.python.log.startLogging",
"twisted.web.client.getPage",
"twisted.web.static.File",
"autobahn.util.utcno... | [((8501, 8529), 'twisted.python.log.startLogging', 'log.startLogging', (['sys.stdout'], {}), '(sys.stdout)\n', (8517, 8529), False, 'from twisted.python import log\n'), ((8743, 8752), 'twisted.web.static.File', 'File', (['"""."""'], {}), "('.')\n", (8747, 8752), False, 'from twisted.web.static import File\n'), ((8861, 8887), 'autobahn.twisted.resource.WebSocketResource', 'WebSocketResource', (['factory'], {}), '(factory)\n', (8878, 8887), False, 'from autobahn.twisted.resource import WebSocketResource\n'), ((8977, 8987), 'twisted.web.server.Site', 'Site', (['root'], {}), '(root)\n', (8981, 8987), False, 'from twisted.web.server import Site\n'), ((9046, 9075), 'twisted.internet.reactor.listenTCP', 'reactor.listenTCP', (['(8080)', 'site'], {}), '(8080, site)\n', (9063, 9075), False, 'from twisted.internet import reactor\n'), ((9081, 9094), 'twisted.internet.reactor.run', 'reactor.run', ([], {}), '()\n', (9092, 9094), False, 'from twisted.internet import reactor\n'), ((8371, 8413), 'autobahn.twisted.websocket.WebSocketServerFactory.__init__', 'WebSocketServerFactory.__init__', (['self', 'url'], {}), '(self, url)\n', (8402, 8413), False, 'from autobahn.twisted.websocket import WebSocketServerFactory, WebSocketServerProtocol\n'), ((2825, 2832), 'autobahn.util.newid', 'newid', ([], {}), '()\n', (2830, 2832), False, 'from autobahn.util import newid, utcnow\n'), ((3360, 3407), 'twisted.python.log.msg', 'log.msg', (["('Setting new cookie: %s' % self._cbtid)"], {}), "('Setting new cookie: %s' % self._cbtid)\n", (3367, 3407), False, 'from twisted.python import log\n'), ((4923, 4942), 'json.loads', 'json.loads', (['payload'], {}), '(payload)\n', (4933, 4942), False, 'import json\n'), ((2295, 2316), 'Cookie.SimpleCookie', 'Cookie.SimpleCookie', ([], {}), '()\n', (2314, 2316), False, 'import Cookie\n'), ((2895, 2903), 'autobahn.util.utcnow', 'utcnow', ([], {}), '()\n', (2901, 2903), False, 'from autobahn.util import newid, utcnow\n'), ((4092, 4138), 'json.dumps', 'json.dumps', (["{'cmd': 'AUTHENTICATION_REQUIRED'}"], {}), "({'cmd': 'AUTHENTICATION_REQUIRED'})\n", (4102, 4138), False, 'import json\n'), ((4240, 4300), 'json.dumps', 'json.dumps', (["{'cmd': 'AUTHENTICATED', 'email': authenticated}"], {}), "({'cmd': 'AUTHENTICATED', 'email': authenticated})\n", (4250, 4300), False, 'import json\n'), ((5817, 5881), 'urllib.urlencode', 'urllib.urlencode', (["{'audience': audience, 'assertion': assertion}"], {}), "({'audience': audience, 'assertion': assertion})\n", (5833, 5881), False, 'import urllib\n'), ((5958, 6065), 'twisted.web.client.getPage', 'getPage', ([], {'url': '"""https://verifier.login.persona.org/verify"""', 'method': '"""POST"""', 'postdata': 'body', 'headers': 'headers'}), "(url='https://verifier.login.persona.org/verify', method='POST',\n postdata=body, headers=headers)\n", (5965, 6065), False, 'from twisted.web.client import getPage\n'), ((6163, 6202), 'twisted.python.log.msg', 'log.msg', (['"""Authentication request sent."""'], {}), "('Authentication request sent.')\n", (6170, 6202), False, 'from twisted.python import log\n'), ((6261, 6276), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (6271, 6276), False, 'import json\n'), ((2666, 2713), 'twisted.python.log.msg', 'log.msg', (["('Cookie already set: %s' % self._cbtid)"], {}), "('Cookie already set: %s' % self._cbtid)\n", (2673, 2713), False, 'from twisted.python import log\n'), ((6737, 6796), 'json.dumps', 'json.dumps', (["{'cmd': 'AUTHENTICATED', 'email': res['email']}"], {}), "({'cmd': 'AUTHENTICATED', 'email': res['email']})\n", (6747, 6796), False, 'import json\n'), ((7927, 7960), 'json.dumps', 'json.dumps', (["{'cmd': 'LOGGED_OUT'}"], {}), "({'cmd': 'LOGGED_OUT'})\n", (7937, 7960), False, 'import json\n')] |
import subprocess, re, sys
def get_coref_score(metric, path_to_scorer, gold=None, preds=None):
output=subprocess.check_output(["perl", path_to_scorer, metric, preds, gold]).decode("utf-8")
output=output.split("\n")[-3]
matcher=re.search("Coreference: Recall: \(.*?\) (.*?)% Precision: \(.*?\) (.*?)% F1: (.*?)%", output)
if matcher is not None:
recall=float(matcher.group(1))
precision=float(matcher.group(2))
f1=float(matcher.group(3))
return recall, precision, f1
def get_conll(path_to_scorer, gold=None, preds=None):
bcub_r, bcub_p, bcub_f=get_coref_score("bcub", path_to_scorer, gold, preds)
muc_r, muc_p, muc_f=get_coref_score("muc", path_to_scorer, gold, preds)
ceaf_r, ceaf_p, ceaf_f=get_coref_score("ceafe", path_to_scorer, gold, preds)
print("bcub:\t%.1f" % bcub_f)
print("muc:\t%.1f" % muc_f)
print("ceaf:\t%.1f" % ceaf_f)
avg=(bcub_f + muc_f + ceaf_f)/3.
print("Average F1: %.1f" % (avg))
# Generate Latex table
# print("%.1f&%.1f&%.1f&%.1f" % (bcub_f, muc_f, ceaf_f, avg))
return bcub_f, avg
if __name__ == "__main__":
goldFile=sys.argv[1]
predFile=sys.argv[2]
scorer=sys.argv[3]
bcub_f, avg=get_conll(scorer, gold=goldFile, preds=predFile)
| [
"subprocess.check_output",
"re.search"
] | [((232, 341), 're.search', 're.search', (['"""Coreference: Recall: \\\\(.*?\\\\) (.*?)%\tPrecision: \\\\(.*?\\\\) (.*?)%\tF1: (.*?)%"""', 'output'], {}), "(\n 'Coreference: Recall: \\\\(.*?\\\\) (.*?)%\\tPrecision: \\\\(.*?\\\\) (.*?)%\\tF1: (.*?)%'\n , output)\n", (241, 341), False, 'import subprocess, re, sys\n'), ((105, 175), 'subprocess.check_output', 'subprocess.check_output', (["['perl', path_to_scorer, metric, preds, gold]"], {}), "(['perl', path_to_scorer, metric, preds, gold])\n", (128, 175), False, 'import subprocess, re, sys\n')] |
import argparse
import json
import base64
import hashlib
import sys
import binascii
from optigatrust.util.types import *
from optigatrust.pk import *
from optigatrust.x509 import *
private_key_slot_map = {
'second': KeyId.ECC_KEY_E0F1,
'0xE0E1': KeyId.ECC_KEY_E0F1,
'0xE0F1': KeyId.ECC_KEY_E0F1,
'third': KeyId.ECC_KEY_E0F2,
'0xE0E2': KeyId.ECC_KEY_E0F2,
'0xE0F2': KeyId.ECC_KEY_E0F2,
'fourth': KeyId.ECC_KEY_E0F3,
'0xE0E3': KeyId.ECC_KEY_E0F3,
'0xE0F3': KeyId.ECC_KEY_E0F3,
'five': KeyId.RSA_KEY_E0FC,
'0xE0FC': KeyId.RSA_KEY_E0FC,
'six': KeyId.RSA_KEY_E0FD,
'0xE0FD': KeyId.RSA_KEY_E0FD
}
certificate_slot_map = {
'second': ObjectId.USER_CERT_1,
'0xE0E1': ObjectId.USER_CERT_1,
'0xE0F1': ObjectId.USER_CERT_1,
'third': ObjectId.USER_CERT_2,
'0xE0E2': ObjectId.USER_CERT_2,
'0xE0F2': ObjectId.USER_CERT_2,
'fourth': ObjectId.USER_CERT_3,
'0xE0E3': ObjectId.USER_CERT_3,
'0xE0F3': ObjectId.USER_CERT_3,
'0xE0E8': ObjectId.TRUST_ANCHOR_1,
'0xE0EF': ObjectId.TRUST_ANCHOR_2
}
object_slot_map = {
'0xf1d0': ObjectId.DATA_TYPE1_0,
'0xf1d1': ObjectId.DATA_TYPE1_1,
'0xf1d2': ObjectId.DATA_TYPE1_2,
'0xf1d3': ObjectId.DATA_TYPE1_3,
'0xf1d4': ObjectId.DATA_TYPE1_4,
'0xf1d5': ObjectId.DATA_TYPE1_5,
'0xf1d6': ObjectId.DATA_TYPE1_6,
'0xf1d7': ObjectId.DATA_TYPE1_7,
'0xf1d8': ObjectId.DATA_TYPE1_8,
'0xf1d9': ObjectId.DATA_TYPE1_9,
'0xf1da': ObjectId.DATA_TYPE1_A,
'0xf1db': ObjectId.DATA_TYPE1_B,
'0xf1dc': ObjectId.DATA_TYPE1_C,
'0xf1dd': ObjectId.DATA_TYPE1_D,
'0xf1de': ObjectId.DATA_TYPE1_E,
'0xf1e0': ObjectId.DATA_TYPE2_0,
'0xf1e1': ObjectId.DATA_TYPE2_1
}
allowed_object_ids = [
# Certificate Slots
'0xe0e0', '0xe0e1', '0xe0e2', '0xe0e3',
# Trust Anchor Slots
'0xe0e8', '0xe0ef',
# Arbitrary Data Objects
'0xf1d0', '0xf1d1', '0xf1d2', '0xf1d3', '0xf1d4', '0xf1d5', '0xf1d6', '0xf1d7',
'0xf1d8', '0xf1d9', '0xf1da', '0xf1db', '0xf1dc', '0xf1dd', '0xf1de',
'0xf1e0', '0xf1e1'
]
def _break_apart(f, sep, step):
return sep.join(f[n:n + step] for n in range(0, len(f), step))
def parse_csr(_args):
if not _args.csr:
raise IOError('--csr command is used, but no config file provided. Exit.')
with open(_args.csr, "r") as csr_config:
try:
cfg = json.load(csr_config)
cfg = cfg['csr_config']
if not _args.quiet or _args.verbose:
print("\nYour configuration is following:\n{0}".format(json.dumps(cfg, sort_keys=True, indent=4)))
if 'certificate_info' not in cfg:
raise IOError("Your CSR configuration file should have a certificate_info field. Check out the example")
if 'key_info' not in cfg:
raise IOError("Your CSR configuration file should have a key_info field. Check out the example")
if 'signature_info' not in cfg:
raise IOError("Your CSR configuration file should have a signature_info field. Check out the example")
except json.JSONDecodeError as err:
raise IOError("The config file incorrectly composed. Parser error. "
"Unformated Message from parser: {0}".format(err.msg))
if _args.slot:
if _args.slot not in private_key_slot_map:
raise ValueError("--slot has been used with wrong argument, allowed values {0}, you used {1}".
format(private_key_slot_map, _args.slot))
_key_id_slot = private_key_slot_map[_args.slot]
else:
if cfg['key_info']['parameters']['slot'] not in private_key_slot_map:
raise ValueError("--slot has been used with wrong argument, allowed values {0}, you used {1}".
format(private_key_slot_map, cfg['key_info']['parameters']['slot']))
_key_id_slot = private_key_slot_map[cfg['key_info']['parameters']['slot']]
if cfg['key_info']['algorithm_id'] == 'ec':
key = ecc.generate_keypair(cfg['key_info']['parameters']['curve'], _key_id_slot)
elif cfg['key_info']['algorithm_id'] == 'rsa':
key = rsa.generate_keypair(cfg['key_info']['parameters']['key_size'], _key_id_slot)
else:
raise ValueError("unsupported algorithm_id, allowed values 'rsa', or 'ec', you used {0}".
format(cfg['key_info']['algorithm_id']))
builder = csr.Builder(cfg['certificate_info'], key)
_csr_request = base64.b64encode(builder.build(key).dump())
csr_fingerprint_sha1 = hashlib.sha1(_csr_request).hexdigest()
csr_request = '-----BEGIN CERTIFICATE REQUEST-----\n'
csr_request += _break_apart(_csr_request.decode(), '\n', 64)
csr_request += '\n-----END CERTIFICATE REQUEST-----'
with open(csr_fingerprint_sha1 + ".csr", "w+") as csr_file:
csr_file.write(csr_request)
_return_value = {
"filename": csr_fingerprint_sha1 + ".csr",
"public_key": binascii.hexlify(bytearray(key.pkey)).decode()
}
if _args.query:
if _args.query[0] not in _return_value:
raise ValueError("The query argument is not within the available values. Available {0}, you gave {1}".
format(_return_value.keys(), _args.query))
return_value = _return_value[_args.query[0]]
else:
return_value = _return_value
sys.stdout.write(str(return_value))
sys.stdout.flush()
sys.exit(0)
def parse_write(_args):
if not _args.write:
raise IOError('--write command is used, but no data file provided. Exit.')
if not _args.slot:
_id = 'second'
else:
_id = _args.slot
if _id not in certificate_slot_map:
raise ValueError("--id has been used with wrong argument, allowed values {0}, you used {1}".
format(certificate_slot_map, _id))
_certificate_slot = certificate_slot_map[_id]
with open(_args.write, "r") as datafile:
data = datafile.read()
if not _args.quiet or _args.verbose:
print("Your are going to write the following file:\n{0}".format(data))
cert.write_new(data, _certificate_slot)
if not _args.quiet or _args.verbose:
print("Certificate has been written")
'''
#################################################################################################################
'''
parser = argparse.ArgumentParser(description="Communicate with your OPTIGA(TM) Trust sample")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("--query", nargs=1, metavar='QUERY_ARGUMENT',
help="Define the query argument you want to extract from the output")
parser.add_argument("--csr", metavar='CONFIG_FILE',
help="Instructs the script to generate a Certificate Signing Request."
"Give the script the configuration file for your CSR (fields like Common Name, "
"AWS IoT Thing Name, etc)")
parser.add_argument("--write", metavar='DATA_TO_WRITE', help="Write provided data to the chip.")
parser.add_argument("--read",
metavar='OBJECT_ID',
choices=allowed_object_ids,
help="Certificate Slots: 0xe0e0-0xe0e3\n"
"Trust Anchor slots: 0xe0e8 and 0xe0ef\n"
"100 bytes: 0xf1d0-0xf1de\n"
"1500 bytes: 0xf1e0, 0xf1e1")
parser.add_argument("--slot",
choices=[
# They all mean the same
'second', '0xe0e1', '0xe0f1',
'third', '0xe0e2', '0xe0f2',
'fourth', '0xe0e3', '0xe0f3',
'five', '0xe0fc', 'six', '0xe0fd',
'0xE0E8', '0xE0EF'
],
help="Use one the predefined slots; e.g. second, 0xe0e1, or 0xe0f1, they all mean the same")
parser.add_argument("--id",
metavar='OBJECT_ID',
choices=allowed_object_ids,
help="USe to define which ID to use with your write command \n"
"Certificate Slots: 0xe0e0-0xe0e3\n"
"Trust Anchor slots: 0xe0e8 and 0xe0ef\n"
"100 bytes: 0xf1d0-0xf1de\n"
"1500 bytes: 0xf1e0, 0xf1e1")
args = parser.parse_args()
if args.csr:
parse_csr(args)
sys.exit(0)
if args.write:
parse_write(args)
sys.exit(0)
else:
parser.print_help()
sys.exit(0)
| [
"argparse.ArgumentParser",
"json.dumps",
"sys.exit",
"json.load",
"sys.stdout.flush",
"hashlib.sha1"
] | [((6610, 6699), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Communicate with your OPTIGA(TM) Trust sample"""'}), "(description=\n 'Communicate with your OPTIGA(TM) Trust sample')\n", (6633, 6699), False, 'import argparse\n'), ((5600, 5618), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5616, 5618), False, 'import sys\n'), ((5624, 5635), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5632, 5635), False, 'import sys\n'), ((8805, 8816), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8813, 8816), False, 'import sys\n'), ((8861, 8872), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8869, 8872), False, 'import sys\n'), ((8910, 8921), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8918, 8921), False, 'import sys\n'), ((2470, 2491), 'json.load', 'json.load', (['csr_config'], {}), '(csr_config)\n', (2479, 2491), False, 'import json\n'), ((4704, 4730), 'hashlib.sha1', 'hashlib.sha1', (['_csr_request'], {}), '(_csr_request)\n', (4716, 4730), False, 'import hashlib\n'), ((2653, 2694), 'json.dumps', 'json.dumps', (['cfg'], {'sort_keys': '(True)', 'indent': '(4)'}), '(cfg, sort_keys=True, indent=4)\n', (2663, 2694), False, 'import json\n')] |
import pytest
from tequila import numpy as np
from tequila.circuit.gradient import grad
from tequila.objective.objective import Objective, Variable
import operator
def test_nesting():
a = Variable(name='a')
variables = {a: 3.0}
b = a + 2 - 2
c = (b * 5) / 5
d = -(-c)
e = d ** 0.5
f = e ** 2
assert np.isclose(a(variables), f(variables))
def test_gradient():
a = Variable(name='a')
variables = {a: 3.0}
b = a + 2 - 2
c = (b * 5) / 5
d = -(-c)
assert grad(d, a)(variables) == 1.0
def test_equality():
a = Variable('a')
b = Variable('a.')
assert a != b
def test_transform_update():
a = Variable('a')
b = Variable('a.')
t = Objective(transformation=operator.add, args=[a, b])
variables = {a: 8, b: 1, a: 9, "c": 17}
assert np.isclose(float(t(variables)), 10.0)
@pytest.mark.parametrize('gradvar', ['a', 'b', 'c', 'd', 'e', 'f'])
def test_exotic_gradients(gradvar):
# a and b will fail for autograd not with jax
a = Variable('a')
b = Variable('b')
c = Variable('c')
d = Variable('d')
e = Variable('e')
f = Variable('f')
variables = {a: 2.0, b: 3.0, c: 4.0, d: 5.0, e: 6.0, f: 7.0}
t = c * a ** b + b / c - Objective(args=[c], transformation=np.cos) + f / (d * e) + a * Objective(args=[d],
transformation=np.exp) / (
f + b) + Objective(args=[e], transformation=np.tanh) + Objective(args=[f], transformation=np.sinc)
g = grad(t, gradvar)
if gradvar == 'a':
assert np.isclose(g(variables) , c(variables) * b(variables) * (a(variables) ** (b(variables) - 1.)) + np.exp(d(variables)) / (f(variables) + b(variables)))
if gradvar == 'b':
assert np.isclose(g(variables) , (c(variables) * a(variables) ** b(variables)) * np.log(a(variables)) + 1. / c(variables) - a(variables) * np.exp(d(variables)) / (f(variables) + b(variables)) ** 2.0)
if gradvar == 'c':
assert np.isclose(g(variables) , a(variables) ** b(variables) - b(variables) / c(variables) ** 2. + np.sin(c(variables)))
if gradvar == 'd':
assert np.isclose(g(variables) , -f(variables) / (np.square(d(variables)) * e(variables)) + a(variables) * np.exp(d(variables)) / (f(variables) + b(variables)))
if gradvar == 'e':
assert np.isclose(g(variables), 2. / (1. + np.cosh(2 * e(variables))) - f(variables) / (d(variables) * e(variables) ** 2.))
if gradvar == 'f':
assert np.isclose(g(variables) , 1. / (d(variables) * e(variables)) - a(variables) * np.exp(d(variables)) / (f(variables) + b(variables)) ** 2. + np.cos(np.pi * f(variables)) / f(variables) - np.sin(np.pi * f(variables)) / (np.pi * f(variables) ** 2.))
| [
"tequila.circuit.gradient.grad",
"pytest.mark.parametrize",
"tequila.objective.objective.Variable",
"tequila.objective.objective.Objective"
] | [((859, 925), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""gradvar"""', "['a', 'b', 'c', 'd', 'e', 'f']"], {}), "('gradvar', ['a', 'b', 'c', 'd', 'e', 'f'])\n", (882, 925), False, 'import pytest\n'), ((194, 212), 'tequila.objective.objective.Variable', 'Variable', ([], {'name': '"""a"""'}), "(name='a')\n", (202, 212), False, 'from tequila.objective.objective import Objective, Variable\n'), ((404, 422), 'tequila.objective.objective.Variable', 'Variable', ([], {'name': '"""a"""'}), "(name='a')\n", (412, 422), False, 'from tequila.objective.objective import Objective, Variable\n'), ((572, 585), 'tequila.objective.objective.Variable', 'Variable', (['"""a"""'], {}), "('a')\n", (580, 585), False, 'from tequila.objective.objective import Objective, Variable\n'), ((594, 608), 'tequila.objective.objective.Variable', 'Variable', (['"""a."""'], {}), "('a.')\n", (602, 608), False, 'from tequila.objective.objective import Objective, Variable\n'), ((666, 679), 'tequila.objective.objective.Variable', 'Variable', (['"""a"""'], {}), "('a')\n", (674, 679), False, 'from tequila.objective.objective import Objective, Variable\n'), ((688, 702), 'tequila.objective.objective.Variable', 'Variable', (['"""a."""'], {}), "('a.')\n", (696, 702), False, 'from tequila.objective.objective import Objective, Variable\n'), ((711, 762), 'tequila.objective.objective.Objective', 'Objective', ([], {'transformation': 'operator.add', 'args': '[a, b]'}), '(transformation=operator.add, args=[a, b])\n', (720, 762), False, 'from tequila.objective.objective import Objective, Variable\n'), ((1020, 1033), 'tequila.objective.objective.Variable', 'Variable', (['"""a"""'], {}), "('a')\n", (1028, 1033), False, 'from tequila.objective.objective import Objective, Variable\n'), ((1042, 1055), 'tequila.objective.objective.Variable', 'Variable', (['"""b"""'], {}), "('b')\n", (1050, 1055), False, 'from tequila.objective.objective import Objective, Variable\n'), ((1064, 1077), 'tequila.objective.objective.Variable', 'Variable', (['"""c"""'], {}), "('c')\n", (1072, 1077), False, 'from tequila.objective.objective import Objective, Variable\n'), ((1086, 1099), 'tequila.objective.objective.Variable', 'Variable', (['"""d"""'], {}), "('d')\n", (1094, 1099), False, 'from tequila.objective.objective import Objective, Variable\n'), ((1108, 1121), 'tequila.objective.objective.Variable', 'Variable', (['"""e"""'], {}), "('e')\n", (1116, 1121), False, 'from tequila.objective.objective import Objective, Variable\n'), ((1130, 1143), 'tequila.objective.objective.Variable', 'Variable', (['"""f"""'], {}), "('f')\n", (1138, 1143), False, 'from tequila.objective.objective import Objective, Variable\n'), ((1578, 1594), 'tequila.circuit.gradient.grad', 'grad', (['t', 'gradvar'], {}), '(t, gradvar)\n', (1582, 1594), False, 'from tequila.circuit.gradient import grad\n'), ((1526, 1569), 'tequila.objective.objective.Objective', 'Objective', ([], {'args': '[f]', 'transformation': 'np.sinc'}), '(args=[f], transformation=np.sinc)\n', (1535, 1569), False, 'from tequila.objective.objective import Objective, Variable\n'), ((512, 522), 'tequila.circuit.gradient.grad', 'grad', (['d', 'a'], {}), '(d, a)\n', (516, 522), False, 'from tequila.circuit.gradient import grad\n'), ((1480, 1523), 'tequila.objective.objective.Objective', 'Objective', ([], {'args': '[e]', 'transformation': 'np.tanh'}), '(args=[e], transformation=np.tanh)\n', (1489, 1523), False, 'from tequila.objective.objective import Objective, Variable\n'), ((1239, 1281), 'tequila.objective.objective.Objective', 'Objective', ([], {'args': '[c]', 'transformation': 'np.cos'}), '(args=[c], transformation=np.cos)\n', (1248, 1281), False, 'from tequila.objective.objective import Objective, Variable\n'), ((1302, 1344), 'tequila.objective.objective.Objective', 'Objective', ([], {'args': '[d]', 'transformation': 'np.exp'}), '(args=[d], transformation=np.exp)\n', (1311, 1344), False, 'from tequila.objective.objective import Objective, Variable\n')] |
from getnear.config import Tagged, Untagged, Ignore
from getnear.logging import info
from lxml import etree
import re
import requests
import telnetlib
def connect(hostname, *args, **kwargs):
url = f'http://{hostname}/'
html = requests.get(url).text
doc = etree.HTML(html)
for title in doc.xpath('//title'):
if re.match('NETGEAR GS\d+T', title.text):
return TSeries(hostname, *args, **kwargs)
class TSeries:
def __init__(self, hostname, password='password', old_password='password', debug=False):
info('connecting')
self.t = telnetlib.Telnet(hostname, 60000)
if debug:
self.t.set_debuglevel(2)
info('entering admin mode')
self.admin_mode()
info('logging in')
if self.login(password):
return
else:
info('trying old password')
self.admin_mode()
if self.login(old_password):
info('changing password')
self.change_password(old_password, password)
else:
raise Exception('login failed')
def admin_mode(self):
self.t.read_until(b'please wait ...')
self.t.write(b'admin\n')
def login(self, password):
self.t.read_until(b'Password:')
self.t.write(password.encode('ascii'))
self.t.write(b'\n')
_, _, match = self.t.expect([b'>', b'Applying'])
if b'Applying' in match:
return False
self.t.write(b'enable\n\n')
self.t.read_until(b'#')
return True
def exit(self):
# Leave "enable" mode
self.t.write(b'exit\n')
self.t.read_until(b'>')
self.t.write(b'logout\n')
def get_current_config(self):
# (ports, pvids, {vlan_id -> {U, T, _, _...})
ports_pvids = dict(self.get_port_pvids())
ports = tuple(sorted(ports_pvids))
pvids = tuple(ports_pvids[p] for p in ports)
vlans = {}
vlan_ids = set(pvids) | set(self.get_vlan_ids())
for vlan_id in vlan_ids:
port_map = dict(self.get_vlan(vlan_id))
membership = tuple(port_map[p] for p in ports)
vlans[vlan_id] = membership
return (ports, pvids, vlans)
def get_vlan_ids(self):
self.t.write(b'show vlan brief\n')
output = self.page().decode(errors='ignore')
for line in output.splitlines():
fields = line.split()
if fields and fields[0].isnumeric():
yield int(fields[0])
def get_vlan(self, vlan_id):
self.t.write(f'show vlan {vlan_id}\n'.encode())
for line in self.paged_table_body():
fields = line.split(maxsplit=3)
interface_port, current = fields[0:2]
interface, port = map(int, interface_port.split('/'))
if interface == 0:
port = int(interface_port.split('/')[1])
is_included = current == 'Include'
is_tagged = 'Tagged' in line
if is_tagged:
state = Tagged
elif is_included:
state = Untagged
else:
state = Ignore
yield port, state
def get_port_pvids(self):
self.t.write(b'show vlan port all\n')
for line in self.paged_table_body():
fields = line.split()
interface_port, pvid_s = fields[0:2]
interface, port = map(int, interface_port.split('/'))
if interface == 0:
pvid = int(pvid_s)
yield port, pvid
def set_port_pvid(self, port, vlan_id):
self.do_configure_interface(port, f'vlan pvid {vlan_id}')
def set_port_vlan_tagging(self, port, vlan_id, is_tagged):
if is_tagged:
command = f'vlan tagging {vlan_id}'
else:
command = f'no vlan tagging {vlan_id}'
self.do_configure_interface(port, command)
def set_port_vlan_participation(self, port, vlan_id, is_included):
if is_included:
command = f'vlan participation include {vlan_id}'
else:
command = f'vlan participation exclude {vlan_id}'
self.do_configure_interface(port, command)
def add_vlan(self, vlan_id):
self.do_vlan_database(f'vlan {vlan_id}')
def delete_vlan(self, vlan_id):
self.do_vlan_database(f'no vlan {vlan_id}')
def do_configure_interface(self, port, command):
self.t.write(b'configure\n')
self.t.read_until(b'#')
self.t.write(f'interface 0/{port}\n'.encode())
self.t.read_until(b'#')
self.t.write((command + '\n').encode())
self.t.read_until(b'#')
self.t.write(b'exit\n')
self.t.read_until(b'#')
self.t.write(b'exit\n')
self.t.read_until(b'#')
def do_vlan_database(self, command):
self.t.write(b'vlan database\n')
self.t.read_until(b'#')
self.t.write((command + '\n').encode())
self.t.read_until(b'#')
self.t.write(b'exit\n')
self.t.read_until(b'#')
def change_password(self, password_old, password_new):
# TODO For this to work, we have to leave "enable" mode. It would be
# better if all other commands entererd enable mode instead. More
# verbose, but less confusing. Maybe have a cursor to remember which
# mode we are in?
self.t.write(b'exit\n')
self.t.read_until(b'>')
self.t.write(b'passwd\n')
self.t.read_until(b'Enter old password:')
self.t.write((password_old + '\n').encode())
self.t.read_until(b'Enter new password:')
self.t.write((password_new + '\n').encode())
self.t.read_until(b'Confirm new password:')
self.t.write((password_new + '\n').encode())
self.t.read_until(b'Password Changed!')
self.t.write(b'enable\n') # Double newline
self.t.read_until(b'#')
def paged_table_body(self):
output = self.page().decode(errors='ignore')
in_body = False
for line in output.splitlines():
if line.strip() == '':
in_body = False
if in_body:
yield line
if line and line[0:4] == '----':
in_body = True
def page(self):
result = b''
while True:
index, _, output = self.t.expect([
b'--More-- or \(q\)uit',
b'#'
])
result += output
if index == 0:
self.t.write(b'\n')
else:
break
return result
def sync(self, config):
ports, pvids, vlans = config
vlan_ids = set(pvids) | set(vlans)
for vlan_id in sorted(vlan_ids):
info(f'adding vlan {vlan_id}')
self.add_vlan(vlan_id)
for port, pvid in zip(ports, pvids):
info(f'setting port {port} to PVID {pvid}')
self.set_port_pvid(port, pvid)
for vlan_id, membership in vlans.items():
info(f'vlan {vlan_id}')
for port, status in zip(ports, membership):
if status == Ignore:
info(f' port {port} off')
self.set_port_vlan_participation(port, vlan_id, False)
else:
is_tagged = status == Tagged
symbol = 'T' if is_tagged else 'U'
info(f' port {port} {symbol}')
self.set_port_vlan_participation(port, vlan_id, True)
self.set_port_vlan_tagging(port, vlan_id, is_tagged)
| [
"getnear.logging.info",
"re.match",
"requests.get",
"lxml.etree.HTML",
"telnetlib.Telnet"
] | [((269, 285), 'lxml.etree.HTML', 'etree.HTML', (['html'], {}), '(html)\n', (279, 285), False, 'from lxml import etree\n'), ((236, 253), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (248, 253), False, 'import requests\n'), ((336, 375), 're.match', 're.match', (['"""NETGEAR GS\\\\d+T"""', 'title.text'], {}), "('NETGEAR GS\\\\d+T', title.text)\n", (344, 375), False, 'import re\n'), ((548, 566), 'getnear.logging.info', 'info', (['"""connecting"""'], {}), "('connecting')\n", (552, 566), False, 'from getnear.logging import info\n'), ((584, 617), 'telnetlib.Telnet', 'telnetlib.Telnet', (['hostname', '(60000)'], {}), '(hostname, 60000)\n', (600, 617), False, 'import telnetlib\n'), ((682, 709), 'getnear.logging.info', 'info', (['"""entering admin mode"""'], {}), "('entering admin mode')\n", (686, 709), False, 'from getnear.logging import info\n'), ((744, 762), 'getnear.logging.info', 'info', (['"""logging in"""'], {}), "('logging in')\n", (748, 762), False, 'from getnear.logging import info\n'), ((841, 868), 'getnear.logging.info', 'info', (['"""trying old password"""'], {}), "('trying old password')\n", (845, 868), False, 'from getnear.logging import info\n'), ((6811, 6841), 'getnear.logging.info', 'info', (['f"""adding vlan {vlan_id}"""'], {}), "(f'adding vlan {vlan_id}')\n", (6815, 6841), False, 'from getnear.logging import info\n'), ((6935, 6978), 'getnear.logging.info', 'info', (['f"""setting port {port} to PVID {pvid}"""'], {}), "(f'setting port {port} to PVID {pvid}')\n", (6939, 6978), False, 'from getnear.logging import info\n'), ((7085, 7108), 'getnear.logging.info', 'info', (['f"""vlan {vlan_id}"""'], {}), "(f'vlan {vlan_id}')\n", (7089, 7108), False, 'from getnear.logging import info\n'), ((956, 981), 'getnear.logging.info', 'info', (['"""changing password"""'], {}), "('changing password')\n", (960, 981), False, 'from getnear.logging import info\n'), ((7222, 7248), 'getnear.logging.info', 'info', (['f""" port {port} off"""'], {}), "(f' port {port} off')\n", (7226, 7248), False, 'from getnear.logging import info\n'), ((7470, 7501), 'getnear.logging.info', 'info', (['f""" port {port} {symbol}"""'], {}), "(f' port {port} {symbol}')\n", (7474, 7501), False, 'from getnear.logging import info\n')] |
from string import ascii_letters
import textwrap
from fontTools.misc.testTools import getXML
from fontTools import subset
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.ttGlyphPen import TTGlyphPen
from fontTools.ttLib import TTFont, newTable
from fontTools.subset.svg import NAMESPACES, ranges
import pytest
etree = pytest.importorskip("lxml.etree")
@pytest.fixture
def empty_svg_font():
glyph_order = [".notdef"] + list(ascii_letters)
pen = TTGlyphPen(glyphSet=None)
pen.moveTo((0, 0))
pen.lineTo((0, 500))
pen.lineTo((500, 500))
pen.lineTo((500, 0))
pen.closePath()
glyph = pen.glyph()
glyphs = {g: glyph for g in glyph_order}
fb = FontBuilder(unitsPerEm=1024, isTTF=True)
fb.setupGlyphOrder(glyph_order)
fb.setupCharacterMap({ord(c): c for c in ascii_letters})
fb.setupGlyf(glyphs)
fb.setupHorizontalMetrics({g: (500, 0) for g in glyph_order})
fb.setupHorizontalHeader()
fb.setupOS2()
fb.setupPost()
fb.setupNameTable({"familyName": "TestSVG", "styleName": "Regular"})
svg_table = newTable("SVG ")
svg_table.docList = []
fb.font["SVG "] = svg_table
return fb.font
def new_svg(**attrs):
return etree.Element("svg", {"xmlns": NAMESPACES["svg"], **attrs})
def _lines(s):
return textwrap.dedent(s).splitlines()
@pytest.mark.parametrize(
"gids, retain_gids, expected_xml",
[
# keep four glyphs in total, don't retain gids, which thus get remapped
(
"2,4-6",
False,
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph1" d="M2,2"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="2" startGlyphID="2">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph2" d="M4,4"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="3" startGlyphID="3">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph3" d="M5,5"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="4" startGlyphID="4">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph4" d="M6,6"/></svg>]]>
</svgDoc>
"""
),
),
# same four glyphs, but we now retain gids
(
"2,4-6",
True,
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="2">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph2" d="M2,2"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="4" startGlyphID="4">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph4" d="M4,4"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="5" startGlyphID="5">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph5" d="M5,5"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="6" startGlyphID="6">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph6" d="M6,6"/></svg>]]>
</svgDoc>
"""
),
),
],
)
def test_subset_single_glyph_per_svg(
empty_svg_font, tmp_path, gids, retain_gids, expected_xml
):
font = empty_svg_font
svg_docs = font["SVG "].docList
for i in range(1, 11):
svg = new_svg()
etree.SubElement(svg, "path", {"id": f"glyph{i}", "d": f"M{i},{i}"})
svg_docs.append((etree.tostring(svg).decode(), i, i))
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
subset.main(
[
str(svg_font_path),
f"--output-file={subset_path}",
f"--gids={gids}",
"--retain_gids" if retain_gids else "--no-retain_gids",
]
)
subset_font = TTFont(subset_path)
assert getXML(subset_font["SVG "].toXML, subset_font) == expected_xml
# This contains a bunch of cross-references between glyphs, paths, gradients, etc.
# Note the path coordinates are completely made up and not meant to be rendered.
# We only care about the tree structure, not it's visual content.
COMPLEX_SVG = """\
<svg xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<radialGradient id="rg2" cx="50" cy="50" r="10" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</radialGradient>
<radialGradient id="rg3" xlink:href="#rg2" r="20"/>
<radialGradient id="rg4" xlink:href="#rg3" cy="100"/>
<path id="p1" d="M3,3"/>
<clipPath id="c1">
<circle cx="10" cy="10" r="1"/>
</clipPath>
</defs>
<g id="glyph1">
<g id="glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
<g id="glyph3">
<use xlink:href="#p1"/>
</g>
<use id="glyph4" xlink:href="#glyph1" x="10"/>
<use id="glyph5" xlink:href="#glyph2" y="-10"/>
<g id="glyph6">
<use xlink:href="#p1" transform="scale(2, 1)"/>
</g>
<g id="group1">
<g id="glyph7">
<path id="p2" d="M4,4"/>
</g>
<g id=".glyph7">
<path d="M4,4"/>
</g>
<g id="glyph8">
<g id=".glyph8">
<path id="p3" d="M5,5"/>
<path id="M6,6"/>
</g>
<path d="M7,7"/>
</g>
<g id="glyph9">
<use xlink:href="#p2"/>
</g>
<g id="glyph10">
<use xlink:href="#p3"/>
</g>
</g>
<g id="glyph11">
<path d="M7,7" fill="url(#rg4)"/>
</g>
<g id="glyph12">
<path d="M7,7" style="fill:url(#lg1);stroke:red;clip-path:url(#c1)"/>
</g>
</svg>
"""
@pytest.mark.parametrize(
"subset_gids, expected_xml",
[
# we only keep gid=2, with 'glyph2' defined inside 'glyph1': 'glyph2'
# is renamed 'glyph1' to match the new subset indices, and the old 'glyph1'
# is kept (as it contains 'glyph2') but renamed '.glyph1' to avoid clash
(
"2",
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id=".glyph1">
<g id="glyph1">
<path d="M0,0"/>
</g>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
# we keep both gid 1 and 2: the glyph elements' ids stay as they are (only the
# range endGlyphID change); a gradient is kept since it's referenced by glyph1
(
"1,2",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
</defs>
<g id="glyph1">
<g id="glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# both gid 3 and 6 refer (via <use xlink:href="#...") to path 'p1', which
# is thus kept in <defs>; the glyph ids and range start/end are renumbered.
"3,6",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<path id="p1" d="M3,3"/>
</defs>
<g id="glyph1">
<use xlink:href="#p1"/>
</g>
<g id="glyph2">
<use xlink:href="#p1" transform="scale(2, 1)"/>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph4' uses the whole 'glyph1' element (translated); we keep the latter
# renamed to avoid clashes with new gids
"3-4",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<path id="p1" d="M3,3"/>
</defs>
<g id=".glyph1">
<g id=".glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
<g id="glyph1">
<use xlink:href="#p1"/>
</g>
<use id="glyph2" xlink:href="#.glyph1" x="10"/>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph9' uses a path 'p2' defined inside 'glyph7', the latter is excluded
# from our subset, thus gets renamed '.glyph7'; an unrelated element with
# same id=".glyph7" doesn't clash because it was dropped.
# Similarly 'glyph10' uses path 'p3' defined inside 'glyph8', also excluded
# from subset and prefixed with '.'. But since an id=".glyph8" is already
# used in the doc, we append a .{digit} suffix to disambiguate.
"9,10",
_lines(
"""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="group1">
<g id=".glyph7">
<path id="p2" d="M4,4"/>
</g>
<g id=".glyph8.1">
<g id=".glyph8">
<path id="p3" d="M5,5"/>
</g>
</g>
<g id="glyph1">
<use xlink:href="#p2"/>
</g>
<g id="glyph2">
<use xlink:href="#p3"/>
</g>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph11' uses gradient 'rg4' which inherits from 'rg3', which inherits
# from 'rg2', etc.
"11",
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<radialGradient id="rg2" cx="50" cy="50" r="10" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</radialGradient>
<radialGradient id="rg3" xlink:href="#rg2" r="20"/>
<radialGradient id="rg4" xlink:href="#rg3" cy="100"/>
</defs>
<g id="glyph1">
<path d="M7,7" fill="url(#rg4)"/>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
(
# 'glyph12' contains a style attribute with inline CSS declarations that
# contains references to a gradient fill and a clipPath: we keep those
"12",
_lines(
"""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<clipPath id="c1">
<circle cx="10" cy="10" r="1"/>
</clipPath>
</defs>
<g id="glyph1">
<path d="M7,7" style="fill:url(#lg1);stroke:red;clip-path:url(#c1)"/>
</g>
</svg>
]]>
</svgDoc>
"""
),
),
],
)
def test_subset_svg_with_references(
empty_svg_font, tmp_path, subset_gids, expected_xml
):
font = empty_svg_font
font["SVG "].docList.append((COMPLEX_SVG, 1, 12))
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
subset.main(
[
str(svg_font_path),
f"--output-file={subset_path}",
f"--gids={subset_gids}",
"--pretty-svg",
]
)
subset_font = TTFont(subset_path)
if expected_xml is not None:
assert getXML(subset_font["SVG "].toXML, subset_font) == expected_xml
else:
assert "SVG " not in subset_font
def test_subset_svg_empty_table(empty_svg_font, tmp_path):
font = empty_svg_font
svg = new_svg()
etree.SubElement(svg, "rect", {"id": "glyph1", "x": "1", "y": "2"})
font["SVG "].docList.append((etree.tostring(svg).decode(), 1, 1))
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
# there's no gid=2 in SVG table, drop the empty table
subset.main([str(svg_font_path), f"--output-file={subset_path}", f"--gids=2"])
assert "SVG " not in TTFont(subset_path)
def test_subset_svg_missing_glyph(empty_svg_font, tmp_path):
font = empty_svg_font
svg = new_svg()
etree.SubElement(svg, "rect", {"id": "glyph1", "x": "1", "y": "2"})
font["SVG "].docList.append(
(
etree.tostring(svg).decode(),
1,
# the range endGlyphID=2 declares two glyphs however our svg contains
# only one glyph element with id="glyph1", the "glyph2" one is absent.
# Techically this would be invalid according to the OT-SVG spec.
2,
)
)
svg_font_path = tmp_path / "TestSVG.ttf"
font.save(svg_font_path)
subset_path = svg_font_path.with_suffix(".subset.ttf")
# make sure we don't crash when we don't find the expected "glyph2" element
subset.main([str(svg_font_path), f"--output-file={subset_path}", f"--gids=1"])
subset_font = TTFont(subset_path)
assert getXML(subset_font["SVG "].toXML, subset_font) == [
'<svgDoc endGlyphID="1" startGlyphID="1">',
' <![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><rect id="glyph1" x="1" y="2"/></svg>]]>',
"</svgDoc>",
]
# ignore the missing gid even if included in the subset; in this test case we
# end up with an empty svg document--which is dropped, along with the empty table
subset.main([str(svg_font_path), f"--output-file={subset_path}", f"--gids=2"])
assert "SVG " not in TTFont(subset_path)
@pytest.mark.parametrize(
"ints, expected_ranges",
[
((), []),
((0,), [(0, 0)]),
((0, 1), [(0, 1)]),
((1, 1, 1, 1), [(1, 1)]),
((1, 3), [(1, 1), (3, 3)]),
((4, 2, 1, 3), [(1, 4)]),
((1, 2, 4, 5, 6, 9, 13, 14, 15), [(1, 2), (4, 6), (9, 9), (13, 15)]),
],
)
def test_ranges(ints, expected_ranges):
assert list(ranges(ints)) == expected_ranges
| [
"textwrap.dedent",
"pytest.mark.parametrize",
"fontTools.subset.svg.ranges",
"pytest.importorskip",
"fontTools.ttLib.newTable",
"fontTools.pens.ttGlyphPen.TTGlyphPen",
"fontTools.misc.testTools.getXML",
"fontTools.ttLib.TTFont",
"fontTools.fontBuilder.FontBuilder"
] | [((339, 372), 'pytest.importorskip', 'pytest.importorskip', (['"""lxml.etree"""'], {}), "('lxml.etree')\n", (358, 372), False, 'import pytest\n'), ((16533, 16796), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ints, expected_ranges"""', '[((), []), ((0,), [(0, 0)]), ((0, 1), [(0, 1)]), ((1, 1, 1, 1), [(1, 1)]),\n ((1, 3), [(1, 1), (3, 3)]), ((4, 2, 1, 3), [(1, 4)]), ((1, 2, 4, 5, 6, \n 9, 13, 14, 15), [(1, 2), (4, 6), (9, 9), (13, 15)])]'], {}), "('ints, expected_ranges', [((), []), ((0,), [(0, 0)]\n ), ((0, 1), [(0, 1)]), ((1, 1, 1, 1), [(1, 1)]), ((1, 3), [(1, 1), (3, \n 3)]), ((4, 2, 1, 3), [(1, 4)]), ((1, 2, 4, 5, 6, 9, 13, 14, 15), [(1, 2\n ), (4, 6), (9, 9), (13, 15)])])\n", (16556, 16796), False, 'import pytest\n'), ((476, 501), 'fontTools.pens.ttGlyphPen.TTGlyphPen', 'TTGlyphPen', ([], {'glyphSet': 'None'}), '(glyphSet=None)\n', (486, 501), False, 'from fontTools.pens.ttGlyphPen import TTGlyphPen\n'), ((701, 741), 'fontTools.fontBuilder.FontBuilder', 'FontBuilder', ([], {'unitsPerEm': '(1024)', 'isTTF': '(True)'}), '(unitsPerEm=1024, isTTF=True)\n', (712, 741), False, 'from fontTools.fontBuilder import FontBuilder\n'), ((1088, 1104), 'fontTools.ttLib.newTable', 'newTable', (['"""SVG """'], {}), "('SVG ')\n", (1096, 1104), False, 'from fontTools.ttLib import TTFont, newTable\n'), ((4056, 4075), 'fontTools.ttLib.TTFont', 'TTFont', (['subset_path'], {}), '(subset_path)\n', (4062, 4075), False, 'from fontTools.ttLib import TTFont, newTable\n'), ((14341, 14360), 'fontTools.ttLib.TTFont', 'TTFont', (['subset_path'], {}), '(subset_path)\n', (14347, 14360), False, 'from fontTools.ttLib import TTFont, newTable\n'), ((15967, 15986), 'fontTools.ttLib.TTFont', 'TTFont', (['subset_path'], {}), '(subset_path)\n', (15973, 15986), False, 'from fontTools.ttLib import TTFont, newTable\n'), ((4088, 4134), 'fontTools.misc.testTools.getXML', 'getXML', (["subset_font['SVG '].toXML", 'subset_font'], {}), "(subset_font['SVG '].toXML, subset_font)\n", (4094, 4134), False, 'from fontTools.misc.testTools import getXML\n'), ((15076, 15095), 'fontTools.ttLib.TTFont', 'TTFont', (['subset_path'], {}), '(subset_path)\n', (15082, 15095), False, 'from fontTools.ttLib import TTFont, newTable\n'), ((15998, 16044), 'fontTools.misc.testTools.getXML', 'getXML', (["subset_font['SVG '].toXML", 'subset_font'], {}), "(subset_font['SVG '].toXML, subset_font)\n", (16004, 16044), False, 'from fontTools.misc.testTools import getXML\n'), ((16510, 16529), 'fontTools.ttLib.TTFont', 'TTFont', (['subset_path'], {}), '(subset_path)\n', (16516, 16529), False, 'from fontTools.ttLib import TTFont, newTable\n'), ((1307, 1325), 'textwrap.dedent', 'textwrap.dedent', (['s'], {}), '(s)\n', (1322, 1325), False, 'import textwrap\n'), ((14410, 14456), 'fontTools.misc.testTools.getXML', 'getXML', (["subset_font['SVG '].toXML", 'subset_font'], {}), "(subset_font['SVG '].toXML, subset_font)\n", (14416, 14456), False, 'from fontTools.misc.testTools import getXML\n'), ((16912, 16924), 'fontTools.subset.svg.ranges', 'ranges', (['ints'], {}), '(ints)\n', (16918, 16924), False, 'from fontTools.subset.svg import NAMESPACES, ranges\n')] |
"""
Subprocessor for device messages
"""
import logging
from ..utilities.tag import Tag
from ..workers import device_worker_forward
from ..workers import device_worker_startup
from ..utilities.address import Address
from ..utilities.status import Status
from ..utilities.data_indexes import SubprocessorIndex
_SubprocessorIndex = SubprocessorIndex()
logger = logging.Logger('utilities.process_device')
class ProcessDevice(object):
"""
Subprocessor for device messages
"""
def __init__(self, utim):
"""
Initialization of subprocessor for device messages
"""
self.__utim = utim
def process(self, data):
"""
Process device message
:param data: array [source, destination, status, body]
:return: same as input
"""
logger.info('Starting device processing')
# Placeholder for data being processed, that will be returned one day
res = data
while (res[_SubprocessorIndex.status] is not Status.STATUS_TO_SEND and
res[_SubprocessorIndex.status] is not Status.STATUS_FINALIZED and
res[_SubprocessorIndex.source] is Address.ADDRESS_DEVICE):
command = res[_SubprocessorIndex.body][0:1]
if command == Tag.INBOUND.DATA_TO_PLATFORM:
res = device_worker_forward.process(self.__utim, res)
elif command == Tag.INBOUND.NETWORK_READY:
res = device_worker_startup.process(self.__utim, res)
else:
res[_SubprocessorIndex.status] = Status.STATUS_FINALIZED
if (res[_SubprocessorIndex.status] is Status.STATUS_TO_SEND or
res[_SubprocessorIndex.status] is Status.STATUS_FINALIZED):
break
return res
| [
"logging.Logger"
] | [((361, 403), 'logging.Logger', 'logging.Logger', (['"""utilities.process_device"""'], {}), "('utilities.process_device')\n", (375, 403), False, 'import logging\n')] |
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
CSRF_ENABLED = True
SECRET_KEY = 'you-will-never-guess'
SQLITE = 'db.sqlite3'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, SQLITE) + '?check_same_thread=False'
| [
"os.path.dirname",
"os.path.join"
] | [((39, 64), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (54, 64), False, 'import os\n'), ((187, 217), 'os.path.join', 'os.path.join', (['BASE_DIR', 'SQLITE'], {}), '(BASE_DIR, SQLITE)\n', (199, 217), False, 'import os\n')] |
import pytest
from flipy.lp_problem import LpProblem
from flipy.lp_objective import LpObjective, Maximize
from flipy.lp_variable import LpVariable
from flipy.lp_expression import LpExpression
from flipy.lp_constraint import LpConstraint
from io import StringIO
@pytest.fixture
def problem():
return LpProblem('test_problem')
@pytest.fixture
def expression(x):
return LpExpression(name='test_expr', expression={x: 998}, constant=8)
@pytest.mark.usefixtures('problem', 'x')
class TestLpProblem(object):
def test_init(self):
problem = LpProblem('test_problem')
assert problem.lp_objective is None
assert len(problem.lp_constraints) == 0 and isinstance(problem.lp_constraints, dict)
assert len(problem.lp_variables) == 0 and isinstance(problem.lp_variables, dict)
def test_add_variable(self, problem, x):
problem.add_variable(x)
assert problem.lp_variables == {'x': x}
with pytest.raises(Exception) as e:
problem.add_variable('123')
assert e.value.args == ('123 is not an LpVariable',)
x2 = LpVariable('x')
with pytest.raises(Exception) as e:
problem.add_variable(x2)
assert e.value.args == ('LP variable name x conflicts with an existing LP variable',)
def test_set_objective(self, problem, x):
objective = LpObjective(name='minimize_cpm', expression={x: 998}, constant=8)
problem.set_objective(objective)
assert problem.lp_objective == objective
with pytest.raises(Exception) as e:
problem.set_objective(objective)
assert e.value.args == ('LP objective is already set',)
assert x.obj_coeff == 998
def test_add_constraint(self, problem, x):
rhs = LpExpression('rhs', {x: 1})
lhs = LpExpression('lhs', {x: 1}, 2)
constraint = LpConstraint(rhs, 'geq', lhs, 'constraint')
problem.add_constraint(constraint)
assert problem.lp_constraints[constraint.name] == constraint
assert problem.lp_variables[x.name] == x
constraint = LpConstraint(lhs, 'geq', rhs, 'constraint')
with pytest.raises(Exception) as e:
problem.add_constraint(constraint)
assert e.value.args == ('LP constraint name %s conflicts with an existing LP constraint' % constraint.name,)
with pytest.raises(Exception) as e:
problem.add_constraint(10)
assert e.value.args == ('%s is not an LpConstraint' % 10,)
def test_write(self, problem, x):
objective = LpObjective(name='minimize_cpm', expression={x: 998}, constant=8)
rhs = LpExpression('rhs', {x: 1})
lhs = LpExpression('lhs', {}, -2)
constraint = LpConstraint(rhs, 'geq', lhs, 'constraint')
problem.add_constraint(constraint)
problem.set_objective(objective)
buffer = StringIO()
problem.write_lp(buffer)
flipy_string = buffer.getvalue()
assert flipy_string == '\\* test_problem *\\\nMinimize\nminimize_cpm: 998 x + 8\nSubject To\nconstraint: x >= -2\nBounds\nx <= 10\nEnd'
def test_write_slack(self, problem, x):
objective = LpObjective(name='minimize_cpm', expression={x: 998}, constant=8, sense=Maximize)
rhs = LpExpression('rhs', {x: 1})
lhs = LpExpression('lhs', {}, -2)
constraint = LpConstraint(rhs, 'leq', lhs, 'constraint', True, 100)
problem.add_constraint(constraint)
problem.set_objective(objective)
buffer = StringIO()
problem.write_lp(buffer)
flipy_string = buffer.getvalue()
assert flipy_string == '\\* test_problem *\\\nMaximize\nminimize_cpm: 998 x - 100 constraint_slack_variable + 8\nSubject To\nconstraint: - constraint_slack_variable + x <= -2\nBounds\nx <= 10\nEnd'
def test_write_with_empty_constraint(self, problem, x):
objective = LpObjective(name='minimize_cpm', expression={x: 998}, constant=8, sense=Maximize)
constraint = LpConstraint(LpExpression('lhs', {x: 0}), 'leq', LpExpression('rhs', {}), 'constraint')
problem.add_constraint(constraint)
problem.set_objective(objective)
buffer = StringIO()
problem.write_lp(buffer)
flipy_string = buffer.getvalue()
assert flipy_string == '\\* test_problem *\\\nMaximize\nminimize_cpm: 998 x + 8\nSubject To\nBounds\nx <= 10\nEnd'
| [
"flipy.lp_expression.LpExpression",
"flipy.lp_variable.LpVariable",
"flipy.lp_problem.LpProblem",
"flipy.lp_constraint.LpConstraint",
"pytest.raises",
"pytest.mark.usefixtures",
"io.StringIO",
"flipy.lp_objective.LpObjective"
] | [((448, 487), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""problem"""', '"""x"""'], {}), "('problem', 'x')\n", (471, 487), False, 'import pytest\n'), ((307, 332), 'flipy.lp_problem.LpProblem', 'LpProblem', (['"""test_problem"""'], {}), "('test_problem')\n", (316, 332), False, 'from flipy.lp_problem import LpProblem\n'), ((381, 444), 'flipy.lp_expression.LpExpression', 'LpExpression', ([], {'name': '"""test_expr"""', 'expression': '{x: 998}', 'constant': '(8)'}), "(name='test_expr', expression={x: 998}, constant=8)\n", (393, 444), False, 'from flipy.lp_expression import LpExpression\n'), ((561, 586), 'flipy.lp_problem.LpProblem', 'LpProblem', (['"""test_problem"""'], {}), "('test_problem')\n", (570, 586), False, 'from flipy.lp_problem import LpProblem\n'), ((1099, 1114), 'flipy.lp_variable.LpVariable', 'LpVariable', (['"""x"""'], {}), "('x')\n", (1109, 1114), False, 'from flipy.lp_variable import LpVariable\n'), ((1357, 1422), 'flipy.lp_objective.LpObjective', 'LpObjective', ([], {'name': '"""minimize_cpm"""', 'expression': '{x: 998}', 'constant': '(8)'}), "(name='minimize_cpm', expression={x: 998}, constant=8)\n", (1368, 1422), False, 'from flipy.lp_objective import LpObjective, Maximize\n'), ((1763, 1790), 'flipy.lp_expression.LpExpression', 'LpExpression', (['"""rhs"""', '{x: 1}'], {}), "('rhs', {x: 1})\n", (1775, 1790), False, 'from flipy.lp_expression import LpExpression\n'), ((1805, 1835), 'flipy.lp_expression.LpExpression', 'LpExpression', (['"""lhs"""', '{x: 1}', '(2)'], {}), "('lhs', {x: 1}, 2)\n", (1817, 1835), False, 'from flipy.lp_expression import LpExpression\n'), ((1857, 1900), 'flipy.lp_constraint.LpConstraint', 'LpConstraint', (['rhs', '"""geq"""', 'lhs', '"""constraint"""'], {}), "(rhs, 'geq', lhs, 'constraint')\n", (1869, 1900), False, 'from flipy.lp_constraint import LpConstraint\n'), ((2085, 2128), 'flipy.lp_constraint.LpConstraint', 'LpConstraint', (['lhs', '"""geq"""', 'rhs', '"""constraint"""'], {}), "(lhs, 'geq', rhs, 'constraint')\n", (2097, 2128), False, 'from flipy.lp_constraint import LpConstraint\n'), ((2547, 2612), 'flipy.lp_objective.LpObjective', 'LpObjective', ([], {'name': '"""minimize_cpm"""', 'expression': '{x: 998}', 'constant': '(8)'}), "(name='minimize_cpm', expression={x: 998}, constant=8)\n", (2558, 2612), False, 'from flipy.lp_objective import LpObjective, Maximize\n'), ((2627, 2654), 'flipy.lp_expression.LpExpression', 'LpExpression', (['"""rhs"""', '{x: 1}'], {}), "('rhs', {x: 1})\n", (2639, 2654), False, 'from flipy.lp_expression import LpExpression\n'), ((2669, 2696), 'flipy.lp_expression.LpExpression', 'LpExpression', (['"""lhs"""', '{}', '(-2)'], {}), "('lhs', {}, -2)\n", (2681, 2696), False, 'from flipy.lp_expression import LpExpression\n'), ((2718, 2761), 'flipy.lp_constraint.LpConstraint', 'LpConstraint', (['rhs', '"""geq"""', 'lhs', '"""constraint"""'], {}), "(rhs, 'geq', lhs, 'constraint')\n", (2730, 2761), False, 'from flipy.lp_constraint import LpConstraint\n'), ((2863, 2873), 'io.StringIO', 'StringIO', ([], {}), '()\n', (2871, 2873), False, 'from io import StringIO\n'), ((3157, 3243), 'flipy.lp_objective.LpObjective', 'LpObjective', ([], {'name': '"""minimize_cpm"""', 'expression': '{x: 998}', 'constant': '(8)', 'sense': 'Maximize'}), "(name='minimize_cpm', expression={x: 998}, constant=8, sense=\n Maximize)\n", (3168, 3243), False, 'from flipy.lp_objective import LpObjective, Maximize\n'), ((3253, 3280), 'flipy.lp_expression.LpExpression', 'LpExpression', (['"""rhs"""', '{x: 1}'], {}), "('rhs', {x: 1})\n", (3265, 3280), False, 'from flipy.lp_expression import LpExpression\n'), ((3295, 3322), 'flipy.lp_expression.LpExpression', 'LpExpression', (['"""lhs"""', '{}', '(-2)'], {}), "('lhs', {}, -2)\n", (3307, 3322), False, 'from flipy.lp_expression import LpExpression\n'), ((3344, 3398), 'flipy.lp_constraint.LpConstraint', 'LpConstraint', (['rhs', '"""leq"""', 'lhs', '"""constraint"""', '(True)', '(100)'], {}), "(rhs, 'leq', lhs, 'constraint', True, 100)\n", (3356, 3398), False, 'from flipy.lp_constraint import LpConstraint\n'), ((3500, 3510), 'io.StringIO', 'StringIO', ([], {}), '()\n', (3508, 3510), False, 'from io import StringIO\n'), ((3872, 3958), 'flipy.lp_objective.LpObjective', 'LpObjective', ([], {'name': '"""minimize_cpm"""', 'expression': '{x: 998}', 'constant': '(8)', 'sense': 'Maximize'}), "(name='minimize_cpm', expression={x: 998}, constant=8, sense=\n Maximize)\n", (3883, 3958), False, 'from flipy.lp_objective import LpObjective, Maximize\n'), ((4164, 4174), 'io.StringIO', 'StringIO', ([], {}), '()\n', (4172, 4174), False, 'from io import StringIO\n'), ((953, 977), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (966, 977), False, 'import pytest\n'), ((1128, 1152), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1141, 1152), False, 'import pytest\n'), ((1527, 1551), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1540, 1551), False, 'import pytest\n'), ((2142, 2166), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2155, 2166), False, 'import pytest\n'), ((2351, 2375), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2364, 2375), False, 'import pytest\n'), ((3988, 4015), 'flipy.lp_expression.LpExpression', 'LpExpression', (['"""lhs"""', '{x: 0}'], {}), "('lhs', {x: 0})\n", (4000, 4015), False, 'from flipy.lp_expression import LpExpression\n'), ((4024, 4047), 'flipy.lp_expression.LpExpression', 'LpExpression', (['"""rhs"""', '{}'], {}), "('rhs', {})\n", (4036, 4047), False, 'from flipy.lp_expression import LpExpression\n')] |
from pathlib import Path
import click
import yaml
from sklearn.model_selection import ParameterGrid
from ertk.utils import PathlibPath, get_arg_mapping
@click.command()
@click.argument("param_grid", type=PathlibPath(exists=True, dir_okay=False))
@click.argument("output", type=Path)
@click.option("--format", help="Format string.")
def main(param_grid: Path, output: Path, format: str):
"""Creates a new parameters YAML file in the OUTPUT directory for
each combination of parameters in the PARAM_GRID file. The names of
the files will be formatted according to the --format parameter if
given, or else assigned a number starting from 1.
"""
grid = get_arg_mapping(param_grid)
output.mkdir(exist_ok=True, parents=True)
for i, params in enumerate(ParameterGrid(grid)):
if format:
filename = format.format(**params)
else:
filename = f"params_{i:02d}"
if not filename.endswith(".yaml"):
filename += ".yaml"
with open(output / filename, "w") as fid:
yaml.dump(params, fid)
print(f"Wrote {output / filename}.")
if __name__ == "__main__":
main()
| [
"sklearn.model_selection.ParameterGrid",
"click.argument",
"ertk.utils.PathlibPath",
"yaml.dump",
"click.option",
"click.command",
"ertk.utils.get_arg_mapping"
] | [((157, 172), 'click.command', 'click.command', ([], {}), '()\n', (170, 172), False, 'import click\n'), ((251, 286), 'click.argument', 'click.argument', (['"""output"""'], {'type': 'Path'}), "('output', type=Path)\n", (265, 286), False, 'import click\n'), ((288, 335), 'click.option', 'click.option', (['"""--format"""'], {'help': '"""Format string."""'}), "('--format', help='Format string.')\n", (300, 335), False, 'import click\n'), ((677, 704), 'ertk.utils.get_arg_mapping', 'get_arg_mapping', (['param_grid'], {}), '(param_grid)\n', (692, 704), False, 'from ertk.utils import PathlibPath, get_arg_mapping\n'), ((782, 801), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['grid'], {}), '(grid)\n', (795, 801), False, 'from sklearn.model_selection import ParameterGrid\n'), ((208, 248), 'ertk.utils.PathlibPath', 'PathlibPath', ([], {'exists': '(True)', 'dir_okay': '(False)'}), '(exists=True, dir_okay=False)\n', (219, 248), False, 'from ertk.utils import PathlibPath, get_arg_mapping\n'), ((1062, 1084), 'yaml.dump', 'yaml.dump', (['params', 'fid'], {}), '(params, fid)\n', (1071, 1084), False, 'import yaml\n')] |
# -*- coding: utf-8 -*-
"""
Tests for vector_tile/polygon.py
"""
import unittest
from mapbox_vector_tile.polygon import make_it_valid
from shapely import wkt
import os
class TestPolygonMakeValid(unittest.TestCase):
def test_dev_errors(self):
test_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(test_dir, 'errors.wkt')) as fh:
for line in fh:
geom = wkt.loads(line)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertTrue(fixed.area > 0.9 * abs(geom.area))
def test_multipolygon_with_flipped_ring(self):
geom = wkt.loads("""MULTIPOLYGON(
((0 0, 0 4, 4 4, 4 0, 0 0), (1 1, 1 3, 3 3, 3 1, 1 1)),
((5 0, 9 0, 9 4, 5 4, 5 0), (6 1, 6 3, 8 3, 8 1, 6 1))
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(24, fixed.area)
def test_polygon_self_touching(self):
geom = wkt.loads("""POLYGON(
(1 0, 5 0, 5 5, 0 5, 0 2, 2 2, 2 4, 3 4, 1 0)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(21, fixed.area)
def test_polygon_self_touching_inner(self):
geom = wkt.loads("""POLYGON(
(-1 -1, -1 6, 6 6, 6 -1, -1 -1),
(1 0, 5 0, 5 5, 0 5, 0 2, 2 2, 2 4, 3 4, 1 0)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(28, fixed.area)
def test_polygon_inners_touching(self):
geom = wkt.loads("""POLYGON(
(0 0, 6 0, 6 6, 0 6, 0 0),
(1 1, 1 3, 3 3, 3 1, 1 1),
(3 3, 3 5, 5 5, 5 3, 3 3)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(28, fixed.area)
def test_polygon_inner_touching_outer(self):
geom = wkt.loads("""POLYGON(
(0 0, 3 0, 3 3, 0 3, 0 0),
(1 1, 2 3, 2 1, 1 1)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(8, fixed.area)
def test_polygon_two_inners_touching_outer(self):
geom = wkt.loads("""POLYGON(
(0 0, 6 0, 6 3, 0 3, 0 0),
(1 1, 2 3, 2 1, 1 1),
(4 1, 5 3, 5 1, 4 1)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(16, fixed.area)
def test_polygon_inners_touching_colinear(self):
geom = wkt.loads("""POLYGON(
(0 0, 6 0, 6 6, 0 6, 0 0),
(1 1, 1 3, 3 4, 3 1, 1 1),
(3 2, 3 5, 5 5, 5 3, 3 2)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(26, fixed.area)
def test_polygon_inner_colinear_outer(self):
geom = wkt.loads("""POLYGON(
(0 0, 3 0, 3 3, 0 3, 0 0),
(1 1, 1 3, 2 3, 2 1, 1 1)
)""")
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(7, fixed.area)
def test_polygon_many_inners_touching(self):
geom = wkt.loads("""POLYGON(
(0 0, 5 0, 5 5, 0 5, 0 0),
(1 1, 1 2, 3 2, 1 1),
(3 1, 3 3, 4 1, 3 1),
(2 2, 1 4, 2 4, 2 2),
(2 3, 4 4, 4 3, 2 3)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(21, fixed.area)
def test_polygon_inner_spike(self):
geom = wkt.loads("""POLYGON(
(0 0, 3 0, 3 4, 0 4, 0 0),
(1 1, 1 3, 2 3, 2 2, 1 2, 2 2, 2 1, 1 1)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(10, fixed.area)
def test_polygon_disconnected_inner(self):
geom = wkt.loads("""POLYGON(
(0 0, 5 0, 5 5, 0 5, 0 0),
(1 1, 1 2, 2 2, 1 1),
(2 1, 2 2, 3 2, 2 1),
(3 1, 3 2, 4 2, 3 1),
(1 2, 1 3, 2 3, 1 2),
(2 2, 2 3, 3 3, 2 2),
(3 2, 3 3, 4 3, 3 2),
(1 3, 1 4, 2 4, 1 3),
(2 3, 2 4, 3 4, 2 3),
(3 3, 3 4, 4 4, 3 3)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(20.5, fixed.area)
def test_polygon_disconnected_outer(self):
geom = wkt.loads("""POLYGON(
(0 0, 4 0, 4 3, 3 3, 3 2, 2 3, 1 2, 1 3, 0 3, 0 0),
(1 1, 1 2, 3 2, 3 1, 1 1)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(9, fixed.area)
def test_polygon_ring_of_inners(self):
geom = wkt.loads("""POLYGON(
(0 0, 4 0, 4 4, 0 4, 0 0),
(1 1, 1 2, 2 1, 1 1),
(1 2, 1 3, 2 3, 1 2),
(2 3, 3 3, 3 2, 2 3),
(2 1, 3 2, 3 1, 2 1)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(14, fixed.area)
def test_polygon_ring_of_inners_2(self):
geom = wkt.loads("""POLYGON(
(0 0, 5 0, 5 5, 0 5, 0 0),
(1 3, 1 4, 2 4, 1 3),
(3 3, 4 3, 4 2, 3 3),
(1 1, 1 2, 2 1, 1 1),
(1 2, 1 3, 2 3, 1 2),
(2 3, 3 3, 3 2, 2 3),
(2 1, 3 2, 3 1, 2 1)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
self.assertEquals(22, fixed.area)
def test_polygon_inners_crossing_outer(self):
geom = wkt.loads("""POLYGON (
(2325 1015, 2329 1021, 2419 1057, 2461 944, 2369 907, 2325 1015),
(2329 1012, 2370 909, 2457 944, 2417 1050, 2329 1012),
(2410 1053, 2410 1052, 2412 1053, 2411 1054, 2410 1053),
(2378 1040, 2378 1039, 2379 1040, 2379 1041, 2378 1040),
(2369 1037, 2370 1036, 2371 1036, 2371 1038, 2369 1037),
(2361 1034, 2362 1033, 2363 1033, 2363 1034, 2361 1034),
(2353 1031, 2354 1029, 2355 1030, 2354 1031, 2353 1031),
(2337 1024, 2338 1023, 2339 1023, 2338 1025, 2337 1024)
)""")
self.assertFalse(geom.is_valid)
fixed = make_it_valid(geom)
self.assertTrue(fixed.is_valid)
# different versions of GEOS hit this bug in slightly different ways,
# meaning that some inners get included and some don't, depending on
# the version. therefore, we need quite a wide range of acceptable
# answers.
#
# the main part of this polygon (outer - largest inner) has area 1551,
# and the smaller inners sum up to area 11, so we'll take +/-6 from
# 1545.
self.assertAlmostEqual(1545, fixed.area, delta=6)
| [
"shapely.wkt.loads",
"mapbox_vector_tile.polygon.make_it_valid",
"os.path.realpath",
"os.path.join"
] | [((669, 850), 'shapely.wkt.loads', 'wkt.loads', (['"""MULTIPOLYGON(\n ((0 0, 0 4, 4 4, 4 0, 0 0), (1 1, 1 3, 3 3, 3 1, 1 1)),\n ((5 0, 9 0, 9 4, 5 4, 5 0), (6 1, 6 3, 8 3, 8 1, 6 1))\n )"""'], {}), '(\n """MULTIPOLYGON(\n ((0 0, 0 4, 4 4, 4 0, 0 0), (1 1, 1 3, 3 3, 3 1, 1 1)),\n ((5 0, 9 0, 9 4, 5 4, 5 0), (6 1, 6 3, 8 3, 8 1, 6 1))\n )"""\n )\n', (678, 850), False, 'from shapely import wkt\n'), ((857, 876), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (870, 876), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((1017, 1118), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON(\n (1 0, 5 0, 5 5, 0 5, 0 2, 2 2, 2 4, 3 4, 1 0)\n )"""'], {}), '(\n """POLYGON(\n (1 0, 5 0, 5 5, 0 5, 0 2, 2 2, 2 4, 3 4, 1 0)\n )"""\n )\n', (1026, 1118), False, 'from shapely import wkt\n'), ((1125, 1144), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (1138, 1144), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((1291, 1435), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON(\n (-1 -1, -1 6, 6 6, 6 -1, -1 -1),\n (1 0, 5 0, 5 5, 0 5, 0 2, 2 2, 2 4, 3 4, 1 0)\n )"""'], {}), '(\n """POLYGON(\n (-1 -1, -1 6, 6 6, 6 -1, -1 -1),\n (1 0, 5 0, 5 5, 0 5, 0 2, 2 2, 2 4, 3 4, 1 0)\n )"""\n )\n', (1300, 1435), False, 'from shapely import wkt\n'), ((1442, 1461), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (1455, 1461), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((1604, 1759), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON(\n (0 0, 6 0, 6 6, 0 6, 0 0),\n (1 1, 1 3, 3 3, 3 1, 1 1),\n (3 3, 3 5, 5 5, 5 3, 3 3)\n )"""'], {}), '(\n """POLYGON(\n (0 0, 6 0, 6 6, 0 6, 0 0),\n (1 1, 1 3, 3 3, 3 1, 1 1),\n (3 3, 3 5, 5 5, 5 3, 3 3)\n )"""\n )\n', (1613, 1759), False, 'from shapely import wkt\n'), ((1766, 1785), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (1779, 1785), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((1933, 2046), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON(\n (0 0, 3 0, 3 3, 0 3, 0 0),\n (1 1, 2 3, 2 1, 1 1)\n )"""'], {}), '(\n """POLYGON(\n (0 0, 3 0, 3 3, 0 3, 0 0),\n (1 1, 2 3, 2 1, 1 1)\n )"""\n )\n', (1942, 2046), False, 'from shapely import wkt\n'), ((2053, 2072), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (2066, 2072), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((2224, 2369), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON(\n (0 0, 6 0, 6 3, 0 3, 0 0),\n (1 1, 2 3, 2 1, 1 1),\n (4 1, 5 3, 5 1, 4 1)\n )"""'], {}), '(\n """POLYGON(\n (0 0, 6 0, 6 3, 0 3, 0 0),\n (1 1, 2 3, 2 1, 1 1),\n (4 1, 5 3, 5 1, 4 1)\n )"""\n )\n', (2233, 2369), False, 'from shapely import wkt\n'), ((2376, 2395), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (2389, 2395), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((2547, 2702), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON(\n (0 0, 6 0, 6 6, 0 6, 0 0),\n (1 1, 1 3, 3 4, 3 1, 1 1),\n (3 2, 3 5, 5 5, 5 3, 3 2)\n )"""'], {}), '(\n """POLYGON(\n (0 0, 6 0, 6 6, 0 6, 0 0),\n (1 1, 1 3, 3 4, 3 1, 1 1),\n (3 2, 3 5, 5 5, 5 3, 3 2)\n )"""\n )\n', (2556, 2702), False, 'from shapely import wkt\n'), ((2749, 2768), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (2762, 2768), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((2916, 3034), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON(\n (0 0, 3 0, 3 3, 0 3, 0 0),\n (1 1, 1 3, 2 3, 2 1, 1 1)\n )"""'], {}), '(\n """POLYGON(\n (0 0, 3 0, 3 3, 0 3, 0 0),\n (1 1, 1 3, 2 3, 2 1, 1 1)\n )"""\n )\n', (2925, 3034), False, 'from shapely import wkt\n'), ((3041, 3060), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (3054, 3060), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((3207, 3416), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON(\n (0 0, 5 0, 5 5, 0 5, 0 0),\n (1 1, 1 2, 3 2, 1 1),\n (3 1, 3 3, 4 1, 3 1),\n (2 2, 1 4, 2 4, 2 2),\n (2 3, 4 4, 4 3, 2 3)\n )"""'], {}), '(\n """POLYGON(\n (0 0, 5 0, 5 5, 0 5, 0 0),\n (1 1, 1 2, 3 2, 1 1),\n (3 1, 3 3, 4 1, 3 1),\n (2 2, 1 4, 2 4, 2 2),\n (2 3, 4 4, 4 3, 2 3)\n )"""\n )\n', (3216, 3416), False, 'from shapely import wkt\n'), ((3463, 3482), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (3476, 3482), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((3621, 3754), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON(\n (0 0, 3 0, 3 4, 0 4, 0 0),\n (1 1, 1 3, 2 3, 2 2, 1 2, 2 2, 2 1, 1 1)\n )"""'], {}), '(\n """POLYGON(\n (0 0, 3 0, 3 4, 0 4, 0 0),\n (1 1, 1 3, 2 3, 2 2, 1 2, 2 2, 2 1, 1 1)\n )"""\n )\n', (3630, 3754), False, 'from shapely import wkt\n'), ((3801, 3820), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (3814, 3820), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((3966, 4335), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON(\n (0 0, 5 0, 5 5, 0 5, 0 0),\n (1 1, 1 2, 2 2, 1 1),\n (2 1, 2 2, 3 2, 2 1),\n (3 1, 3 2, 4 2, 3 1),\n (1 2, 1 3, 2 3, 1 2),\n (2 2, 2 3, 3 3, 2 2),\n (3 2, 3 3, 4 3, 3 2),\n (1 3, 1 4, 2 4, 1 3),\n (2 3, 2 4, 3 4, 2 3),\n (3 3, 3 4, 4 4, 3 3)\n )"""'], {}), '(\n """POLYGON(\n (0 0, 5 0, 5 5, 0 5, 0 0),\n (1 1, 1 2, 2 2, 1 1),\n (2 1, 2 2, 3 2, 2 1),\n (3 1, 3 2, 4 2, 3 1),\n (1 2, 1 3, 2 3, 1 2),\n (2 2, 2 3, 3 3, 2 2),\n (3 2, 3 3, 4 3, 3 2),\n (1 3, 1 4, 2 4, 1 3),\n (2 3, 2 4, 3 4, 2 3),\n (3 3, 3 4, 4 4, 3 3)\n )"""\n )\n', (3975, 4335), False, 'from shapely import wkt\n'), ((4382, 4401), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (4395, 4401), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((4549, 4692), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON(\n (0 0, 4 0, 4 3, 3 3, 3 2, 2 3, 1 2, 1 3, 0 3, 0 0),\n (1 1, 1 2, 3 2, 3 1, 1 1)\n )"""'], {}), '(\n """POLYGON(\n (0 0, 4 0, 4 3, 3 3, 3 2, 2 3, 1 2, 1 3, 0 3, 0 0),\n (1 1, 1 2, 3 2, 3 1, 1 1)\n )"""\n )\n', (4558, 4692), False, 'from shapely import wkt\n'), ((4739, 4758), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (4752, 4758), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((4899, 5108), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON(\n (0 0, 4 0, 4 4, 0 4, 0 0),\n (1 1, 1 2, 2 1, 1 1),\n (1 2, 1 3, 2 3, 1 2),\n (2 3, 3 3, 3 2, 2 3),\n (2 1, 3 2, 3 1, 2 1)\n )"""'], {}), '(\n """POLYGON(\n (0 0, 4 0, 4 4, 0 4, 0 0),\n (1 1, 1 2, 2 1, 1 1),\n (1 2, 1 3, 2 3, 1 2),\n (2 3, 3 3, 3 2, 2 3),\n (2 1, 3 2, 3 1, 2 1)\n )"""\n )\n', (4908, 5108), False, 'from shapely import wkt\n'), ((5155, 5174), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (5168, 5174), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((5318, 5591), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON(\n (0 0, 5 0, 5 5, 0 5, 0 0),\n (1 3, 1 4, 2 4, 1 3),\n (3 3, 4 3, 4 2, 3 3),\n (1 1, 1 2, 2 1, 1 1),\n (1 2, 1 3, 2 3, 1 2),\n (2 3, 3 3, 3 2, 2 3),\n (2 1, 3 2, 3 1, 2 1)\n )"""'], {}), '(\n """POLYGON(\n (0 0, 5 0, 5 5, 0 5, 0 0),\n (1 3, 1 4, 2 4, 1 3),\n (3 3, 4 3, 4 2, 3 3),\n (1 1, 1 2, 2 1, 1 1),\n (1 2, 1 3, 2 3, 1 2),\n (2 3, 3 3, 3 2, 2 3),\n (2 1, 3 2, 3 1, 2 1)\n )"""\n )\n', (5327, 5591), False, 'from shapely import wkt\n'), ((5638, 5657), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (5651, 5657), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((5806, 6394), 'shapely.wkt.loads', 'wkt.loads', (['"""POLYGON (\n (2325 1015, 2329 1021, 2419 1057, 2461 944, 2369 907, 2325 1015),\n (2329 1012, 2370 909, 2457 944, 2417 1050, 2329 1012),\n (2410 1053, 2410 1052, 2412 1053, 2411 1054, 2410 1053),\n (2378 1040, 2378 1039, 2379 1040, 2379 1041, 2378 1040),\n (2369 1037, 2370 1036, 2371 1036, 2371 1038, 2369 1037),\n (2361 1034, 2362 1033, 2363 1033, 2363 1034, 2361 1034),\n (2353 1031, 2354 1029, 2355 1030, 2354 1031, 2353 1031),\n (2337 1024, 2338 1023, 2339 1023, 2338 1025, 2337 1024)\n )"""'], {}), '(\n """POLYGON (\n (2325 1015, 2329 1021, 2419 1057, 2461 944, 2369 907, 2325 1015),\n (2329 1012, 2370 909, 2457 944, 2417 1050, 2329 1012),\n (2410 1053, 2410 1052, 2412 1053, 2411 1054, 2410 1053),\n (2378 1040, 2378 1039, 2379 1040, 2379 1041, 2378 1040),\n (2369 1037, 2370 1036, 2371 1036, 2371 1038, 2369 1037),\n (2361 1034, 2362 1033, 2363 1033, 2363 1034, 2361 1034),\n (2353 1031, 2354 1029, 2355 1030, 2354 1031, 2353 1031),\n (2337 1024, 2338 1023, 2339 1023, 2338 1025, 2337 1024)\n )"""\n )\n', (5815, 6394), False, 'from shapely import wkt\n'), ((6441, 6460), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (6454, 6460), False, 'from mapbox_vector_tile.polygon import make_it_valid\n'), ((285, 311), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (301, 311), False, 'import os\n'), ((331, 367), 'os.path.join', 'os.path.join', (['test_dir', '"""errors.wkt"""'], {}), "(test_dir, 'errors.wkt')\n", (343, 367), False, 'import os\n'), ((427, 442), 'shapely.wkt.loads', 'wkt.loads', (['line'], {}), '(line)\n', (436, 442), False, 'from shapely import wkt\n'), ((467, 486), 'mapbox_vector_tile.polygon.make_it_valid', 'make_it_valid', (['geom'], {}), '(geom)\n', (480, 486), False, 'from mapbox_vector_tile.polygon import make_it_valid\n')] |
# encoding: utf-8
import os
import ssl
import sys
import csv
import json
import time
import base64
from datetime import datetime
from datetime import timedelta
try:
import pyodbc
except ImportError:
pass
# PYTHON 2 FALLBACK #
try:
from urllib.request import urlopen, Request
from urllib.parse import urlencode
from io import StringIO
python = 3
except ImportError:
from urllib import urlencode
from urllib2 import urlopen, Request
from StringIO import StringIO
reload(sys)
sys.setdefaultencoding('utf8')
python = 2
# PYTHON 2 FALLBACK #
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
global _debug
_debug = True
def _post(url, query, options):
# PYTHON 2 FALLBACK #
if python == 3:
base64string = base64.b64encode(bytes('%s:%s' % (options['username'], options['password']), 'utf-8'))
post_data = bytes(urlencode({
"query": query,
"header": "yes"
}), 'utf-8')
else:
base64string = base64.b64encode('%s:%s' % (options['username'], options['password']))
post_data = urlencode({
"query": query,
"header": "yes"
})
# PYTHON 2 FALLBACK #
request = Request(url, post_data)
request.add_header("Authorization", "Basic %s" % base64string.decode("utf-8"))
request.get_method = lambda: 'POST'
r = urlopen(request, context=ctx)
body = r.read()
r.close()
if _debug:
msg = 'Status code: %s' % str(r.code)
print('\n\t----------- POST FUNCTION -----------')
print('\t' + url)
print('\t' + msg)
print('\tQuery: ' + query)
print('\t------- END OF POST FUNCTION -------\n')
return body
def get_list_from_csv(text):
f = StringIO(text.decode("utf-8"))
list_ = []
dict_reader = csv.DictReader(f, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True, dialect='excel')
for item in dict_reader:
list_.append(item)
return list_, [x for x in dict_reader.fieldnames]
def doql_call(config, query):
limit = 0
query['query'] = ' '.join(query['query'].split())
# prepare date-filtered query
if query['date'] and query['date']['column'] and query['date']['days_limit']:
index = None
where_index = query['query'].find('where')
order_index = query['query'].find('order by')
if where_index > 0:
index = where_index + 6
query['query'] = query['query'][:index] + " %s > current_date - interval '%s day' and " % (query['date']['column'], query['date']['days_limit']) + query['query'][index:]
elif order_index > 0:
index = order_index
query['query'] = query['query'][:index] + " where %s > current_date - interval '%s day' " % (query['date']['column'], query['date']['days_limit']) + query['query'][index:]
if query['output_format'] == 'csv' or query['output_format'] == 'json':
if query['offset']:
page = 0
_next = True
while _next:
doql_offset = page * query['offset']
doql_limit = query['offset']
if query['limit'] and query['limit'] > query['offset']:
if (doql_offset + query['offset']) > query['limit']:
doql_limit = query['limit'] - doql_offset
else:
if query['limit']:
doql_limit = query['limit']
doql_query = query['query'] + ' LIMIT %s OFFSET %s' % (doql_limit, doql_offset)
res = _post(
'https://%s/services/data/v1.0/query/' % config['host'], doql_query, {
'username': config['username'],
'password': config['password']
}
)
csv_list, field_order = get_list_from_csv(res)
if query['output_format'] == 'csv':
file = open('%s_%s_%s.csv' % (query['output_filename'], time.strftime("%Y%m%d%H%M%S"), page + 1 ), 'w+')
file.write(res.decode("utf-8"))
elif query['output_format'] == 'json':
file = open('%s_%s_%s.json' % (query['output_filename'], time.strftime("%Y%m%d%H%M%S"), page + 1), 'w+')
file.write(json.dumps(csv_list, indent=4, sort_keys=True))
if doql_limit != query['offset'] or len(csv_list) != query['offset'] or (doql_offset + doql_limit) == query['limit'] :
break
page += 1
else:
if query['limit']:
doql_query = query['query'] + ' LIMIT %s ' % query['limit']
else:
doql_query = query['query']
res = _post(
'https://%s/services/data/v1.0/query/' % config['host'], doql_query, {
'username': config['username'],
'password': config['password']
}
)
csv_list, field_order = get_list_from_csv(res)
if query['output_format'] == 'csv':
file = open('%s_%s.csv' % (query['output_filename'], time.strftime("%Y%m%d%H%M%S")), 'w+')
file.write(res)
elif query['output_format'] == 'json':
csv_list, field_order = get_list_from_csv(res)
file = open('%s_%s.json' % (query['output_filename'], time.strftime("%Y%m%d%H%M%S")), 'w+')
file.write(json.dumps(csv_list, indent=4, sort_keys=True))
file.close()
elif query['output_format'] == 'database':
if query['offset']:
page = 0
_next = True
while _next:
doql_offset = page * query['offset']
doql_limit = query['offset']
if query['limit'] and query['limit'] > query['offset']:
if (doql_offset + query['offset']) > query['limit']:
doql_limit = query['limit'] - doql_offset
else:
if query['limit']:
doql_limit = query['limit']
doql_query = query['query'] + ' LIMIT %s OFFSET %s' % (doql_limit, doql_offset)
res = _post(
'https://%s/services/data/v1.0/query/' % config['host'], doql_query, {
'username': config['username'],
'password': config['password']
}
)
csv_list, field_order = get_list_from_csv(res)
cnxn = pyodbc.connect(query['connection_string'], autocommit=True)
conn = cnxn.cursor()
for record in csv_list:
# some special cases for strange DOQL responses ( that may break database such as MySQL )
query_str = "INSERT INTO %s (%s) VALUES (%s)" % (query['table'], ','.join(field_order), ','.join([str("'%s'" % record[x][:-1].replace("'", "\\'")) if record[x].endswith('\\') else str("'%s'" % record[x].replace("'", "\\'")) for x in record]))
conn.execute(query_str)
print("Added %s records" % len(csv_list))
if doql_limit != query['offset'] or len(csv_list) != query['offset'] or (doql_offset + doql_limit) == query['limit'] :
conn.close()
break
page += 1
else:
if query['limit']:
doql_query = query['query'] + ' LIMIT %s ' % query['limit']
else:
doql_query = query['query']
res = _post(
'https://%s/services/data/v1.0/query/' % config['host'], doql_query, {
'username': config['username'],
'password': config['password']
}
)
csv_list, field_order = get_list_from_csv(res)
cnxn = pyodbc.connect(query['connection_string'], autocommit=True)
conn = cnxn.cursor()
for record in csv_list:
# some special cases for strange DOQL responses ( that may break database such as MySQL )
query_str = "INSERT INTO %s (%s) VALUES (%s)" % (query['table'], ','.join(field_order), ','.join([str("'%s'" % record[x][:-1].replace("'", "\\'")) if record[x].endswith('\\') else str("'%s'" % record[x].replace("'", "\\'")) for x in record]))
conn.execute(query_str)
print("Added %s records" % len(csv_list))
conn.close()
def main():
try:
with open('settings.json') as data_file:
config = json.load(data_file)
except IOError:
print('File "settings.json" doesn\'t exists.')
sys.exit()
try:
with open(sys.argv[1]) as data_file:
query = json.loads(data_file.read().replace('\n', '').replace(" ", ' '))
except IOError:
print('File "%s" doesn\'t exists.' % sys.argv[1])
sys.exit()
doql_call(config, query)
if __name__ == "__main__":
if len(sys.argv) < 2:
print('Please use "python starter.py query.json".')
sys.exit()
main()
print('Done!')
sys.exit()
| [
"pyodbc.connect",
"urllib2.urlopen",
"csv.DictReader",
"sys.setdefaultencoding",
"ssl.create_default_context",
"base64.b64encode",
"json.dumps",
"time.strftime",
"urllib2.Request",
"urllib.urlencode",
"sys.exit",
"json.load"
] | [((598, 626), 'ssl.create_default_context', 'ssl.create_default_context', ([], {}), '()\n', (624, 626), False, 'import ssl\n'), ((1267, 1290), 'urllib2.Request', 'Request', (['url', 'post_data'], {}), '(url, post_data)\n', (1274, 1290), False, 'from urllib2 import urlopen, Request\n'), ((1422, 1451), 'urllib2.urlopen', 'urlopen', (['request'], {'context': 'ctx'}), '(request, context=ctx)\n', (1429, 1451), False, 'from urllib2 import urlopen, Request\n'), ((1873, 1987), 'csv.DictReader', 'csv.DictReader', (['f'], {'quotechar': '"""\\""""', 'delimiter': '""","""', 'quoting': 'csv.QUOTE_ALL', 'skipinitialspace': '(True)', 'dialect': '"""excel"""'}), '(f, quotechar=\'"\', delimiter=\',\', quoting=csv.QUOTE_ALL,\n skipinitialspace=True, dialect=\'excel\')\n', (1887, 1987), False, 'import csv\n'), ((9278, 9288), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9286, 9288), False, 'import sys\n'), ((522, 552), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf8"""'], {}), "('utf8')\n", (544, 552), False, 'import sys\n'), ((1055, 1125), 'base64.b64encode', 'base64.b64encode', (["('%s:%s' % (options['username'], options['password']))"], {}), "('%s:%s' % (options['username'], options['password']))\n", (1071, 1125), False, 'import base64\n'), ((1146, 1190), 'urllib.urlencode', 'urlencode', (["{'query': query, 'header': 'yes'}"], {}), "({'query': query, 'header': 'yes'})\n", (1155, 1190), False, 'from urllib import urlencode\n'), ((9232, 9242), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9240, 9242), False, 'import sys\n'), ((933, 977), 'urllib.urlencode', 'urlencode', (["{'query': query, 'header': 'yes'}"], {}), "({'query': query, 'header': 'yes'})\n", (942, 977), False, 'from urllib import urlencode\n'), ((8727, 8747), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (8736, 8747), False, 'import json\n'), ((8831, 8841), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8839, 8841), False, 'import sys\n'), ((9069, 9079), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9077, 9079), False, 'import sys\n'), ((8018, 8077), 'pyodbc.connect', 'pyodbc.connect', (["query['connection_string']"], {'autocommit': '(True)'}), "(query['connection_string'], autocommit=True)\n", (8032, 8077), False, 'import pyodbc\n'), ((6670, 6729), 'pyodbc.connect', 'pyodbc.connect', (["query['connection_string']"], {'autocommit': '(True)'}), "(query['connection_string'], autocommit=True)\n", (6684, 6729), False, 'import pyodbc\n'), ((5572, 5618), 'json.dumps', 'json.dumps', (['csv_list'], {'indent': '(4)', 'sort_keys': '(True)'}), '(csv_list, indent=4, sort_keys=True)\n', (5582, 5618), False, 'import json\n'), ((4406, 4452), 'json.dumps', 'json.dumps', (['csv_list'], {'indent': '(4)', 'sort_keys': '(True)'}), '(csv_list, indent=4, sort_keys=True)\n', (4416, 4452), False, 'import json\n'), ((5253, 5282), 'time.strftime', 'time.strftime', (['"""%Y%m%d%H%M%S"""'], {}), "('%Y%m%d%H%M%S')\n", (5266, 5282), False, 'import time\n'), ((4094, 4123), 'time.strftime', 'time.strftime', (['"""%Y%m%d%H%M%S"""'], {}), "('%Y%m%d%H%M%S')\n", (4107, 4123), False, 'import time\n'), ((5507, 5536), 'time.strftime', 'time.strftime', (['"""%Y%m%d%H%M%S"""'], {}), "('%Y%m%d%H%M%S')\n", (5520, 5536), False, 'import time\n'), ((4327, 4356), 'time.strftime', 'time.strftime', (['"""%Y%m%d%H%M%S"""'], {}), "('%Y%m%d%H%M%S')\n", (4340, 4356), False, 'import time\n')] |
import unittest
from libsaas.executors import test_executor
from libsaas.services import googleoauth2
class GoogleOauth2TestCase(unittest.TestCase):
def setUp(self):
self.executor = test_executor.use()
self.executor.set_response(b'{}', 200, {})
self.service = googleoauth2.GoogleOAuth2('id', 'secret')
def expect(self, method=None, uri=None, params=None):
if method is not None:
self.assertEqual(method, self.executor.request.method)
if uri is not None:
self.assertEqual(self.executor.request.uri,
self.service.APIROOT + uri)
if params is not None:
self.assertEqual(self.executor.request.params, params)
def test_access_token(self):
params = {'client_id': 'id',
'client_secret': 'secret',
'grant_type': 'authorization_code',
'code': 'code',
'redirect_uri': 'uri'}
self.service.access_token('code', 'uri')
self.expect('POST', '/token', params)
def test_refresh_token(self):
params = {'client_id': 'id',
'client_secret': 'secret',
'grant_type': 'refresh_token',
'refresh_token': 'token'}
self.service.refresh_token('token')
self.expect('POST', '/token', params)
| [
"libsaas.services.googleoauth2.GoogleOAuth2",
"libsaas.executors.test_executor.use"
] | [((198, 217), 'libsaas.executors.test_executor.use', 'test_executor.use', ([], {}), '()\n', (215, 217), False, 'from libsaas.executors import test_executor\n'), ((293, 334), 'libsaas.services.googleoauth2.GoogleOAuth2', 'googleoauth2.GoogleOAuth2', (['"""id"""', '"""secret"""'], {}), "('id', 'secret')\n", (318, 334), False, 'from libsaas.services import googleoauth2\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from main import Config
from pyrogram import filters
from pyrogram import Client
#from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from urllib.parse import quote_plus, unquote
import math, os, time, datetime, aiohttp, asyncio, mimetypes, logging
from helpers.download_from_url import download_file, get_size
from helpers.file_handler import send_to_transfersh_async, progress
from hachoir.parser import createParser
from hachoir.metadata import extractMetadata
from helpers.display_progress import progress_for_pyrogram, humanbytes
from helpers.tools import execute
from helpers.ffprobe import stream_creator
from helpers.thumbnail_video import thumb_creator
from helpers.url_uploader import leecher2
from helpers.video_renamer import rnv2
from helpers.audio_renamer import rna2
from helpers.file_renamer import rnf2
from helpers.vconverter import to_video2
from helpers.media_info import cinfo2
from helpers.link_info import linfo2
logger = logging.getLogger(__name__)
HELP_TXT = """
A Simple Telegram Bot to
Upload Files From **Direct** and **Google Drive** and **Youtube** Links,
Convert Document Media to Video,
and Rename Audio/Video/Document Files.
/upload : reply to your url .
`http://aaa.bbb.ccc/ddd.eee` | **fff.ggg**
or
`http://aaa.bbb.ccc/ddd.eee`
/c2v : reply to your document to convert it into streamable video.
/rnv : reply to your video. Example:
`/rnv | videoname`
/rna : reply to your audio. \"`-`\" : leave without change.
`/rna | audioname | title | artists`
`/rna | audioname`
`/rna | - | title`
`/rna | - | - | artists`
/rnf : reply to your document. Example:
`/rnf | filename.ext`
"""
@Client.on_message(filters.command(["start"]))
async def start(client , m):
"""Send a message when the command /start is issued."""
await m.reply_text(text=f"Hi\n\nSee /help for More Info!")
@Client.on_message(filters.command(["help"]))
async def help(client , m):
"""Send a message when the command /help is issued."""
await m.reply_text(text=f"{HELP_TXT}")
@Client.on_message(filters.private & filters.command(["rnv"]))
async def rnv1(client , u):
if u.from_user.id in Config.AUTH_USERS:
await rnv2(client , u)
elif not Config.AUTH_USERS:
await rnv2(client , u)
else:
await u.reply_text(text=f"sorry ! you cant use this bot.\n\ndeploy your own bot:\n[Repository_Link](https://github.com/prxpostern/URLtoTG001)", quote=True, disable_web_page_preview=True)
return
@Client.on_message(filters.private & filters.command(["rna"]))
async def rna1(client , u):
if u.from_user.id in Config.AUTH_USERS:
await rna2(client , u)
elif not Config.AUTH_USERS:
await rna2(client , u)
else:
await u.reply_text(text=f"sorry ! you cant use this bot.\n\ndeploy your own bot:\n[Repository_Link](https://github.com/prxpostern/URLtoTG001)", quote=True, disable_web_page_preview=True)
return
@Client.on_message(filters.private & filters.command(["rnf"]))
async def rnf1(client , u):
if u.from_user.id in Config.AUTH_USERS:
await rnf2(client , u)
elif not Config.AUTH_USERS:
await rnf2(client , u)
else:
await u.reply_text(text=f"sorry ! you cant use this bot.\n\ndeploy your own bot:\n[Repository_Link](https://github.com/prxpostern/URLtoTG001)", quote=True, disable_web_page_preview=True)
return
@Client.on_message(filters.private & filters.command(["c2v"]))
async def to_video1(client , u):
if u.from_user.id in Config.AUTH_USERS:
await to_video2(client , u)
elif not Config.AUTH_USERS:
await to_video2(client , u)
else:
await u.reply_text(text=f"sorry ! you cant use this bot.\n\ndeploy your own bot:\n[Repository_Link](https://github.com/prxpostern/URLtoTG001)", quote=True, disable_web_page_preview=True)
return
@Client.on_message(filters.private & (filters.audio | filters.document | filters.video))
async def cinfo1(client , m):
await cinfo2(client , m)
@Client.on_message(filters.private & filters.incoming & filters.text & (filters.regex('^(ht|f)tp*')))
async def linfo1(client , m):
await linfo2(client , m)
@Client.on_message(filters.private & filters.command(["upload"]))
async def leecher1(client , u):
if u.from_user.id in Config.AUTH_USERS:
await leecher2(client , u)
elif not Config.AUTH_USERS:
await leecher2(client , u)
else:
await u.reply_text(text=f"sorry ! you cant use this bot.\n\ndeploy your own bot:\n[Repository_Link](https://github.com/prxpostern/URLtoTG001)", quote=True, disable_web_page_preview=True)
return
| [
"logging.getLogger",
"pyrogram.filters.command",
"helpers.audio_renamer.rna2",
"helpers.url_uploader.leecher2",
"helpers.file_renamer.rnf2",
"helpers.link_info.linfo2",
"pyrogram.Client.on_message",
"helpers.media_info.cinfo2",
"helpers.video_renamer.rnv2",
"pyrogram.filters.regex",
"helpers.vco... | [((1037, 1064), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1054, 1064), False, 'import math, os, time, datetime, aiohttp, asyncio, mimetypes, logging\n'), ((4078, 4169), 'pyrogram.Client.on_message', 'Client.on_message', (['(filters.private & (filters.audio | filters.document | filters.video))'], {}), '(filters.private & (filters.audio | filters.document |\n filters.video))\n', (4095, 4169), False, 'from pyrogram import Client\n'), ((1823, 1849), 'pyrogram.filters.command', 'filters.command', (["['start']"], {}), "(['start'])\n", (1838, 1849), False, 'from pyrogram import filters\n'), ((2034, 2059), 'pyrogram.filters.command', 'filters.command', (["['help']"], {}), "(['help'])\n", (2049, 2059), False, 'from pyrogram import filters\n'), ((2237, 2261), 'pyrogram.filters.command', 'filters.command', (["['rnv']"], {}), "(['rnv'])\n", (2252, 2261), False, 'from pyrogram import filters\n'), ((2703, 2727), 'pyrogram.filters.command', 'filters.command', (["['rna']"], {}), "(['rna'])\n", (2718, 2727), False, 'from pyrogram import filters\n'), ((3165, 3189), 'pyrogram.filters.command', 'filters.command', (["['rnf']"], {}), "(['rnf'])\n", (3180, 3189), False, 'from pyrogram import filters\n'), ((3631, 3655), 'pyrogram.filters.command', 'filters.command', (["['c2v']"], {}), "(['c2v'])\n", (3646, 3655), False, 'from pyrogram import filters\n'), ((4208, 4225), 'helpers.media_info.cinfo2', 'cinfo2', (['client', 'm'], {}), '(client, m)\n', (4214, 4225), False, 'from helpers.media_info import cinfo2\n'), ((4376, 4393), 'helpers.link_info.linfo2', 'linfo2', (['client', 'm'], {}), '(client, m)\n', (4382, 4393), False, 'from helpers.link_info import linfo2\n'), ((4304, 4331), 'pyrogram.filters.regex', 'filters.regex', (['"""^(ht|f)tp*"""'], {}), "('^(ht|f)tp*')\n", (4317, 4331), False, 'from pyrogram import filters\n'), ((4435, 4462), 'pyrogram.filters.command', 'filters.command', (["['upload']"], {}), "(['upload'])\n", (4450, 4462), False, 'from pyrogram import filters\n'), ((2354, 2369), 'helpers.video_renamer.rnv2', 'rnv2', (['client', 'u'], {}), '(client, u)\n', (2358, 2369), False, 'from helpers.video_renamer import rnv2\n'), ((2820, 2835), 'helpers.audio_renamer.rna2', 'rna2', (['client', 'u'], {}), '(client, u)\n', (2824, 2835), False, 'from helpers.audio_renamer import rna2\n'), ((3282, 3297), 'helpers.file_renamer.rnf2', 'rnf2', (['client', 'u'], {}), '(client, u)\n', (3286, 3297), False, 'from helpers.file_renamer import rnf2\n'), ((3753, 3773), 'helpers.vconverter.to_video2', 'to_video2', (['client', 'u'], {}), '(client, u)\n', (3762, 3773), False, 'from helpers.vconverter import to_video2\n'), ((4559, 4578), 'helpers.url_uploader.leecher2', 'leecher2', (['client', 'u'], {}), '(client, u)\n', (4567, 4578), False, 'from helpers.url_uploader import leecher2\n'), ((2419, 2434), 'helpers.video_renamer.rnv2', 'rnv2', (['client', 'u'], {}), '(client, u)\n', (2423, 2434), False, 'from helpers.video_renamer import rnv2\n'), ((2885, 2900), 'helpers.audio_renamer.rna2', 'rna2', (['client', 'u'], {}), '(client, u)\n', (2889, 2900), False, 'from helpers.audio_renamer import rna2\n'), ((3348, 3363), 'helpers.file_renamer.rnf2', 'rnf2', (['client', 'u'], {}), '(client, u)\n', (3352, 3363), False, 'from helpers.file_renamer import rnf2\n'), ((3824, 3844), 'helpers.vconverter.to_video2', 'to_video2', (['client', 'u'], {}), '(client, u)\n', (3833, 3844), False, 'from helpers.vconverter import to_video2\n'), ((4628, 4647), 'helpers.url_uploader.leecher2', 'leecher2', (['client', 'u'], {}), '(client, u)\n', (4636, 4647), False, 'from helpers.url_uploader import leecher2\n')] |
import typing
from typing import Any
import json
import os
from multiprocessing import Process, Queue
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from spacy.tokenizer import Tokenizer
import spacy
from tqdm.auto import tqdm
import time
nlp = spacy.load("en")
class TokenizingWorker(Process):
def __init__(
self,
pbar: Any,
is_json: bool,
queue_in: Queue, # Queue where text comes in
queue_out: Queue, #Queue where tokens go
tokenizer_type: str = "just_spaces",
):
super(TokenizingWorker, self).__init__()
self.queue_in = queue_in
self.queue_out = queue_out
self.is_json = is_json
self.pbar = pbar
if tokenizer_type == "just_spaces":
tokenizer = SpacyWordSplitter()
self.tokenizer = lambda text: list(map(str, tokenizer.split_words(text)))
elif tokenizer_type == "spacy":
tokenizer = Tokenizer(nlp.vocab)
self.tokenizer = lambda text: list(map(str, tokenizer(text)))
def run(self):
for line in iter(self.queue_in.get, None):
if self.is_json:
text = json.loads(line)["text"]
else:
text = line
tokens = self.tokenizer(text)
while self.queue_out.full():
time.sleep(0.01)
self.queue_out.put(" ".join(tokens),block=False,)
self.pbar.update()
def multi_proc_data_loader(data_path: str, tokenizer_type: str = "just_spaces"):
num_processes = max(1, os.cpu_count() - 1)
queue_in = Queue()
queue_out = Queue(maxsize=10000)
workers =[]
is_json = data_path.endswith(".jsonl") or data_path.endswith(".json")
pbar = tqdm()
for _ in range(num_processes): # minus one if the main processus is CPU intensive
worker = TokenizingWorker(
pbar=pbar,
is_json=is_json, queue_in=queue_in, queue_out=queue_out,tokenizer_type=tokenizer_type
)
workers.append(worker)
worker.start()
with (open(data_path, "r")) as f:
for line in f:
queue_in.put(line)
for worker in workers:
#ensure each worker gets a None which tells it to stop
queue_in.put(None)
alive = any(map(lambda x:x.is_alive(),workers))
res=[]
while alive:
while not queue_out.empty():
tokens =queue_out.get(block=False)
res.append(tokens)
alive = any(map(lambda x: x.is_alive(), workers))
if alive:
time.sleep(0.01)
return res
| [
"json.loads",
"spacy.load",
"time.sleep",
"allennlp.data.tokenizers.word_splitter.SpacyWordSplitter",
"os.cpu_count",
"tqdm.auto.tqdm",
"spacy.tokenizer.Tokenizer",
"multiprocessing.Queue"
] | [((269, 285), 'spacy.load', 'spacy.load', (['"""en"""'], {}), "('en')\n", (279, 285), False, 'import spacy\n'), ((1608, 1615), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (1613, 1615), False, 'from multiprocessing import Process, Queue\n'), ((1632, 1652), 'multiprocessing.Queue', 'Queue', ([], {'maxsize': '(10000)'}), '(maxsize=10000)\n', (1637, 1652), False, 'from multiprocessing import Process, Queue\n'), ((1756, 1762), 'tqdm.auto.tqdm', 'tqdm', ([], {}), '()\n', (1760, 1762), False, 'from tqdm.auto import tqdm\n'), ((791, 810), 'allennlp.data.tokenizers.word_splitter.SpacyWordSplitter', 'SpacyWordSplitter', ([], {}), '()\n', (808, 810), False, 'from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter\n'), ((1573, 1587), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (1585, 1587), False, 'import os\n'), ((2567, 2583), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (2577, 2583), False, 'import time\n'), ((961, 981), 'spacy.tokenizer.Tokenizer', 'Tokenizer', (['nlp.vocab'], {}), '(nlp.vocab)\n', (970, 981), False, 'from spacy.tokenizer import Tokenizer\n'), ((1350, 1366), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1360, 1366), False, 'import time\n'), ((1180, 1196), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1190, 1196), False, 'import json\n')] |
# -*- coding:utf-8 -*-
import random
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from app.api.util.web_request import WebRequest, USER_AGENT_PC, USER_AGENT_MOBILE
class SpiderWebDriver(object):
def __init__(self, url: str,
userAgent: str = None,referer: str=None, proxy: str = None):
# 进入浏览器设置
chrome_options = Options()
# 配置参数: http://chromedriver.chromium.org/capabilities
# 详细参数: https://peter.sh/experiments/chromium-command-line-switches/
chrome_options.add_argument('lang=zh_CN.UTF-8')
# chrome_options.add_argument('headless')
# chrome_options.add_argument('window-size=1024,768')
chrome_options.add_argument('no-sandbox')
chrome_options.add_argument("disable-gpu")
chrome_options.add_argument("ignore-certificate-errors");
chrome_options.add_argument("disable-popup-blocking");
chrome_options.add_argument("disable-default-apps");
# Chrome is being controlled by automated test software
if userAgent is None:
# 默认safari pc端浏览器
userAgent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2'
chrome_options.add_argument('user-agent="' + userAgent + '"')
chrome_options.add_argument('referer="https://www.google.com/"')
if proxy is not None:
proxy_str = "http://{proxy}".format(proxy=proxy)
chrome_options.add_argument('proxy-server=' + proxy_str)
# http://chromedriver.storage.googleapis.com/index.html
self.driver = webdriver.Chrome(options=chrome_options)
self.driver.maximize_window()
if url:
self.driver.get(url=url)
def close(self):
driver = self.driver
if driver is None:
return
try:
driver.close()
driver.quit()
finally:
self.driver = None
def __enter__(self):
return self
def __exit__(self, *exc_info):
del exc_info
self.close()
def open(self, url):
self.driver.get(url)
def get_cookies(self):
cookies_dict = {}
cookies = self.driver.get_cookies()
for cookie in cookies:
cookies_dict[cookie['name']] = cookie['value']
return cookies_dict
def execute_js(self, js, *args):
return self.driver.execute_script(js, args)
def adsenseClick():
# 获取wordpress的随机文章
url = 'https://pingbook.top/wp-json/wp/v2/posts'
r=WebRequest()
post_list=r.pc().get(url=url).json()
# links=[ item.get('link') for item in post_list]
# print(links)
# post_list =[{'link': 'https://pingbook.top/vue-videojs-m3u8-player-a-html5-video-player/'}]
# 模拟操作打开文章
proxyset = set()
for num in range(10000):
post=random.choice(post_list)
post_url=post.get('link')
print('发送请求的文章地址是: {}'.format(post_url))
agents = USER_AGENT_PC + USER_AGENT_MOBILE
time_count = num + 1
driver = None
try:
content = r.pc().get('https://open.pingbook.top/proxy/get?type=valid').json()
proxy = content.get('data').get('proxy')
print('发送请求的代理是: {}'.format(proxy))
if proxy not in proxyset:
# 时候重复的使用了相同的ip地址
proxyset.add(proxy)
agent = random.choice(agents)
driver = SpiderWebDriver(post_url, agent, proxy)
driver.open(post_url)
print('已经打开博客地址: {}'.format(post_url))
driver.driver.refresh()
submitBtn =driver.driver.find_element_by_id('submit')
if submitBtn:
# 滚动到对应的广告部分
driver.driver.execute_script('arguments[0].scrollIntoView(true);',submitBtn)
submitBtn.click()
time.sleep(3)
# driver.driver.refresh()
# wait = WebDriverWait(driver.driver, 6)
# element = wait.until(expected_conditions.element_to_be_clickable((By.ID, 'ads')))
# driver.close()
print('第{}次轮训成功,代理: {}。。。。'.format(time_count, proxy))
# actionBtn = driver.driver.find_element_by_class_name('copy-btn')
# if actionBtn:
# driver.driver.refresh()
# wait = WebDriverWait(driver.driver, 6)
# element = wait.until(expected_conditions.element_to_be_clickable((By.ID, 'ads')))
# actionBtn.click()
# driver.close()
# print('第{}次轮训成功,代理: {}。。。。'.format(time, proxy))
else:
print('当前代理地址: {}已经存在,不再使用该地址进行测试,代理池大小: {}!'.format(proxy,len(proxyset)))
except Exception as e:
print('第{}次轮训失败,失败信息: {}。。。。'.format(time_count, e))
# raise
finally:
if driver is not None:
driver.close()
def searchGoogle():
keyword= 'nuxt create nuxt app error :pingbook.top'
# 模拟操作打开文章
proxyset = set()
r=WebRequest()
agents=USER_AGENT_PC
for num in range(10000):
driver = None
try:
content = r.pc().get('https://open.pingbook.top/proxy/get?type=valid').json()
proxy = content.get('data').get('proxy')
print('发送请求的代理是: {}'.format(proxy))
if proxy not in proxyset:
# 时候重复的使用了相同的ip地址
proxyset.add(proxy)
agent = random.choice(agents)
spider = SpiderWebDriver(None, agent, proxy)
spider.open('https://google.com')
driver =spider.driver
# 输入关键字
inputbox=driver.find_element_by_name('q')
if inputbox:
inputbox.send_keys(keyword)
inputbox.send_keys(Keys.ENTER)
time.sleep(3)
# 点击第一条记录
first_record=driver.find_element_by_css_selector('#rso > div:nth-child(1) > div > div:nth-child(1) > div > div > div.r > a')
first_record.click()
time.sleep(5)
driver.refresh()
time.sleep(6)
except Exception as e:
print('第{}次轮训失败,失败信息: {}。。。。'.format(num, e))
finally:
if driver is not None:
driver.quit()
if __name__ == '__main__':
adsenseClick()
| [
"selenium.webdriver.chrome.options.Options",
"random.choice",
"selenium.webdriver.Chrome",
"app.api.util.web_request.WebRequest",
"time.sleep"
] | [((2641, 2653), 'app.api.util.web_request.WebRequest', 'WebRequest', ([], {}), '()\n', (2651, 2653), False, 'from app.api.util.web_request import WebRequest, USER_AGENT_PC, USER_AGENT_MOBILE\n'), ((5239, 5251), 'app.api.util.web_request.WebRequest', 'WebRequest', ([], {}), '()\n', (5249, 5251), False, 'from app.api.util.web_request import WebRequest, USER_AGENT_PC, USER_AGENT_MOBILE\n'), ((454, 463), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (461, 463), False, 'from selenium.webdriver.chrome.options import Options\n'), ((1709, 1749), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'chrome_options'}), '(options=chrome_options)\n', (1725, 1749), False, 'from selenium import webdriver\n'), ((2944, 2968), 'random.choice', 'random.choice', (['post_list'], {}), '(post_list)\n', (2957, 2968), False, 'import random\n'), ((3490, 3511), 'random.choice', 'random.choice', (['agents'], {}), '(agents)\n', (3503, 3511), False, 'import random\n'), ((5664, 5685), 'random.choice', 'random.choice', (['agents'], {}), '(agents)\n', (5677, 5685), False, 'import random\n'), ((3998, 4011), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (4008, 4011), False, 'import time\n'), ((6065, 6078), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (6075, 6078), False, 'import time\n'), ((6315, 6328), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (6325, 6328), False, 'import time\n'), ((6386, 6399), 'time.sleep', 'time.sleep', (['(6)'], {}), '(6)\n', (6396, 6399), False, 'import time\n')] |
import os
import argparse
import numpy as np
import csv
import cv2
img_w = 0
img_h = 0
def relativ2pixel(detection, frameHeight, frameWidth):
center_x, center_y = int(detection[0] * frameWidth), int(detection[1] * frameHeight)
width, height = int(detection[2] * frameWidth), int(detection[3] * frameHeight)
left, top = int(center_x - width / 2), int(center_y - height / 2)
return [left, top, width, height]
def get_bbs_from_file(path):
boxes_file = open(path,"r")
bb_lines = boxes_file.readlines()
bbs = []
for bb_line in bb_lines:
x1, y1, x2, y2 = bb_line.split(' ')
x1, y1, x2, y2 = float(x1), float(y1), float(x2), float(y2)
bbs.append([x1, y1, x2-x1, y2-y1])
return bbs
def map_bbs_to_img(img, bbs):
for bb in bbs:
h_pixels, w_pixels = img.shape[:2]
x1, y1, x2, y2 = int(bb[0]*w_pixels), int(bb[1]*h_pixels), int((bb[0]+bb[2])*w_pixels), int((bb[1]+bb[3])*h_pixels)
img = cv2.rectangle(img,(x1, y1),(x2, y2),(0,255,0),2)
return img
def ResizeWithAspectRatio(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim), 1/r
if __name__ == "__main__":
"""
Command:
python show_yolo.py -g
"""
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-g", "--gt", type=str,
help="Path to gt bb .txt")
args = vars(ap.parse_args())
img_path = args["gt"].replace("txt", "png")
img = cv2.imread(img_path,-1)
if len(img.shape) < 3:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
# start a new yolo txt file with name of image
boxes = get_bbs_from_file(args["gt"])
img = map_bbs_to_img(img, boxes)
'''
if img.shape[0] > img.shape[1]:
img, _ = ResizeWithAspectRatio(img, height=1400)
else:
img, _ = ResizeWithAspectRatio(img, width=1400)
'''
'''
print(img.shape)
img_h, img_w = img.shape[1], img.shape[0]
boxes = []
lines = []
with open(args["gt"]) as f:
lines = f.read().splitlines()
for line in lines:
cl, c_x, c_y, w, h = line.split(' ')
boxes.append(relativ2pixel([float(c_x), float(c_y), float(w), float(h)], img_w, img_h))
for box in boxes:
print(box)
cv2.rectangle(img, (box[0],box[1]), (box[0]+box[2],box[1]+box[3]), (0,255,0), 1)
'''
cv2.putText(img, os.path.basename(img_path), (10,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
cv2.imshow("output",img[-400:,:])
key = cv2.waitKey()
| [
"cv2.rectangle",
"argparse.ArgumentParser",
"cv2.imshow",
"os.path.basename",
"cv2.cvtColor",
"cv2.resize",
"cv2.waitKey",
"cv2.imread"
] | [((1581, 1606), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1604, 1606), False, 'import argparse\n'), ((1790, 1814), 'cv2.imread', 'cv2.imread', (['img_path', '(-1)'], {}), '(img_path, -1)\n', (1800, 1814), False, 'import cv2\n'), ((2804, 2839), 'cv2.imshow', 'cv2.imshow', (['"""output"""', 'img[-400:, :]'], {}), "('output', img[-400:, :])\n", (2814, 2839), False, 'import cv2\n'), ((2848, 2861), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (2859, 2861), False, 'import cv2\n'), ((969, 1023), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(2)'], {}), '(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n', (982, 1023), False, 'import cv2\n'), ((1394, 1416), 'cv2.resize', 'cv2.resize', (['image', 'dim'], {}), '(image, dim)\n', (1404, 1416), False, 'import cv2\n'), ((1855, 1892), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2BGR'], {}), '(img, cv2.COLOR_GRAY2BGR)\n', (1867, 1892), False, 'import cv2\n'), ((2704, 2730), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (2720, 2730), False, 'import os\n')] |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generally useful utility functions."""
from __future__ import print_function
import codecs
import collections
import json
import math
import os
import sys
import time
import numpy as np
import tensorflow as tf
import pandas as pd
def hash_single_batch(batch,hparams):
for b in batch:
for i in range(len(b)):
b[i]=abs(hash('key_'+str(i)+' value_'+str(b[i]))) % hparams.single_hash_num
return batch
def hash_multi_batch(batch,hparams):
lengths=0
for b in batch:
for i in range(len(b)):
b[i]=[abs(hash('key_'+str(i)+' value_'+str(x)))% hparams.multi_hash_num for x in str(b[i]).split()]
lengths=max(lengths,len(b[i]))
if len(b[i])==0:
b[i]=[abs(hash('key_'+str(i)+' value_'+str('<pad>')))% hparams.multi_hash_num]
batch_t=np.zeros((len(batch),len(hparams.multi_features),min(hparams.max_length,lengths)))
weights_t=np.zeros((len(batch),len(hparams.multi_features),min(hparams.max_length,lengths)))
for i in range(len(batch)):
for j in range(len(batch[i])):
for k in range(min(hparams.max_length,len(batch[i][j]))):
batch_t[i,j,k]=batch[i][j][k]
weights_t[i,j,k]=1
return batch_t,weights_t
def print_time(s, start_time):
"""Take a start time, print elapsed duration, and return a new time."""
print("%s, time %ds, %s." % (s, (time.time() - start_time), time.ctime()))
sys.stdout.flush()
return time.time()
def print_out(s, f=None, new_line=True):
"""Similar to print but with support to flush and output to a file."""
if isinstance(s, bytes):
s = s.decode("utf-8")
if f:
f.write(s.encode("utf-8"))
if new_line:
f.write(b"\n")
# stdout
out_s = s.encode("utf-8")
if not isinstance(out_s, str):
out_s = out_s.decode("utf-8")
print(out_s, end="", file=sys.stdout)
if new_line:
sys.stdout.write("\n")
sys.stdout.flush()
def print_step_info(prefix,epoch, global_step, info):
print_out("%sepoch %d step %d lr %g loss %.6f gN %.2f, %s" %
(prefix, epoch,global_step, info["learning_rate"],
info["train_ppl"], info["avg_grad_norm"], time.ctime()))
def print_hparams(hparams, skip_patterns=None, header=None):
"""Print hparams, can skip keys based on pattern."""
if header: print_out("%s" % header)
values = hparams.values()
for key in sorted(values.keys()):
if not skip_patterns or all(
[skip_pattern not in key for skip_pattern in skip_patterns]):
print_out(" %s=%s" % (key, str(values[key])))
def normalize(inputs, epsilon=1e-8):
'''
Applies layer normalization
Args:
inputs: A tensor with 2 or more dimensions
epsilon: A floating number to prevent Zero Division
Returns:
A tensor with the same shape and data dtype
'''
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta = tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ((variance + epsilon) ** (.5))
outputs = gamma * normalized + beta
return outputs | [
"time.ctime",
"tensorflow.ones",
"tensorflow.nn.moments",
"sys.stdout.flush",
"time.time",
"tensorflow.zeros",
"sys.stdout.write"
] | [((2133, 2151), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2149, 2151), False, 'import sys\n'), ((2161, 2172), 'time.time', 'time.time', ([], {}), '()\n', (2170, 2172), False, 'import time\n'), ((2611, 2629), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2627, 2629), False, 'import sys\n'), ((3620, 3663), 'tensorflow.nn.moments', 'tf.nn.moments', (['inputs', '[-1]'], {'keep_dims': '(True)'}), '(inputs, [-1], keep_dims=True)\n', (3633, 3663), True, 'import tensorflow as tf\n'), ((2586, 2608), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (2602, 2608), False, 'import sys\n'), ((3687, 3709), 'tensorflow.zeros', 'tf.zeros', (['params_shape'], {}), '(params_shape)\n', (3695, 3709), True, 'import tensorflow as tf\n'), ((3735, 3756), 'tensorflow.ones', 'tf.ones', (['params_shape'], {}), '(params_shape)\n', (3742, 3756), True, 'import tensorflow as tf\n'), ((2116, 2128), 'time.ctime', 'time.ctime', ([], {}), '()\n', (2126, 2128), False, 'import time\n'), ((2856, 2868), 'time.ctime', 'time.ctime', ([], {}), '()\n', (2866, 2868), False, 'import time\n'), ((2089, 2100), 'time.time', 'time.time', ([], {}), '()\n', (2098, 2100), False, 'import time\n')] |
from inqry.system_specs import win_physical_disk
UNIQUE_ID_OUTPUT = """
UniqueId
--------
{256a2559-ce63-5434-1bee-3ff629daa3a7}
{4069d186-f178-856e-cff3-ba250c28446d}
{4da19f06-2e28-2722-a0fb-33c02696abcd}
50014EE20D887D66
eui.0025384161B6798A
5000C5007A75E216
500A07510F1A545C
ATA LITEONIT LMT-256M6M mSATA 256GB TW0XXM305508532M0705
IDE\Diskpacker-virtualbox-iso-1421140659-disk1__F.R7BNPC\5&1944dbef&0&0.0.0:vagrant-2012-r2
"""
def test_creating_list_of_unique_disk_ids():
expected_physical_disks = {'{256a2559-ce63-5434-1bee-3ff629daa3a7}',
'{4069d186-f178-856e-cff3-ba250c28446d}',
'{4da19f06-2e28-2722-a0fb-33c02696abcd}',
'50014EE20D887D66',
'eui.0025384161B6798A',
'5000C5007A75E216',
'500A07510F1A545C',
'ATA LITEONIT LMT-256M6M mSATA 256GB TW0XXM305508532M0705',
"IDE\Diskpacker-virtualbox-iso-1421140659-disk1__F.R7BNPC\5&1944dbef&0&0.0.0:vagrant-2012-r2"}
assert expected_physical_disks == set(win_physical_disk.get_physical_disk_identifiers(UNIQUE_ID_OUTPUT))
| [
"inqry.system_specs.win_physical_disk.get_physical_disk_identifiers"
] | [((1190, 1255), 'inqry.system_specs.win_physical_disk.get_physical_disk_identifiers', 'win_physical_disk.get_physical_disk_identifiers', (['UNIQUE_ID_OUTPUT'], {}), '(UNIQUE_ID_OUTPUT)\n', (1237, 1255), False, 'from inqry.system_specs import win_physical_disk\n')] |
import time # NOQA
from app import db
class Link(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String)
url = db.Column(db.String)
description = db.Column(db.String)
type = db.Column(db.Integer)
enabled = db.Column(db.Boolean)
createtime = db.Column(db.DateTime)
def __init__(self, title, url, description, type, enabled):
self.title = title
self.url = url
self.description = description
self.type = type
self.enabled = enabled
self.createtime = time.time()
| [
"app.db.Column",
"time.time"
] | [((72, 111), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (81, 111), False, 'from app import db\n'), ((124, 144), 'app.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (133, 144), False, 'from app import db\n'), ((155, 175), 'app.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (164, 175), False, 'from app import db\n'), ((194, 214), 'app.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (203, 214), False, 'from app import db\n'), ((226, 247), 'app.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (235, 247), False, 'from app import db\n'), ((262, 283), 'app.db.Column', 'db.Column', (['db.Boolean'], {}), '(db.Boolean)\n', (271, 283), False, 'from app import db\n'), ((301, 323), 'app.db.Column', 'db.Column', (['db.DateTime'], {}), '(db.DateTime)\n', (310, 323), False, 'from app import db\n'), ((560, 571), 'time.time', 'time.time', ([], {}), '()\n', (569, 571), False, 'import time\n')] |
from django.test import TestCase
from django.urls import reverse_lazy
from ..models import PHOTO_MODEL, UploadedPhotoModel, IMAGE_SIZES
from .model_factories import get_image_file, get_zip_file
import time
from uuid import uuid4
class UploadPhotoApiViewTest(TestCase):
def check_photo_ok_and_delete(self, photo):
self.assertTrue(photo.image.storage.exists(photo.image.name))
for size in IMAGE_SIZES.values():
self.assertTrue(photo.image.storage.exists(photo.get_filepath_for_size(size)))
photo.delete()
def test_upload_photo(self):
self.client.post(reverse_lazy('image_upload'), {'file': get_image_file(), 'upload_id': str(uuid4())})
time.sleep(1) # Different process implementations might need a little bit longer
self.assertEqual(1, PHOTO_MODEL.objects.count())
self.assertEqual(1, UploadedPhotoModel.objects.count())
self.assertEqual(PHOTO_MODEL.objects.first(), UploadedPhotoModel.objects.first().photo)
photo = PHOTO_MODEL.objects.first()
self.check_photo_ok_and_delete(photo)
UploadedPhotoModel.objects.all().delete()
def test_upload_zip(self):
zip_file = get_zip_file(images=[get_image_file(name='img1.png'), get_image_file(name='img2.png')])
self.client.post(reverse_lazy('image_upload'), {'file': zip_file, 'upload_id': str(uuid4())})
time.sleep(1) # Different process implementations might need a little bit longer
self.assertEqual(2, PHOTO_MODEL.objects.count())
self.assertEqual(2, UploadedPhotoModel.objects.count())
for photo in PHOTO_MODEL.objects.all():
self.check_photo_ok_and_delete(photo)
UploadedPhotoModel.objects.all().delete()
| [
"uuid.uuid4",
"time.sleep",
"django.urls.reverse_lazy"
] | [((699, 712), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (709, 712), False, 'import time\n'), ((1391, 1404), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1401, 1404), False, 'import time\n'), ((605, 633), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""image_upload"""'], {}), "('image_upload')\n", (617, 633), False, 'from django.urls import reverse_lazy\n'), ((1305, 1333), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""image_upload"""'], {}), "('image_upload')\n", (1317, 1333), False, 'from django.urls import reverse_lazy\n'), ((679, 686), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (684, 686), False, 'from uuid import uuid4\n'), ((1371, 1378), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1376, 1378), False, 'from uuid import uuid4\n')] |
from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor, as_completed
import csv
import dearpygui.dearpygui as dpg
from os.path import isfile, isdir, join
import pyperclip
import subprocess
import sys
from tempfile import gettempdir
from traceback import print_exc
import core
import extruct
import utilio
from pytube import YouTube, Playlist
import ffmpeg
if sys.platform == 'darwin':
from tkinter import Tk
from tkinter.filedialog import askdirectory, askopenfilename
save_dir_dialog_mac = False
load_csv_dialog_mac = False
tkinter_root = Tk()
tkinter_root.withdraw()
dpg.create_context()
APPNAME = 'dataset-dl'
TEMPDIR = join(gettempdir(), APPNAME)
MAXWOREKR = 20
TAGS = []
def check_save_dir():
dpg.set_value('save_dir_check', isdir(dpg.get_value('save_dir_path')))
if sys.platform == 'darwin':
def save_dir_dialog():
global save_dir_dialog_mac
save_dir_dialog_mac = True
def load_csv_dialog():
global load_csv_dialog_mac
load_csv_dialog_mac = True
else:
def save_dir_dialog():
save_dir = utilio.ask_directry()
if save_dir != '':
dpg.set_value('save_dir_path', save_dir)
check_save_dir()
def load_csv_dialog():
load_csv = utilio.ask_open_file([('', '.csv')])
if load_csv != '':
dpg.set_value('csv_path', load_csv)
check_csv_path()
def check_csv_path():
csv_path = dpg.get_value('csv_path')
dpg.set_value('csv_path_check', isfile(csv_path) and csv_path.lower().endswith('.csv'))
def check_url():
url_str = dpg.get_value('url')
is_url = extruct.get_video_id(url_str) != '' or extruct.get_playlist_id(url_str) != ''
dpg.set_value('url_check', is_url)
def paste_url():
dpg.set_value('url', pyperclip.paste())
check_url()
def lock_ui():
for tag in TAGS:
dpg.configure_item(tag, enabled=False)
def unlock_ui():
for tag in TAGS:
dpg.configure_item(tag, enabled=True)
def run_url():
lock_ui()
parent_tag = 'url_tab'
if not (dpg.get_value('save_dir_check') and dpg.get_value('url_check')):
unlock_ui()
return
generate_entire_progress(parent_tag)
input_url = dpg.get_value('url')
if extruct.get_playlist_id(input_url) != '':
video_urls = Playlist(input_url).video_urls
else:
video_urls = ['https://www.youtube.com/watch?v=' + extruct.get_video_id(input_url)]
with ThreadPoolExecutor(max_workers=MAXWOREKR) as executor:
tasks = [executor.submit(
download,
video_url,
core.NameMode.TITLE,
0,
0,
parent_tag
) for video_url in video_urls]
complete_count = 0
max_task_count = len(tasks)
for task in as_completed(tasks):
complete_count += 1
dpg.set_value('entire_bar', complete_count / max_task_count)
dpg.set_value('entire_text', f'Completed: {complete_count:>7} / {max_task_count}')
dpg.delete_item('entire_group')
unlock_ui()
def run_csv():
lock_ui()
parent_tag = 'csv_tab'
if not (dpg.get_value('save_dir_check') and dpg.get_value('csv_path_check')):
unlock_ui()
return
generate_entire_progress(parent_tag)
with open(dpg.get_value('csv_path'), 'r', encoding='utf-8') as f,\
ThreadPoolExecutor(max_workers=MAXWOREKR) as executor:
reader = csv.reader(f)
tasks = []
for row in reader:
if row[0].startswith('#'):
continue
video_url = 'https://www.youtube.com/watch?v=' + row[0]
tasks.append(executor.submit(
download,
video_url,
core.NameMode.ID,
int(float(row[1])),
int(float(row[2])),
parent_tag
))
complete_count = 0
max_task_count = len(tasks)
for task in as_completed(tasks):
complete_count += 1
dpg.set_value('entire_bar', complete_count / max_task_count)
dpg.set_value('entire_text', f'Completed: {complete_count:>7} / {max_task_count}')
dpg.delete_item('entire_group')
unlock_ui()
def generate_entire_progress(parent_tag: str):
dpg.add_group(tag='entire_group', parent=parent_tag, horizontal=True)
dpg.add_progress_bar(tag='entire_bar', parent='entire_group')
dpg.add_text('Downloading...', tag=f'entire_text', parent=f'entire_group')
def set_progress(stream, chunk, bytes_remaining):
stream_id = extruct.file_hash(f'{stream.title}_{stream.filesize}')
dpg.set_value(stream_id, 1 - bytes_remaining / stream.filesize)
def download(video_url: str, naming: core.NameMode, start_time: int, end_time: int, parent_tag: str):
yt = YouTube(video_url, on_progress_callback=set_progress)
quality_mode = core.get_qualitymode(dpg.get_value('quality_radio'))
stream_video = core.get_video_stream(yt, quality_mode)
stream_audio = core.get_audio_stream(yt, quality_mode)
if not quality_mode.is_audio:
return
stream_audio_id = extruct.file_hash(f'{stream_audio.title}_{stream_audio.filesize}')
if not quality_mode.is_video:
request_type = core.get_request_type(quality_mode.extension_audio)
save_path = TEMPDIR if quality_mode == core.QualityMode.OPUS or quality_mode == core.QualityMode.MP3 else dpg.get_value('save_dir_path')
file_name = None if quality_mode == core.QualityMode.OPUS or quality_mode == core.QualityMode.MP3 else extruct.file_name(stream_audio.title)
with ThreadPoolExecutor(max_workers=MAXWOREKR*2) as executor:
tasks = []
tasks.append(executor.submit(
download_stream,
stream_audio,
save_path,
request_type,
parent_tag,
filename = file_name
))
for task in as_completed(tasks):
pass
dpg.delete_item(f'{stream_audio_id}_group')
if quality_mode != core.QualityMode.OPUS and quality_mode != core.QualityMode.MP3:
return
if naming == core.NameMode.ID:
audio_id = extruct.get_video_id(video_url)
save_path = f"{join(dpg.get_value('save_dir_path'), extruct.file_name(audio_id))}.{quality_mode.extension_audio}"
else:
save_path = f"{join(dpg.get_value('save_dir_path'), extruct.file_name(stream_audio.title))}.{quality_mode.extension_audio}"
audio_temp_path = f'{join(TEMPDIR, stream_audio_id)}'
auodio_save(quality_mode, save_path, audio_temp_path, start_time, end_time)
stream_video_id = extruct.file_hash(f'{stream_video.title}_{stream_video.filesize}')
with ThreadPoolExecutor(max_workers=MAXWOREKR*2) as executor:
tasks = []
tasks.append(executor.submit(
download_stream,
stream_video,
TEMPDIR,
quality_mode.extension_video,
parent_tag
))
tasks.append(executor.submit(
download_stream,
stream_audio,
TEMPDIR,
quality_mode.extension_audio,
parent_tag
))
for task in as_completed(tasks):
pass
dpg.delete_item(f'{stream_video_id}_group')
dpg.delete_item(f'{stream_audio_id}_group')
if naming == core.NameMode.ID:
stream_id = extruct.get_video_id(video_url)
save_path = f"{join(dpg.get_value('save_dir_path'), extruct.file_name(stream_id))}.{quality_mode.extension_video}"
else:
save_path = f"{join(dpg.get_value('save_dir_path'), extruct.file_name(stream_video.title))}.{quality_mode.extension_video}"
video_temp_path = f'{join(TEMPDIR, stream_video_id)}.{quality_mode.extension_video}'
audio_temp_path = f'{join(TEMPDIR, stream_audio_id)}.{quality_mode.extension_audio}'
marge_save(save_path, video_temp_path, audio_temp_path, start_time, end_time)
def auodio_save(quality_mode: core.QualityMode, save_path: str, audio_temp_path: str, start_time: int, end_time: int):
try:
if quality_mode == core.QualityMode.OPUS or quality_mode == core.QualityMode.MP3:
opus_temp_path = f'{audio_temp_path}.{core.get_request_type(quality_mode.extension_audio)}'
audio_temp_path = f'{audio_temp_path}.{quality_mode.extension_audio}'
opus_audio = ffmpeg.input(opus_temp_path)
if quality_mode == core.QualityMode.OPUS:
opus_audio_stream = ffmpeg.output(opus_audio, audio_temp_path, acodec='copy').global_args('-loglevel', 'quiet')
else:
opus_audio_stream = ffmpeg.output(opus_audio, audio_temp_path).global_args('-loglevel', 'quiet')
startupinfo = None
if sys.platform == 'win32':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process = subprocess.Popen(ffmpeg.compile(opus_audio_stream, overwrite_output=True), startupinfo=startupinfo)
out, err = process.communicate()
retcode = process.poll()
if retcode:
raise ffmpeg.Error('ffmpeg', out, err)
utilio.delete_file(opus_temp_path)
else:
audio_temp_path = f'{audio_temp_path}.{quality_mode.extension_audio}'
if start_time < end_time and not (start_time == 0 == end_time):
audio = ffmpeg.input(audio_temp_path, ss=start_time, to=end_time)
else:
audio = ffmpeg.input(audio_temp_path)
audio_stream = ffmpeg.output(audio, save_path, acodec='copy').global_args('-loglevel', 'quiet')
startupinfo = None
if sys.platform == 'win32':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process = subprocess.Popen(ffmpeg.compile(audio_stream, overwrite_output=True), startupinfo=startupinfo)
out, err = process.communicate()
retcode = process.poll()
if retcode:
raise ffmpeg.Error('ffmpeg', out, err)
utilio.delete_file(audio_temp_path)
except:
print_exc()
def marge_save(save_path: str, video_temp_path: str, audio_temp_path: str,
start_time: int, end_time: int):
try:
if start_time < end_time and not (start_time == 0 == end_time):
video = ffmpeg.input(video_temp_path, ss=start_time, to=end_time)
audio = ffmpeg.input(audio_temp_path, ss=start_time, to=end_time)
else:
video = ffmpeg.input(video_temp_path)
audio = ffmpeg.input(audio_temp_path)
marge_stream = ffmpeg.output(video, audio, save_path, vcodec='copy', acodec='copy').global_args('-loglevel', 'quiet')
startupinfo = None
if sys.platform == 'win32':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process = subprocess.Popen(ffmpeg.compile(marge_stream, overwrite_output=True), startupinfo=startupinfo)
out, err = process.communicate()
retcode = process.poll()
if retcode:
raise ffmpeg.Error('ffmpeg', out, err)
utilio.delete_file(video_temp_path)
utilio.delete_file(audio_temp_path)
except:
print_exc()
def download_stream(stream, output_path, extension, parent_tag, filename=None):
stream_id = extruct.file_hash(f'{stream.title}_{stream.filesize}')
if filename == None:
filename = f'{stream_id}.{extension}'
else:
filename = f'{filename}.{extension}'
dpg.add_group(tag=f'{stream_id}_group', parent=parent_tag, horizontal=True)
dpg.add_progress_bar(tag=stream_id, parent=f'{stream_id}_group')
dpg.add_text(stream.title, tag=f'{stream_id}_text', parent=f'{stream_id}_group')
try:
stream.download(output_path=output_path, filename=filename)
except:
print_exc()
with dpg.font_registry():
with dpg.font(extruct.get_fullpath(join('resources', 'fonts', 'NotoSansJP-Regular.otf')), 22) as default_font:
dpg.add_font_range_hint(dpg.mvFontRangeHint_Default)
dpg.add_font_range_hint(dpg.mvFontRangeHint_Japanese)
with open(extruct.get_fullpath(join('resources', 'fonts', 'OFL.txt')), 'r', encoding='utf-8') as f:
font_license = f.read()
with dpg.window(tag='Primary Window'):
dpg.bind_font(default_font)
with dpg.menu_bar():
with dpg.menu(label='License'):
dpg.add_text('NotoSansJP-Regular')
dpg.add_input_text(default_value=font_license, multiline=True, readonly=True)
dpg.add_text('Save Directory')
with dpg.group(horizontal=True):
dpg.add_checkbox(default_value=False, enabled=False, tag='save_dir_check')
dpg.add_input_text(callback=check_save_dir, tag='save_dir_path')
dpg.add_button(label='Select', tag='save_dir_button', callback=save_dir_dialog)
TAGS.append('save_dir_path')
TAGS.append('save_dir_button')
dpg.add_spacer(height=10)
dpg.add_text('Quality')
dpg.add_radio_button(
[quality_mode.text for quality_mode in core.QualityMode],
tag = 'quality_radio',
default_value = core.QualityMode.HIGH.text,
horizontal = True
)
TAGS.append('quality_radio')
dpg.add_spacer(height=10)
dpg.add_text('Mode')
with dpg.tab_bar():
with dpg.tab(label='Video OR Playlist URL', tag='url_tab'):
with dpg.group(horizontal=True):
dpg.add_checkbox(default_value=False, enabled=False, tag='url_check')
dpg.add_input_text(callback=check_url, tag='url')
dpg.add_button(label='Paste', tag='url_paste_button', callback=paste_url)
dpg.add_button(label='Run', tag='url_run_button', callback=run_url)
TAGS.append('url')
TAGS.append('url_paste_button')
TAGS.append('url_run_button')
with dpg.tab(label='CSV File', tag='csv_tab'):
with dpg.group(horizontal=True):
dpg.add_checkbox(default_value=False, enabled=False, tag='csv_path_check')
dpg.add_input_text(callback=check_csv_path, tag='csv_path')
dpg.add_button(label='Select', tag='csv_path_button', callback=load_csv_dialog)
dpg.add_button(label='Run', tag='csv_run_button', callback=run_csv)
TAGS.append('csv_path')
TAGS.append('csv_path_button')
TAGS.append('csv_run_button')
utilio.create_workdir(TEMPDIR)
icon = extruct.get_fullpath(join('resources', 'dataset-dl.ico')) if sys.platform == 'win32' else ''
dpg.create_viewport(title=APPNAME, width=1000, height=500, large_icon=icon)
dpg.setup_dearpygui()
dpg.show_viewport()
dpg.set_primary_window('Primary Window', True)
if not sys.platform == 'darwin':
dpg.start_dearpygui()
else:
while dpg.is_dearpygui_running():
dpg.render_dearpygui_frame()
if save_dir_dialog_mac:
save_dir = askdirectory()
if save_dir != '':
dpg.set_value('save_dir_path', save_dir)
check_save_dir()
save_dir_dialog_mac = False
elif load_csv_dialog_mac:
load_csv = askopenfilename(filetypes=[('', '.csv')])
if load_csv != '':
dpg.set_value('csv_path', load_csv)
check_csv_path()
load_csv_dialog_mac = False
tkinter_root.destroy()
dpg.destroy_context()
utilio.delete_workdir(TEMPDIR) | [
"dearpygui.dearpygui.add_input_text",
"dearpygui.dearpygui.create_viewport",
"utilio.ask_directry",
"tkinter.filedialog.askdirectory",
"ffmpeg.output",
"subprocess.STARTUPINFO",
"dearpygui.dearpygui.window",
"dearpygui.dearpygui.add_radio_button",
"dearpygui.dearpygui.set_primary_window",
"pypercl... | [((613, 633), 'dearpygui.dearpygui.create_context', 'dpg.create_context', ([], {}), '()\n', (631, 633), True, 'import dearpygui.dearpygui as dpg\n'), ((14990, 15020), 'utilio.create_workdir', 'utilio.create_workdir', (['TEMPDIR'], {}), '(TEMPDIR)\n', (15011, 15020), False, 'import utilio\n'), ((15122, 15197), 'dearpygui.dearpygui.create_viewport', 'dpg.create_viewport', ([], {'title': 'APPNAME', 'width': '(1000)', 'height': '(500)', 'large_icon': 'icon'}), '(title=APPNAME, width=1000, height=500, large_icon=icon)\n', (15141, 15197), True, 'import dearpygui.dearpygui as dpg\n'), ((15198, 15219), 'dearpygui.dearpygui.setup_dearpygui', 'dpg.setup_dearpygui', ([], {}), '()\n', (15217, 15219), True, 'import dearpygui.dearpygui as dpg\n'), ((15220, 15239), 'dearpygui.dearpygui.show_viewport', 'dpg.show_viewport', ([], {}), '()\n', (15237, 15239), True, 'import dearpygui.dearpygui as dpg\n'), ((15240, 15286), 'dearpygui.dearpygui.set_primary_window', 'dpg.set_primary_window', (['"""Primary Window"""', '(True)'], {}), "('Primary Window', True)\n", (15262, 15286), True, 'import dearpygui.dearpygui as dpg\n'), ((15956, 15977), 'dearpygui.dearpygui.destroy_context', 'dpg.destroy_context', ([], {}), '()\n', (15975, 15977), True, 'import dearpygui.dearpygui as dpg\n'), ((15979, 16009), 'utilio.delete_workdir', 'utilio.delete_workdir', (['TEMPDIR'], {}), '(TEMPDIR)\n', (16000, 16009), False, 'import utilio\n'), ((578, 582), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (580, 582), False, 'from tkinter import Tk\n'), ((673, 685), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (683, 685), False, 'from tempfile import gettempdir\n'), ((1464, 1489), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""csv_path"""'], {}), "('csv_path')\n", (1477, 1489), True, 'import dearpygui.dearpygui as dpg\n'), ((1615, 1635), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""url"""'], {}), "('url')\n", (1628, 1635), True, 'import dearpygui.dearpygui as dpg\n'), ((1731, 1765), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['"""url_check"""', 'is_url'], {}), "('url_check', is_url)\n", (1744, 1765), True, 'import dearpygui.dearpygui as dpg\n'), ((2247, 2267), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""url"""'], {}), "('url')\n", (2260, 2267), True, 'import dearpygui.dearpygui as dpg\n'), ((3108, 3139), 'dearpygui.dearpygui.delete_item', 'dpg.delete_item', (['"""entire_group"""'], {}), "('entire_group')\n", (3123, 3139), True, 'import dearpygui.dearpygui as dpg\n'), ((4317, 4348), 'dearpygui.dearpygui.delete_item', 'dpg.delete_item', (['"""entire_group"""'], {}), "('entire_group')\n", (4332, 4348), True, 'import dearpygui.dearpygui as dpg\n'), ((4418, 4487), 'dearpygui.dearpygui.add_group', 'dpg.add_group', ([], {'tag': '"""entire_group"""', 'parent': 'parent_tag', 'horizontal': '(True)'}), "(tag='entire_group', parent=parent_tag, horizontal=True)\n", (4431, 4487), True, 'import dearpygui.dearpygui as dpg\n'), ((4492, 4553), 'dearpygui.dearpygui.add_progress_bar', 'dpg.add_progress_bar', ([], {'tag': '"""entire_bar"""', 'parent': '"""entire_group"""'}), "(tag='entire_bar', parent='entire_group')\n", (4512, 4553), True, 'import dearpygui.dearpygui as dpg\n'), ((4558, 4632), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (['"""Downloading..."""'], {'tag': 'f"""entire_text"""', 'parent': 'f"""entire_group"""'}), "('Downloading...', tag=f'entire_text', parent=f'entire_group')\n", (4570, 4632), True, 'import dearpygui.dearpygui as dpg\n'), ((4701, 4755), 'extruct.file_hash', 'extruct.file_hash', (['f"""{stream.title}_{stream.filesize}"""'], {}), "(f'{stream.title}_{stream.filesize}')\n", (4718, 4755), False, 'import extruct\n'), ((4760, 4823), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['stream_id', '(1 - bytes_remaining / stream.filesize)'], {}), '(stream_id, 1 - bytes_remaining / stream.filesize)\n', (4773, 4823), True, 'import dearpygui.dearpygui as dpg\n'), ((4937, 4990), 'pytube.YouTube', 'YouTube', (['video_url'], {'on_progress_callback': 'set_progress'}), '(video_url, on_progress_callback=set_progress)\n', (4944, 4990), False, 'from pytube import YouTube, Playlist\n'), ((5087, 5126), 'core.get_video_stream', 'core.get_video_stream', (['yt', 'quality_mode'], {}), '(yt, quality_mode)\n', (5108, 5126), False, 'import core\n'), ((5146, 5185), 'core.get_audio_stream', 'core.get_audio_stream', (['yt', 'quality_mode'], {}), '(yt, quality_mode)\n', (5167, 5185), False, 'import core\n'), ((5262, 5328), 'extruct.file_hash', 'extruct.file_hash', (['f"""{stream_audio.title}_{stream_audio.filesize}"""'], {}), "(f'{stream_audio.title}_{stream_audio.filesize}')\n", (5279, 5328), False, 'import extruct\n'), ((6915, 6981), 'extruct.file_hash', 'extruct.file_hash', (['f"""{stream_video.title}_{stream_video.filesize}"""'], {}), "(f'{stream_video.title}_{stream_video.filesize}')\n", (6932, 6981), False, 'import extruct\n'), ((7567, 7610), 'dearpygui.dearpygui.delete_item', 'dpg.delete_item', (['f"""{stream_video_id}_group"""'], {}), "(f'{stream_video_id}_group')\n", (7582, 7610), True, 'import dearpygui.dearpygui as dpg\n'), ((7615, 7658), 'dearpygui.dearpygui.delete_item', 'dpg.delete_item', (['f"""{stream_audio_id}_group"""'], {}), "(f'{stream_audio_id}_group')\n", (7630, 7658), True, 'import dearpygui.dearpygui as dpg\n'), ((11844, 11898), 'extruct.file_hash', 'extruct.file_hash', (['f"""{stream.title}_{stream.filesize}"""'], {}), "(f'{stream.title}_{stream.filesize}')\n", (11861, 11898), False, 'import extruct\n'), ((12029, 12104), 'dearpygui.dearpygui.add_group', 'dpg.add_group', ([], {'tag': 'f"""{stream_id}_group"""', 'parent': 'parent_tag', 'horizontal': '(True)'}), "(tag=f'{stream_id}_group', parent=parent_tag, horizontal=True)\n", (12042, 12104), True, 'import dearpygui.dearpygui as dpg\n'), ((12109, 12173), 'dearpygui.dearpygui.add_progress_bar', 'dpg.add_progress_bar', ([], {'tag': 'stream_id', 'parent': 'f"""{stream_id}_group"""'}), "(tag=stream_id, parent=f'{stream_id}_group')\n", (12129, 12173), True, 'import dearpygui.dearpygui as dpg\n'), ((12178, 12263), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (['stream.title'], {'tag': 'f"""{stream_id}_text"""', 'parent': 'f"""{stream_id}_group"""'}), "(stream.title, tag=f'{stream_id}_text', parent=f'{stream_id}_group'\n )\n", (12190, 12263), True, 'import dearpygui.dearpygui as dpg\n'), ((12376, 12395), 'dearpygui.dearpygui.font_registry', 'dpg.font_registry', ([], {}), '()\n', (12393, 12395), True, 'import dearpygui.dearpygui as dpg\n'), ((12770, 12802), 'dearpygui.dearpygui.window', 'dpg.window', ([], {'tag': '"""Primary Window"""'}), "(tag='Primary Window')\n", (12780, 12802), True, 'import dearpygui.dearpygui as dpg\n'), ((12808, 12835), 'dearpygui.dearpygui.bind_font', 'dpg.bind_font', (['default_font'], {}), '(default_font)\n', (12821, 12835), True, 'import dearpygui.dearpygui as dpg\n'), ((13047, 13077), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (['"""Save Directory"""'], {}), "('Save Directory')\n", (13059, 13077), True, 'import dearpygui.dearpygui as dpg\n'), ((13439, 13464), 'dearpygui.dearpygui.add_spacer', 'dpg.add_spacer', ([], {'height': '(10)'}), '(height=10)\n', (13453, 13464), True, 'import dearpygui.dearpygui as dpg\n'), ((13474, 13497), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (['"""Quality"""'], {}), "('Quality')\n", (13486, 13497), True, 'import dearpygui.dearpygui as dpg\n'), ((13502, 13670), 'dearpygui.dearpygui.add_radio_button', 'dpg.add_radio_button', (['[quality_mode.text for quality_mode in core.QualityMode]'], {'tag': '"""quality_radio"""', 'default_value': 'core.QualityMode.HIGH.text', 'horizontal': '(True)'}), "([quality_mode.text for quality_mode in core.\n QualityMode], tag='quality_radio', default_value=core.QualityMode.HIGH.\n text, horizontal=True)\n", (13522, 13670), True, 'import dearpygui.dearpygui as dpg\n'), ((13746, 13771), 'dearpygui.dearpygui.add_spacer', 'dpg.add_spacer', ([], {'height': '(10)'}), '(height=10)\n', (13760, 13771), True, 'import dearpygui.dearpygui as dpg\n'), ((13781, 13801), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (['"""Mode"""'], {}), "('Mode')\n", (13793, 13801), True, 'import dearpygui.dearpygui as dpg\n'), ((15325, 15346), 'dearpygui.dearpygui.start_dearpygui', 'dpg.start_dearpygui', ([], {}), '()\n', (15344, 15346), True, 'import dearpygui.dearpygui as dpg\n'), ((15363, 15389), 'dearpygui.dearpygui.is_dearpygui_running', 'dpg.is_dearpygui_running', ([], {}), '()\n', (15387, 15389), True, 'import dearpygui.dearpygui as dpg\n'), ((1106, 1127), 'utilio.ask_directry', 'utilio.ask_directry', ([], {}), '()\n', (1125, 1127), False, 'import utilio\n'), ((1288, 1324), 'utilio.ask_open_file', 'utilio.ask_open_file', (["[('', '.csv')]"], {}), "([('', '.csv')])\n", (1308, 1324), False, 'import utilio\n'), ((1810, 1827), 'pyperclip.paste', 'pyperclip.paste', ([], {}), '()\n', (1825, 1827), False, 'import pyperclip\n'), ((1891, 1929), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['tag'], {'enabled': '(False)'}), '(tag, enabled=False)\n', (1909, 1929), True, 'import dearpygui.dearpygui as dpg\n'), ((1977, 2014), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['tag'], {'enabled': '(True)'}), '(tag, enabled=True)\n', (1995, 2014), True, 'import dearpygui.dearpygui as dpg\n'), ((2275, 2309), 'extruct.get_playlist_id', 'extruct.get_playlist_id', (['input_url'], {}), '(input_url)\n', (2298, 2309), False, 'import extruct\n'), ((2485, 2526), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'MAXWOREKR'}), '(max_workers=MAXWOREKR)\n', (2503, 2526), False, 'from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor, as_completed\n'), ((2883, 2902), 'concurrent.futures.as_completed', 'as_completed', (['tasks'], {}), '(tasks)\n', (2895, 2902), False, 'from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor, as_completed\n'), ((3465, 3506), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'MAXWOREKR'}), '(max_workers=MAXWOREKR)\n', (3483, 3506), False, 'from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor, as_completed\n'), ((3537, 3550), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (3547, 3550), False, 'import csv\n'), ((4092, 4111), 'concurrent.futures.as_completed', 'as_completed', (['tasks'], {}), '(tasks)\n', (4104, 4111), False, 'from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor, as_completed\n'), ((5031, 5061), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""quality_radio"""'], {}), "('quality_radio')\n", (5044, 5061), True, 'import dearpygui.dearpygui as dpg\n'), ((5391, 5442), 'core.get_request_type', 'core.get_request_type', (['quality_mode.extension_audio'], {}), '(quality_mode.extension_audio)\n', (5412, 5442), False, 'import core\n'), ((6186, 6229), 'dearpygui.dearpygui.delete_item', 'dpg.delete_item', (['f"""{stream_audio_id}_group"""'], {}), "(f'{stream_audio_id}_group')\n", (6201, 6229), True, 'import dearpygui.dearpygui as dpg\n'), ((6996, 7041), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(MAXWOREKR * 2)'}), '(max_workers=MAXWOREKR * 2)\n', (7014, 7041), False, 'from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor, as_completed\n'), ((7520, 7539), 'concurrent.futures.as_completed', 'as_completed', (['tasks'], {}), '(tasks)\n', (7532, 7539), False, 'from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor, as_completed\n'), ((7719, 7750), 'extruct.get_video_id', 'extruct.get_video_id', (['video_url'], {}), '(video_url)\n', (7739, 7750), False, 'import extruct\n'), ((10498, 10533), 'utilio.delete_file', 'utilio.delete_file', (['audio_temp_path'], {}), '(audio_temp_path)\n', (10516, 10533), False, 'import utilio\n'), ((11634, 11669), 'utilio.delete_file', 'utilio.delete_file', (['video_temp_path'], {}), '(video_temp_path)\n', (11652, 11669), False, 'import utilio\n'), ((11678, 11713), 'utilio.delete_file', 'utilio.delete_file', (['audio_temp_path'], {}), '(audio_temp_path)\n', (11696, 11713), False, 'import utilio\n'), ((12520, 12572), 'dearpygui.dearpygui.add_font_range_hint', 'dpg.add_font_range_hint', (['dpg.mvFontRangeHint_Default'], {}), '(dpg.mvFontRangeHint_Default)\n', (12543, 12572), True, 'import dearpygui.dearpygui as dpg\n'), ((12581, 12634), 'dearpygui.dearpygui.add_font_range_hint', 'dpg.add_font_range_hint', (['dpg.mvFontRangeHint_Japanese'], {}), '(dpg.mvFontRangeHint_Japanese)\n', (12604, 12634), True, 'import dearpygui.dearpygui as dpg\n'), ((12845, 12859), 'dearpygui.dearpygui.menu_bar', 'dpg.menu_bar', ([], {}), '()\n', (12857, 12859), True, 'import dearpygui.dearpygui as dpg\n'), ((13087, 13113), 'dearpygui.dearpygui.group', 'dpg.group', ([], {'horizontal': '(True)'}), '(horizontal=True)\n', (13096, 13113), True, 'import dearpygui.dearpygui as dpg\n'), ((13123, 13197), 'dearpygui.dearpygui.add_checkbox', 'dpg.add_checkbox', ([], {'default_value': '(False)', 'enabled': '(False)', 'tag': '"""save_dir_check"""'}), "(default_value=False, enabled=False, tag='save_dir_check')\n", (13139, 13197), True, 'import dearpygui.dearpygui as dpg\n'), ((13206, 13270), 'dearpygui.dearpygui.add_input_text', 'dpg.add_input_text', ([], {'callback': 'check_save_dir', 'tag': '"""save_dir_path"""'}), "(callback=check_save_dir, tag='save_dir_path')\n", (13224, 13270), True, 'import dearpygui.dearpygui as dpg\n'), ((13279, 13358), 'dearpygui.dearpygui.add_button', 'dpg.add_button', ([], {'label': '"""Select"""', 'tag': '"""save_dir_button"""', 'callback': 'save_dir_dialog'}), "(label='Select', tag='save_dir_button', callback=save_dir_dialog)\n", (13293, 13358), True, 'import dearpygui.dearpygui as dpg\n'), ((13811, 13824), 'dearpygui.dearpygui.tab_bar', 'dpg.tab_bar', ([], {}), '()\n', (13822, 13824), True, 'import dearpygui.dearpygui as dpg\n'), ((15049, 15084), 'os.path.join', 'join', (['"""resources"""', '"""dataset-dl.ico"""'], {}), "('resources', 'dataset-dl.ico')\n", (15053, 15084), False, 'from os.path import isfile, isdir, join\n'), ((15399, 15427), 'dearpygui.dearpygui.render_dearpygui_frame', 'dpg.render_dearpygui_frame', ([], {}), '()\n', (15425, 15427), True, 'import dearpygui.dearpygui as dpg\n'), ((788, 818), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""save_dir_path"""'], {}), "('save_dir_path')\n", (801, 818), True, 'import dearpygui.dearpygui as dpg\n'), ((1167, 1207), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['"""save_dir_path"""', 'save_dir'], {}), "('save_dir_path', save_dir)\n", (1180, 1207), True, 'import dearpygui.dearpygui as dpg\n'), ((1364, 1399), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['"""csv_path"""', 'load_csv'], {}), "('csv_path', load_csv)\n", (1377, 1399), True, 'import dearpygui.dearpygui as dpg\n'), ((1526, 1542), 'os.path.isfile', 'isfile', (['csv_path'], {}), '(csv_path)\n', (1532, 1542), False, 'from os.path import isfile, isdir, join\n'), ((1649, 1678), 'extruct.get_video_id', 'extruct.get_video_id', (['url_str'], {}), '(url_str)\n', (1669, 1678), False, 'import extruct\n'), ((1688, 1720), 'extruct.get_playlist_id', 'extruct.get_playlist_id', (['url_str'], {}), '(url_str)\n', (1711, 1720), False, 'import extruct\n'), ((2085, 2116), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""save_dir_check"""'], {}), "('save_dir_check')\n", (2098, 2116), True, 'import dearpygui.dearpygui as dpg\n'), ((2121, 2147), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""url_check"""'], {}), "('url_check')\n", (2134, 2147), True, 'import dearpygui.dearpygui as dpg\n'), ((2338, 2357), 'pytube.Playlist', 'Playlist', (['input_url'], {}), '(input_url)\n', (2346, 2357), False, 'from pytube import YouTube, Playlist\n'), ((2948, 3008), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['"""entire_bar"""', '(complete_count / max_task_count)'], {}), "('entire_bar', complete_count / max_task_count)\n", (2961, 3008), True, 'import dearpygui.dearpygui as dpg\n'), ((3021, 3107), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['"""entire_text"""', 'f"""Completed: {complete_count:>7} / {max_task_count}"""'], {}), "('entire_text',\n f'Completed: {complete_count:>7} / {max_task_count}')\n", (3034, 3107), True, 'import dearpygui.dearpygui as dpg\n'), ((3226, 3257), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""save_dir_check"""'], {}), "('save_dir_check')\n", (3239, 3257), True, 'import dearpygui.dearpygui as dpg\n'), ((3262, 3293), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""csv_path_check"""'], {}), "('csv_path_check')\n", (3275, 3293), True, 'import dearpygui.dearpygui as dpg\n'), ((3391, 3416), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""csv_path"""'], {}), "('csv_path')\n", (3404, 3416), True, 'import dearpygui.dearpygui as dpg\n'), ((4157, 4217), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['"""entire_bar"""', '(complete_count / max_task_count)'], {}), "('entire_bar', complete_count / max_task_count)\n", (4170, 4217), True, 'import dearpygui.dearpygui as dpg\n'), ((4230, 4316), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['"""entire_text"""', 'f"""Completed: {complete_count:>7} / {max_task_count}"""'], {}), "('entire_text',\n f'Completed: {complete_count:>7} / {max_task_count}')\n", (4243, 4316), True, 'import dearpygui.dearpygui as dpg\n'), ((5557, 5587), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""save_dir_path"""'], {}), "('save_dir_path')\n", (5570, 5587), True, 'import dearpygui.dearpygui as dpg\n'), ((5702, 5739), 'extruct.file_name', 'extruct.file_name', (['stream_audio.title'], {}), '(stream_audio.title)\n', (5719, 5739), False, 'import extruct\n'), ((5762, 5807), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(MAXWOREKR * 2)'}), '(max_workers=MAXWOREKR * 2)\n', (5780, 5807), False, 'from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor, as_completed\n'), ((6136, 6155), 'concurrent.futures.as_completed', 'as_completed', (['tasks'], {}), '(tasks)\n', (6148, 6155), False, 'from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor, as_completed\n'), ((6420, 6451), 'extruct.get_video_id', 'extruct.get_video_id', (['video_url'], {}), '(video_url)\n', (6440, 6451), False, 'import extruct\n'), ((8046, 8076), 'os.path.join', 'join', (['TEMPDIR', 'stream_video_id'], {}), '(TEMPDIR, stream_video_id)\n', (8050, 8076), False, 'from os.path import isfile, isdir, join\n'), ((8135, 8165), 'os.path.join', 'join', (['TEMPDIR', 'stream_audio_id'], {}), '(TEMPDIR, stream_audio_id)\n', (8139, 8165), False, 'from os.path import isfile, isdir, join\n'), ((8725, 8753), 'ffmpeg.input', 'ffmpeg.input', (['opus_temp_path'], {}), '(opus_temp_path)\n', (8737, 8753), False, 'import ffmpeg\n'), ((9573, 9607), 'utilio.delete_file', 'utilio.delete_file', (['opus_temp_path'], {}), '(opus_temp_path)\n', (9591, 9607), False, 'import utilio\n'), ((9806, 9863), 'ffmpeg.input', 'ffmpeg.input', (['audio_temp_path'], {'ss': 'start_time', 'to': 'end_time'}), '(audio_temp_path, ss=start_time, to=end_time)\n', (9818, 9863), False, 'import ffmpeg\n'), ((9898, 9927), 'ffmpeg.input', 'ffmpeg.input', (['audio_temp_path'], {}), '(audio_temp_path)\n', (9910, 9927), False, 'import ffmpeg\n'), ((10139, 10163), 'subprocess.STARTUPINFO', 'subprocess.STARTUPINFO', ([], {}), '()\n', (10161, 10163), False, 'import subprocess\n'), ((10266, 10317), 'ffmpeg.compile', 'ffmpeg.compile', (['audio_stream'], {'overwrite_output': '(True)'}), '(audio_stream, overwrite_output=True)\n', (10280, 10317), False, 'import ffmpeg\n'), ((10456, 10488), 'ffmpeg.Error', 'ffmpeg.Error', (['"""ffmpeg"""', 'out', 'err'], {}), "('ffmpeg', out, err)\n", (10468, 10488), False, 'import ffmpeg\n'), ((10554, 10565), 'traceback.print_exc', 'print_exc', ([], {}), '()\n', (10563, 10565), False, 'from traceback import print_exc\n'), ((10793, 10850), 'ffmpeg.input', 'ffmpeg.input', (['video_temp_path'], {'ss': 'start_time', 'to': 'end_time'}), '(video_temp_path, ss=start_time, to=end_time)\n', (10805, 10850), False, 'import ffmpeg\n'), ((10871, 10928), 'ffmpeg.input', 'ffmpeg.input', (['audio_temp_path'], {'ss': 'start_time', 'to': 'end_time'}), '(audio_temp_path, ss=start_time, to=end_time)\n', (10883, 10928), False, 'import ffmpeg\n'), ((10963, 10992), 'ffmpeg.input', 'ffmpeg.input', (['video_temp_path'], {}), '(video_temp_path)\n', (10975, 10992), False, 'import ffmpeg\n'), ((11013, 11042), 'ffmpeg.input', 'ffmpeg.input', (['audio_temp_path'], {}), '(audio_temp_path)\n', (11025, 11042), False, 'import ffmpeg\n'), ((11267, 11291), 'subprocess.STARTUPINFO', 'subprocess.STARTUPINFO', ([], {}), '()\n', (11289, 11291), False, 'import subprocess\n'), ((11394, 11445), 'ffmpeg.compile', 'ffmpeg.compile', (['marge_stream'], {'overwrite_output': '(True)'}), '(marge_stream, overwrite_output=True)\n', (11408, 11445), False, 'import ffmpeg\n'), ((11584, 11616), 'ffmpeg.Error', 'ffmpeg.Error', (['"""ffmpeg"""', 'out', 'err'], {}), "('ffmpeg', out, err)\n", (11596, 11616), False, 'import ffmpeg\n'), ((11734, 11745), 'traceback.print_exc', 'print_exc', ([], {}), '()\n', (11743, 11745), False, 'from traceback import print_exc\n'), ((12356, 12367), 'traceback.print_exc', 'print_exc', ([], {}), '()\n', (12365, 12367), False, 'from traceback import print_exc\n'), ((12667, 12704), 'os.path.join', 'join', (['"""resources"""', '"""fonts"""', '"""OFL.txt"""'], {}), "('resources', 'fonts', 'OFL.txt')\n", (12671, 12704), False, 'from os.path import isfile, isdir, join\n'), ((12874, 12899), 'dearpygui.dearpygui.menu', 'dpg.menu', ([], {'label': '"""License"""'}), "(label='License')\n", (12882, 12899), True, 'import dearpygui.dearpygui as dpg\n'), ((12913, 12947), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (['"""NotoSansJP-Regular"""'], {}), "('NotoSansJP-Regular')\n", (12925, 12947), True, 'import dearpygui.dearpygui as dpg\n'), ((12960, 13037), 'dearpygui.dearpygui.add_input_text', 'dpg.add_input_text', ([], {'default_value': 'font_license', 'multiline': '(True)', 'readonly': '(True)'}), '(default_value=font_license, multiline=True, readonly=True)\n', (12978, 13037), True, 'import dearpygui.dearpygui as dpg\n'), ((13839, 13892), 'dearpygui.dearpygui.tab', 'dpg.tab', ([], {'label': '"""Video OR Playlist URL"""', 'tag': '"""url_tab"""'}), "(label='Video OR Playlist URL', tag='url_tab')\n", (13846, 13892), True, 'import dearpygui.dearpygui as dpg\n'), ((14420, 14460), 'dearpygui.dearpygui.tab', 'dpg.tab', ([], {'label': '"""CSV File"""', 'tag': '"""csv_tab"""'}), "(label='CSV File', tag='csv_tab')\n", (14427, 14460), True, 'import dearpygui.dearpygui as dpg\n'), ((15492, 15506), 'tkinter.filedialog.askdirectory', 'askdirectory', ([], {}), '()\n', (15504, 15506), False, 'from tkinter.filedialog import askdirectory, askopenfilename\n'), ((2438, 2469), 'extruct.get_video_id', 'extruct.get_video_id', (['input_url'], {}), '(input_url)\n', (2458, 2469), False, 'import extruct\n'), ((6766, 6796), 'os.path.join', 'join', (['TEMPDIR', 'stream_audio_id'], {}), '(TEMPDIR, stream_audio_id)\n', (6770, 6796), False, 'from os.path import isfile, isdir, join\n'), ((9181, 9205), 'subprocess.STARTUPINFO', 'subprocess.STARTUPINFO', ([], {}), '()\n', (9203, 9205), False, 'import subprocess\n'), ((9316, 9372), 'ffmpeg.compile', 'ffmpeg.compile', (['opus_audio_stream'], {'overwrite_output': '(True)'}), '(opus_audio_stream, overwrite_output=True)\n', (9330, 9372), False, 'import ffmpeg\n'), ((9527, 9559), 'ffmpeg.Error', 'ffmpeg.Error', (['"""ffmpeg"""', 'out', 'err'], {}), "('ffmpeg', out, err)\n", (9539, 9559), False, 'import ffmpeg\n'), ((9960, 10006), 'ffmpeg.output', 'ffmpeg.output', (['audio', 'save_path'], {'acodec': '"""copy"""'}), "(audio, save_path, acodec='copy')\n", (9973, 10006), False, 'import ffmpeg\n'), ((11066, 11134), 'ffmpeg.output', 'ffmpeg.output', (['video', 'audio', 'save_path'], {'vcodec': '"""copy"""', 'acodec': '"""copy"""'}), "(video, audio, save_path, vcodec='copy', acodec='copy')\n", (11079, 11134), False, 'import ffmpeg\n'), ((12436, 12488), 'os.path.join', 'join', (['"""resources"""', '"""fonts"""', '"""NotoSansJP-Regular.otf"""'], {}), "('resources', 'fonts', 'NotoSansJP-Regular.otf')\n", (12440, 12488), False, 'from os.path import isfile, isdir, join\n'), ((13911, 13937), 'dearpygui.dearpygui.group', 'dpg.group', ([], {'horizontal': '(True)'}), '(horizontal=True)\n', (13920, 13937), True, 'import dearpygui.dearpygui as dpg\n'), ((13955, 14024), 'dearpygui.dearpygui.add_checkbox', 'dpg.add_checkbox', ([], {'default_value': '(False)', 'enabled': '(False)', 'tag': '"""url_check"""'}), "(default_value=False, enabled=False, tag='url_check')\n", (13971, 14024), True, 'import dearpygui.dearpygui as dpg\n'), ((14041, 14090), 'dearpygui.dearpygui.add_input_text', 'dpg.add_input_text', ([], {'callback': 'check_url', 'tag': '"""url"""'}), "(callback=check_url, tag='url')\n", (14059, 14090), True, 'import dearpygui.dearpygui as dpg\n'), ((14107, 14180), 'dearpygui.dearpygui.add_button', 'dpg.add_button', ([], {'label': '"""Paste"""', 'tag': '"""url_paste_button"""', 'callback': 'paste_url'}), "(label='Paste', tag='url_paste_button', callback=paste_url)\n", (14121, 14180), True, 'import dearpygui.dearpygui as dpg\n'), ((14197, 14264), 'dearpygui.dearpygui.add_button', 'dpg.add_button', ([], {'label': '"""Run"""', 'tag': '"""url_run_button"""', 'callback': 'run_url'}), "(label='Run', tag='url_run_button', callback=run_url)\n", (14211, 14264), True, 'import dearpygui.dearpygui as dpg\n'), ((14479, 14505), 'dearpygui.dearpygui.group', 'dpg.group', ([], {'horizontal': '(True)'}), '(horizontal=True)\n', (14488, 14505), True, 'import dearpygui.dearpygui as dpg\n'), ((14523, 14597), 'dearpygui.dearpygui.add_checkbox', 'dpg.add_checkbox', ([], {'default_value': '(False)', 'enabled': '(False)', 'tag': '"""csv_path_check"""'}), "(default_value=False, enabled=False, tag='csv_path_check')\n", (14539, 14597), True, 'import dearpygui.dearpygui as dpg\n'), ((14614, 14673), 'dearpygui.dearpygui.add_input_text', 'dpg.add_input_text', ([], {'callback': 'check_csv_path', 'tag': '"""csv_path"""'}), "(callback=check_csv_path, tag='csv_path')\n", (14632, 14673), True, 'import dearpygui.dearpygui as dpg\n'), ((14690, 14769), 'dearpygui.dearpygui.add_button', 'dpg.add_button', ([], {'label': '"""Select"""', 'tag': '"""csv_path_button"""', 'callback': 'load_csv_dialog'}), "(label='Select', tag='csv_path_button', callback=load_csv_dialog)\n", (14704, 14769), True, 'import dearpygui.dearpygui as dpg\n'), ((14786, 14853), 'dearpygui.dearpygui.add_button', 'dpg.add_button', ([], {'label': '"""Run"""', 'tag': '"""csv_run_button"""', 'callback': 'run_csv'}), "(label='Run', tag='csv_run_button', callback=run_csv)\n", (14800, 14853), True, 'import dearpygui.dearpygui as dpg\n'), ((15554, 15594), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['"""save_dir_path"""', 'save_dir'], {}), "('save_dir_path', save_dir)\n", (15567, 15594), True, 'import dearpygui.dearpygui as dpg\n'), ((15734, 15775), 'tkinter.filedialog.askopenfilename', 'askopenfilename', ([], {'filetypes': "[('', '.csv')]"}), "(filetypes=[('', '.csv')])\n", (15749, 15775), False, 'from tkinter.filedialog import askdirectory, askopenfilename\n'), ((7779, 7809), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""save_dir_path"""'], {}), "('save_dir_path')\n", (7792, 7809), True, 'import dearpygui.dearpygui as dpg\n'), ((7811, 7839), 'extruct.file_name', 'extruct.file_name', (['stream_id'], {}), '(stream_id)\n', (7828, 7839), False, 'import extruct\n'), ((7912, 7942), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""save_dir_path"""'], {}), "('save_dir_path')\n", (7925, 7942), True, 'import dearpygui.dearpygui as dpg\n'), ((7944, 7981), 'extruct.file_name', 'extruct.file_name', (['stream_video.title'], {}), '(stream_video.title)\n', (7961, 7981), False, 'import extruct\n'), ((8551, 8602), 'core.get_request_type', 'core.get_request_type', (['quality_mode.extension_audio'], {}), '(quality_mode.extension_audio)\n', (8572, 8602), False, 'import core\n'), ((15823, 15858), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['"""csv_path"""', 'load_csv'], {}), "('csv_path', load_csv)\n", (15836, 15858), True, 'import dearpygui.dearpygui as dpg\n'), ((6484, 6514), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""save_dir_path"""'], {}), "('save_dir_path')\n", (6497, 6514), True, 'import dearpygui.dearpygui as dpg\n'), ((6516, 6543), 'extruct.file_name', 'extruct.file_name', (['audio_id'], {}), '(audio_id)\n', (6533, 6543), False, 'import extruct\n'), ((6624, 6654), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['"""save_dir_path"""'], {}), "('save_dir_path')\n", (6637, 6654), True, 'import dearpygui.dearpygui as dpg\n'), ((6656, 6693), 'extruct.file_name', 'extruct.file_name', (['stream_audio.title'], {}), '(stream_audio.title)\n', (6673, 6693), False, 'import extruct\n'), ((8844, 8901), 'ffmpeg.output', 'ffmpeg.output', (['opus_audio', 'audio_temp_path'], {'acodec': '"""copy"""'}), "(opus_audio, audio_temp_path, acodec='copy')\n", (8857, 8901), False, 'import ffmpeg\n'), ((8990, 9032), 'ffmpeg.output', 'ffmpeg.output', (['opus_audio', 'audio_temp_path'], {}), '(opus_audio, audio_temp_path)\n', (9003, 9032), False, 'import ffmpeg\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf import yamlutil
from asdf.versioning import AsdfSpec
from ..types import AstronomyDataModelType
from ..fixed_location import FixedLocation
class FixedLocationType(AstronomyDataModelType):
name = 'datamodel/fixed_location'
version = '1.0.0'
supported_versions = ['1.0.0']
types = ['astronomy_datamodels.fixed_location.FixedLocation']
requires = ["astropy"]
@classmethod
def to_tree(cls, node, ctx): # to ASDF representation
d = {}
d['solar_system_body'] = node.solar_system_body
d['latitude'] = yamlutil.custom_tree_to_tagged_tree(node.latitude, ctx)
d['longitude'] = yamlutil.custom_tree_to_tagged_tree(node.longitude, ctx)
if node.altitude is not None:
d['altitude'] = yamlutil.custom_tree_to_tagged_tree(node.altitude, ctx)
if node.meta is not None:
d['meta'] = yamlutil.custom_tree_to_tagged_tree(node.meta, ctx)
return d
@classmethod
def from_tree(cls, node, ctx): # from ASDF to object representation
solar_system_body = node['solar_system_body']
latitude = yamlutil.tagged_tree_to_custom_tree(node['latitude'], ctx)
longitude = yamlutil.tagged_tree_to_custom_tree(node['longitude'], ctx)
fixed_location = FixedLocation(latitude=latitude, longitude=longitude,
solar_system_body=solar_system_body)
if 'altitude' in node:
fixed_location.altitude = yamlutil.tagged_tree_to_custom_tree(node['altitude'], ctx)
if 'meta' in node:
fixed_location.meta = yamlutil.tagged_tree_to_custom_tree(node['meta'], ctx)
return fixed_location
@classmethod
def assert_equal(cls, old, new):
pass
| [
"asdf.yamlutil.custom_tree_to_tagged_tree",
"asdf.yamlutil.tagged_tree_to_custom_tree"
] | [((647, 702), 'asdf.yamlutil.custom_tree_to_tagged_tree', 'yamlutil.custom_tree_to_tagged_tree', (['node.latitude', 'ctx'], {}), '(node.latitude, ctx)\n', (682, 702), False, 'from asdf import yamlutil\n'), ((728, 784), 'asdf.yamlutil.custom_tree_to_tagged_tree', 'yamlutil.custom_tree_to_tagged_tree', (['node.longitude', 'ctx'], {}), '(node.longitude, ctx)\n', (763, 784), False, 'from asdf import yamlutil\n'), ((1197, 1255), 'asdf.yamlutil.tagged_tree_to_custom_tree', 'yamlutil.tagged_tree_to_custom_tree', (["node['latitude']", 'ctx'], {}), "(node['latitude'], ctx)\n", (1232, 1255), False, 'from asdf import yamlutil\n'), ((1276, 1335), 'asdf.yamlutil.tagged_tree_to_custom_tree', 'yamlutil.tagged_tree_to_custom_tree', (["node['longitude']", 'ctx'], {}), "(node['longitude'], ctx)\n", (1311, 1335), False, 'from asdf import yamlutil\n'), ((851, 906), 'asdf.yamlutil.custom_tree_to_tagged_tree', 'yamlutil.custom_tree_to_tagged_tree', (['node.altitude', 'ctx'], {}), '(node.altitude, ctx)\n', (886, 906), False, 'from asdf import yamlutil\n'), ((965, 1016), 'asdf.yamlutil.custom_tree_to_tagged_tree', 'yamlutil.custom_tree_to_tagged_tree', (['node.meta', 'ctx'], {}), '(node.meta, ctx)\n', (1000, 1016), False, 'from asdf import yamlutil\n'), ((1561, 1619), 'asdf.yamlutil.tagged_tree_to_custom_tree', 'yamlutil.tagged_tree_to_custom_tree', (["node['altitude']", 'ctx'], {}), "(node['altitude'], ctx)\n", (1596, 1619), False, 'from asdf import yamlutil\n'), ((1681, 1735), 'asdf.yamlutil.tagged_tree_to_custom_tree', 'yamlutil.tagged_tree_to_custom_tree', (["node['meta']", 'ctx'], {}), "(node['meta'], ctx)\n", (1716, 1735), False, 'from asdf import yamlutil\n')] |
# Generated by Django 4.0.3 on 2022-03-21 13:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0002_remove_profile_caption_alter_profile_profile_pic_and_more'),
]
operations = [
migrations.CreateModel(
name='Contacts',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100, null=True)),
('unit', models.CharField(blank=True, max_length=100, null=True)),
('m_number', models.IntegerField(default=0)),
('hood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.neighborhood')),
],
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.BigAutoField",
"django.db.models.IntegerField"
] | [((400, 496), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (419, 496), False, 'from django.db import migrations, models\n'), ((520, 575), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)'}), '(blank=True, max_length=100, null=True)\n', (536, 575), False, 'from django.db import migrations, models\n'), ((603, 658), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)'}), '(blank=True, max_length=100, null=True)\n', (619, 658), False, 'from django.db import migrations, models\n'), ((690, 720), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (709, 720), False, 'from django.db import migrations, models\n'), ((748, 838), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""app.neighborhood"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'app.neighborhood')\n", (765, 838), False, 'from django.db import migrations, models\n')] |
from setuptools import setup
setup(
name='pyons',
version='1.0',
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
py_modules=['pyons'],
install_requires=[
],
tests_requires=[
'pytest',
],
)
| [
"setuptools.setup"
] | [((31, 199), 'setuptools.setup', 'setup', ([], {'name': '"""pyons"""', 'version': '"""1.0"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'py_modules': "['pyons']", 'install_requires': '[]', 'tests_requires': "['pytest']"}), "(name='pyons', version='1.0', author='<NAME>', author_email='<EMAIL>',\n license='MIT', py_modules=['pyons'], install_requires=[],\n tests_requires=['pytest'])\n", (36, 199), False, 'from setuptools import setup\n')] |
#! /usr/bin/env python3.2
import re
def _subpre(text):
list=re.split('(<pre>|</pre>)',text)
for i in range(len(list)):
# begin of pre
if i%4==1:
list[i]='\n\n '
# in pre
elif i%4==2:
list[i]=re.sub('<p>|<br>|\n\n', '\n\n ',list[i])
# end of pre
elif i%4==3:
list[i]='\n\n'
return ''.join(list)
def _subblock(text):
list=re.split('(<blockquote>|</blockquote>)',text)
for i in range(len(list)):
# begin of blockquote
if i%4==1:
list[i]='\n\n> '
# in blockquote
elif i%4==2:
list[i]=re.sub('<p>|<br>|\n\n', '\n\n> ',list[i])
# end of blockquote
elif i%4==3:
list[i]='\n\n'
return ''.join(list)
def _sublinks(text):
return re.sub('<a href=\"(?P<link>.*?)\">(?P<linktext>.*?)</a>', lambda m : '[' + _markdownify_linktext(m.group('linktext')) + '](' + _fefe_linksintern(m.group('link')) + ')', text)
def _markdownify(text):
list=re.split('(\[.*\]\(.*\))',text)
# only change when not a link
for i in range(0,len(list),2):
list[i]=re.sub('\*','\\*',list[i])
list[i]=re.sub('_','\\_',list[i])
list[i]=re.sub('<b>','**',list[i])
list[i]=re.sub('</b>','**',list[i])
list[i]=re.sub('<i>','_',list[i])
list[i]=re.sub('</i>','_',list[i])
list[i]=re.sub('<u>','\n',list[i])
list[i]=re.sub('</u>','\n',list[i])
list[i]=re.sub('<li>','\n - ',list[i])
list[i]=re.sub('</li>','\n',list[i])
list[i]=re.sub('<p>','\n\n',list[i])
list[i]=re.sub('</p>','\n\n',list[i])
list[i]=re.sub('<br>','\n\n',list[i])
return ''.join(list)
def _markdownify_linktext(text):
list=re.split('(\[.*\]\(.*\))',text)
# only change when not a link
for i in range(0,len(list),2):
list[i]=re.sub('\*','\\*',list[i])
list[i]=re.sub('_','\\_',list[i])
list[i]=re.sub('<b>','**',list[i])
list[i]=re.sub('</b>','**',list[i])
list[i]=re.sub('<i>','_',list[i])
list[i]=re.sub('</i>','_',list[i])
return ''.join(list)
def _fefe_linksintern(text):
text=re.sub('^\/\?ts=','https://blog.fefe.de/?ts=',text)
text=re.sub('^\/\?q=','https://blog.fefe.de/?q=',text)
return text
def html2md(html):
html=_subpre(html)
html=_subblock(html)
html=_sublinks(html)
html=_markdownify(html)
return html
| [
"re.sub",
"re.split"
] | [((64, 96), 're.split', 're.split', (['"""(<pre>|</pre>)"""', 'text'], {}), "('(<pre>|</pre>)', text)\n", (72, 96), False, 'import re\n'), ((379, 425), 're.split', 're.split', (['"""(<blockquote>|</blockquote>)"""', 'text'], {}), "('(<blockquote>|</blockquote>)', text)\n", (387, 425), False, 'import re\n'), ((934, 970), 're.split', 're.split', (['"""(\\\\[.*\\\\]\\\\(.*\\\\))"""', 'text'], {}), "('(\\\\[.*\\\\]\\\\(.*\\\\))', text)\n", (942, 970), False, 'import re\n'), ((1616, 1652), 're.split', 're.split', (['"""(\\\\[.*\\\\]\\\\(.*\\\\))"""', 'text'], {}), "('(\\\\[.*\\\\]\\\\(.*\\\\))', text)\n", (1624, 1652), False, 'import re\n'), ((2006, 2061), 're.sub', 're.sub', (['"""^\\\\/\\\\?ts="""', '"""https://blog.fefe.de/?ts="""', 'text'], {}), "('^\\\\/\\\\?ts=', 'https://blog.fefe.de/?ts=', text)\n", (2012, 2061), False, 'import re\n'), ((2065, 2118), 're.sub', 're.sub', (['"""^\\\\/\\\\?q="""', '"""https://blog.fefe.de/?q="""', 'text'], {}), "('^\\\\/\\\\?q=', 'https://blog.fefe.de/?q=', text)\n", (2071, 2118), False, 'import re\n'), ((1043, 1072), 're.sub', 're.sub', (['"""\\\\*"""', '"""\\\\*"""', 'list[i]'], {}), "('\\\\*', '\\\\*', list[i])\n", (1049, 1072), False, 'import re\n'), ((1082, 1109), 're.sub', 're.sub', (['"""_"""', '"""\\\\_"""', 'list[i]'], {}), "('_', '\\\\_', list[i])\n", (1088, 1109), False, 'import re\n'), ((1120, 1148), 're.sub', 're.sub', (['"""<b>"""', '"""**"""', 'list[i]'], {}), "('<b>', '**', list[i])\n", (1126, 1148), False, 'import re\n'), ((1159, 1188), 're.sub', 're.sub', (['"""</b>"""', '"""**"""', 'list[i]'], {}), "('</b>', '**', list[i])\n", (1165, 1188), False, 'import re\n'), ((1199, 1226), 're.sub', 're.sub', (['"""<i>"""', '"""_"""', 'list[i]'], {}), "('<i>', '_', list[i])\n", (1205, 1226), False, 'import re\n'), ((1237, 1265), 're.sub', 're.sub', (['"""</i>"""', '"""_"""', 'list[i]'], {}), "('</i>', '_', list[i])\n", (1243, 1265), False, 'import re\n'), ((1276, 1304), 're.sub', 're.sub', (['"""<u>"""', '"""\n"""', 'list[i]'], {}), "('<u>', '\\n', list[i])\n", (1282, 1304), False, 'import re\n'), ((1315, 1344), 're.sub', 're.sub', (['"""</u>"""', '"""\n"""', 'list[i]'], {}), "('</u>', '\\n', list[i])\n", (1321, 1344), False, 'import re\n'), ((1355, 1387), 're.sub', 're.sub', (['"""<li>"""', '"""\n - """', 'list[i]'], {}), "('<li>', '\\n - ', list[i])\n", (1361, 1387), False, 'import re\n'), ((1398, 1428), 're.sub', 're.sub', (['"""</li>"""', '"""\n"""', 'list[i]'], {}), "('</li>', '\\n', list[i])\n", (1404, 1428), False, 'import re\n'), ((1439, 1469), 're.sub', 're.sub', (['"""<p>"""', '"""\n\n"""', 'list[i]'], {}), "('<p>', '\\n\\n', list[i])\n", (1445, 1469), False, 'import re\n'), ((1480, 1511), 're.sub', 're.sub', (['"""</p>"""', '"""\n\n"""', 'list[i]'], {}), "('</p>', '\\n\\n', list[i])\n", (1486, 1511), False, 'import re\n'), ((1522, 1553), 're.sub', 're.sub', (['"""<br>"""', '"""\n\n"""', 'list[i]'], {}), "('<br>', '\\n\\n', list[i])\n", (1528, 1553), False, 'import re\n'), ((1725, 1754), 're.sub', 're.sub', (['"""\\\\*"""', '"""\\\\*"""', 'list[i]'], {}), "('\\\\*', '\\\\*', list[i])\n", (1731, 1754), False, 'import re\n'), ((1764, 1791), 're.sub', 're.sub', (['"""_"""', '"""\\\\_"""', 'list[i]'], {}), "('_', '\\\\_', list[i])\n", (1770, 1791), False, 'import re\n'), ((1802, 1830), 're.sub', 're.sub', (['"""<b>"""', '"""**"""', 'list[i]'], {}), "('<b>', '**', list[i])\n", (1808, 1830), False, 'import re\n'), ((1841, 1870), 're.sub', 're.sub', (['"""</b>"""', '"""**"""', 'list[i]'], {}), "('</b>', '**', list[i])\n", (1847, 1870), False, 'import re\n'), ((1881, 1908), 're.sub', 're.sub', (['"""<i>"""', '"""_"""', 'list[i]'], {}), "('<i>', '_', list[i])\n", (1887, 1908), False, 'import re\n'), ((1919, 1947), 're.sub', 're.sub', (['"""</i>"""', '"""_"""', 'list[i]'], {}), "('</i>', '_', list[i])\n", (1925, 1947), False, 'import re\n'), ((228, 272), 're.sub', 're.sub', (['"""<p>|<br>|\n\n"""', '"""\n\n """', 'list[i]'], {}), "('<p>|<br>|\\n\\n', '\\n\\n ', list[i])\n", (234, 272), False, 'import re\n'), ((569, 611), 're.sub', 're.sub', (['"""<p>|<br>|\n\n"""', '"""\n\n> """', 'list[i]'], {}), "('<p>|<br>|\\n\\n', '\\n\\n> ', list[i])\n", (575, 611), False, 'import re\n')] |
from requests.adapters import HTTPAdapter, DEFAULT_POOLBLOCK
from requests.packages.urllib3.poolmanager import PoolManager
class TribAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK):
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block,
ssl_version='TLSv1')
| [
"requests.packages.urllib3.poolmanager.PoolManager"
] | [((379, 468), 'requests.packages.urllib3.poolmanager.PoolManager', 'PoolManager', ([], {'num_pools': 'connections', 'maxsize': 'maxsize', 'block': 'block', 'ssl_version': '"""TLSv1"""'}), "(num_pools=connections, maxsize=maxsize, block=block,\n ssl_version='TLSv1')\n", (390, 468), False, 'from requests.packages.urllib3.poolmanager import PoolManager\n')] |
import spacy
from spacy.lang.ro import Romanian
from typing import Dict, List, Iterable
from nltk import sent_tokenize
import re
# JSON Example localhost:8081/spacy application/json
# {
# "lang" : "en",
# "blocks" : ["După terminarea oficială a celui de-al doilea război mondial, în conformitate cu discursul lui <NAME> (prim ministru al Regatului Unit la acea dată), de la Fulton, s-a declanșat Războiul rece și a apărut conceptul de cortină de fier. Urmare a politicii consecvente de apărare a sistemului economic și politic (implicit a intereslor economice ale marelui capital din lumea occidentală) trupele germane, în calitate de prizonieri, aflate pe teritoriul Germaniei de Vest au fost reînarmate și au constituit baza viitorului Bundeswehr - armata regulată a R.F.G."]
# }
models = {
'en': 'en_coref_lg',
'nl': 'nl',
'fr': 'fr_core_news_md',
'es': 'es',
'de': 'de',
'it': 'it',
'ro': 'models/model3'
}
normalization = {
'ro': [
(re.compile("ş"), "ș"),
(re.compile("Ş"), "Ș"),
(re.compile("ţ"), "ț"),
(re.compile("Ţ"), "Ț"),
(re.compile("(\w)î(\w)"), "\g<1>â\g<2>")
]
}
def convertToPenn(pos: str, lang: str) -> str:
if lang == 'fr':
pos = pos.lower()
if pos.startswith('noun') or pos.startswith('propn'):
return "NN"
if pos.startswith("verb"):
return "VB"
if pos.startswith("adj"):
return "JJ"
if pos.startswith("adv"):
return "RB"
if pos.startswith("adp"):
return "IN"
if pos.startswith("cconj"):
return "CC"
return ""
if lang == 'nl':
pos = pos.lower()
if pos.startswith('n_') or pos.startswith('n|') or pos.startswith('propn'):
return "NN"
if pos.startswith("v_") or pos.startswith("v|"):
return "VB"
if pos.startswith("adj"):
return "JJ"
if pos.startswith("adv"):
return "RB"
if pos.startswith("adp"):
return "IN"
if pos.startswith("cconj") or pos.startswith("conj"):
return "CC"
return ""
if lang == 'ro':
pos = pos.lower()
if pos.startswith("n"):
return "NN"
if pos.startswith("v"):
return "VB"
if pos.startswith("a"):
return "JJ"
if pos.startswith("r"):
return "RB"
if pos.startswith("s") or pos.startswith("cs"):
return "IN"
if pos.startswith("c"):
return "CC"
return ""
if len(pos) > 2:
return pos[:2]
return pos
class SpacyParser:
def __init__(self):
self.ner = spacy.load('xx_ent_wiki_sm')
# self.romanian = Romanian()
self.pipelines = {
lang: spacy.util.get_lang_class(lang)()
for lang in models
}
# for pipeline in self.pipelines.values():
# component = pipeline.create_pipe('tagger') # 3. create the pipeline components
# pipeline.add_pipe(component)
self.loaded_models = {}
def preprocess(self, text: str, lang: str) -> str:
if lang not in normalization:
return text
for pattern, replacement in normalization[lang]:
text = re.sub(pattern, replacement, text)
return text
def get_tokens_lemmas(self, sentences: Iterable, lang: str) -> Iterable:
if lang not in self.pipelines:
return None
pipeline = self.pipelines[lang]
# sbd = pipeline.create_pipe('sentencizer')
# pipeline.add_pipe(sbd)
doc = pipeline.pipe((sent[:1].lower() + sent[1:] for sent in sentences), batch_size=100000, n_threads=16)
# print([sent.string.strip() for sent in doc.sents])
# print(len(doc.sents))
# print("====================")
# for token in doc:
# print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,
# token.shape_, token.is_alpha, token.is_stop)
# print("====================")
return doc
# return [(token.text, token.lemma_) for token in doc]
def tokenize_sentences(self, block: str) -> List[str]:
return sent_tokenize(block)
def parse(self, sentence: str, lang: str):
if lang not in self.loaded_models:
self.loaded_models[lang] = spacy.load(models[lang])
model = self.loaded_models[lang]
doc = model(sentence)
# print([sent.string.strip() for sent in doc.sents])
# for chunk in doc.noun_chunks:
# print(chunk.text, chunk.root.text, chunk.root.dep_,
# chunk.root.head.text)
# print("********************")
# for token in doc:
# print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,
# token.shape_, token.is_alpha, token.is_stop)
# print("********************")
# return [(token.text, token.lemma_, token.pos_, token.tag_) for token in doc]
return doc
def process(self, doc):
lang = doc["lang"]
for block in doc["blocks"]:
sents = sent_tokenize(block["text"])
block["sentences"] = list()
for sent in sents:
ne = self.ner(sent)
tokens = self.parse(sent, lang)
# print(ne)
# print(pos)
res_sent = {}
res_sent["text"] = sent
res_sent["words"] = []
# get pos tags
for w in tokens:
wp = {"text" : w.text}
wp["index"] = w.i
wp["lemma"] = w.lemma_
wp["pos"] = convertToPenn(w.tag_, lang)
wp["dep"] = w.dep_
wp["ner"] = w.ent_type_
wp["head"] = w.head.i
res_sent["words"].append(wp)
# get named entities
for ent in [token for token in ne if token.ent_type != 0]:
for w in res_sent["words"]:
# or (' ' in ent[0] and w["word"] in ent[0])
if w["index"] == ent.i:
w["ner"] = ent.ent_type_
block["sentences"].append(res_sent)
return doc
if __name__ == "__main__":
spacyInstance = SpacyParser()
sent = """
După terminarea oficială a celui de-al doilea război mondial, în conformitate cu discursul lui <NAME> (prim ministru al Regatului Unit la acea dată), de la Fulton, s-a declanșat Războiul rece și a apărut conceptul de cortină de fier. Urmare a politicii consecvente de apărare a sistemului economic și politic (implicit a intereslor economice ale marelui capital din lumea occidentală) trupele germane, în calitate de "prizonieri", aflate pe teritoriul Germaniei de Vest au fost reînarmate și au constituit baza viitorului "Bundeswehr" - armata regulată a R.F.G.
Pe fondul evenimentelor din 1948 din Cehoslovacia (expulzări ale etnicilor germani, alegeri, reconstrucție economică) apare infiltrarea agenților serviciilor speciale ale S.U.A. și Marii Britanii cu rol de "agitatori". Existând cauza, trupele sovietice nu părăsesc Europa Centrală și de Est cucerită-eliberată, staționând pe teritoriul mai multor state. Aflate pe linia de demarcație dintre cele două blocuri foste aliate, armata sovietică nu a plecat din Ungaria decât după dizolvarea Tratatului de la Varșovia.
"""
# sent = """
# După terminarea oficială a celui de-al doilea război mondial, în conformitate cu discursul lui Churchill, de la Fulton, s-a declanșat Războiul rece și a apărut conceptul de cortină de fier."""
# print(spacyInstance.get_ner(sent))
# print(spacyInstance.get_tokens_lemmas(sent))
# doc = spacyInstance.parse("My sister has a dog. She loves him.", 'en')
doc = spacyInstance.parse("Pensée des enseignants, production d’écrits, ingénierie éducative, enseignement à distance, traitement automatique de la langue, outils cognitifs, feedback automatique", 'fr')
for token in doc:
print(convertToPenn(token.tag_, 'fr'))
# print(spacyInstance.preprocess("coborî", 'ro'))
| [
"spacy.util.get_lang_class",
"re.compile",
"spacy.load",
"nltk.sent_tokenize",
"re.sub"
] | [((2724, 2752), 'spacy.load', 'spacy.load', (['"""xx_ent_wiki_sm"""'], {}), "('xx_ent_wiki_sm')\n", (2734, 2752), False, 'import spacy\n'), ((4265, 4285), 'nltk.sent_tokenize', 'sent_tokenize', (['block'], {}), '(block)\n', (4278, 4285), False, 'from nltk import sent_tokenize\n'), ((989, 1004), 're.compile', 're.compile', (['"""ş"""'], {}), "('ş')\n", (999, 1004), False, 'import re\n'), ((1021, 1036), 're.compile', 're.compile', (['"""Ş"""'], {}), "('Ş')\n", (1031, 1036), False, 'import re\n'), ((1053, 1068), 're.compile', 're.compile', (['"""ţ"""'], {}), "('ţ')\n", (1063, 1068), False, 'import re\n'), ((1085, 1100), 're.compile', 're.compile', (['"""Ţ"""'], {}), "('Ţ')\n", (1095, 1100), False, 'import re\n'), ((1117, 1142), 're.compile', 're.compile', (['"""(\\\\w)î(\\\\w)"""'], {}), "('(\\\\w)î(\\\\w)')\n", (1127, 1142), False, 'import re\n'), ((3333, 3367), 're.sub', 're.sub', (['pattern', 'replacement', 'text'], {}), '(pattern, replacement, text)\n', (3339, 3367), False, 'import re\n'), ((4416, 4440), 'spacy.load', 'spacy.load', (['models[lang]'], {}), '(models[lang])\n', (4426, 4440), False, 'import spacy\n'), ((5185, 5213), 'nltk.sent_tokenize', 'sent_tokenize', (["block['text']"], {}), "(block['text'])\n", (5198, 5213), False, 'from nltk import sent_tokenize\n'), ((2835, 2866), 'spacy.util.get_lang_class', 'spacy.util.get_lang_class', (['lang'], {}), '(lang)\n', (2860, 2866), False, 'import spacy\n')] |
#! /usr/bin/env python
# comando con argumentos
# y procesamiento de una imagen
# enviada por el usuario
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from io import BytesIO
from PIL import Image
import cv2 as cv
import skimage.io as io
updater = Updater('api token del bot')
def sendImage(bot, cid, frame):
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
image = Image.fromarray(frame, mode = 'RGB')
byte_io = BytesIO()
image.save(byte_io, 'PNG')
byte_io.seek(0)
bot.sendPhoto(chat_id=cid, photo=byte_io)
def hello(bot, update):
update.message.reply_text('Hello {}'.format(update.message.from_user.first_name))
def argu(bot, update, args):
print('arguments:')
for arg in args:
print(arg)
def work(bot, update):
file_id = update.message.photo[-1].file_id
path = bot.get_file(file_id)['file_path']
img = io.imread(path)
print(img.shape)
update.message.reply_text('{}x{}'.format(img.shape[1],img.shape[0]))
r = cv.cvtColor(cv.cvtColor(img, cv.COLOR_RGB2GRAY), cv.COLOR_GRAY2RGB)
sendImage(bot, update.message.chat_id, r)
updater.dispatcher.add_handler(CommandHandler('hello', hello))
updater.dispatcher.add_handler(CommandHandler('argu' , argu, pass_args=True))
updater.dispatcher.add_handler(MessageHandler(Filters.photo, work))
updater.start_polling()
updater.idle()
| [
"PIL.Image.fromarray",
"io.BytesIO",
"skimage.io.imread",
"cv2.cvtColor",
"telegram.ext.MessageHandler",
"telegram.ext.CommandHandler",
"telegram.ext.Updater"
] | [((278, 306), 'telegram.ext.Updater', 'Updater', (['"""api token del bot"""'], {}), "('api token del bot')\n", (285, 306), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n'), ((352, 388), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLOR_BGR2RGB'], {}), '(frame, cv.COLOR_BGR2RGB)\n', (363, 388), True, 'import cv2 as cv\n'), ((401, 435), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {'mode': '"""RGB"""'}), "(frame, mode='RGB')\n", (416, 435), False, 'from PIL import Image\n'), ((452, 461), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (459, 461), False, 'from io import BytesIO\n'), ((890, 905), 'skimage.io.imread', 'io.imread', (['path'], {}), '(path)\n', (899, 905), True, 'import skimage.io as io\n'), ((1155, 1185), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""hello"""', 'hello'], {}), "('hello', hello)\n", (1169, 1185), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n'), ((1218, 1262), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""argu"""', 'argu'], {'pass_args': '(True)'}), "('argu', argu, pass_args=True)\n", (1232, 1262), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n'), ((1296, 1331), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.photo', 'work'], {}), '(Filters.photo, work)\n', (1310, 1331), False, 'from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n'), ((1020, 1055), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_RGB2GRAY'], {}), '(img, cv.COLOR_RGB2GRAY)\n', (1031, 1055), True, 'import cv2 as cv\n')] |
from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler, OneHotEncoder
import numpy as np
import pandas as pd
import tqdm
"""
Class for Preprocessing CICIDS2017 Data represented as rows
"""
class CICIDSPreprocessor:
def __init__(self):
self.to_delete_columns = ['Flow ID', ' Timestamp']
self.label_column = ' Label'
def preprocess_train_data(self, df: pd.DataFrame, label="BENIGN"):
df = df.drop(self.to_delete_columns, axis=1)
df = df[df[self.label_column] == label]
df.reset_index(drop=True, inplace=True)
df.drop(self.label_column, axis=1, inplace=True)
return df.fillna(0)
def preprocess_test_data(self, df: pd.DataFrame, label="BENIGN"):
df = df.drop(self.to_delete_columns, axis=1)
df = df[df[self.label_column] == label]
df.reset_index(drop=True, inplace=True)
df.drop(self.label_column, axis=1, inplace=True)
return df.fillna(0)
def __get_windows(self, df, window_size=20, stride=10):
windows_arr = []
for i in tqdm.tqdm(range(0, len(df)-window_size+1, stride)):
windows_arr.append(df.iloc[i:i+window_size, :].to_numpy())
return np.array(windows_arr)
| [
"numpy.array"
] | [((1214, 1235), 'numpy.array', 'np.array', (['windows_arr'], {}), '(windows_arr)\n', (1222, 1235), True, 'import numpy as np\n')] |
import os
from netmiko import ConnectHandler
from getpass import getpass
from pprint import pprint
# Code so automated tests will run properly
# Check for environment variable, if that fails, use getpass().
password = os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass()
my_device = {
"device_type": "cisco_xe",
"host": "cisco3.lasthop.io",
"username": "pyclass",
"password": password,
}
with ConnectHandler(**my_device) as net_connect:
output = net_connect.send_command("show ip int brief", use_genie=True)
# output = net_connect.send_command("show ip arp", use_genie=True)
pprint(output)
| [
"netmiko.ConnectHandler",
"getpass.getpass",
"pprint.pprint",
"os.getenv"
] | [((252, 281), 'os.getenv', 'os.getenv', (['"""NETMIKO_PASSWORD"""'], {}), "('NETMIKO_PASSWORD')\n", (261, 281), False, 'import os\n'), ((219, 248), 'os.getenv', 'os.getenv', (['"""NETMIKO_PASSWORD"""'], {}), "('NETMIKO_PASSWORD')\n", (228, 248), False, 'import os\n'), ((287, 296), 'getpass.getpass', 'getpass', ([], {}), '()\n', (294, 296), False, 'from getpass import getpass\n'), ((437, 464), 'netmiko.ConnectHandler', 'ConnectHandler', ([], {}), '(**my_device)\n', (451, 464), False, 'from netmiko import ConnectHandler\n'), ((631, 645), 'pprint.pprint', 'pprint', (['output'], {}), '(output)\n', (637, 645), False, 'from pprint import pprint\n')] |
import requests
import os
from user_data import UserData
import json
class DataManager:
"""This class is responsible for talking to the Google Sheet."""
def __init__(self) -> None:
self.SHEETY_URL = f"https://api.sheety.co/{os.environ['SHEETY_SHEET_ID']}/pythonFlightDeals"
self.sheet_data = {}
self.bearer_token = os.environ["SHEETY_TOKEN"]
self.headers = {
"Authorization": f"Bearer {self.bearer_token}"
}
def get_cities(self):
response = requests.get(url=f"{self.SHEETY_URL}/prices", headers=self.headers)
response.raise_for_status()
self.sheet_data = response.json()
return self.sheet_data
def update_city(self, row_id, city_data):
self.headers["Content-Type"] = "application/json"
response = requests.put(url=f"{self.SHEETY_URL}/prices/{row_id}", json=city_data,headers=self.headers)
response.raise_for_status()
def get_users(self):
response = requests.get(url=f"{self.SHEETY_URL}/subscribers", headers=self.headers)
response.raise_for_status()
return response.json()['subscribers']
def add_user(self, user: UserData):
self.headers["Content-Type"] = "application/json"
user_data = {
"subscriber": {
"first": str(user.first_name),
"last": str(user.last_name),
"email": str(user.email),
}
}
response = requests.post(url=f"{self.SHEETY_URL}/subscribers", json=user_data, headers=self.headers)
print(response.text)
response.raise_for_status()
| [
"requests.put",
"requests.post",
"requests.get"
] | [((517, 584), 'requests.get', 'requests.get', ([], {'url': 'f"""{self.SHEETY_URL}/prices"""', 'headers': 'self.headers'}), "(url=f'{self.SHEETY_URL}/prices', headers=self.headers)\n", (529, 584), False, 'import requests\n'), ((818, 914), 'requests.put', 'requests.put', ([], {'url': 'f"""{self.SHEETY_URL}/prices/{row_id}"""', 'json': 'city_data', 'headers': 'self.headers'}), "(url=f'{self.SHEETY_URL}/prices/{row_id}', json=city_data,\n headers=self.headers)\n", (830, 914), False, 'import requests\n'), ((999, 1071), 'requests.get', 'requests.get', ([], {'url': 'f"""{self.SHEETY_URL}/subscribers"""', 'headers': 'self.headers'}), "(url=f'{self.SHEETY_URL}/subscribers', headers=self.headers)\n", (1011, 1071), False, 'import requests\n'), ((1481, 1575), 'requests.post', 'requests.post', ([], {'url': 'f"""{self.SHEETY_URL}/subscribers"""', 'json': 'user_data', 'headers': 'self.headers'}), "(url=f'{self.SHEETY_URL}/subscribers', json=user_data, headers\n =self.headers)\n", (1494, 1575), False, 'import requests\n')] |
import copy
from deploy_config_generator.utils import yaml_dump
from deploy_config_generator.output import kube_common
class OutputPlugin(kube_common.OutputPlugin):
NAME = 'kube_kong_consumer'
DESCR = 'Kubernetes KongConsumer output plugin'
FILE_EXT = '.yaml'
DEFAULT_CONFIG = {
'fields': {
'kong_consumers': dict(
metadata=dict(
type='dict',
required=True,
fields=copy.deepcopy(kube_common.METADATA_FIELD_SPEC),
),
username=dict(
type='str',
),
custom_id=dict(
type='str',
),
credentials=dict(
type='list',
subtype='str',
),
),
}
}
def generate_output(self, app_vars):
# Basic structure
data = {
'apiVersion': 'configuration.konghq.com/v1',
'kind': 'KongConsumer',
}
data['metadata'] = self.build_metadata(app_vars['APP']['metadata'])
for field in ('username', 'custom_id', 'credentials'):
if app_vars['APP'][field]:
data.update(self.build_generic(app_vars['APP'], {field: self._fields['kong_consumers'][field]}, camel_case=False))
data = self._template.render_template(data, app_vars)
output = yaml_dump(data)
return (output, self.get_output_filename_suffix(data))
| [
"deploy_config_generator.utils.yaml_dump",
"copy.deepcopy"
] | [((1443, 1458), 'deploy_config_generator.utils.yaml_dump', 'yaml_dump', (['data'], {}), '(data)\n', (1452, 1458), False, 'from deploy_config_generator.utils import yaml_dump\n'), ((482, 528), 'copy.deepcopy', 'copy.deepcopy', (['kube_common.METADATA_FIELD_SPEC'], {}), '(kube_common.METADATA_FIELD_SPEC)\n', (495, 528), False, 'import copy\n')] |
import pytest
from conflow.merge import merge_factory
from conflow.node import Node, NodeList, NodeMap
def test_merge_node_node(default_config):
base = Node('base', 'node_A')
other = Node('other', 'node_B')
assert merge_factory(base, other, default_config) == other
def test_merge_node_nodelist(default_config):
base = Node('base', 'node_A')
other = NodeList('other', [2])
assert merge_factory(base, other, default_config) == other
def test_merge_node_nodemap(default_config):
base = Node('base', 'node_A')
other = NodeMap('other', {
'db': {
'master': {
'host': 'other'
}
}
})
assert merge_factory(base, other, default_config) == other
def test_merge_nodelist_node(default_config):
base = NodeList('other', [2])
other = Node('base', 'node_A')
assert merge_factory(base, other, default_config) == other
def test_merge_nodelist_nodelist_override(default_config):
base = NodeList('base', [1])
other = NodeList('other', [2])
assert merge_factory(base, other, default_config) == other
def test_merge_nodelist_nodelist_extend(extend_list_config):
base = NodeList('base', [1])
other = NodeList('other', [2])
expected = NodeList('base', [1, 2])
assert merge_factory(base, other, extend_list_config) == expected
def test_merge_nodelist_nodemap(default_config):
base = NodeList('base', [1])
other = NodeMap('base', {
'db': {
'master': {
'host': 'base'
}
}
})
assert merge_factory(base, other, default_config) == other
def test_merge_nodemap_node(default_config):
base = NodeMap('base', {
'db': {
'master': {
'host': 'base'
}
}
})
other = Node('base', 'node_A')
assert merge_factory(base, other, default_config) == other
def test_merge_nodemap_nodelist(default_config):
base = NodeMap('base', {
'db': {
'master': {
'host': 'base'
}
}
})
other = NodeList('base', [1])
assert merge_factory(base, other, default_config) == other
def test_merge_nodemap_nodemap_override(default_config):
base = NodeMap('base', {
'db': {
'master': {
'host': 'base'
}
}
})
other = NodeMap('other', {
'db': {
'master': {
'host': 'other'
}
}
})
result = merge_factory(base, other, default_config)
assert result.db.master.host == 'other'
def test_merge_nodemap_nodemap_extend(default_config):
base = NodeMap('base', {
'master': {
'host': 'master'
}
})
other = NodeMap('other', {
'slave': {
'host': 'slave'
}
})
result = merge_factory(base, other, default_config)
assert 'master' in result
assert 'slave' in result
def test_merge_nodemap_nodemap_empty(default_config):
base = NodeMap('base', {})
other = NodeMap('other', {})
expected = NodeMap('expected', {})
assert merge_factory(base, other, default_config) == expected
def test_merge_different_types_strict(strict_config):
base = NodeMap('base', {'merged_key': {'a': 'b'}})
other = NodeMap('other', {'merged_key': 1})
with pytest.raises(RuntimeError) as error:
merge_factory(base, other, strict_config)
error_message = (
"Cannot merge `{'a': 'b'}` and `1` with key `merged_key`"
)
assert str(error.value) == error_message
| [
"conflow.node.NodeList",
"conflow.node.NodeMap",
"pytest.raises",
"conflow.merge.merge_factory",
"conflow.node.Node"
] | [((159, 181), 'conflow.node.Node', 'Node', (['"""base"""', '"""node_A"""'], {}), "('base', 'node_A')\n", (163, 181), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((194, 217), 'conflow.node.Node', 'Node', (['"""other"""', '"""node_B"""'], {}), "('other', 'node_B')\n", (198, 217), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((340, 362), 'conflow.node.Node', 'Node', (['"""base"""', '"""node_A"""'], {}), "('base', 'node_A')\n", (344, 362), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((375, 397), 'conflow.node.NodeList', 'NodeList', (['"""other"""', '[2]'], {}), "('other', [2])\n", (383, 397), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((519, 541), 'conflow.node.Node', 'Node', (['"""base"""', '"""node_A"""'], {}), "('base', 'node_A')\n", (523, 541), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((554, 609), 'conflow.node.NodeMap', 'NodeMap', (['"""other"""', "{'db': {'master': {'host': 'other'}}}"], {}), "('other', {'db': {'master': {'host': 'other'}}})\n", (561, 609), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((798, 820), 'conflow.node.NodeList', 'NodeList', (['"""other"""', '[2]'], {}), "('other', [2])\n", (806, 820), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((833, 855), 'conflow.node.Node', 'Node', (['"""base"""', '"""node_A"""'], {}), "('base', 'node_A')\n", (837, 855), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((991, 1012), 'conflow.node.NodeList', 'NodeList', (['"""base"""', '[1]'], {}), "('base', [1])\n", (999, 1012), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((1025, 1047), 'conflow.node.NodeList', 'NodeList', (['"""other"""', '[2]'], {}), "('other', [2])\n", (1033, 1047), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((1185, 1206), 'conflow.node.NodeList', 'NodeList', (['"""base"""', '[1]'], {}), "('base', [1])\n", (1193, 1206), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((1219, 1241), 'conflow.node.NodeList', 'NodeList', (['"""other"""', '[2]'], {}), "('other', [2])\n", (1227, 1241), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((1257, 1281), 'conflow.node.NodeList', 'NodeList', (['"""base"""', '[1, 2]'], {}), "('base', [1, 2])\n", (1265, 1281), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((1414, 1435), 'conflow.node.NodeList', 'NodeList', (['"""base"""', '[1]'], {}), "('base', [1])\n", (1422, 1435), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((1448, 1501), 'conflow.node.NodeMap', 'NodeMap', (['"""base"""', "{'db': {'master': {'host': 'base'}}}"], {}), "('base', {'db': {'master': {'host': 'base'}}})\n", (1455, 1501), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((1689, 1742), 'conflow.node.NodeMap', 'NodeMap', (['"""base"""', "{'db': {'master': {'host': 'base'}}}"], {}), "('base', {'db': {'master': {'host': 'base'}}})\n", (1696, 1742), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((1821, 1843), 'conflow.node.Node', 'Node', (['"""base"""', '"""node_A"""'], {}), "('base', 'node_A')\n", (1825, 1843), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((1969, 2022), 'conflow.node.NodeMap', 'NodeMap', (['"""base"""', "{'db': {'master': {'host': 'base'}}}"], {}), "('base', {'db': {'master': {'host': 'base'}}})\n", (1976, 2022), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((2101, 2122), 'conflow.node.NodeList', 'NodeList', (['"""base"""', '[1]'], {}), "('base', [1])\n", (2109, 2122), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((2256, 2309), 'conflow.node.NodeMap', 'NodeMap', (['"""base"""', "{'db': {'master': {'host': 'base'}}}"], {}), "('base', {'db': {'master': {'host': 'base'}}})\n", (2263, 2309), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((2388, 2443), 'conflow.node.NodeMap', 'NodeMap', (['"""other"""', "{'db': {'master': {'host': 'other'}}}"], {}), "('other', {'db': {'master': {'host': 'other'}}})\n", (2395, 2443), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((2523, 2565), 'conflow.merge.merge_factory', 'merge_factory', (['base', 'other', 'default_config'], {}), '(base, other, default_config)\n', (2536, 2565), False, 'from conflow.merge import merge_factory\n'), ((2678, 2725), 'conflow.node.NodeMap', 'NodeMap', (['"""base"""', "{'master': {'host': 'master'}}"], {}), "('base', {'master': {'host': 'master'}})\n", (2685, 2725), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((2774, 2820), 'conflow.node.NodeMap', 'NodeMap', (['"""other"""', "{'slave': {'host': 'slave'}}"], {}), "('other', {'slave': {'host': 'slave'}})\n", (2781, 2820), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((2870, 2912), 'conflow.merge.merge_factory', 'merge_factory', (['base', 'other', 'default_config'], {}), '(base, other, default_config)\n', (2883, 2912), False, 'from conflow.merge import merge_factory\n'), ((3039, 3058), 'conflow.node.NodeMap', 'NodeMap', (['"""base"""', '{}'], {}), "('base', {})\n", (3046, 3058), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((3071, 3091), 'conflow.node.NodeMap', 'NodeMap', (['"""other"""', '{}'], {}), "('other', {})\n", (3078, 3091), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((3107, 3130), 'conflow.node.NodeMap', 'NodeMap', (['"""expected"""', '{}'], {}), "('expected', {})\n", (3114, 3130), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((3264, 3307), 'conflow.node.NodeMap', 'NodeMap', (['"""base"""', "{'merged_key': {'a': 'b'}}"], {}), "('base', {'merged_key': {'a': 'b'}})\n", (3271, 3307), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((3320, 3355), 'conflow.node.NodeMap', 'NodeMap', (['"""other"""', "{'merged_key': 1}"], {}), "('other', {'merged_key': 1})\n", (3327, 3355), False, 'from conflow.node import Node, NodeList, NodeMap\n'), ((229, 271), 'conflow.merge.merge_factory', 'merge_factory', (['base', 'other', 'default_config'], {}), '(base, other, default_config)\n', (242, 271), False, 'from conflow.merge import merge_factory\n'), ((409, 451), 'conflow.merge.merge_factory', 'merge_factory', (['base', 'other', 'default_config'], {}), '(base, other, default_config)\n', (422, 451), False, 'from conflow.merge import merge_factory\n'), ((687, 729), 'conflow.merge.merge_factory', 'merge_factory', (['base', 'other', 'default_config'], {}), '(base, other, default_config)\n', (700, 729), False, 'from conflow.merge import merge_factory\n'), ((867, 909), 'conflow.merge.merge_factory', 'merge_factory', (['base', 'other', 'default_config'], {}), '(base, other, default_config)\n', (880, 909), False, 'from conflow.merge import merge_factory\n'), ((1059, 1101), 'conflow.merge.merge_factory', 'merge_factory', (['base', 'other', 'default_config'], {}), '(base, other, default_config)\n', (1072, 1101), False, 'from conflow.merge import merge_factory\n'), ((1293, 1339), 'conflow.merge.merge_factory', 'merge_factory', (['base', 'other', 'extend_list_config'], {}), '(base, other, extend_list_config)\n', (1306, 1339), False, 'from conflow.merge import merge_factory\n'), ((1579, 1621), 'conflow.merge.merge_factory', 'merge_factory', (['base', 'other', 'default_config'], {}), '(base, other, default_config)\n', (1592, 1621), False, 'from conflow.merge import merge_factory\n'), ((1855, 1897), 'conflow.merge.merge_factory', 'merge_factory', (['base', 'other', 'default_config'], {}), '(base, other, default_config)\n', (1868, 1897), False, 'from conflow.merge import merge_factory\n'), ((2134, 2176), 'conflow.merge.merge_factory', 'merge_factory', (['base', 'other', 'default_config'], {}), '(base, other, default_config)\n', (2147, 2176), False, 'from conflow.merge import merge_factory\n'), ((3142, 3184), 'conflow.merge.merge_factory', 'merge_factory', (['base', 'other', 'default_config'], {}), '(base, other, default_config)\n', (3155, 3184), False, 'from conflow.merge import merge_factory\n'), ((3365, 3392), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3378, 3392), False, 'import pytest\n'), ((3411, 3452), 'conflow.merge.merge_factory', 'merge_factory', (['base', 'other', 'strict_config'], {}), '(base, other, strict_config)\n', (3424, 3452), False, 'from conflow.merge import merge_factory\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 dlilien <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
"""
import numpy as np
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from .cartopy_overrides import SPS
# import shapely.geometry as sgeom
USP_EXTENT = (31000, 35000, -37750, -33750)
# USP_EXTENT = (-100000, 100000, -100000, 100000)
USP_ASP = (USP_EXTENT[1] - USP_EXTENT[0]) / (USP_EXTENT[3] - USP_EXTENT[2])
def upstream(ax=None, fig_kwargs=None):
if fig_kwargs is None:
fig_kwargs = {}
if ax is None:
_, ax = plt.subplots(**fig_kwargs, subplot_kw={'projection': SPS()})
ax.set_extent(USP_EXTENT, ccrs.epsg(3031))
ax._xlocs = np.arange(0, 180)
ax._ylocs = np.arange(-90, -80, 0.1)
ax._y_inline = False
ax._x_inline = False
return ax
| [
"cartopy.crs.epsg",
"numpy.arange"
] | [((733, 750), 'numpy.arange', 'np.arange', (['(0)', '(180)'], {}), '(0, 180)\n', (742, 750), True, 'import numpy as np\n'), ((767, 791), 'numpy.arange', 'np.arange', (['(-90)', '(-80)', '(0.1)'], {}), '(-90, -80, 0.1)\n', (776, 791), True, 'import numpy as np\n'), ((700, 715), 'cartopy.crs.epsg', 'ccrs.epsg', (['(3031)'], {}), '(3031)\n', (709, 715), True, 'import cartopy.crs as ccrs\n')] |
from PyQuantum.TC3.Cavity import Cavity
from PyQuantum.TC3.Hamiltonian3 import Hamiltonian3
capacity = {
'0_1': 2,
'1_2': 2,
}
wc = {
'0_1': 0.2,
'1_2': 0.3,
}
wa = [0.2] * 3
g = {
'0_1': 1,
'1_2': 200,
}
cv = Cavity(wc=wc, wa=wa, g=g, n_atoms=3, n_levels=3)
# cv.wc_info()
# cv.wa_info()
# cv.g_info()
cv.info()
H = Hamiltonian3(capacity=capacity, cavity=cv, iprint=False)
H.print_states()
H.print_bin_states()
# H.iprint()
| [
"PyQuantum.TC3.Hamiltonian3.Hamiltonian3",
"PyQuantum.TC3.Cavity.Cavity"
] | [((239, 287), 'PyQuantum.TC3.Cavity.Cavity', 'Cavity', ([], {'wc': 'wc', 'wa': 'wa', 'g': 'g', 'n_atoms': '(3)', 'n_levels': '(3)'}), '(wc=wc, wa=wa, g=g, n_atoms=3, n_levels=3)\n', (245, 287), False, 'from PyQuantum.TC3.Cavity import Cavity\n'), ((348, 404), 'PyQuantum.TC3.Hamiltonian3.Hamiltonian3', 'Hamiltonian3', ([], {'capacity': 'capacity', 'cavity': 'cv', 'iprint': '(False)'}), '(capacity=capacity, cavity=cv, iprint=False)\n', (360, 404), False, 'from PyQuantum.TC3.Hamiltonian3 import Hamiltonian3\n')] |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Mapping, Dict, List
from qf_lib.backtesting.broker.broker import Broker
from qf_lib.backtesting.contract.contract import Contract
from qf_lib.backtesting.contract.contract_to_ticker_conversion.base import ContractTickerMapper
from qf_lib.backtesting.order.execution_style import ExecutionStyle
from qf_lib.backtesting.order.order import Order
from qf_lib.backtesting.order.time_in_force import TimeInForce
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.utils.logging.qf_parent_logger import qf_logger
from qf_lib.common.utils.miscellaneous.function_name import get_function_name
from qf_lib.data_providers.data_provider import DataProvider
class OrderFactory:
""" Creates Orders.
Parameters
----------
broker: Broker
broker used to access the portfolio
data_provider: DataProvider
data provider used to download prices. In case of backtesting, the DataHandler wrapper should be used.
contract_to_ticker_mapper: ContractTickerMapper
object mapping contracts to tickers
"""
def __init__(self, broker: Broker, data_provider: DataProvider, contract_to_ticker_mapper: ContractTickerMapper):
self.broker = broker
self.data_provider = data_provider
self.contract_to_ticker_mapper = contract_to_ticker_mapper
self.logger = qf_logger.getChild(self.__class__.__name__)
def orders(self, quantities: Mapping[Contract, int], execution_style: ExecutionStyle,
time_in_force: TimeInForce) -> List[Order]:
"""
Creates a list of Orders for given numbers of shares for each given asset.
Orders requiring 0 shares will be removed from resulting order list
Parameters
----------
quantities: Mapping[Contract, int]
mapping of a Contract to an amount of shares which should be bought/sold.
If number is positive then asset will be bought. Otherwise it will be sold.
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
order_list = []
for contract, quantity in quantities.items():
if quantity != 0:
order_list.append(Order(contract, quantity, execution_style, time_in_force))
return order_list
def target_orders(self, target_quantities: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, tolerance_quantities: Mapping[Contract, float] = None) -> List[Order]:
"""
Creates a list of Orders from a dictionary of desired target number of shares (number of shares which should be
present in the portfolio after executing the Order).
If the position doesn't already exist, the new Order is placed for the :target_quantity of shares.
If the position does exist the Order for the difference between the target number of shares
and the current number of shares is placed.
Parameters
----------
target_quantities: Mapping[Contract, int]
mapping of a Contract to a target number of shares which should be present in the portfolio after the Order
is executed. After comparing with tolerance the math.floor of the quantity will be taken.
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
tolerance_quantities: None, Mapping[Contract, int]
tells what is a tolerance for the target_quantities (in both directions) for each Contract.
The tolerance is expressed in shares.
For example: assume that currently the portfolio contains 100 shares of asset A.
then calling target_orders({A: 101}, ..., tolerance_quantities={A: 2}) will not generate any trades as
the tolerance of 2 allows the allocation to be 100. while target value is 101.
Another example:
assume that currently the portfolio contains 100 shares of asset A.
then calling target_value_order({A: 103}, ..., tolerance_quantities={A: 2}) will generate a BUY order
for 3 shares
if abs(target - actual) > tolerance buy or sell assets to match the target
If tolerance for a specific contract is not provided it is assumed to be 0
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
# Dict of Contract -> Quantities of shares to buy/sell
quantities = dict()
if tolerance_quantities is None:
tolerance_quantities = {}
contract_to_positions = {position.contract(): position for position in self.broker.get_positions()}
for contract, target_quantity in target_quantities.items():
position = contract_to_positions.get(contract, None)
tolerance_quantity = tolerance_quantities.get(contract, 0)
if position is not None:
current_quantity = position.quantity()
else:
current_quantity = 0
quantity = target_quantity - current_quantity
if abs(quantity) > tolerance_quantity and quantity != 0: # tolerance_quantity can be 0
quantities[contract] = math.floor(quantity) # type: int
return self.orders(quantities, execution_style, time_in_force)
def value_orders(self, values: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, frequency: Frequency = None) -> List[Order]:
"""
Creates a list of Orders by specifying the amount of money which should be spent on each asset rather
than the number of shares to buy/sell.
Parameters
----------
values: Mapping[Contract, int]
mapping of a Contract to the amount of money which should be spent on the asset (expressed in the currency
in which the asset is traded)
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
frequency: Frequency
frequency for the last available price sampling
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
quantities, _ = self._calculate_target_shares_and_tolerances(values, frequency=frequency)
int_quantities = {contract: math.floor(quantity) for contract, quantity in quantities.items()}
return self.orders(int_quantities, execution_style, time_in_force)
def percent_orders(self, percentages: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, frequency: Frequency = None) -> List[Order]:
"""
Creates a list of Orders by specifying the percentage of the current portfolio value which should be spent
on each asset.
Parameters
----------
percentages: Mapping[Contract, int]
mapping of a Contract to a percentage value of the current portfolio which should be allocated in the asset.
This is specified as a decimal value (e.g. 0.5 means 50%)
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
frequency: Frequency
frequency for the last available price sampling (daily or minutely)
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
portfolio_value = self.broker.get_portfolio_value()
values = {contract: portfolio_value * fraction for contract, fraction in percentages.items()}
return self.value_orders(values, execution_style, time_in_force, frequency)
def target_value_orders(self, target_values: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, tolerance_percentage: float = 0.0, frequency: Frequency = None)\
-> List[Order]:
"""
Creates a list of Orders by specifying how much should be allocated in each asset after the Orders
have been executed.
For example if we've already have 10M invested in 'SPY US Equity' and you call this method with target value of 11M
then only 1M will be spent on this asset
Parameters
----------
target_values: Mapping[Contract, int]
mapping of a Contract to a value which should be allocated in the asset after the Order has been executed
(expressed in the currency in which the asset is traded)
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
tolerance_percentage: float
tells the us what is a tolerance to the target_values (in both directions).
The tolerance is expressed as percentage of target_values.
For example: assume that currently the portfolio contains asset A with allocation 10 000$.
then calling target_value_order({A: 10 500}, ..., tolerance_percentage=0.05) will not generate any trades as
the tolerance of 0.05 allows the allocation to be 10 000$, while target value is 10 500$ (tolerance value
would be equal to 0.05 * 10 500 = 525 and the difference between current and target value would be < 525$).
Another example:
For example: assume that currently the portfolio contains asset A with allocation 10 000$.
then calling target_value_order({A: 13 000}, ..., tolerance_percentage=0.1) will generate a BUY order
corresponding to 3000$ of shares. The tolerance of 0.1 does not allow a difference of 3000$
if abs(target - actual) > tolerance_percentage * target value
frequency: Frequency
frequency for the last available price sampling (daily or minutely)
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
assert 0.0 <= tolerance_percentage < 1.0, "The tolerance_percentage should belong to [0, 1) interval"
target_quantities, tolerance_quantities = \
self._calculate_target_shares_and_tolerances(target_values, tolerance_percentage, frequency)
return self.target_orders(target_quantities, execution_style, time_in_force, tolerance_quantities)
def target_percent_orders(self, target_percentages: Mapping[Contract, float], execution_style: ExecutionStyle,
time_in_force: TimeInForce, tolerance_percentage: float = 0.0, frequency: Frequency = None) \
-> List[Order]:
"""
Creates an Order adjusting a position to a value equal to the given percentage of the portfolio.
Parameters
----------
target_percentages: Mapping[Contract, int]
mapping of a Contract to a percentage of a current portfolio value which should be allocated in each asset
after the Order has been carried out
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
tolerance_percentage: float
tells the us what is a tolerance to the target_percentages (in both directions). The tolerance is expressed
in percentage points (0.02 corresponds to 2pp of the target_value). For more details look at the description
of target_value_orders.
frequency: Frequency
frequency for the last available price sampling (daily or minutely)
Returns
--------
List[Order]
list of generated orders
"""
self._log_function_call(vars())
assert 0.0 <= tolerance_percentage < 1.0, "The tolerance_percentage should belong to [0, 1) interval"
portfolio_value = self.broker.get_portfolio_value()
target_values = {
contract: portfolio_value * target_percent for contract, target_percent in target_percentages.items()}
return self.target_value_orders(target_values, execution_style, time_in_force, tolerance_percentage, frequency)
def _calculate_target_shares_and_tolerances(
self, contract_to_amount_of_money: Mapping[Contract, float], tolerance_percentage: float = 0.0,
frequency: Frequency = None) -> (Mapping[Contract, float], Mapping[Contract, float]):
"""
Returns
----------
Tuple(Mapping[Contract, float], Mapping[Contract, float])
Tells how many shares of each asset we should have in order to match the target and what is the tolerance
(in number of shares) for each asset
"""
tickers_to_contract_and_amount_of_money = self._make_tickers_to_contract_and_amount_of_money(
contract_to_amount_of_money)
tickers = list(tickers_to_contract_and_amount_of_money.keys())
# In case of live trading the get_last_available_price will use datetime.now() as the current time to obtain
# last price and in case of a backtest - it will use the data handlers timer to compute the date
current_prices = self.data_provider.get_last_available_price(tickers, frequency)
# Contract -> target number of shares
target_quantities = dict() # type: Dict[Contract, float]
# Contract -> tolerance expressed as number of shares
tolerance_quantities = dict() # type: Dict[Contract, float]
for ticker, (contract, amount_of_money) in tickers_to_contract_and_amount_of_money.items():
current_price = current_prices.loc[ticker]
divisor = (current_price * contract.contract_size)
target_quantity = amount_of_money / divisor # type: float
target_quantities[contract] = target_quantity
tolerance_quantity = target_quantity * tolerance_percentage
tolerance_quantities[contract] = tolerance_quantity
return target_quantities, tolerance_quantities
def _make_tickers_to_contract_and_amount_of_money(self, contract_to_amount_of_money):
tickers_to_contract_and_amount_of_money = dict()
for contract, amount_of_money in contract_to_amount_of_money.items():
ticker = self.contract_to_ticker_mapper.contract_to_ticker(contract)
tickers_to_contract_and_amount_of_money[ticker] = contract, amount_of_money
return tickers_to_contract_and_amount_of_money
def _log_function_call(self, params_dict):
if 'self' in params_dict:
del params_dict['self']
fn_name_level_above = get_function_name(1)
log_message = "Function call: '{}' with parameters:".format(fn_name_level_above)
for key, value in params_dict.items():
if isinstance(value, dict) and value:
value_str = ""
for inner_k, inner_v in value.items():
value_str += "\n\t\t{}: {}".format(inner_k, inner_v)
else:
value_str = str(value)
log_message += "\n\t{}: {}".format(key, value_str)
self.logger.debug(log_message)
| [
"qf_lib.common.utils.logging.qf_parent_logger.qf_logger.getChild",
"qf_lib.common.utils.miscellaneous.function_name.get_function_name",
"qf_lib.backtesting.order.order.Order",
"math.floor"
] | [((2024, 2067), 'qf_lib.common.utils.logging.qf_parent_logger.qf_logger.getChild', 'qf_logger.getChild', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (2042, 2067), False, 'from qf_lib.common.utils.logging.qf_parent_logger import qf_logger\n'), ((16367, 16387), 'qf_lib.common.utils.miscellaneous.function_name.get_function_name', 'get_function_name', (['(1)'], {}), '(1)\n', (16384, 16387), False, 'from qf_lib.common.utils.miscellaneous.function_name import get_function_name\n'), ((7715, 7735), 'math.floor', 'math.floor', (['quantity'], {}), '(quantity)\n', (7725, 7735), False, 'import math\n'), ((6403, 6423), 'math.floor', 'math.floor', (['quantity'], {}), '(quantity)\n', (6413, 6423), False, 'import math\n'), ((3174, 3231), 'qf_lib.backtesting.order.order.Order', 'Order', (['contract', 'quantity', 'execution_style', 'time_in_force'], {}), '(contract, quantity, execution_style, time_in_force)\n', (3179, 3231), False, 'from qf_lib.backtesting.order.order import Order\n')] |
import random as rd
def genPass(num , length):
print ("Password Generator")
print ("===================\n")
numpass=num
lenpass=length
AlphaLcase=[ chr(m) for m in range(65, 91)]
AlphaCcase=[ chr(n) for n in range(97, 123)]
Intset=[ chr(p) for p in range(48,58)]
listsetpass=[]
for j in range(lenpass):
randAlphaset=rd.randint(2,lenpass)
randAlphaL=rd.randint(1,randAlphaset)
randAlphaH=randAlphaset-randAlphaL
randIntset=lenpass-randAlphaset
password=[]
strpassword=""
for i in range(randAlphaH):
randindexAlphaH=rd.randint(0,len(AlphaCcase)-1)
password.append(AlphaCcase[randindexAlphaH])
for k in range(randAlphaL):
randindexAlphaL=rd.randint(0,len(AlphaLcase)-1)
password.append(AlphaLcase[randindexAlphaL])
for l in range(randIntset):
randindexInt=rd.randint(0,len(Intset)-1)
password.append(Intset[randindexInt])
for u in range(len(password)):
rd.shuffle(password)
strpassword+=str(password[u])
listsetpass+=[strpassword]
return listsetpass
| [
"random.shuffle",
"random.randint"
] | [((361, 383), 'random.randint', 'rd.randint', (['(2)', 'lenpass'], {}), '(2, lenpass)\n', (371, 383), True, 'import random as rd\n'), ((402, 429), 'random.randint', 'rd.randint', (['(1)', 'randAlphaset'], {}), '(1, randAlphaset)\n', (412, 429), True, 'import random as rd\n'), ((1051, 1071), 'random.shuffle', 'rd.shuffle', (['password'], {}), '(password)\n', (1061, 1071), True, 'import random as rd\n')] |
from datetime import date, datetime, timedelta
from matplotlib import pyplot as plt, dates as mdates
from matplotlib.ticker import MaxNLocator
from helpers import programmes_helper
filename = 'offers.png'
class OffersService:
def __init__(self, db_conn):
self.db_conn = db_conn
async def generate_graph(self, programme: programmes_helper.Programme, step: bool, year: int):
if year not in programme.places:
raise ValueError
rows = await self.db_conn.fetch(
'SELECT rank, is_private, offer_date FROM ranks '
'WHERE programme = $1 AND rank > $2 AND offer_date IS NOT NULL AND year = $3 '
'ORDER BY offer_date, rank', programme.id, programme.places[year], year)
x_values = [date(year, 4, 15)]
y_values = [programme.places[year]]
if rows:
for i in range(len(rows)):
row = rows[i]
rank = row[0]
is_private = row[1]
offer_date = row[2]
# Round rank if it's private
if is_private:
rank = round_rank(rank)
# make sure it's not lower than the previous rank
if i > 0 and rank < y_values[i - 1]:
rank = y_values[i - 1]
# make sure it's not higher than the next public rank
for j in range(i, len(rows)):
if not rows[j][1]:
if rank > rows[j][0]:
rank = rows[j][0]
break
x_values.append(offer_date)
y_values.append(rank)
end_date = date(year, 8, 15)
curr_date = datetime.utcnow().date()
x_values.append(min(end_date, curr_date))
y_values.append(y_values[len(y_values) - 1])
fill_between_end = programme.places[year] - (y_values[len(y_values) - 1] - programme.places[year]) / 15
bottom_limit = fill_between_end - (y_values[len(y_values) - 1] - fill_between_end) / 40
bg_color = '#36393F'
fg_color = programme.graph_colour
plt.rcParams['ytick.color'] = 'w'
plt.rcParams['xtick.color'] = 'w'
plt.rcParams['axes.edgecolor'] = 'w'
plt.rcParams['axes.labelcolor'] = '#767676'
ax = plt.gca()
formatter = mdates.DateFormatter("%d %b")
ax.xaxis.set_major_formatter(formatter)
locator = mdates.WeekdayLocator(byweekday=x_values[0].weekday())
ax.xaxis.set_major_locator(locator)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xlabel('Offer date')
ax.set_ylabel('Ranking number')
plt.setp(ax.spines.values(), visible=False)
ax.set_facecolor(bg_color)
ax.set_axisbelow(True)
plt.grid(color='#444444', linestyle='--')
if programme.visa_cutoff is not None:
cutoff_date = date(year, programme.visa_cutoff[1], programme.visa_cutoff[0])
if (datetime.utcnow() + timedelta(days=20)).date() >= cutoff_date:
plt.axvline(cutoff_date, ymin=0.02, linestyle='--', alpha=0.7, color=fg_color)
plt.text(cutoff_date, y_values[-1], "Non-EU cutoff", rotation='vertical', color=fg_color,
verticalalignment='center_baseline', horizontalalignment='right', stretch='condensed',
fontsize='small', fontweight='ultralight', fontstyle='italic')
if not step:
plt.plot(x_values, y_values, linestyle='--', color=fg_color)
plt.fill_between(x_values, y_values, y2=fill_between_end, alpha=0.15, color=fg_color)
plt.step(x_values, y_values, where='post', alpha=(0.5 if not step else None), color=fg_color)
plt.fill_between(x_values, y_values, y2=fill_between_end, step="post", alpha=(0.20 if not step else 0.35),
color=fg_color)
plt.title(f'{programme.uni_name} {programme.display_name} ({year})', color='w')
ax.set_ylim(bottom=bottom_limit)
# only show every second week
for label in ax.get_xaxis().get_ticklabels()[1::2]:
label.set_visible(False)
for label in ax.get_xaxis().get_major_ticks()[1::2]:
label.set_visible(False)
plt.savefig(filename, facecolor=bg_color, dpi=200)
plt.close()
async def get_highest_ranks_with_offers(self, year):
offers = await self.db_conn.fetch(
'select r.programme, r.rank, MAX(d.offer_date), d.is_private '
'from (select programme, max(rank) as rank from ranks '
'where ranks.offer_date is not null and ranks.year = $1 '
'group by programme) as r '
'inner join ranks as d '
'on r.programme = d.programme and r.rank = d.rank and d.year = $1 '
'and d.offer_date is not null '
'group by r.programme, r.rank, d.is_private '
'order by MAX(d.offer_date) desc', year)
for i in range(len(offers)):
programme_id, rank = offers[i][0:2]
places = programmes_helper.programmes[programme_id].places[year]
if rank <= places:
offers[i] = (programme_id, places, date(year, 4, 15), False)
return offers
def round_rank(number, base=5):
return base * round(number / base)
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"datetime.datetime.utcnow",
"matplotlib.pyplot.gca",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.close",
"matplotlib.ticker.MaxNLocator",
"datetim... | [((2376, 2385), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2383, 2385), True, 'from matplotlib import pyplot as plt, dates as mdates\n'), ((2406, 2435), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%d %b"""'], {}), "('%d %b')\n", (2426, 2435), True, 'from matplotlib import pyplot as plt, dates as mdates\n'), ((2867, 2908), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""#444444"""', 'linestyle': '"""--"""'}), "(color='#444444', linestyle='--')\n", (2875, 2908), True, 'from matplotlib import pyplot as plt, dates as mdates\n'), ((3727, 3822), 'matplotlib.pyplot.step', 'plt.step', (['x_values', 'y_values'], {'where': '"""post"""', 'alpha': '(0.5 if not step else None)', 'color': 'fg_color'}), "(x_values, y_values, where='post', alpha=0.5 if not step else None,\n color=fg_color)\n", (3735, 3822), True, 'from matplotlib import pyplot as plt, dates as mdates\n'), ((3829, 3952), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x_values', 'y_values'], {'y2': 'fill_between_end', 'step': '"""post"""', 'alpha': '(0.2 if not step else 0.35)', 'color': 'fg_color'}), "(x_values, y_values, y2=fill_between_end, step='post',\n alpha=0.2 if not step else 0.35, color=fg_color)\n", (3845, 3952), True, 'from matplotlib import pyplot as plt, dates as mdates\n'), ((3985, 4064), 'matplotlib.pyplot.title', 'plt.title', (['f"""{programme.uni_name} {programme.display_name} ({year})"""'], {'color': '"""w"""'}), "(f'{programme.uni_name} {programme.display_name} ({year})', color='w')\n", (3994, 4064), True, 'from matplotlib import pyplot as plt, dates as mdates\n'), ((4349, 4399), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'facecolor': 'bg_color', 'dpi': '(200)'}), '(filename, facecolor=bg_color, dpi=200)\n', (4360, 4399), True, 'from matplotlib import pyplot as plt, dates as mdates\n'), ((4408, 4419), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4417, 4419), True, 'from matplotlib import pyplot as plt, dates as mdates\n'), ((764, 781), 'datetime.date', 'date', (['year', '(4)', '(15)'], {}), '(year, 4, 15)\n', (768, 781), False, 'from datetime import date, datetime, timedelta\n'), ((1721, 1738), 'datetime.date', 'date', (['year', '(8)', '(15)'], {}), '(year, 8, 15)\n', (1725, 1738), False, 'from datetime import date, datetime, timedelta\n'), ((2636, 2661), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (2647, 2661), False, 'from matplotlib.ticker import MaxNLocator\n'), ((2982, 3044), 'datetime.date', 'date', (['year', 'programme.visa_cutoff[1]', 'programme.visa_cutoff[0]'], {}), '(year, programme.visa_cutoff[1], programme.visa_cutoff[0])\n', (2986, 3044), False, 'from datetime import date, datetime, timedelta\n'), ((3559, 3619), 'matplotlib.pyplot.plot', 'plt.plot', (['x_values', 'y_values'], {'linestyle': '"""--"""', 'color': 'fg_color'}), "(x_values, y_values, linestyle='--', color=fg_color)\n", (3567, 3619), True, 'from matplotlib import pyplot as plt, dates as mdates\n'), ((3632, 3722), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x_values', 'y_values'], {'y2': 'fill_between_end', 'alpha': '(0.15)', 'color': 'fg_color'}), '(x_values, y_values, y2=fill_between_end, alpha=0.15, color\n =fg_color)\n', (3648, 3722), True, 'from matplotlib import pyplot as plt, dates as mdates\n'), ((3140, 3218), 'matplotlib.pyplot.axvline', 'plt.axvline', (['cutoff_date'], {'ymin': '(0.02)', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'color': 'fg_color'}), "(cutoff_date, ymin=0.02, linestyle='--', alpha=0.7, color=fg_color)\n", (3151, 3218), True, 'from matplotlib import pyplot as plt, dates as mdates\n'), ((3235, 3486), 'matplotlib.pyplot.text', 'plt.text', (['cutoff_date', 'y_values[-1]', '"""Non-EU cutoff"""'], {'rotation': '"""vertical"""', 'color': 'fg_color', 'verticalalignment': '"""center_baseline"""', 'horizontalalignment': '"""right"""', 'stretch': '"""condensed"""', 'fontsize': '"""small"""', 'fontweight': '"""ultralight"""', 'fontstyle': '"""italic"""'}), "(cutoff_date, y_values[-1], 'Non-EU cutoff', rotation='vertical',\n color=fg_color, verticalalignment='center_baseline',\n horizontalalignment='right', stretch='condensed', fontsize='small',\n fontweight='ultralight', fontstyle='italic')\n", (3243, 3486), True, 'from matplotlib import pyplot as plt, dates as mdates\n'), ((1763, 1780), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1778, 1780), False, 'from datetime import date, datetime, timedelta\n'), ((5291, 5308), 'datetime.date', 'date', (['year', '(4)', '(15)'], {}), '(year, 4, 15)\n', (5295, 5308), False, 'from datetime import date, datetime, timedelta\n'), ((3061, 3078), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3076, 3078), False, 'from datetime import date, datetime, timedelta\n'), ((3081, 3099), 'datetime.timedelta', 'timedelta', ([], {'days': '(20)'}), '(days=20)\n', (3090, 3099), False, 'from datetime import date, datetime, timedelta\n')] |
#!/usr/bin/env python
# pylint: disable=no-value-for-parameter,too-many-nested-blocks
import contextlib
import datetime
import functools
import re
from abc import abstractmethod
import sqlalchemy as sa
from sqlalchemy import event, exc, func, select
from sqlalchemy.ext import declarative
from sqlalchemy.ext import hybrid
from sqlalchemy import orm
import sqlalchemy_utils
from lifeloopweb import config, constants, exception, logging, renders, subscription
from lifeloopweb.db import utils as db_utils
from lifeloopweb.webpack import webpack
from lifeloopweb.helpers.base_helper import Helper
from flask_login import UserMixin
LOG = logging.get_logger(__name__)
CONF = config.CONF
helper = Helper()
TABLE_KWARGS = {"mysql_engine": "InnoDB",
"mysql_charset": "utf8",
"mysql_collate": "utf8_general_ci"}
DB_NAME = "lifeloopweb_{}".format(CONF.get("ENVIRONMENT"))
# TODO(mdietz): when this comes from a configuration, we need to
# force the charset to utf8
ENGINE_URL = CONF.get("DB_ENGINE_URL")
if not ENGINE_URL:
ENGINE_URL = ("mysql+pymysql://root:@127.0.0.1/"
"{}?charset=utf8".format(DB_NAME))
connection_debug = CONF.get("database.connection.debug")
if connection_debug.lower() not in ["true", "false"]:
raise exception.InvalidConfigValue(value=connection_debug,
key="database.connection.debug")
connection_debug = connection_debug.lower() == "true"
connection_pool_size = int(CONF.get("database.connection.poolsize"))
connection_overflow_pool = int(CONF.get("database.connection.overflowpool"))
# NOTE: MySQL defaults to 8 hour connection timeouts. It's possible that
# docker-compose or our hosting provider will sever connections sooner.
# if we see "MySQL has gone away" tweaking this variable is the thing
# to revisit
connection_pool_recycle = int(CONF.get("database.connection.poolrecycle"))
engine_kwargs = {}
if "sqlite" not in ENGINE_URL:
engine_kwargs = {
"pool_size": connection_pool_size,
"max_overflow": connection_overflow_pool,
"pool_recycle": connection_pool_recycle}
engine = sa.create_engine(ENGINE_URL, echo=connection_debug,
**engine_kwargs)
SessionFactory = orm.sessionmaker(bind=engine, expire_on_commit=False,
autocommit=False, autoflush=True)
# TODO use of the scoped session needs to be evaluated against
# greenthreading servers like gunicorn and uwsgi. The scope
# by default is to thread local, as in threading.local
# and not the greenthread specifically. Things that use greenthreads
# have to be gt aware, so really we may just do Scoped and Unscoped
# sessions. Alternatively, we hack eventlet to attach the scope there
# http://docs.sqlalchemy.org/en/latest/orm/contextual.html#using-custom-created-scopes
ScopedSession = orm.scoped_session(SessionFactory)
Session = ScopedSession
# TODO We may only want to do this conditionally. I've used it in the past
# but I think the pool_recycling may be enough
@event.listens_for(engine, "engine_connect")
def ping_connection(connection, branch):
if branch:
return
save_should_close_with_result = connection.should_close_with_result
connection.should_close_with_result = False
try:
connection.scalar(select([1]))
except exc.DBAPIError as err:
if err.connection_invalidated:
connection.scalar(select([1]))
else:
raise
finally:
connection.should_close_with_result = save_should_close_with_result
@contextlib.contextmanager
def transaction():
try:
session = ScopedSession()
yield session
session.commit()
except:
LOG.exception("Transaction failed! Rolling back...")
session.rollback()
raise
def teardown():
ScopedSession.remove()
def can_connect():
try:
engine.connect()
return True
except Exception:
return False
class MetaBase(declarative.DeclarativeMeta):
def __init__(cls, klsname, bases, attrs):
if klsname != "Base":
super().__init__(klsname, bases, attrs)
for attr_name, attr in attrs.items():
if isinstance(attr, sa.Column):
query_single_getter_name = "get_by_{}".format(attr_name)
query_all_getter_name = "get_all_by_{}".format(attr_name)
if not hasattr(cls, query_single_getter_name):
setattr(cls, query_single_getter_name,
functools.partial(cls._get_by, attr))
if not hasattr(cls, query_all_getter_name):
setattr(cls, query_all_getter_name,
functools.partial(cls._get_all_by, attr))
# TODO This does not work
# if isinstance(attr, hybrid.hybrid_property):
# print(attr, type(attr))
# setattr(cls, "get_by_{}".format(attr_name),
# functools.partial(cls._get_by_property, attr))
class ModelBase(object):
created_at = sa.Column(sa.DateTime(), server_default=func.now())
updated_at = sa.Column(sa.DateTime(), onupdate=func.now())
__table_args__ = TABLE_KWARGS
@declarative.declared_attr
def __tablename__(cls): # pylint: disable=no-self-argument
""" Returns a snake_case form of the table name. """
return db_utils.pluralize(db_utils.to_snake_case(cls.__name__))
def __eq__(self, other):
if not other:
return False
return self.id == other.id
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
if hasattr(self, key):
return setattr(self, key, value)
raise AttributeError(key)
def __contains__(self, key):
return hasattr(self, key)
def update(self, **fields):
for attr, value in fields.items():
if attr not in self:
raise exception.ModelUnknownAttrbute(model=self, attr=attr)
self[attr] = value
return self
@classmethod
def get(cls, pk):
return Session.query(cls).filter(cls.id == pk).first()
@classmethod
def _get_by_property(cls, prop):
LOG.debug("Fetching '%s' by property '%s'", cls, prop)
return Session.query(cls).filter(prop).first()
@classmethod
def _get_by(cls, field, value):
LOG.debug("Fetching one '%s.%s' by value '%s'", cls, field, value)
return Session.query(cls).filter(field == value).first()
@classmethod
def _get_all_by(cls, field, value):
LOG.debug("Fetching all '%s.%s' with value '%s'", cls, field, value)
return Session.query(cls).filter(field == value).all()
@classmethod
def last(cls):
return Session.query(cls).order_by(cls.id.desc()).first()
def save(self):
LOG.debug("Attempting to save '%s'", self)
with transaction() as session:
session.add(self)
def delete(self):
LOG.debug("Attempting to delete '%s'", self)
with transaction() as session:
session.delete(self)
def to_dict(self):
return {key: value for key, value in self.__dict__.items()
if not callable(value) and not key.startswith('_')}
Base = declarative.declarative_base(cls=ModelBase, bind=engine,
metaclass=MetaBase)
# pylint: disable=abstract-method,unused-argument
# TODO This parent class may not allow NULL to go into a UUID field :-|
class GUID(sqlalchemy_utils.UUIDType):
"""
Overload of the sqlalchemy_utils UUID class. There are issues
with it and alembic, acknowledged by the maintainer:
https://github.com/kvesteri/sqlalchemy-utils/issues/129
"""
def __init__(self, length=16, binary=True, native=True):
# pylint: disable=unused-argument
# NOTE(mdietz): Ignoring length, see:
# https://github.com/kvesteri/sqlalchemy-utils/issues/129
super(GUID, self).__init__(binary, native)
class HasId(object):
"""id mixin, add to subclasses that have an id."""
id = sa.Column(GUID,
primary_key=True,
default=db_utils.generate_guid)
class ImageMixin(object):
"""image main_image mixin, add to subclasses that have images."""
exclude = tuple(CONF.get('allowed.video.extensions').split(','))
@property
@abstractmethod
def images(self):
raise NotImplementedError
@property
def main_image(self):
images = [Image()]
if self.images:
images = [image for image in self.images if not image.image_url.endswith(self.exclude)]
if not images:
images = [Image()]
return images[-1]
class NotificationType(Base, HasId):
description = sa.Column(sa.String(80), nullable=False)
priority = sa.Column(sa.Integer(), nullable=True)
notifications = orm.relationship("Notification", backref="type")
def __str__(self):
return self.description
def __repr__(self):
return "NotificationType:{}, {}".format(self.id, self.description)
class Notification(Base, HasId):
notification_type_id = sa.Column(sa.ForeignKey("notification_types.id"),
nullable=False)
user_from_id = sa.Column(GUID(), sa.ForeignKey("users.id"), nullable=False)
user_to_id = sa.Column(GUID(), sa.ForeignKey("users.id"), nullable=False)
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"), nullable=True)
organization_id = sa.Column(GUID(), sa.ForeignKey("organizations.id"),
nullable=True)
acknowledge_only = sa.Column(sa.Boolean(), nullable=False, default=False)
blocked_as_spam = sa.Column(sa.Boolean(), nullable=False, default=False)
accepted = sa.Column(sa.DateTime(), nullable=True, default=None)
declined = sa.Column(sa.DateTime(), nullable=True, default=None)
acknowledged = sa.Column(sa.DateTime(), nullable=True, default=None)
@property
def needs_action(self):
return not self.acknowledge_only and not self.accepted and not self.declined
def prevent_duplicate(self):
user = User.get(self.user_to_id)
notifications = user.group_notifications(self.group_id)
for n in notifications:
if (n.user_from_id == self.user_from_id and
n.notification_type_id == self.notification_type_id and
n.organization_id == self.organization_id):
if n.blocked_as_spam:
return False
self.accepted = None
self.declined = None
self.acknowledged = None
elements = self.to_dict()
updated_notification = n.update(**elements)
return updated_notification
return self
class OrganizationRole(Base, HasId):
description = sa.Column(sa.String(120), nullable=False)
priority = sa.Column(sa.Integer(), nullable=True)
users = orm.relationship(
"User", secondary='organization_members',
back_populates="organization_roles")
def __str__(self):
return self.description
def __repr__(self):
return "OrganizationRole:{}, {}".format(self.id, self.description)
class User(Base, HasId, UserMixin, ImageMixin, renders.UserMixin):
# TODO IMO these need to be contact details and a separate table
first_name = sa.Column(sa.String(40), nullable=False)
last_name = sa.Column(sa.String(40), nullable=False)
# TODO Middle name?
# TODO Title?
# TODO Add a wholly separate ContactInfo table instead and one to
# many from this?
email = sa.Column(sa.String(254), nullable=False, unique=True)
# http://stackoverflow.com/questions/3350500/international-phone-number-max-and-min
phone_number = sa.Column(sa.String(16), nullable=True)
hashed_password = sa.Column(sa.String(128), nullable=False)
deleted_at = sa.Column(sa.DateTime(), nullable=True, default=None)
zoom_user_id = sa.Column(sa.String(80), nullable=True)
city = sa.Column(sa.String(80), nullable=True)
date_of_birth = sa.Column(sa.Date(), nullable=True)
super_admin = sa.Column(sa.Boolean(), nullable=False, default=False)
images = orm.relationship('Image', secondary='user_images')
privacy_and_terms_agreed_at = sa.Column(sa.DateTime(), nullable=True, default=None)
# By name of zone rather than offset, which changes all the time
timezone = sa.Column(sa.String(64), nullable=False)
opt_in_texts = sa.Column(sa.Boolean(), nullable=False, default=False)
opt_in_emails = sa.Column(sa.Boolean(), nullable=False, default=False)
notifications_on = sa.Column(sa.Boolean(), nullable=False, default=True)
# last_login = sa.Column(sa.DateTime(), server_default=func.now())
verified_at = sa.Column(sa.DateTime(), nullable=True, default=None)
organizations = orm.relationship(
"Organization", secondary='organization_members',
back_populates="users",
primaryjoin=(
'and_('
'OrganizationMember.user_id==User.id, '
'Organization.activated_at.isnot(None))'))
groups = orm.relationship(
"Group",
secondary='group_members',
back_populates="users",
primaryjoin=(
'and_('
'GroupMember.user_id==User.id, '
'GroupMember.group_id==Group.id, '
'OrganizationGroup.group_id==Group.id, '
'OrganizationGroup.organization_id==Organization.id, '
'Organization.activated_at.isnot(None), '
'Group.archived_at==None)'))
organization_roles = orm.relationship(
"OrganizationRole", secondary='organization_members',
back_populates="users")
group_roles = orm.relationship(
"GroupRole", secondary='group_members',
back_populates="users")
notifications = orm.relationship(
"Notification",
foreign_keys="[Notification.user_to_id]",
backref="to_user")
sent_notifications = orm.relationship(
"Notification",
foreign_keys="[Notification.user_from_id]",
backref="from_user")
group_members = orm.relationship(
"GroupMember",
back_populates="users")
organization_members = orm.relationship(
"OrganizationMember",
back_populates="users")
group_leaders = orm.relationship(
'Group',
secondary='group_members',
back_populates='users',
primaryjoin=(
"and_("
"GroupMember.user_id==User.id, "
"GroupMember.group_id==Group.id, "
"GroupMember.role_id==GroupRole.id, "
"OrganizationGroup.group_id==Group.id, "
"OrganizationGroup.organization_id==Organization.id, "
"GroupRole.description=='Group Leader')"))
def __str__(self):
return self.full_name_and_email
def __repr__(self):
return "User: {}, {}".format(self.id, self.full_name_and_email)
def organizations_created(self):
# TODO: Refactor.
# I think we should add Group.parent_org and Org.creator columns
# to avoid this huge db query
subquery = Session.query(func.min(
OrganizationMember.created_at).label('created_at')).group_by(
OrganizationMember.organization_id).subquery()
query = Session.query(Organization).join(
OrganizationMember, OrganizationRole, User).join(
subquery,
subquery.c.created_at == OrganizationMember.created_at).filter(
Organization.activated_at.isnot(None),
OrganizationRole.description == 'Owner',
User.email == self.email)
return query.all()
@property
def new_notifications(self):
return [n for n in self.notifications if
not n.acknowledged]
@property
def non_acknowledged_notifications(self):
return [n for n in self.sent_notifications if
not n.acknowledged and (n.accepted or n.declined)]
@property
def get_notifications(self):
return (self.new_notifications +
self.non_acknowledged_notifications)
@property
def full_name(self):
return "{} {}".format(self.first_name, self.last_name)
@property
def short_name(self):
return "{} {}.".format(self.first_name, self.last_name[:1])
@property
def full_name_and_email(self):
return "{} ({})".format(self.full_name, self.email)
def group_notifications(self, group_id):
return (n for n in self.notifications if
n.group_id == group_id)
def org_notifications(self, org_id):
return (n for n in self.notifications if
n.org_id is org_id)
# NOTE: this fails as soon as we allow a user to have more than one
# role in an organization
def role_for_org(self, org_id):
roles = [om.role for om in self.organization_members
if om.organization.id == org_id]
return roles[0] if roles else None
# NOTE: this fails as soon as we allow a user to have more than one
# role in an group
def role_for_group(self, group_id):
roles = [gm.role for gm in self.group_members
if gm.group and gm.group.id == group_id]
return roles[0] if roles else None
def is_group_member(self, group_id):
return group_id in [g.id for g in self.groups]
def is_org_creator(self, org_id):
organization = Organization.get(org_id)
return organization.creator.id == self.id
def is_org_owner(self, org_id=None):
if not org_id:
return 'Owner' in [g.description for g in
self.organization_roles]
return any([om for om in self.organization_members if
om.organization_id == org_id and
om.role.description == 'Owner'])
def can_view_group_items(self, group_id):
g = Group.get(group_id)
return (self.super_admin or
self.is_group_member(group_id) or
self.is_group_admin(g.parent_org.id))
def is_org_admin(self, org_id=None):
if not org_id:
return 'Organization Administrator' in [g.description for g in
self.organization_roles]
return any([om for om in self.organization_members if
om.organization_id == org_id and
om.role.description == 'Organization Administrator'])
def is_org_member(self, org_id):
return any([om for om in self.organization_members if
om.organization_id == org_id and
om.role.description == 'Member'])
def is_in_org(self, org_id):
return org_id in [g.id for g in self.organizations]
def is_group_leader(self, group_id):
return any([gm for gm in self.group_members if
gm.group_id == group_id and
gm.role.description == 'Group Leader'])
def is_meeting_alternate_host(self, group_id):
return any([gm for gm in self.group_members if
gm.can_cohost_meeting == 1])
def is_group_admin(self, org_id=None):
if not org_id:
return 'Group Administrator' in [g.description for g in
self.organization_roles]
return any([om for om in self.organization_members if
om.organization_id == org_id and
om.role.description == 'Group Administrator'])
def is_group_creator(self, org_id=None):
if not org_id:
return 'Group Creator' in [g.description for g in
self.organization_roles]
return any([om for om in self.organization_members if
om.organization_id == org_id and
om.role.description == 'Group Creator'])
def can_add_group(self, group_id=None, org_id=None):
return (self.super_admin or
self.is_org_owner(org_id) or
self.is_org_admin(org_id) or
self.is_group_admin(org_id) or
self.is_group_creator(org_id))
def can_edit_group(self, group_id=None):
group = Group.get(group_id)
org_id = group.parent_org.id
return (self.super_admin or
self.is_group_leader(group.id) or
self.is_group_admin(org_id) or
self.can_edit_org(org_id))
def can_change_group_members_role(self, group):
org_id = group.parent_org.id
return (self.super_admin or
self.is_group_admin(org_id) or
self.can_edit_org(org_id))
def can_edit_org(self, org_id):
return (self.super_admin or
self.is_org_owner(org_id) or
self.is_org_admin(org_id))
def can_manage_subscription(self, org_id):
return any([om for om in self.organization_members if
om.organization_id == org_id and
om.can_manage_subscription])
@classmethod
def get_email_from_full_name_and_email(cls, full_name_and_email):
regex = r"(\w+([-+.']\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*)"
matches = re.findall(regex, full_name_and_email)
if not matches:
raise exception.InvalidEmail()
return matches[0][0]
class LinkType(Base, HasId):
description = sa.Column(sa.String(200), nullable=False)
priority = sa.Column(sa.Integer(), nullable=True)
link = orm.relationship('Link', backref='link_type')
@property
def icon(self):
return '-'.join(self.description.lower().split(' '))
def __str__(self):
return self.description
def __repr__(self):
return "LinkType:{}, {}".format(self.id, self.description)
class Link(Base, HasId):
link_type_id = sa.Column(GUID(), sa.ForeignKey("link_types.id"))
icon_css_class = sa.Column(sa.String(120))
organization_id = sa.Column(GUID(), sa.ForeignKey("organizations.id"), nullable=True)
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"), nullable=True)
url = sa.Column(sa.String(250), nullable=False)
@property
def formatted_url(self):
if 'http' in self.url:
return self.url
return "http://{}".format(self.url)
class Address(Base, HasId):
# TODO I think this is the correct mapping
# organization_id = sa.Column(GUID(), sa.ForeignKey("organization.id"))
# organization = orm.relationship("Organization", backref="addresses")
# TODO Nothing International?
# TODO this needs to be split up into street number and street IMO
street_address = sa.Column(sa.String(100), nullable=False)
city = sa.Column(sa.String(100), nullable=False)
# TODO this should be an enum
state = sa.Column(sa.String(30), nullable=False)
# TODO No country?
zip_code = sa.Column(sa.String(9), nullable=True)
organization = orm.relationship('Organization', backref='address')
@property
def formatted(self):
return "{} {}, {} {}".format(self.street_address,
self.city,
self.state,
self.zip_code)
@property
def line1(self):
return "{}".format(self.street_address)
@property
def line2(self):
return "{}, {} {}".format(self.city,
self.state,
self.zip_code)
def __str__(self):
return self.formatted
def __repr__(self):
return "Address:{}, {}".format(self.id, self.formatted)
class ZoomMeeting(Base, HasId, renders.MeetingMixin):
# https://zoom.us/
# TODO Is this the only type they want to support?
# TODO This seems insufficient. Probably need Outlook-meeting-like
# granularity
SCHEDULED_MEETING = 2
REPEATED_MEETING = 3
DEFAULT_MEETING_LENGTH = 60
LIST_LIMIT = int(CONF.get('zoom.meeting.list.limit', 30))
meeting_id = sa.Column(sa.String(255), nullable=False)
duration = sa.Column(sa.Integer(), nullable=False, default=60)
meeting_start = sa.Column(sa.DateTime(), nullable=False, default=None)
# TODO model this as an enumerable type?
repeat_type = sa.Column(sa.String(10))
topic = sa.Column(sa.String(100), nullable=False)
start_url = sa.Column(sa.String(500), nullable=False)
join_url = sa.Column(sa.String(255), nullable=False)
repeat_end_date = sa.Column(sa.Date(), nullable=True, default=None)
user_id = sa.Column(GUID(), sa.ForeignKey("users.id"), nullable=False)
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"))
def url(self, user_id):
if self.can_host_meeting(user_id):
return self.start_url
return self.join_url
def can_host_meeting(self, user_id):
u = User.get(user_id)
return self.user_id == user_id or u.is_meeting_alternate_host(
self.group_id)
def info(self, timezone):
if self.repeat_type == str(self.REPEATED_MEETING):
output = "Every {} at {}".format(
helper.day_of_week(self.meeting_start, timezone),
helper.time_only_offset(self.meeting_start, timezone))
if self.repeat_end_date:
output += "<br/>{}-{}".format(
self.start_date_with_timezone(timezone),
self.repeat_end_date.strftime(constants.DATE_FORMAT))
return output
if self.single_day_event:
return "{} - {}".format(
self.start_with_timezone(timezone),
self.end_time_with_timezone(timezone))
return "{} - {}".format(
self.start_with_timezone(timezone),
self.end_with_timezone(timezone))
@property
def single_day_event(self):
if self.start_date == self.end_date:
return True
return False
@property
def duration_time(self):
return helper.seconds_to_hours_and_minutes(self.duration)
@property
def start_time(self):
return helper.time_only_offset(self.meeting_start)
@property
def start_date(self):
return helper.date_only_offset(self.meeting_start)
@property
def end_time(self):
return helper.time_only_offset(self.meeting_end)
@property
def end_date(self):
return helper.date_only_offset(self.meeting_end)
@property
def meeting_end(self):
return self.meeting_start + datetime.timedelta(minutes=self.duration)
def start_with_timezone(self, timezone):
return helper.datetime_offset(self.meeting_start, timezone)
def end_with_timezone(self, timezone):
return helper.datetime_offset(self.meeting_end, timezone)
def start_time_with_timezone(self, timezone):
return helper.time_only_offset(self.meeting_start, timezone)
def end_time_with_timezone(self, timezone):
return helper.time_only_offset(self.meeting_end, timezone)
def start_date_with_timezone(self, timezone):
return helper.date_only_offset(self.meeting_start, timezone)
def end_date_with_timezone(self, timezone):
return helper.date_only_offset(self.meeting_end, timezone)
class GroupMember(Base, HasId):
__table_args__ = (sa.UniqueConstraint("group_id", "user_id",
name="group_user_membership"),
TABLE_KWARGS)
# join table for groups and users
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"))
user_id = sa.Column(GUID(), sa.ForeignKey("users.id"))
role_id = sa.Column(GUID(), sa.ForeignKey("group_roles.id"))
can_cohost_meeting = sa.Column(sa.Boolean(), nullable=False, default=False)
# TODO IMO we don't keep deleted_at OR we keep *all* of them on all models
deleted_at = sa.Column(sa.DateTime(), nullable=True, default=None)
user = orm.relationship('User')
group = orm.relationship('Group')
role = orm.relationship('GroupRole')
users = orm.relationship(
"User",
back_populates="group_members")
# TODO If these represent permissions, we can probably do this better, globally
class GroupRole(Base, HasId):
description = sa.Column(sa.String(80), nullable=False)
priority = sa.Column(sa.Integer(), nullable=True)
users = orm.relationship(
"User", secondary='group_members',
back_populates="group_roles")
def __str__(self):
return self.description
def __repr__(self):
return "GroupRole:{}, {}".format(id, self.description)
class GroupDocument(Base, HasId, renders.GroupDocumentMixin):
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"))
friendly_name = sa.Column(sa.String(80), nullable=False)
file_url = sa.Column(sa.String(250), nullable=True)
class AgeRange(Base, HasId):
description = sa.Column(sa.String(80))
priority = sa.Column(sa.Integer(), nullable=True)
groups = orm.relationship('Group', backref='age_range')
def __str__(self):
return self.description
def __repr__(self):
return "AgeRange:{}, {}".format(id, self.description)
class GroupMeetTime(Base, HasId):
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"))
meet_time_type_id = sa.Column(GUID(), sa.ForeignKey("meet_time_types.id"))
def __str__(self):
return "GroupMeetTime group_id: {}, meet_time_type_id: {}".format(
self.group_id, self.meet_time_type_id)
def __repr__(self):
return "GroupMeetTime:{}, group_id: {}, meet_time_type_id: {}".format(
self.id, self.group_id, self.meet_time_type_id)
def __hash__(self):
return hash(str(self))
class MeetTimeType(Base, HasId):
description = sa.Column(sa.String(80), nullable=False)
group_meet_time = orm.relationship('GroupMeetTime',
backref='meet_time_type')
priority = sa.Column(sa.Integer(), nullable=True)
def __str__(self):
return self.description
def __repr__(self):
return "MeetTimeType:{}, {}".format(self.id, self.description)
class GroupType(Base, HasId):
description = sa.Column(sa.String(80), nullable=False)
priority = sa.Column(sa.Integer(), nullable=True)
# Has a one to many relationship to Groups, but Why? maybe backref?
groups = orm.relationship('Group', backref='group_type')
def __str__(self):
return self.description
def __repr__(self):
return "GroupType:{}, {}".format(self.id, self.description)
class GroupPrivacySetting(Base, HasId):
priority = sa.Column(sa.Integer(), nullable=True)
description = sa.Column(sa.String(80), nullable=False)
# has a one to many relationship to Groups, by Why? maybe backref?
@hybrid.hybrid_property
def is_public(self):
return self.description.startswith("Public")
@hybrid.hybrid_property
def is_org_only(self):
return self.description.startswith("Organization Only")
def __str__(self):
return self.description
def __repr__(self):
return "GroupPrivacySetting:{}, {}".format(self.id, self.description)
class OrganizationGroup(Base, HasId):
organization_id = sa.Column(GUID(), sa.ForeignKey("organizations.id"))
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"))
order = sa.Column(sa.Integer(), default=0)
organization = orm.relationship('Organization')
group = orm.relationship('Group')
class Group(Base, HasId, ImageMixin, renders.GroupMixin):
name = sa.Column(sa.String(80), nullable=False)
description = sa.Column(sa.Text(), nullable=False)
member_limit = sa.Column(sa.Text(), nullable=True)
archived_at = sa.Column(sa.DateTime(), nullable=True, default=None)
tag_line = sa.Column(sa.String(80), nullable=True)
# TODO This is racey and requires locking
clicks = sa.Column(sa.Integer(), nullable=False, default=0)
age_range_id = sa.Column(GUID(), sa.ForeignKey("age_ranges.id"),
nullable=True)
anonymous = sa.Column(sa.Boolean(), nullable=False, default=False)
# NOTE For now, this will be M, F, and None, and should be an FK to
# an enum table
gender_focus = sa.Column(sa.String(80), nullable=True)
images = orm.relationship('Image', secondary='group_images')
privacy_setting_id = sa.Column(
GUID(), sa.ForeignKey("group_privacy_settings.id"))
privacy_settings = orm.relationship("GroupPrivacySetting",
backref="group")
group_type_id = sa.Column(GUID(), sa.ForeignKey("group_types.id"),
nullable=True)
organizations = orm.relationship('Organization',
secondary='organization_groups',
back_populates='groups')
documents = orm.relationship('GroupDocument',
backref='group')
meet_times = orm.relationship('GroupMeetTime', backref='group')
meetings = orm.relationship('ZoomMeeting',
backref='group')
users = orm.relationship('User',
secondary='group_members',
back_populates='groups')
gender_translation = {'M': "Men's Group",
'F': "Women's Group",
None: 'Men and Women',
'': 'Men and Women'}
notifications = orm.relationship("Notification", backref="group")
leaders = orm.relationship('User',
secondary='group_members',
back_populates='groups',
primaryjoin=(
"and_("
"GroupMember.user_id==User.id, "
"GroupMember.group_id==Group.id, "
"GroupMember.role_id==GroupRole.id, "
"GroupRole.description=='Group Leader')"))
links = orm.relationship('Link', backref='group')
@property
def parent_org(self):
return self.organizations[0]
@property
def org_creator(self):
org = Organization.get(self.parent_org.id)
return org.creator
@property
def is_payed_up(self):
org = Organization.get(self.parent_org.id)
return org.is_payed_up
def is_joinable(self):
if not self.member_limit:
return True
return self.member_limit > len(self.users)
@property
def get_meet_times(self):
ids = []
for meet_time in self.meet_times:
if meet_time.meet_time_type_id:
ids.append(meet_time.meet_time_type_id)
meet_descriptions = []
if ids:
with transaction() as session:
meet_types = (session.query(MeetTimeType)
.filter(MeetTimeType.id.in_(ids))
.options(orm.load_only('description'))
.all())
meet_descriptions = [meet_type.description for meet_type in meet_types]
return meet_descriptions
@property
def gender_focus_formatted(self):
return self.gender_translation.get(self.gender_focus, None)
def __str__(self):
return self.name
def __repr__(self):
return "Group:{}, {}".format(self.id, self.name)
class Organization(Base, HasId, ImageMixin, renders.OrganizationMixin):
# TODO We should talk to Toneo about allowing people to craft this
# model piecemeal, but only allow them to "publish" their Org
# after all the minimum detail is met. This also could use
# some vetting/approval process
name = sa.Column(sa.String(80), nullable=False)
description = sa.Column(sa.Text(), nullable=True, default=None)
deleted_at = sa.Column(sa.DateTime(), nullable=True, default=None)
show_address = sa.Column(sa.Boolean(), nullable=False, default=True)
vanity_name = sa.Column(sa.String(80), nullable=True, default=None)
# TODO This is very clearly church focused. What should we do with this?
# and how should we migrate it?
service_times_description = sa.Column(sa.String(80), nullable=True,
default=None)
date_established = sa.Column(sa.DateTime(), nullable=True)
address_id = sa.Column(GUID(), sa.ForeignKey("addresses.id"))
users = orm.relationship('User',
secondary='organization_members',
back_populates='organizations',
order_by='OrganizationMember.created_at')
owners = orm.relationship('OrganizationMember',
secondary='organization_roles',
primaryjoin=(
'and_('
'OrganizationMember.organization_id=='
'Organization.id, '
'OrganizationRole.description=="Owner")'),
order_by='OrganizationMember.created_at')
links = orm.relationship('Link', backref='organization')
# The primaryjoin here excludes archived groups
groups = orm.relationship('Group',
secondary='organization_groups',
back_populates='organizations',
order_by='OrganizationGroup.order',
primaryjoin=(
'and_('
'OrganizationGroup.organization_id=='
'Organization.id, '
'OrganizationGroup.group_id==Group.id, '
'Organization.activated_at.isnot(None), '
'Group.archived_at==None)'))
group_leaders = orm.relationship(
'User',
secondary='group_members',
back_populates='organizations',
primaryjoin=('and_('
'GroupMember.user_id==User.id, '
'GroupMember.group_id==Group.id, '
'GroupMember.role_id==GroupRole.id, '
'OrganizationGroup.group_id==Group.id, '
'OrganizationGroup.organization_id==Organization.id, '
'GroupRole.description=="Group Leader", '
'Group.archived_at==None)'))
images = orm.relationship('Image', secondary='organization_images')
notifications = orm.relationship('Notification', backref='organization')
activated_at = sa.Column(sa.DateTime(), nullable=True, default=None)
# Cache elements
licenses = 0
allocated_licenses = 0
billing_date = False
sub_data = None
discount_data = 0
@property
def group_leader_count(self):
# TODO: Flag the correct organization as is_lifeloop, refer to that
# TODO: Add 'no_charge' flag to organizations who we don't bill
llw_org = Organization.get(CONF.get("llw.org.id"))
llw_leaders = llw_org.group_leaders
count = 0
for leader in self.group_leaders:
if leader not in llw_leaders:
count += 1
return count
@property
def purchased_licenses(self):
if not self.allocated_licenses and self.subscription_data:
subscription_driver = subscription.ChargifyDriver(self.id)
allocation = (
subscription_driver.
get_subscription_component_allocation(
self.subscription_data['id']))
self.allocated_licenses = allocation['quantity']
return self.allocated_licenses
@property
def available_licenses(self):
if not self.licenses:
purchased = self.purchased_licenses + 1 # base license
used = self.group_leader_count
total = purchased - used
self.licenses = 0 if total < 0 else total
return self.licenses
def next_billing_date(self):
if not self.billing_date:
if self.subscription_data:
data = self.subscription_data['current_period_ends_at']
date = data[0:19]
date_time = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S")
self.billing_date = helper.datetime_offset(
date_time, self.timezone)
return self.billing_date
@property
def cancel_at_end_of_period(self):
if self.subscription_data:
return self.subscription_data['cancel_at_end_of_period']
return False
def is_in_trial(self):
date = self.next_billing_date()
if date:
datetime_now = datetime.datetime.utcnow()
now = helper.datetime_offset(datetime_now, self.timezone)
if now < date:
return True
return False
@property
def subscription_data(self):
if not self.sub_data:
subscription_driver = subscription.ChargifyDriver(self.id)
self.sub_data = subscription_driver.get_subscription(self.id)
return self.sub_data
@property
def coupon(self):
LOG.debug(self.subscription_data)
if 'coupon_code' in self.subscription_data:
return self.subscription_data['coupon_code']
return None
@property
def discount(self):
if self.coupon and not self.discount_data:
subscription_driver = subscription.subscription_driver()
self.discount_data = subscription_driver.get_discount(self.coupon)
return self.discount_data
@property
def is_active(self):
return self.activated_at is not None
@property
def is_payed_up(self):
if self.subscription_data and self.available_licenses >= 0:
return True
return self.is_in_trial()
@property
def creator(self):
owners = self.owners
return (owners[0].user if len(owners) == 1 else
[om for om in owners if om.user.email.find("lifeloop.live") < 0][0].user)
@property
def timezone(self):
return self.creator.timezone
def public_groups(self):
return [g for g in self.groups
if g.privacy_settings.description.lower()
.startswith('public')]
def private_groups(self):
return [g for g in self.groups
if g.privacy_settings.description.lower()
.startswith('private')]
def org_only_groups(self):
return [g for g in self.groups
if g.privacy_settings.description.lower()
.startswith('organization only')]
def public_and_org_only_groups(self):
return [g for g in self.groups
if g.privacy_settings.description.lower()
.startswith('organization only') or g.privacy_settings
.description.lower().startswith('public')]
@property
def website(self):
for link in self.links:
if link.link_type.description.split(' ')[-1] == 'Website':
return link.url
return None
def __repr__(self):
return "Organization: {}, name: {}".format(
self.id, self.name)
def __hash__(self):
return hash(str(self))
def __lt__(self, other):
return self.name < other.name
class OrganizationMember(Base, HasId):
__table_args__ = (sa.UniqueConstraint("organization_id", "user_id",
name="org_user_membership"),
TABLE_KWARGS)
user_id = sa.Column(GUID(), sa.ForeignKey("users.id"))
organization_id = sa.Column(GUID(), sa.ForeignKey("organizations.id"), index=True)
# TODO Should be many?
role_id = sa.Column(GUID(), sa.ForeignKey("organization_roles.id"))
user = orm.relationship('User')
organization = orm.relationship('Organization')
role = orm.relationship('OrganizationRole')
can_manage_subscription = sa.Column(sa.Boolean(), nullable=False, default=False)
users = orm.relationship(
"User",
back_populates="organization_members")
def __str__(self):
return self.user.full_name
def __repr__(self):
return "OrganizationMember:{}, {}".format(self.id, self.user.full_name)
class UserImage(Base, HasId):
user_id = sa.Column(GUID(), sa.ForeignKey("users.id"))
image_id = sa.Column(GUID(), sa.ForeignKey("images.id"))
user = orm.relationship('User')
image = orm.relationship('Image')
class GroupImage(Base, HasId):
group_id = sa.Column(GUID(), sa.ForeignKey("groups.id"))
image_id = sa.Column(GUID(), sa.ForeignKey("images.id"))
group = orm.relationship('Group')
image = orm.relationship('Image')
class OrganizationImage(Base, HasId):
organization_id = sa.Column(GUID(), sa.ForeignKey("organizations.id"))
image_id = sa.Column(GUID(), sa.ForeignKey("images.id"))
organization = orm.relationship('Organization')
image = orm.relationship('Image')
class Image(Base, HasId):
image_url = sa.Column(sa.String(500), nullable=False)
public_id = sa.Column(sa.String(255), nullable=True)
# NOTE: TEMPORARY WHILE MIGRATING TO JOIN TABLES
organization_id = sa.Column(GUID(), nullable=True)
@property
def url(self):
if self.image_url:
return self.image_url
return webpack.asset_url_for('images/card.default.png')
class Page(Base, HasId):
title = sa.Column(sa.String(60), nullable=False)
content = sa.Column(sa.String(20000), nullable=False)
pagetype = sa.Column(sa.Integer(), nullable=False)
updated_by = sa.Column(sa.String(60), nullable=False)
| [
"sqlalchemy.orm.load_only",
"sqlalchemy.func.min",
"sqlalchemy.Text",
"sqlalchemy.String",
"lifeloopweb.exception.InvalidEmail",
"sqlalchemy.select",
"datetime.timedelta",
"lifeloopweb.exception.ModelUnknownAttrbute",
"sqlalchemy.Column",
"sqlalchemy.orm.sessionmaker",
"sqlalchemy.DateTime",
"... | [((639, 667), 'lifeloopweb.logging.get_logger', 'logging.get_logger', (['__name__'], {}), '(__name__)\n', (657, 667), False, 'from lifeloopweb import config, constants, exception, logging, renders, subscription\n'), ((697, 705), 'lifeloopweb.helpers.base_helper.Helper', 'Helper', ([], {}), '()\n', (703, 705), False, 'from lifeloopweb.helpers.base_helper import Helper\n'), ((2162, 2230), 'sqlalchemy.create_engine', 'sa.create_engine', (['ENGINE_URL'], {'echo': 'connection_debug'}), '(ENGINE_URL, echo=connection_debug, **engine_kwargs)\n', (2178, 2230), True, 'import sqlalchemy as sa\n'), ((2274, 2365), 'sqlalchemy.orm.sessionmaker', 'orm.sessionmaker', ([], {'bind': 'engine', 'expire_on_commit': '(False)', 'autocommit': '(False)', 'autoflush': '(True)'}), '(bind=engine, expire_on_commit=False, autocommit=False,\n autoflush=True)\n', (2290, 2365), False, 'from sqlalchemy import orm\n'), ((2909, 2943), 'sqlalchemy.orm.scoped_session', 'orm.scoped_session', (['SessionFactory'], {}), '(SessionFactory)\n', (2927, 2943), False, 'from sqlalchemy import orm\n'), ((3097, 3140), 'sqlalchemy.event.listens_for', 'event.listens_for', (['engine', '"""engine_connect"""'], {}), "(engine, 'engine_connect')\n", (3114, 3140), False, 'from sqlalchemy import event, exc, func, select\n'), ((7513, 7589), 'sqlalchemy.ext.declarative.declarative_base', 'declarative.declarative_base', ([], {'cls': 'ModelBase', 'bind': 'engine', 'metaclass': 'MetaBase'}), '(cls=ModelBase, bind=engine, metaclass=MetaBase)\n', (7541, 7589), False, 'from sqlalchemy.ext import declarative\n'), ((1293, 1383), 'lifeloopweb.exception.InvalidConfigValue', 'exception.InvalidConfigValue', ([], {'value': 'connection_debug', 'key': '"""database.connection.debug"""'}), "(value=connection_debug, key=\n 'database.connection.debug')\n", (1321, 1383), False, 'from lifeloopweb import config, constants, exception, logging, renders, subscription\n'), ((8343, 8408), 'sqlalchemy.Column', 'sa.Column', (['GUID'], {'primary_key': '(True)', 'default': 'db_utils.generate_guid'}), '(GUID, primary_key=True, default=db_utils.generate_guid)\n', (8352, 8408), True, 'import sqlalchemy as sa\n'), ((9157, 9205), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Notification"""'], {'backref': '"""type"""'}), "('Notification', backref='type')\n", (9173, 9205), False, 'from sqlalchemy import orm\n'), ((11261, 11361), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""User"""'], {'secondary': '"""organization_members"""', 'back_populates': '"""organization_roles"""'}), "('User', secondary='organization_members', back_populates=\n 'organization_roles')\n", (11277, 11361), False, 'from sqlalchemy import orm\n'), ((12518, 12568), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Image"""'], {'secondary': '"""user_images"""'}), "('Image', secondary='user_images')\n", (12534, 12568), False, 'from sqlalchemy import orm\n'), ((13172, 13372), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Organization"""'], {'secondary': '"""organization_members"""', 'back_populates': '"""users"""', 'primaryjoin': '"""and_(OrganizationMember.user_id==User.id, Organization.activated_at.isnot(None))"""'}), "('Organization', secondary='organization_members',\n back_populates='users', primaryjoin=\n 'and_(OrganizationMember.user_id==User.id, Organization.activated_at.isnot(None))'\n )\n", (13188, 13372), False, 'from sqlalchemy import orm\n'), ((13443, 13769), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Group"""'], {'secondary': '"""group_members"""', 'back_populates': '"""users"""', 'primaryjoin': '"""and_(GroupMember.user_id==User.id, GroupMember.group_id==Group.id, OrganizationGroup.group_id==Group.id, OrganizationGroup.organization_id==Organization.id, Organization.activated_at.isnot(None), Group.archived_at==None)"""'}), "('Group', secondary='group_members', back_populates='users',\n primaryjoin=\n 'and_(GroupMember.user_id==User.id, GroupMember.group_id==Group.id, OrganizationGroup.group_id==Group.id, OrganizationGroup.organization_id==Organization.id, Organization.activated_at.isnot(None), Group.archived_at==None)'\n )\n", (13459, 13769), False, 'from sqlalchemy import orm\n'), ((13920, 14018), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""OrganizationRole"""'], {'secondary': '"""organization_members"""', 'back_populates': '"""users"""'}), "('OrganizationRole', secondary='organization_members',\n back_populates='users')\n", (13936, 14018), False, 'from sqlalchemy import orm\n'), ((14051, 14136), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""GroupRole"""'], {'secondary': '"""group_members"""', 'back_populates': '"""users"""'}), "('GroupRole', secondary='group_members', back_populates='users'\n )\n", (14067, 14136), False, 'from sqlalchemy import orm\n'), ((14170, 14267), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Notification"""'], {'foreign_keys': '"""[Notification.user_to_id]"""', 'backref': '"""to_user"""'}), "('Notification', foreign_keys='[Notification.user_to_id]',\n backref='to_user')\n", (14186, 14267), False, 'from sqlalchemy import orm\n'), ((14315, 14416), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Notification"""'], {'foreign_keys': '"""[Notification.user_from_id]"""', 'backref': '"""from_user"""'}), "('Notification', foreign_keys='[Notification.user_from_id]',\n backref='from_user')\n", (14331, 14416), False, 'from sqlalchemy import orm\n'), ((14459, 14514), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""GroupMember"""'], {'back_populates': '"""users"""'}), "('GroupMember', back_populates='users')\n", (14475, 14514), False, 'from sqlalchemy import orm\n'), ((14560, 14622), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""OrganizationMember"""'], {'back_populates': '"""users"""'}), "('OrganizationMember', back_populates='users')\n", (14576, 14622), False, 'from sqlalchemy import orm\n'), ((14661, 14997), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Group"""'], {'secondary': '"""group_members"""', 'back_populates': '"""users"""', 'primaryjoin': '"""and_(GroupMember.user_id==User.id, GroupMember.group_id==Group.id, GroupMember.role_id==GroupRole.id, OrganizationGroup.group_id==Group.id, OrganizationGroup.organization_id==Organization.id, GroupRole.description==\'Group Leader\')"""'}), '(\'Group\', secondary=\'group_members\', back_populates=\'users\',\n primaryjoin=\n "and_(GroupMember.user_id==User.id, GroupMember.group_id==Group.id, GroupMember.role_id==GroupRole.id, OrganizationGroup.group_id==Group.id, OrganizationGroup.organization_id==Organization.id, GroupRole.description==\'Group Leader\')"\n )\n', (14677, 14997), False, 'from sqlalchemy import orm\n'), ((21922, 21967), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Link"""'], {'backref': '"""link_type"""'}), "('Link', backref='link_type')\n", (21938, 21967), False, 'from sqlalchemy import orm\n'), ((23352, 23403), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Organization"""'], {'backref': '"""address"""'}), "('Organization', backref='address')\n", (23368, 23403), False, 'from sqlalchemy import orm\n'), ((28348, 28372), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""User"""'], {}), "('User')\n", (28364, 28372), False, 'from sqlalchemy import orm\n'), ((28385, 28410), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Group"""'], {}), "('Group')\n", (28401, 28410), False, 'from sqlalchemy import orm\n'), ((28422, 28451), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""GroupRole"""'], {}), "('GroupRole')\n", (28438, 28451), False, 'from sqlalchemy import orm\n'), ((28464, 28520), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""User"""'], {'back_populates': '"""group_members"""'}), "('User', back_populates='group_members')\n", (28480, 28520), False, 'from sqlalchemy import orm\n'), ((28775, 28861), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""User"""'], {'secondary': '"""group_members"""', 'back_populates': '"""group_roles"""'}), "('User', secondary='group_members', back_populates=\n 'group_roles')\n", (28791, 28861), False, 'from sqlalchemy import orm\n'), ((29401, 29447), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Group"""'], {'backref': '"""age_range"""'}), "('Group', backref='age_range')\n", (29417, 29447), False, 'from sqlalchemy import orm\n'), ((30253, 30312), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""GroupMeetTime"""'], {'backref': '"""meet_time_type"""'}), "('GroupMeetTime', backref='meet_time_type')\n", (30269, 30312), False, 'from sqlalchemy import orm\n'), ((30788, 30835), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Group"""'], {'backref': '"""group_type"""'}), "('Group', backref='group_type')\n", (30804, 30835), False, 'from sqlalchemy import orm\n'), ((31840, 31872), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Organization"""'], {}), "('Organization')\n", (31856, 31872), False, 'from sqlalchemy import orm\n'), ((31885, 31910), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Group"""'], {}), "('Group')\n", (31901, 31910), False, 'from sqlalchemy import orm\n'), ((32718, 32769), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Image"""'], {'secondary': '"""group_images"""'}), "('Image', secondary='group_images')\n", (32734, 32769), False, 'from sqlalchemy import orm\n'), ((32889, 32945), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""GroupPrivacySetting"""'], {'backref': '"""group"""'}), "('GroupPrivacySetting', backref='group')\n", (32905, 32945), False, 'from sqlalchemy import orm\n'), ((33122, 33216), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Organization"""'], {'secondary': '"""organization_groups"""', 'back_populates': '"""groups"""'}), "('Organization', secondary='organization_groups',\n back_populates='groups')\n", (33138, 33216), False, 'from sqlalchemy import orm\n'), ((33303, 33353), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""GroupDocument"""'], {'backref': '"""group"""'}), "('GroupDocument', backref='group')\n", (33319, 33353), False, 'from sqlalchemy import orm\n'), ((33404, 33454), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""GroupMeetTime"""'], {'backref': '"""group"""'}), "('GroupMeetTime', backref='group')\n", (33420, 33454), False, 'from sqlalchemy import orm\n'), ((33470, 33518), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""ZoomMeeting"""'], {'backref': '"""group"""'}), "('ZoomMeeting', backref='group')\n", (33486, 33518), False, 'from sqlalchemy import orm\n'), ((33563, 33639), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""User"""'], {'secondary': '"""group_members"""', 'back_populates': '"""groups"""'}), "('User', secondary='group_members', back_populates='groups')\n", (33579, 33639), False, 'from sqlalchemy import orm\n'), ((33908, 33957), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Notification"""'], {'backref': '"""group"""'}), "('Notification', backref='group')\n", (33924, 33957), False, 'from sqlalchemy import orm\n'), ((33972, 34218), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""User"""'], {'secondary': '"""group_members"""', 'back_populates': '"""groups"""', 'primaryjoin': '"""and_(GroupMember.user_id==User.id, GroupMember.group_id==Group.id, GroupMember.role_id==GroupRole.id, GroupRole.description==\'Group Leader\')"""'}), '(\'User\', secondary=\'group_members\', back_populates=\'groups\',\n primaryjoin=\n "and_(GroupMember.user_id==User.id, GroupMember.group_id==Group.id, GroupMember.role_id==GroupRole.id, GroupRole.description==\'Group Leader\')"\n )\n', (33988, 34218), False, 'from sqlalchemy import orm\n'), ((34500, 34541), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Link"""'], {'backref': '"""group"""'}), "('Link', backref='group')\n", (34516, 34541), False, 'from sqlalchemy import orm\n'), ((36943, 37080), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""User"""'], {'secondary': '"""organization_members"""', 'back_populates': '"""organizations"""', 'order_by': '"""OrganizationMember.created_at"""'}), "('User', secondary='organization_members', back_populates=\n 'organizations', order_by='OrganizationMember.created_at')\n", (36959, 37080), False, 'from sqlalchemy import orm\n'), ((37177, 37415), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""OrganizationMember"""'], {'secondary': '"""organization_roles"""', 'primaryjoin': '"""and_(OrganizationMember.organization_id==Organization.id, OrganizationRole.description=="Owner")"""', 'order_by': '"""OrganizationMember.created_at"""'}), '(\'OrganizationMember\', secondary=\'organization_roles\',\n primaryjoin=\n \'and_(OrganizationMember.organization_id==Organization.id, OrganizationRole.description=="Owner")\'\n , order_by=\'OrganizationMember.created_at\')\n', (37193, 37415), False, 'from sqlalchemy import orm\n'), ((37653, 37701), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Link"""'], {'backref': '"""organization"""'}), "('Link', backref='organization')\n", (37669, 37701), False, 'from sqlalchemy import orm\n'), ((37767, 38082), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Group"""'], {'secondary': '"""organization_groups"""', 'back_populates': '"""organizations"""', 'order_by': '"""OrganizationGroup.order"""', 'primaryjoin': '"""and_(OrganizationGroup.organization_id==Organization.id, OrganizationGroup.group_id==Group.id, Organization.activated_at.isnot(None), Group.archived_at==None)"""'}), "('Group', secondary='organization_groups', back_populates=\n 'organizations', order_by='OrganizationGroup.order', primaryjoin=\n 'and_(OrganizationGroup.organization_id==Organization.id, OrganizationGroup.group_id==Group.id, Organization.activated_at.isnot(None), Group.archived_at==None)'\n )\n", (37783, 38082), False, 'from sqlalchemy import orm\n'), ((38430, 38799), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""User"""'], {'secondary': '"""group_members"""', 'back_populates': '"""organizations"""', 'primaryjoin': '"""and_(GroupMember.user_id==User.id, GroupMember.group_id==Group.id, GroupMember.role_id==GroupRole.id, OrganizationGroup.group_id==Group.id, OrganizationGroup.organization_id==Organization.id, GroupRole.description=="Group Leader", Group.archived_at==None)"""'}), '(\'User\', secondary=\'group_members\', back_populates=\n \'organizations\', primaryjoin=\n \'and_(GroupMember.user_id==User.id, GroupMember.group_id==Group.id, GroupMember.role_id==GroupRole.id, OrganizationGroup.group_id==Group.id, OrganizationGroup.organization_id==Organization.id, GroupRole.description=="Group Leader", Group.archived_at==None)\'\n )\n', (38446, 38799), False, 'from sqlalchemy import orm\n'), ((39001, 39059), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Image"""'], {'secondary': '"""organization_images"""'}), "('Image', secondary='organization_images')\n", (39017, 39059), False, 'from sqlalchemy import orm\n'), ((39080, 39136), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Notification"""'], {'backref': '"""organization"""'}), "('Notification', backref='organization')\n", (39096, 39136), False, 'from sqlalchemy import orm\n'), ((44416, 44440), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""User"""'], {}), "('User')\n", (44432, 44440), False, 'from sqlalchemy import orm\n'), ((44460, 44492), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Organization"""'], {}), "('Organization')\n", (44476, 44492), False, 'from sqlalchemy import orm\n'), ((44504, 44540), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""OrganizationRole"""'], {}), "('OrganizationRole')\n", (44520, 44540), False, 'from sqlalchemy import orm\n'), ((44638, 44701), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""User"""'], {'back_populates': '"""organization_members"""'}), "('User', back_populates='organization_members')\n", (44654, 44701), False, 'from sqlalchemy import orm\n'), ((45046, 45070), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""User"""'], {}), "('User')\n", (45062, 45070), False, 'from sqlalchemy import orm\n'), ((45083, 45108), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Image"""'], {}), "('Image')\n", (45099, 45108), False, 'from sqlalchemy import orm\n'), ((45276, 45301), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Group"""'], {}), "('Group')\n", (45292, 45301), False, 'from sqlalchemy import orm\n'), ((45314, 45339), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Image"""'], {}), "('Image')\n", (45330, 45339), False, 'from sqlalchemy import orm\n'), ((45535, 45567), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Organization"""'], {}), "('Organization')\n", (45551, 45567), False, 'from sqlalchemy import orm\n'), ((45580, 45605), 'sqlalchemy.orm.relationship', 'orm.relationship', (['"""Image"""'], {}), "('Image')\n", (45596, 45605), False, 'from sqlalchemy import orm\n'), ((5204, 5217), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (5215, 5217), True, 'import sqlalchemy as sa\n'), ((5273, 5286), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (5284, 5286), True, 'import sqlalchemy as sa\n'), ((9052, 9065), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (9061, 9065), True, 'import sqlalchemy as sa\n'), ((9108, 9120), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (9118, 9120), True, 'import sqlalchemy as sa\n'), ((9434, 9472), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""notification_types.id"""'], {}), "('notification_types.id')\n", (9447, 9472), True, 'import sqlalchemy as sa\n'), ((9564, 9589), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (9577, 9589), True, 'import sqlalchemy as sa\n'), ((9642, 9667), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (9655, 9667), True, 'import sqlalchemy as sa\n'), ((9718, 9744), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""groups.id"""'], {}), "('groups.id')\n", (9731, 9744), True, 'import sqlalchemy as sa\n'), ((9801, 9834), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""organizations.id"""'], {}), "('organizations.id')\n", (9814, 9834), True, 'import sqlalchemy as sa\n'), ((9916, 9928), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (9926, 9928), True, 'import sqlalchemy as sa\n'), ((9993, 10005), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (10003, 10005), True, 'import sqlalchemy as sa\n'), ((10063, 10076), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (10074, 10076), True, 'import sqlalchemy as sa\n'), ((10132, 10145), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (10143, 10145), True, 'import sqlalchemy as sa\n'), ((10205, 10218), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (10216, 10218), True, 'import sqlalchemy as sa\n'), ((11163, 11177), 'sqlalchemy.String', 'sa.String', (['(120)'], {}), '(120)\n', (11172, 11177), True, 'import sqlalchemy as sa\n'), ((11220, 11232), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (11230, 11232), True, 'import sqlalchemy as sa\n'), ((11695, 11708), 'sqlalchemy.String', 'sa.String', (['(40)'], {}), '(40)\n', (11704, 11708), True, 'import sqlalchemy as sa\n'), ((11752, 11765), 'sqlalchemy.String', 'sa.String', (['(40)'], {}), '(40)\n', (11761, 11765), True, 'import sqlalchemy as sa\n'), ((11939, 11953), 'sqlalchemy.String', 'sa.String', (['(254)'], {}), '(254)\n', (11948, 11953), True, 'import sqlalchemy as sa\n'), ((12101, 12114), 'sqlalchemy.String', 'sa.String', (['(16)'], {}), '(16)\n', (12110, 12114), True, 'import sqlalchemy as sa\n'), ((12163, 12177), 'sqlalchemy.String', 'sa.String', (['(128)'], {}), '(128)\n', (12172, 12177), True, 'import sqlalchemy as sa\n'), ((12222, 12235), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (12233, 12235), True, 'import sqlalchemy as sa\n'), ((12295, 12308), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (12304, 12308), True, 'import sqlalchemy as sa\n'), ((12346, 12359), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (12355, 12359), True, 'import sqlalchemy as sa\n'), ((12406, 12415), 'sqlalchemy.Date', 'sa.Date', ([], {}), '()\n', (12413, 12415), True, 'import sqlalchemy as sa\n'), ((12460, 12472), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (12470, 12472), True, 'import sqlalchemy as sa\n'), ((12613, 12626), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (12624, 12626), True, 'import sqlalchemy as sa\n'), ((12751, 12764), 'sqlalchemy.String', 'sa.String', (['(64)'], {}), '(64)\n', (12760, 12764), True, 'import sqlalchemy as sa\n'), ((12811, 12823), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (12821, 12823), True, 'import sqlalchemy as sa\n'), ((12886, 12898), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (12896, 12898), True, 'import sqlalchemy as sa\n'), ((12964, 12976), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (12974, 12976), True, 'import sqlalchemy as sa\n'), ((13107, 13120), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (13118, 13120), True, 'import sqlalchemy as sa\n'), ((21631, 21669), 're.findall', 're.findall', (['regex', 'full_name_and_email'], {}), '(regex, full_name_and_email)\n', (21641, 21669), False, 'import re\n'), ((21825, 21839), 'sqlalchemy.String', 'sa.String', (['(200)'], {}), '(200)\n', (21834, 21839), True, 'import sqlalchemy as sa\n'), ((21882, 21894), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (21892, 21894), True, 'import sqlalchemy as sa\n'), ((22276, 22306), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""link_types.id"""'], {}), "('link_types.id')\n", (22289, 22306), True, 'import sqlalchemy as sa\n'), ((22339, 22353), 'sqlalchemy.String', 'sa.String', (['(120)'], {}), '(120)\n', (22348, 22353), True, 'import sqlalchemy as sa\n'), ((22395, 22428), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""organizations.id"""'], {}), "('organizations.id')\n", (22408, 22428), True, 'import sqlalchemy as sa\n'), ((22478, 22504), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""groups.id"""'], {}), "('groups.id')\n", (22491, 22504), True, 'import sqlalchemy as sa\n'), ((22541, 22555), 'sqlalchemy.String', 'sa.String', (['(250)'], {}), '(250)\n', (22550, 22555), True, 'import sqlalchemy as sa\n'), ((23084, 23098), 'sqlalchemy.String', 'sa.String', (['(100)'], {}), '(100)\n', (23093, 23098), True, 'import sqlalchemy as sa\n'), ((23137, 23151), 'sqlalchemy.String', 'sa.String', (['(100)'], {}), '(100)\n', (23146, 23151), True, 'import sqlalchemy as sa\n'), ((23225, 23238), 'sqlalchemy.String', 'sa.String', (['(30)'], {}), '(30)\n', (23234, 23238), True, 'import sqlalchemy as sa\n'), ((23304, 23316), 'sqlalchemy.String', 'sa.String', (['(9)'], {}), '(9)\n', (23313, 23316), True, 'import sqlalchemy as sa\n'), ((24453, 24467), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (24462, 24467), True, 'import sqlalchemy as sa\n'), ((24510, 24522), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (24520, 24522), True, 'import sqlalchemy as sa\n'), ((24582, 24595), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (24593, 24595), True, 'import sqlalchemy as sa\n'), ((24700, 24713), 'sqlalchemy.String', 'sa.String', (['(10)'], {}), '(10)\n', (24709, 24713), True, 'import sqlalchemy as sa\n'), ((24737, 24751), 'sqlalchemy.String', 'sa.String', (['(100)'], {}), '(100)\n', (24746, 24751), True, 'import sqlalchemy as sa\n'), ((24795, 24809), 'sqlalchemy.String', 'sa.String', (['(500)'], {}), '(500)\n', (24804, 24809), True, 'import sqlalchemy as sa\n'), ((24852, 24866), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (24861, 24866), True, 'import sqlalchemy as sa\n'), ((24916, 24925), 'sqlalchemy.Date', 'sa.Date', ([], {}), '()\n', (24923, 24925), True, 'import sqlalchemy as sa\n'), ((24988, 25013), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (25001, 25013), True, 'import sqlalchemy as sa\n'), ((25064, 25090), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""groups.id"""'], {}), "('groups.id')\n", (25077, 25090), True, 'import sqlalchemy as sa\n'), ((27732, 27804), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""group_id"""', '"""user_id"""'], {'name': '"""group_user_membership"""'}), "('group_id', 'user_id', name='group_user_membership')\n", (27751, 27804), True, 'import sqlalchemy as sa\n'), ((27955, 27981), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""groups.id"""'], {}), "('groups.id')\n", (27968, 27981), True, 'import sqlalchemy as sa\n'), ((28015, 28040), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (28028, 28040), True, 'import sqlalchemy as sa\n'), ((28074, 28105), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""group_roles.id"""'], {}), "('group_roles.id')\n", (28087, 28105), True, 'import sqlalchemy as sa\n'), ((28142, 28154), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (28152, 28154), True, 'import sqlalchemy as sa\n'), ((28293, 28306), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (28304, 28306), True, 'import sqlalchemy as sa\n'), ((28678, 28691), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (28687, 28691), True, 'import sqlalchemy as sa\n'), ((28734, 28746), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (28744, 28746), True, 'import sqlalchemy as sa\n'), ((29115, 29141), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""groups.id"""'], {}), "('groups.id')\n", (29128, 29141), True, 'import sqlalchemy as sa\n'), ((29173, 29186), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (29182, 29186), True, 'import sqlalchemy as sa\n'), ((29229, 29243), 'sqlalchemy.String', 'sa.String', (['(250)'], {}), '(250)\n', (29238, 29243), True, 'import sqlalchemy as sa\n'), ((29319, 29332), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (29328, 29332), True, 'import sqlalchemy as sa\n'), ((29359, 29371), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (29369, 29371), True, 'import sqlalchemy as sa\n'), ((29660, 29686), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""groups.id"""'], {}), "('groups.id')\n", (29673, 29686), True, 'import sqlalchemy as sa\n'), ((29730, 29765), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""meet_time_types.id"""'], {}), "('meet_time_types.id')\n", (29743, 29765), True, 'import sqlalchemy as sa\n'), ((30200, 30213), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (30209, 30213), True, 'import sqlalchemy as sa\n'), ((30377, 30389), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (30387, 30389), True, 'import sqlalchemy as sa\n'), ((30618, 30631), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (30627, 30631), True, 'import sqlalchemy as sa\n'), ((30674, 30686), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (30684, 30686), True, 'import sqlalchemy as sa\n'), ((31052, 31064), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (31062, 31064), True, 'import sqlalchemy as sa\n'), ((31109, 31122), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (31118, 31122), True, 'import sqlalchemy as sa\n'), ((31677, 31710), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""organizations.id"""'], {}), "('organizations.id')\n", (31690, 31710), True, 'import sqlalchemy as sa\n'), ((31745, 31771), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""groups.id"""'], {}), "('groups.id')\n", (31758, 31771), True, 'import sqlalchemy as sa\n'), ((31795, 31807), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (31805, 31807), True, 'import sqlalchemy as sa\n'), ((31992, 32005), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (32001, 32005), True, 'import sqlalchemy as sa\n'), ((32051, 32060), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (32058, 32060), True, 'import sqlalchemy as sa\n'), ((32107, 32116), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (32114, 32116), True, 'import sqlalchemy as sa\n'), ((32161, 32174), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (32172, 32174), True, 'import sqlalchemy as sa\n'), ((32230, 32243), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (32239, 32243), True, 'import sqlalchemy as sa\n'), ((32329, 32341), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (32339, 32341), True, 'import sqlalchemy as sa\n'), ((32407, 32437), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""age_ranges.id"""'], {}), "('age_ranges.id')\n", (32420, 32437), True, 'import sqlalchemy as sa\n'), ((32509, 32521), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (32519, 32521), True, 'import sqlalchemy as sa\n'), ((32675, 32688), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (32684, 32688), True, 'import sqlalchemy as sa\n'), ((32822, 32864), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""group_privacy_settings.id"""'], {}), "('group_privacy_settings.id')\n", (32835, 32864), True, 'import sqlalchemy as sa\n'), ((33024, 33055), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""group_types.id"""'], {}), "('group_types.id')\n", (33037, 33055), True, 'import sqlalchemy as sa\n'), ((36240, 36253), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (36249, 36253), True, 'import sqlalchemy as sa\n'), ((36299, 36308), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (36306, 36308), True, 'import sqlalchemy as sa\n'), ((36366, 36379), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (36377, 36379), True, 'import sqlalchemy as sa\n'), ((36439, 36451), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (36449, 36451), True, 'import sqlalchemy as sa\n'), ((36512, 36525), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (36521, 36525), True, 'import sqlalchemy as sa\n'), ((36715, 36728), 'sqlalchemy.String', 'sa.String', (['(80)'], {}), '(80)\n', (36724, 36728), True, 'import sqlalchemy as sa\n'), ((36834, 36847), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (36845, 36847), True, 'import sqlalchemy as sa\n'), ((36899, 36928), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""addresses.id"""'], {}), "('addresses.id')\n", (36912, 36928), True, 'import sqlalchemy as sa\n'), ((39166, 39179), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (39177, 39179), True, 'import sqlalchemy as sa\n'), ((44003, 44080), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""organization_id"""', '"""user_id"""'], {'name': '"""org_user_membership"""'}), "('organization_id', 'user_id', name='org_user_membership')\n", (44022, 44080), True, 'import sqlalchemy as sa\n'), ((44192, 44217), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (44205, 44217), True, 'import sqlalchemy as sa\n'), ((44259, 44292), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""organizations.id"""'], {}), "('organizations.id')\n", (44272, 44292), True, 'import sqlalchemy as sa\n'), ((44365, 44403), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""organization_roles.id"""'], {}), "('organization_roles.id')\n", (44378, 44403), True, 'import sqlalchemy as sa\n'), ((44581, 44593), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (44591, 44593), True, 'import sqlalchemy as sa\n'), ((44947, 44972), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (44960, 44972), True, 'import sqlalchemy as sa\n'), ((45007, 45033), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""images.id"""'], {}), "('images.id')\n", (45020, 45033), True, 'import sqlalchemy as sa\n'), ((45175, 45201), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""groups.id"""'], {}), "('groups.id')\n", (45188, 45201), True, 'import sqlalchemy as sa\n'), ((45236, 45262), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""images.id"""'], {}), "('images.id')\n", (45249, 45262), True, 'import sqlalchemy as sa\n'), ((45420, 45453), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""organizations.id"""'], {}), "('organizations.id')\n", (45433, 45453), True, 'import sqlalchemy as sa\n'), ((45488, 45514), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""images.id"""'], {}), "('images.id')\n", (45501, 45514), True, 'import sqlalchemy as sa\n'), ((45660, 45674), 'sqlalchemy.String', 'sa.String', (['(500)'], {}), '(500)\n', (45669, 45674), True, 'import sqlalchemy as sa\n'), ((45718, 45732), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (45727, 45732), True, 'import sqlalchemy as sa\n'), ((45967, 46015), 'lifeloopweb.webpack.webpack.asset_url_for', 'webpack.asset_url_for', (['"""images/card.default.png"""'], {}), "('images/card.default.png')\n", (45988, 46015), False, 'from lifeloopweb.webpack import webpack\n'), ((46064, 46077), 'sqlalchemy.String', 'sa.String', (['(60)'], {}), '(60)\n', (46073, 46077), True, 'import sqlalchemy as sa\n'), ((46119, 46135), 'sqlalchemy.String', 'sa.String', (['(20000)'], {}), '(20000)\n', (46128, 46135), True, 'import sqlalchemy as sa\n'), ((46178, 46190), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (46188, 46190), True, 'import sqlalchemy as sa\n'), ((46235, 46248), 'sqlalchemy.String', 'sa.String', (['(60)'], {}), '(60)\n', (46244, 46248), True, 'import sqlalchemy as sa\n'), ((3369, 3380), 'sqlalchemy.select', 'select', (['[1]'], {}), '([1])\n', (3375, 3380), False, 'from sqlalchemy import event, exc, func, select\n'), ((5234, 5244), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (5242, 5244), False, 'from sqlalchemy import event, exc, func, select\n'), ((5297, 5307), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (5305, 5307), False, 'from sqlalchemy import event, exc, func, select\n'), ((5534, 5570), 'lifeloopweb.db.utils.to_snake_case', 'db_utils.to_snake_case', (['cls.__name__'], {}), '(cls.__name__)\n', (5556, 5570), True, 'from lifeloopweb.db import utils as db_utils\n'), ((21712, 21736), 'lifeloopweb.exception.InvalidEmail', 'exception.InvalidEmail', ([], {}), '()\n', (21734, 21736), False, 'from lifeloopweb import config, constants, exception, logging, renders, subscription\n'), ((26938, 26979), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': 'self.duration'}), '(minutes=self.duration)\n', (26956, 26979), False, 'import datetime\n'), ((39943, 39979), 'lifeloopweb.subscription.ChargifyDriver', 'subscription.ChargifyDriver', (['self.id'], {}), '(self.id)\n', (39970, 39979), False, 'from lifeloopweb import config, constants, exception, logging, renders, subscription\n'), ((41284, 41310), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (41308, 41310), False, 'import datetime\n'), ((41569, 41605), 'lifeloopweb.subscription.ChargifyDriver', 'subscription.ChargifyDriver', (['self.id'], {}), '(self.id)\n', (41596, 41605), False, 'from lifeloopweb import config, constants, exception, logging, renders, subscription\n'), ((42041, 42075), 'lifeloopweb.subscription.subscription_driver', 'subscription.subscription_driver', ([], {}), '()\n', (42073, 42075), False, 'from lifeloopweb import config, constants, exception, logging, renders, subscription\n'), ((6180, 6233), 'lifeloopweb.exception.ModelUnknownAttrbute', 'exception.ModelUnknownAttrbute', ([], {'model': 'self', 'attr': 'attr'}), '(model=self, attr=attr)\n', (6210, 6233), False, 'from lifeloopweb import config, constants, exception, logging, renders, subscription\n'), ((40800, 40853), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(date, '%Y-%m-%dT%H:%M:%S')\n", (40826, 40853), False, 'import datetime\n'), ((3485, 3496), 'sqlalchemy.select', 'select', (['[1]'], {}), '([1])\n', (3491, 3496), False, 'from sqlalchemy import event, exc, func, select\n'), ((4624, 4660), 'functools.partial', 'functools.partial', (['cls._get_by', 'attr'], {}), '(cls._get_by, attr)\n', (4641, 4660), False, 'import functools\n'), ((4819, 4859), 'functools.partial', 'functools.partial', (['cls._get_all_by', 'attr'], {}), '(cls._get_all_by, attr)\n', (4836, 4859), False, 'import functools\n'), ((35456, 35484), 'sqlalchemy.orm.load_only', 'orm.load_only', (['"""description"""'], {}), "('description')\n", (35469, 35484), False, 'from sqlalchemy import orm\n'), ((15491, 15530), 'sqlalchemy.func.min', 'func.min', (['OrganizationMember.created_at'], {}), '(OrganizationMember.created_at)\n', (15499, 15530), False, 'from sqlalchemy import event, exc, func, select\n')] |
import unittest
import os.path
import requests_mock
import tableauserverclient as TSC
TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets')
SIGN_IN_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in.xml')
SIGN_IN_IMPERSONATE_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in_impersonate.xml')
SIGN_IN_ERROR_XML = os.path.join(TEST_ASSET_DIR, 'auth_sign_in_error.xml')
class AuthTests(unittest.TestCase):
def setUp(self):
self.server = TSC.Server('http://test')
self.baseurl = self.server.auth.baseurl
def test_sign_in(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.TableauAuth('testuser', 'password', site_id='Samples')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_sign_in_with_personal_access_tokens(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.PersonalAccessTokenAuth(token_name='mytoken',
personal_access_token='<PASSWORD>', site_id='Samples')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_sign_in_impersonate(self):
with open(SIGN_IN_IMPERSONATE_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
tableau_auth = TSC.TableauAuth('testuser', 'password',
user_id_to_impersonate='dd2239f6-ddf1-4107-981a-4cf94e415794')
self.server.auth.sign_in(tableau_auth)
self.assertEqual('MJonFA6HDyy2C3oqR13fRGqE6cmgz<PASSWORD>', self.server.auth_token)
self.assertEqual('dad65087-b08b-4603-af4e-2887b8aafc67', self.server.site_id)
self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794', self.server.user_id)
def test_sign_in_error(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.TableauAuth('testuser', '<PASSWORD>')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_in_invalid_token(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.PersonalAccessTokenAuth(token_name='mytoken', personal_access_token='invalid')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_in_without_auth(self):
with open(SIGN_IN_ERROR_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml, status_code=401)
tableau_auth = TSC.TableauAuth('', '')
self.assertRaises(TSC.ServerResponseError, self.server.auth.sign_in, tableau_auth)
def test_sign_out(self):
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(self.baseurl + '/signin', text=response_xml)
m.post(self.baseurl + '/signout', text='')
tableau_auth = TSC.TableauAuth('testuser', 'password')
self.server.auth.sign_in(tableau_auth)
self.server.auth.sign_out()
self.assertIsNone(self.server._auth_token)
self.assertIsNone(self.server._site_id)
self.assertIsNone(self.server._user_id)
def test_switch_site(self):
self.server.version = '2.6'
baseurl = self.server.auth.baseurl
site_id, user_id, auth_token = list('<PASSWORD>')
self.server._set_auth(site_id, user_id, auth_token)
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(baseurl + '/switchSite', text=response_xml)
site = TSC.SiteItem('Samples', 'Samples')
self.server.auth.switch_site(site)
self.assertEqual('eIX6mvFsq<PASSWORD>4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('<PASSWORD>-8120<PASSWORD>', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
def test_revoke_all_server_admin_tokens(self):
self.server.version = "3.10"
baseurl = self.server.auth.baseurl
with open(SIGN_IN_XML, 'rb') as f:
response_xml = f.read().decode('utf-8')
with requests_mock.mock() as m:
m.post(baseurl + '/signin', text=response_xml)
m.post(baseurl + '/revokeAllServerAdminTokens', text='')
tableau_auth = TSC.TableauAuth('testuser', 'password')
self.server.auth.sign_in(tableau_auth)
self.server.auth.revoke_all_server_admin_tokens()
self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l', self.server.auth_token)
self.assertEqual('<PASSWORD>ba-b82b-4f0f-91ed-812074ac5da6', self.server.site_id)
self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01', self.server.user_id)
| [
"tableauserverclient.TableauAuth",
"requests_mock.mock",
"tableauserverclient.PersonalAccessTokenAuth",
"tableauserverclient.SiteItem",
"tableauserverclient.Server"
] | [((461, 486), 'tableauserverclient.Server', 'TSC.Server', (['"""http://test"""'], {}), "('http://test')\n", (471, 486), True, 'import tableauserverclient as TSC\n'), ((672, 692), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (690, 692), False, 'import requests_mock\n'), ((790, 848), 'tableauserverclient.TableauAuth', 'TSC.TableauAuth', (['"""testuser"""', '"""password"""'], {'site_id': '"""Samples"""'}), "('testuser', 'password', site_id='Samples')\n", (805, 848), True, 'import tableauserverclient as TSC\n'), ((1323, 1343), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (1341, 1343), False, 'import requests_mock\n'), ((1441, 1550), 'tableauserverclient.PersonalAccessTokenAuth', 'TSC.PersonalAccessTokenAuth', ([], {'token_name': '"""mytoken"""', 'personal_access_token': '"""<PASSWORD>"""', 'site_id': '"""Samples"""'}), "(token_name='mytoken', personal_access_token=\n '<PASSWORD>', site_id='Samples')\n", (1468, 1550), True, 'import tableauserverclient as TSC\n'), ((2071, 2091), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (2089, 2091), False, 'import requests_mock\n'), ((2189, 2296), 'tableauserverclient.TableauAuth', 'TSC.TableauAuth', (['"""testuser"""', '"""password"""'], {'user_id_to_impersonate': '"""dd2239f6-ddf1-4107-981a-4cf94e415794"""'}), "('testuser', 'password', user_id_to_impersonate=\n 'dd2239f6-ddf1-4107-981a-4cf94e415794')\n", (2204, 2296), True, 'import tableauserverclient as TSC\n'), ((2800, 2820), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (2818, 2820), False, 'import requests_mock\n'), ((2935, 2976), 'tableauserverclient.TableauAuth', 'TSC.TableauAuth', (['"""testuser"""', '"""<PASSWORD>"""'], {}), "('testuser', '<PASSWORD>')\n", (2950, 2976), True, 'import tableauserverclient as TSC\n'), ((3229, 3249), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (3247, 3249), False, 'import requests_mock\n'), ((3364, 3451), 'tableauserverclient.PersonalAccessTokenAuth', 'TSC.PersonalAccessTokenAuth', ([], {'token_name': '"""mytoken"""', 'personal_access_token': '"""invalid"""'}), "(token_name='mytoken', personal_access_token=\n 'invalid')\n", (3391, 3451), True, 'import tableauserverclient as TSC\n'), ((3698, 3718), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (3716, 3718), False, 'import requests_mock\n'), ((3833, 3856), 'tableauserverclient.TableauAuth', 'TSC.TableauAuth', (['""""""', '""""""'], {}), "('', '')\n", (3848, 3856), True, 'import tableauserverclient as TSC\n'), ((4090, 4110), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (4108, 4110), False, 'import requests_mock\n'), ((4263, 4302), 'tableauserverclient.TableauAuth', 'TSC.TableauAuth', (['"""testuser"""', '"""password"""'], {}), "('testuser', 'password')\n", (4278, 4302), True, 'import tableauserverclient as TSC\n'), ((4880, 4900), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (4898, 4900), False, 'import requests_mock\n'), ((4989, 5023), 'tableauserverclient.SiteItem', 'TSC.SiteItem', (['"""Samples"""', '"""Samples"""'], {}), "('Samples', 'Samples')\n", (5001, 5023), True, 'import tableauserverclient as TSC\n'), ((5563, 5583), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (5581, 5583), False, 'import requests_mock\n'), ((5745, 5784), 'tableauserverclient.TableauAuth', 'TSC.TableauAuth', (['"""testuser"""', '"""password"""'], {}), "('testuser', 'password')\n", (5760, 5784), True, 'import tableauserverclient as TSC\n')] |
import gym
from gym import spaces, error, utils
from gym.utils import seeding
import numpy as np
import configparser
from os import path
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
font = {'family': 'sans-serif',
'weight': 'bold',
'size': 14}
class FlockingEnv(gym.Env):
def __init__(self):
config_file = path.join(path.dirname(__file__), "params_flock.cfg")
config = configparser.ConfigParser()
config.read(config_file)
config = config['flock']
self.dynamic = False # if the agents are moving or not
self.mean_pooling = True # normalize the adjacency matrix by the number of neighbors or not
# number states per agent
self.nx_system = 4
# numer of observations per agent
self.n_features = 6
# number of actions per agent
self.nu = 2
# problem parameters from file
self.n_agents = int(config['network_size'])
self.comm_radius = float(config['comm_radius'])
self.comm_radius2 = self.comm_radius * self.comm_radius
self.dt = float(config['system_dt'])
self.v_max = float(config['max_vel_init'])
self.v_bias = self.v_max
self.r_max = float(config['max_rad_init'])
self.std_dev = float(config['std_dev']) * self.dt
# intitialize state matrices
self.x = np.zeros((self.n_agents, self.nx_system))
self.u = np.zeros((self.n_agents, self.nu))
self.mean_vel = np.zeros((self.n_agents, self.nu))
self.init_vel = np.zeros((self.n_agents, self.nu))
self.a_net = np.zeros((self.n_agents, self.n_agents))
# TODO : what should the action space be? is [-1,1] OK?
self.max_accel = 1
self.gain = 10.0 # TODO - adjust if necessary - may help the NN performance
self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2 * self.n_agents,),
dtype=np.float32)
self.observation_space = spaces.Box(low=-np.Inf, high=np.Inf, shape=(self.n_agents, self.n_features),
dtype=np.float32)
self.fig = None
self.line1 = None
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
#u = np.reshape(u, (-1, 2))
assert u.shape == (self.n_agents, self.nu)
self.u = u
if self.dynamic:
# x position
self.x[:, 0] = self.x[:, 0] + self.x[:, 2] * self.dt
# y position
self.x[:, 1] = self.x[:, 1] + self.x[:, 3] * self.dt
# x velocity
self.x[:, 2] = self.x[:, 2] + self.gain * self.u[:, 0] * self.dt #+ np.random.normal(0, self.std_dev, (self.n_agents,))
# y velocity
self.x[:, 3] = self.x[:, 3] + self.gain * self.u[:, 1] * self.dt #+ np.random.normal(0, self.std_dev, (self.n_agents,))
return self._get_obs(), self.instant_cost(), False, {}
def instant_cost(self): # sum of differences in velocities
# TODO adjust to desired reward
# action_cost = -1.0 * np.sum(np.square(self.u))
#curr_variance = -1.0 * np.sum((np.var(self.x[:, 2:4], axis=0)))
versus_initial_vel = -1.0 * np.sum(np.sum(np.square(self.x[:, 2:4] - self.mean_vel), axis=1))
#return curr_variance + versus_initial_vel
return versus_initial_vel
def reset(self):
x = np.zeros((self.n_agents, self.nx_system))
degree = 0
min_dist = 0
min_dist_thresh = 0.1 # 0.25
# generate an initial configuration with all agents connected,
# and minimum distance between agents > min_dist_thresh
while degree < 2 or min_dist < min_dist_thresh:
# randomly initialize the location and velocity of all agents
length = np.sqrt(np.random.uniform(0, self.r_max, size=(self.n_agents,)))
angle = np.pi * np.random.uniform(0, 2, size=(self.n_agents,))
x[:, 0] = length * np.cos(angle)
x[:, 1] = length * np.sin(angle)
bias = np.random.uniform(low=-self.v_bias, high=self.v_bias, size=(2,))
x[:, 2] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) + bias[0]
x[:, 3] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) + bias[1]
# compute distances between agents
a_net = self.dist2_mat(x)
# compute minimum distance between agents and degree of network to check if good initial configuration
min_dist = np.sqrt(np.min(np.min(a_net)))
a_net = a_net < self.comm_radius2
degree = np.min(np.sum(a_net.astype(int), axis=1))
# keep good initialization
self.mean_vel = np.mean(x[:, 2:4], axis=0)
self.init_vel = x[:, 2:4]
self.x = x
self.a_net = self.get_connectivity(self.x)
return self._get_obs()
def _get_obs(self):
# state_values = self.x
state_values = np.hstack((self.x, self.init_vel)) # initial velocities are part of state to make system observable
if self.dynamic:
state_network = self.get_connectivity(self.x)
else:
state_network = self.a_net
return (state_values, state_network)
def dist2_mat(self, x):
"""
Compute squared euclidean distances between agents. Diagonal elements are infinity
Args:
x (): current state of all agents
Returns: symmetric matrix of size (n_agents, n_agents) with A_ij the distance between agents i and j
"""
x_loc = np.reshape(x[:, 0:2], (self.n_agents,2,1))
a_net = np.sum(np.square(np.transpose(x_loc, (0,2,1)) - np.transpose(x_loc, (2,0,1))), axis=2)
np.fill_diagonal(a_net, np.Inf)
return a_net
def get_connectivity(self, x):
"""
Get the adjacency matrix of the network based on agent locations by computing pairwise distances using pdist
Args:
x (): current state of all agents
Returns: adjacency matrix of network
"""
a_net = self.dist2_mat(x)
a_net = (a_net < self.comm_radius2).astype(float)
if self.mean_pooling:
# Normalize the adjacency matrix by the number of neighbors - results in mean pooling, instead of sum pooling
n_neighbors = np.reshape(np.sum(a_net, axis=1), (self.n_agents,1)) # TODO or axis=0? Is the mean in the correct direction?
n_neighbors[n_neighbors == 0] = 1
a_net = a_net / n_neighbors
return a_net
def controller(self):
"""
Consensus-based centralized flocking with no obstacle avoidance
Returns: the optimal action
"""
# TODO implement Tanner 2003?
u = np.mean(self.x[:,2:4], axis=0) - self.x[:,2:4]
u = np.clip(u, a_min=-self.max_accel, a_max=self.max_accel)
return u
def render(self, mode='human'):
"""
Render the environment with agents as points in 2D space
"""
if self.fig is None:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
line1, = ax.plot(self.x[:, 0], self.x[:, 1], 'bo') # Returns a tuple of line objects, thus the comma
ax.plot([0], [0], 'kx')
plt.ylim(-1.0 * self.r_max, 1.0 * self.r_max)
plt.xlim(-1.0 * self.r_max, 1.0 * self.r_max)
a = gca()
a.set_xticklabels(a.get_xticks(), font)
a.set_yticklabels(a.get_yticks(), font)
plt.title('GNN Controller')
self.fig = fig
self.line1 = line1
self.line1.set_xdata(self.x[:, 0])
self.line1.set_ydata(self.x[:, 1])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def close(self):
pass
| [
"numpy.clip",
"configparser.ConfigParser",
"numpy.hstack",
"numpy.sin",
"gym.utils.seeding.np_random",
"numpy.mean",
"numpy.reshape",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.gca",
"numpy.fill_diagonal",
"numpy.square",
"os.path.dirname",
"numpy.cos",
"matplotlib.pyplot.... | [((431, 458), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (456, 458), False, 'import configparser\n'), ((1387, 1428), 'numpy.zeros', 'np.zeros', (['(self.n_agents, self.nx_system)'], {}), '((self.n_agents, self.nx_system))\n', (1395, 1428), True, 'import numpy as np\n'), ((1446, 1480), 'numpy.zeros', 'np.zeros', (['(self.n_agents, self.nu)'], {}), '((self.n_agents, self.nu))\n', (1454, 1480), True, 'import numpy as np\n'), ((1505, 1539), 'numpy.zeros', 'np.zeros', (['(self.n_agents, self.nu)'], {}), '((self.n_agents, self.nu))\n', (1513, 1539), True, 'import numpy as np\n'), ((1564, 1598), 'numpy.zeros', 'np.zeros', (['(self.n_agents, self.nu)'], {}), '((self.n_agents, self.nu))\n', (1572, 1598), True, 'import numpy as np\n'), ((1620, 1660), 'numpy.zeros', 'np.zeros', (['(self.n_agents, self.n_agents)'], {}), '((self.n_agents, self.n_agents))\n', (1628, 1660), True, 'import numpy as np\n'), ((1866, 1969), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-self.max_accel)', 'high': 'self.max_accel', 'shape': '(2 * self.n_agents,)', 'dtype': 'np.float32'}), '(low=-self.max_accel, high=self.max_accel, shape=(2 * self.\n n_agents,), dtype=np.float32)\n', (1876, 1969), False, 'from gym import spaces, error, utils\n'), ((2039, 2137), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.Inf)', 'high': 'np.Inf', 'shape': '(self.n_agents, self.n_features)', 'dtype': 'np.float32'}), '(low=-np.Inf, high=np.Inf, shape=(self.n_agents, self.n_features),\n dtype=np.float32)\n', (2049, 2137), False, 'from gym import spaces, error, utils\n'), ((2313, 2336), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (2330, 2336), False, 'from gym.utils import seeding\n'), ((3519, 3560), 'numpy.zeros', 'np.zeros', (['(self.n_agents, self.nx_system)'], {}), '((self.n_agents, self.nx_system))\n', (3527, 3560), True, 'import numpy as np\n'), ((4882, 4908), 'numpy.mean', 'np.mean', (['x[:, 2:4]'], {'axis': '(0)'}), '(x[:, 2:4], axis=0)\n', (4889, 4908), True, 'import numpy as np\n'), ((5124, 5158), 'numpy.hstack', 'np.hstack', (['(self.x, self.init_vel)'], {}), '((self.x, self.init_vel))\n', (5133, 5158), True, 'import numpy as np\n'), ((5738, 5782), 'numpy.reshape', 'np.reshape', (['x[:, 0:2]', '(self.n_agents, 2, 1)'], {}), '(x[:, 0:2], (self.n_agents, 2, 1))\n', (5748, 5782), True, 'import numpy as np\n'), ((5892, 5923), 'numpy.fill_diagonal', 'np.fill_diagonal', (['a_net', 'np.Inf'], {}), '(a_net, np.Inf)\n', (5908, 5923), True, 'import numpy as np\n'), ((6988, 7043), 'numpy.clip', 'np.clip', (['u'], {'a_min': '(-self.max_accel)', 'a_max': 'self.max_accel'}), '(u, a_min=-self.max_accel, a_max=self.max_accel)\n', (6995, 7043), True, 'import numpy as np\n'), ((370, 392), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (382, 392), False, 'from os import path\n'), ((4178, 4242), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.v_bias)', 'high': 'self.v_bias', 'size': '(2,)'}), '(low=-self.v_bias, high=self.v_bias, size=(2,))\n', (4195, 4242), True, 'import numpy as np\n'), ((6929, 6960), 'numpy.mean', 'np.mean', (['self.x[:, 2:4]'], {'axis': '(0)'}), '(self.x[:, 2:4], axis=0)\n', (6936, 6960), True, 'import numpy as np\n'), ((7229, 7238), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (7236, 7238), True, 'import matplotlib.pyplot as plt\n'), ((7257, 7269), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7267, 7269), True, 'import matplotlib.pyplot as plt\n'), ((7470, 7515), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.0 * self.r_max)', '(1.0 * self.r_max)'], {}), '(-1.0 * self.r_max, 1.0 * self.r_max)\n', (7478, 7515), True, 'import matplotlib.pyplot as plt\n'), ((7528, 7573), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.0 * self.r_max)', '(1.0 * self.r_max)'], {}), '(-1.0 * self.r_max, 1.0 * self.r_max)\n', (7536, 7573), True, 'import matplotlib.pyplot as plt\n'), ((7590, 7595), 'matplotlib.pyplot.gca', 'gca', ([], {}), '()\n', (7593, 7595), False, 'from matplotlib.pyplot import gca\n'), ((7712, 7739), 'matplotlib.pyplot.title', 'plt.title', (['"""GNN Controller"""'], {}), "('GNN Controller')\n", (7721, 7739), True, 'import matplotlib.pyplot as plt\n'), ((3936, 3991), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.r_max'], {'size': '(self.n_agents,)'}), '(0, self.r_max, size=(self.n_agents,))\n', (3953, 3991), True, 'import numpy as np\n'), ((4021, 4067), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)'], {'size': '(self.n_agents,)'}), '(0, 2, size=(self.n_agents,))\n', (4038, 4067), True, 'import numpy as np\n'), ((4099, 4112), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (4105, 4112), True, 'import numpy as np\n'), ((4144, 4157), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (4150, 4157), True, 'import numpy as np\n'), ((4265, 4339), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.v_max)', 'high': 'self.v_max', 'size': '(self.n_agents,)'}), '(low=-self.v_max, high=self.v_max, size=(self.n_agents,))\n', (4282, 4339), True, 'import numpy as np\n'), ((4372, 4446), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.v_max)', 'high': 'self.v_max', 'size': '(self.n_agents,)'}), '(low=-self.v_max, high=self.v_max, size=(self.n_agents,))\n', (4389, 4446), True, 'import numpy as np\n'), ((6512, 6533), 'numpy.sum', 'np.sum', (['a_net'], {'axis': '(1)'}), '(a_net, axis=1)\n', (6518, 6533), True, 'import numpy as np\n'), ((3345, 3386), 'numpy.square', 'np.square', (['(self.x[:, 2:4] - self.mean_vel)'], {}), '(self.x[:, 2:4] - self.mean_vel)\n', (3354, 3386), True, 'import numpy as np\n'), ((4697, 4710), 'numpy.min', 'np.min', (['a_net'], {}), '(a_net)\n', (4703, 4710), True, 'import numpy as np\n'), ((5814, 5844), 'numpy.transpose', 'np.transpose', (['x_loc', '(0, 2, 1)'], {}), '(x_loc, (0, 2, 1))\n', (5826, 5844), True, 'import numpy as np\n'), ((5845, 5875), 'numpy.transpose', 'np.transpose', (['x_loc', '(2, 0, 1)'], {}), '(x_loc, (2, 0, 1))\n', (5857, 5875), True, 'import numpy as np\n')] |
import sys,os
from torch.autograd import Variable
import torch.optim as optim
from tensorboardX import SummaryWriter
import torch
import time
import shutil
from torch.utils.data import DataLoader
import csv
from samp_net import EMDLoss, AttributeLoss, SAMPNet
from config import Config
from cadb_dataset import CADBDataset
from test import evaluation_on_cadb
def calculate_accuracy(predict, target, threhold=2.6):
assert target.shape == predict.shape, '{} vs. {}'.format(target.shape, predict.shape)
bin_tar = target > threhold
bin_pre = predict > threhold
correct = (bin_tar == bin_pre).sum()
acc = correct.float() / target.size(0)
return correct,acc
def build_dataloader(cfg):
trainset = CADBDataset('train', cfg)
trainloader = DataLoader(trainset,
batch_size=cfg.batch_size,
shuffle=True,
num_workers=cfg.num_workers,
drop_last=False)
return trainloader
class Trainer(object):
def __init__(self, model, cfg):
self.cfg = cfg
self.model = model
self.device = torch.device('cuda:{}'.format(self.cfg.gpu_id))
self.trainloader = build_dataloader(cfg)
self.optimizer = self.create_optimizer()
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, mode='min', patience=5)
self.epoch = 0
self.iters = 0
self.avg_mse = 0.
self.avg_emd = 0.
self.avg_acc = 0.
self.avg_att = 0.
self.smooth_coe = 0.4
self.smooth_mse = None
self.smooth_emd = None
self.smooth_acc = None
self.smooth_att = None
self.mse_loss = torch.nn.MSELoss()
self.emd_loss = EMDLoss()
self.test_acc = []
self.test_emd1 = []
self.test_emd2 = []
self.test_mse = []
self.test_srcc = []
self.test_lcc = []
if cfg.use_attribute:
self.att_loss = AttributeLoss(cfg.attribute_weight)
self.least_metric = 1.
self.writer = self.create_writer()
def create_optimizer(self):
# for param in self.model.backbone.parameters():
# param.requires_grad = False
bb_params = list(map(id, self.model.backbone.parameters()))
lr_params = filter(lambda p:id(p) not in bb_params, self.model.parameters())
params = [
{'params': lr_params, 'lr': self.cfg.lr},
{'params': self.model.backbone.parameters(), 'lr': self.cfg.lr * 0.01}
]
if self.cfg.optimizer == 'adam':
optimizer = optim.Adam(params,
weight_decay=self.cfg.weight_decay)
elif self.cfg.optimizer == 'sgd':
optimizer = optim.SGD(params,
momentum=self.cfg.momentum,
weight_decay=self.cfg.weight_decay)
else:
raise ValueError(f"not such optimizer {self.cfg.optimizer}")
return optimizer
def create_writer(self):
print('Create tensorboardX writer...', self.cfg.log_dir)
writer = SummaryWriter(log_dir=self.cfg.log_dir)
return writer
def run(self):
for epoch in range(self.cfg.max_epoch):
self.run_epoch()
self.epoch += 1
self.scheduler.step(metrics=self.least_metric)
self.writer.add_scalar('Train/lr', self.optimizer.param_groups[0]['lr'], self.epoch)
if self.epoch % self.cfg.save_epoch == 0:
checkpoint_path = os.path.join(self.cfg.checkpoint_dir, 'model-{epoch}.pth')
torch.save(self.model.state_dict(), checkpoint_path.format(epoch=self.epoch))
print('Save checkpoint...')
if self.epoch % self.cfg.test_epoch == 0:
test_emd = self.eval_training()
if test_emd < self.least_metric:
self.least_metric = test_emd
checkpoint_path = os.path.join(self.cfg.checkpoint_dir, 'model-best.pth')
torch.save(self.model.state_dict(), checkpoint_path)
print('Update best checkpoint...')
self.writer.add_scalar('Test/Least EMD', self.least_metric, self.epoch)
def eval_training(self):
avg_acc, avg_r1_emd, avg_r2_emd, avg_mse, SRCC, LCC = \
evaluation_on_cadb(self.model, self.cfg)
self.writer.add_scalar('Test/Average EMD(r=2)', avg_r2_emd, self.epoch)
self.writer.add_scalar('Test/Average EMD(r=1)', avg_r1_emd, self.epoch)
self.writer.add_scalar('Test/Average MSE', avg_mse, self.epoch)
self.writer.add_scalar('Test/Accuracy', avg_acc, self.epoch)
self.writer.add_scalar('Test/SRCC', SRCC, self.epoch)
self.writer.add_scalar('Test/LCC', LCC, self.epoch)
error = avg_r1_emd
self.test_acc.append(avg_acc)
self.test_emd1.append(avg_r1_emd)
self.test_emd2.append(avg_r2_emd)
self.test_mse.append(avg_mse)
self.test_srcc.append(SRCC)
self.test_lcc.append(LCC)
self.write2csv()
return error
def write2csv(self):
csv_path = os.path.join(self.cfg.exp_path, '..', '{}.csv'.format(self.cfg.exp_name))
header = ['epoch', 'Accuracy', 'EMD r=1', 'EMD r=2', 'MSE', 'SRCC', 'LCC']
epoches = list(range(len(self.test_acc)))
metrics = [epoches, self.test_acc, self.test_emd1, self.test_emd2,
self.test_mse, self.test_srcc, self.test_lcc]
rows = [header]
for i in range(len(epoches)):
row = [m[i] for m in metrics]
rows.append(row)
for name, m in zip(header, metrics):
if name == 'epoch':
continue
index = m.index(min(m))
if name in ['Accuracy', 'SRCC', 'LCC']:
index = m.index(max(m))
title = 'best {} (epoch-{})'.format(name, index)
row = [l[index] for l in metrics]
row[0] = title
rows.append(row)
with open(csv_path, 'w') as f:
cw = csv.writer(f)
cw.writerows(rows)
print('Save result to ', csv_path)
def dist2ave(self, pred_dist):
pred_score = torch.sum(pred_dist* torch.Tensor(range(1,6)).to(pred_dist.device), dim=-1, keepdim=True)
return pred_score
def run_epoch(self):
self.model.train()
for batch, data in enumerate(self.trainloader):
self.iters += 1
image = data[0].to(self.device)
score = data[1].to(self.device)
score_dist = data[2].to(self.device)
saliency = data[3].to(self.device)
attributes = data[4].to(self.device)
weight = data[5].to(self.device)
pred_weight, pred_atts, pred_dist = self.model(image, saliency)
if self.cfg.use_weighted_loss:
dist_loss = self.emd_loss(score_dist, pred_dist, weight)
else:
dist_loss = self.emd_loss(score_dist, pred_dist)
if self.cfg.use_attribute:
att_loss = self.att_loss(attributes, pred_atts)
loss = dist_loss + att_loss
else:
loss = dist_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.avg_emd += dist_loss.item()
self.avg_att += att_loss.item()
pred_score = self.dist2ave(pred_dist)
correct, accuracy = calculate_accuracy(pred_score, score)
self.avg_acc += accuracy.item()
if (self.iters+1) % self.cfg.display_steps == 0:
print('ground truth: average={}'.format(score.view(-1)))
print('prediction: average={}'.format(pred_score.view(-1)))
self.avg_emd = self.avg_emd / self.cfg.display_steps
self.avg_acc = self.avg_acc / self.cfg.display_steps
if self.cfg.use_attribute:
self.avg_att = self.avg_att / self.cfg.display_steps
if self.smooth_emd != None:
self.avg_emd = (1-self.smooth_coe) * self.avg_emd + self.smooth_coe * self.smooth_emd
self.avg_acc = (1-self.smooth_coe) * self.avg_acc + self.smooth_coe * self.smooth_acc
if self.cfg.use_attribute:
self.avg_att = (1-self.smooth_coe) * self.avg_att + self.smooth_coe * self.smooth_att
self.writer.add_scalar('Train/AttributeLoss', self.avg_att, self.iters)
self.writer.add_scalar('Train/EMD_Loss', self.avg_emd, self.iters)
self.writer.add_scalar('Train/Accuracy', self.avg_acc, self.iters)
if self.cfg.use_attribute:
print('Traning Epoch:{}/{} Current Batch: {}/{} EMD_Loss:{:.4f} Attribute_Loss:{:.4f} ACC:{:.2%} lr:{:.6f} '.
format(
self.epoch, self.cfg.max_epoch,
batch, len(self.trainloader),
self.avg_emd, self.avg_att,
self.avg_acc,
self.optimizer.param_groups[0]['lr']))
else:
print(
'Traning Epoch:{}/{} Current Batch: {}/{} EMD_Loss:{:.4f} ACC:{:.2%} lr:{:.6f} '.
format(
self.epoch, self.cfg.max_epoch,
batch, len(self.trainloader),
self.avg_emd, self.avg_acc,
self.optimizer.param_groups[0]['lr']))
self.smooth_emd = self.avg_emd
self.smooth_acc = self.avg_acc
self.avg_mse = 0.
self.avg_emd = 0.
self.avg_acc = 0.
if self.cfg.use_attribute:
self.smooth_att = self.avg_att
self.avg_att = 0.
print()
if __name__ == '__main__':
cfg = Config()
cfg.create_path()
device = torch.device('cuda:{}'.format(cfg.gpu_id))
# evaluate(cfg)
for file in os.listdir('./'):
if file.endswith('.py'):
shutil.copy(file, cfg.exp_path)
print('Backup ', file)
model = SAMPNet(cfg)
model = model.train().to(device)
trainer = Trainer(model, cfg)
trainer.run() | [
"samp_net.AttributeLoss",
"torch.optim.Adam",
"os.listdir",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"tensorboardX.SummaryWriter",
"torch.optim.SGD",
"samp_net.SAMPNet",
"config.Config",
"csv.writer",
"cadb_dataset.CADBDataset",
"os.path.join",
"torch.nn.MSELoss",
"samp_net.EMDLoss",
... | [((725, 750), 'cadb_dataset.CADBDataset', 'CADBDataset', (['"""train"""', 'cfg'], {}), "('train', cfg)\n", (736, 750), False, 'from cadb_dataset import CADBDataset\n'), ((769, 881), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'cfg.batch_size', 'shuffle': '(True)', 'num_workers': 'cfg.num_workers', 'drop_last': '(False)'}), '(trainset, batch_size=cfg.batch_size, shuffle=True, num_workers=\n cfg.num_workers, drop_last=False)\n', (779, 881), False, 'from torch.utils.data import DataLoader\n'), ((10131, 10139), 'config.Config', 'Config', ([], {}), '()\n', (10137, 10139), False, 'from config import Config\n'), ((10254, 10270), 'os.listdir', 'os.listdir', (['"""./"""'], {}), "('./')\n", (10264, 10270), False, 'import sys, os\n'), ((10397, 10409), 'samp_net.SAMPNet', 'SAMPNet', (['cfg'], {}), '(cfg)\n', (10404, 10409), False, 'from samp_net import EMDLoss, AttributeLoss, SAMPNet\n'), ((1319, 1395), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', (['self.optimizer'], {'mode': '"""min"""', 'patience': '(5)'}), "(self.optimizer, mode='min', patience=5)\n", (1355, 1395), True, 'import torch.optim as optim\n'), ((1756, 1774), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (1772, 1774), False, 'import torch\n'), ((1799, 1808), 'samp_net.EMDLoss', 'EMDLoss', ([], {}), '()\n', (1806, 1808), False, 'from samp_net import EMDLoss, AttributeLoss, SAMPNet\n'), ((3195, 3234), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'self.cfg.log_dir'}), '(log_dir=self.cfg.log_dir)\n', (3208, 3234), False, 'from tensorboardX import SummaryWriter\n'), ((4440, 4480), 'test.evaluation_on_cadb', 'evaluation_on_cadb', (['self.model', 'self.cfg'], {}), '(self.model, self.cfg)\n', (4458, 4480), False, 'from test import evaluation_on_cadb\n'), ((2034, 2069), 'samp_net.AttributeLoss', 'AttributeLoss', (['cfg.attribute_weight'], {}), '(cfg.attribute_weight)\n', (2047, 2069), False, 'from samp_net import EMDLoss, AttributeLoss, SAMPNet\n'), ((2665, 2719), 'torch.optim.Adam', 'optim.Adam', (['params'], {'weight_decay': 'self.cfg.weight_decay'}), '(params, weight_decay=self.cfg.weight_decay)\n', (2675, 2719), True, 'import torch.optim as optim\n'), ((6182, 6195), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (6192, 6195), False, 'import csv\n'), ((10317, 10348), 'shutil.copy', 'shutil.copy', (['file', 'cfg.exp_path'], {}), '(file, cfg.exp_path)\n', (10328, 10348), False, 'import shutil\n'), ((2821, 2907), 'torch.optim.SGD', 'optim.SGD', (['params'], {'momentum': 'self.cfg.momentum', 'weight_decay': 'self.cfg.weight_decay'}), '(params, momentum=self.cfg.momentum, weight_decay=self.cfg.\n weight_decay)\n', (2830, 2907), True, 'import torch.optim as optim\n'), ((3626, 3684), 'os.path.join', 'os.path.join', (['self.cfg.checkpoint_dir', '"""model-{epoch}.pth"""'], {}), "(self.cfg.checkpoint_dir, 'model-{epoch}.pth')\n", (3638, 3684), False, 'import sys, os\n'), ((4061, 4116), 'os.path.join', 'os.path.join', (['self.cfg.checkpoint_dir', '"""model-best.pth"""'], {}), "(self.cfg.checkpoint_dir, 'model-best.pth')\n", (4073, 4116), False, 'import sys, os\n')] |
import pygame
from pygame.mixer import music
from pystage.core.assets import SoundManager
from pystage.core._base_sprite import BaseSprite
import time
class _Sound(BaseSprite):
# Like for costumes and backdrops, we need a class structure here.
# Plus a global sound manager.
def __init__(self):
super().__init__()
self.sound_manager = SoundManager(self)
self.mixer = pygame.mixer
self.mixer.init(channels=2)
self.current_pan = 0
self.current_pitch = 0
self.current_volume = 100
def pystage_addsound(self, name):
self.sound_manager.add_sound(name)
def sound_play(self, name, loop=0):
channel = self.mixer.find_channel()
sound = self.sound_manager.get_sound(name)
if sound is not None:
channel.play(sound, loop)
return channel
def sound_playuntildone(self, name):
sound = self.sound_manager.get_sound(name)
if sound is not None:
self.mixer.find_channel().play(sound, 0)
# time.sleep(sound.get_length())
# This need to be done via wait time in code block
# TODO: Add this function to yield blocks.
self.code_manager.current_block.add_to_wait_time = sound.get_length()
def sound_stopallsounds(self):
self.mixer.stop()
def sound_changeeffectby_pitch(self, value):
# TODO: for pitching there is no ready to use code in pygame. To do so
# we must operate on the audio array itself.
# -360 to 360, 10 is a half-step, 120 an octave
# changes only the speed of the sound
pass
sound_changeeffectby_pitch.opcode = "sound_changeeffectby"
sound_changeeffectby_pitch.param = "EFFECT"
sound_changeeffectby_pitch.value = "PITCH"
sound_changeeffectby_pitch.translation = "sound_effects_pitch"
def sound_changeeffectby_pan(self, value):
# norm pan value from -100/100 to range 0/1
self.current_pan += value
self.current_pan = min(100, max(-100, self.current_pan))
self._apply()
sound_changeeffectby_pan.opcode = "sound_changeeffectby"
sound_changeeffectby_pan.param = "EFFECT"
sound_changeeffectby_pan.value = "PAN"
sound_changeeffectby_pan.translation = "sound_effects_pan"
def sound_seteffectto_pitch(self, value):
# TODO: for pitching there is no ready to use code in pygame. To do so
# we must operate on the audio array itself.
pass
sound_seteffectto_pitch.opcode = "sound_seteffectto"
sound_seteffectto_pitch.param = "EFFECT"
sound_seteffectto_pitch.value = "PITCH"
sound_seteffectto_pitch.translation = "sound_effects_pitch"
def sound_seteffectto_pan(self, value):
# Values from -100 (left) to 100 (right)
self.current_pan = value
self.current_pan = min(100, max(-100, self.current_pan))
self._apply()
sound_seteffectto_pan.opcode = "sound_seteffectto"
sound_seteffectto_pan.param = "EFFECT"
sound_seteffectto_pan.value = "PAN"
sound_seteffectto_pan.translation = "sound_effects_pan"
def sound_cleareffects(self):
self.current_pan = 0
self.current_pitch = 0
self._apply()
# apply pitch
def _apply(self):
# norm pan value from -100/100 to range 0/1
pgpan = (self.current_pan + 100) / 200
pgvolume = self.current_volume / 100
for channel_id in range(self.mixer.get_num_channels()):
if pgpan > 0.5:
self.mixer.Channel(channel_id).set_volume(1, 0)
else:
self.mixer.Channel(channel_id).set_volume(0, 1)
for channel_id in range(self.mixer.get_num_channels()):
self.mixer.Channel(channel_id).set_volume(pgvolume)
def sound_changevolumeby(self, value):
self.current_volume += value
self.current_volume = min(100, max(0, self.current_volume))
self._apply()
def sound_setvolumeto(self, value):
self.current_volume = value
self.current_volume = min(100, max(0, self.current_volume))
self._apply()
def sound_volume(self):
# as we hide the channel mechanic, we assume all channels are set to the same volume
return self.mixer.Channel(0).get_volume() * 100
| [
"pystage.core.assets.SoundManager"
] | [((366, 384), 'pystage.core.assets.SoundManager', 'SoundManager', (['self'], {}), '(self)\n', (378, 384), False, 'from pystage.core.assets import SoundManager\n')] |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Basic permissions module."""
from sqlalchemy import or_
class SystemWideRoles(object):
"""List of system wide roles."""
# pylint: disable=too-few-public-methods
SUPERUSER = u"Superuser"
ADMINISTRATOR = u"Administrator"
EDITOR = u"Editor"
READER = u"Reader"
CREATOR = u"Creator"
NO_ACCESS = u"No Access"
def context_query_filter(context_column, contexts):
'''
Intended for use by `model.query.filter(...)`
If `contexts == None`, it's Admin (no filter), so return `True`
Else, return the full query
'''
if contexts is None:
# Admin context, no filter
return True
else:
filter_expr = None
# Handle `NULL` context specially
if None in contexts:
filter_expr = context_column.is_(None)
# We're modifying `contexts`, so copy
contexts = set(contexts)
contexts.remove(None)
if contexts:
filter_in_expr = context_column.in_(contexts)
if filter_expr is not None:
filter_expr = or_(filter_expr, filter_in_expr)
else:
filter_expr = filter_in_expr
if filter_expr is None:
# No valid contexts
return False
return filter_expr
| [
"sqlalchemy.or_"
] | [((1087, 1119), 'sqlalchemy.or_', 'or_', (['filter_expr', 'filter_in_expr'], {}), '(filter_expr, filter_in_expr)\n', (1090, 1119), False, 'from sqlalchemy import or_\n')] |
from django.contrib import admin
from rango.models import Category, Page
admin.site.register(Page)
admin.site.register(Category)
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug':('name',)}
class PageAdmin(admin.ModelAdmin):
list_display = ('title',
'category', 'url')
| [
"django.contrib.admin.site.register"
] | [((79, 104), 'django.contrib.admin.site.register', 'admin.site.register', (['Page'], {}), '(Page)\n', (98, 104), False, 'from django.contrib import admin\n'), ((106, 135), 'django.contrib.admin.site.register', 'admin.site.register', (['Category'], {}), '(Category)\n', (125, 135), False, 'from django.contrib import admin\n')] |
# Copyright (c) 2021, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse, sys, os
import zarr
import tiledb
import numpy as np
from math import ceil
from promort_tools.libs.utils.logger import get_logger, LOG_LEVELS
class ZarrToTileDBConverter(object):
def __init__(self, logger):
self.logger = logger
def _get_array_shape(self, zarr_dataset):
shapes = set([arr[1].shape for arr in zarr_dataset.arrays()])
if len(shapes) == 1:
return shapes.pop()
else:
self.logger.error('Multiple shapes in zarr dataset arrays, cannot convert to tiledb')
sys.exit('Multiple shapes in zarr arrays')
def _get_array_attributes(self, zarr_dataset):
return [(a[0], a[1].dtype) for a in zarr_dataset.arrays()]
def _get_tiledb_path(self, zarr_dataset, out_folder):
return os.path.join(
out_folder,
'{0}.tiledb'.format(os.path.basename(os.path.normpath(zarr_dataset)))
)
def _init_tiledb_dataset(self, dataset_path, dataset_shape, zarr_attributes):
rows = tiledb.Dim(name='rows', domain=(0, dataset_shape[0]-1), tile=4, dtype=np.uint16)
columns = tiledb.Dim(name='columns', domain=(0, dataset_shape[1]-1), tile=4, dtype=np.uint16)
domain = tiledb.Domain(rows, columns)
attributes = list()
for a in zarr_attributes:
attributes.append(tiledb.Attr(a[0], dtype=a[1]))
schema = tiledb.ArraySchema(domain=domain, sparse=False, attrs=attributes)
tiledb.DenseArray.create(dataset_path, schema)
def _zarr_to_tiledb(self, zarr_dataset, tiledb_dataset_path, slide_resolution):
tiledb_data = dict()
tiledb_meta = {
'original_width': slide_resolution[0],
'original_height': slide_resolution[1],
'slide_path': zarr_dataset.attrs['filename']
}
for arr_label, arr_data in zarr_dataset.arrays():
tiledb_data[arr_label] = arr_data[:]
tiledb_meta.update(
{
'{0}.dzi_sampling_level'.format(arr_label): ceil(arr_data.attrs['dzi_sampling_level']),
'{0}.tile_size'.format(arr_label): arr_data.attrs['tile_size'],
'{0}.rows'.format(arr_label): arr_data.shape[1],
'{0}.columns'.format(arr_label): arr_data.shape[0]
}
)
with tiledb.open(tiledb_dataset_path, 'w') as A:
A[:] = tiledb_data
for k, v in tiledb_meta.items():
A.meta[k] = v
def run(self, zarr_dataset, out_folder):
z = zarr.open(zarr_dataset)
try:
slide_res = z.attrs['resolution']
except KeyError as ke:
self.logger.error('Missing key {0} in zarr attributes, exit'.format(ke))
sys.exit('Missing key {0}'.format(ke))
dset_shape = self._get_array_shape(z)
tiledb_dataset_path = self._get_tiledb_path(zarr_dataset, out_folder)
self.logger.info('TileDB dataset path: {0}'.format(tiledb_dataset_path))
attributes = self._get_array_attributes(z)
self._init_tiledb_dataset(tiledb_dataset_path, dset_shape, attributes)
self._zarr_to_tiledb(z, tiledb_dataset_path, slide_res)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--zarr-dataset', type=str, required=True,
help='path to the ZARR dataset to be converted')
parser.add_argument('--out-folder', type=str, required=True,
help='output folder for TileDB dataset')
parser.add_argument('--log-level', type=str, choices=LOG_LEVELS,
default='INFO', help='log level (default=INFO)')
parser.add_argument('--log-file', type=str, default=None, help='log file (default=stderr)')
return parser
def main(argv=None):
parser = make_parser()
args = parser.parse_args(argv)
logger = get_logger(args.log_level, args.log_file)
app = ZarrToTileDBConverter(logger)
app.run(args.zarr_dataset, args.out_folder)
if __name__ == '__main__':
main(sys.argv[1:])
| [
"tiledb.DenseArray.create",
"math.ceil",
"argparse.ArgumentParser",
"os.path.normpath",
"tiledb.Attr",
"tiledb.open",
"zarr.open",
"sys.exit",
"promort_tools.libs.utils.logger.get_logger",
"tiledb.Domain",
"tiledb.Dim",
"tiledb.ArraySchema"
] | [((4348, 4373), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4371, 4373), False, 'import argparse, sys, os\n'), ((4998, 5039), 'promort_tools.libs.utils.logger.get_logger', 'get_logger', (['args.log_level', 'args.log_file'], {}), '(args.log_level, args.log_file)\n', (5008, 5039), False, 'from promort_tools.libs.utils.logger import get_logger, LOG_LEVELS\n'), ((2125, 2212), 'tiledb.Dim', 'tiledb.Dim', ([], {'name': '"""rows"""', 'domain': '(0, dataset_shape[0] - 1)', 'tile': '(4)', 'dtype': 'np.uint16'}), "(name='rows', domain=(0, dataset_shape[0] - 1), tile=4, dtype=np.\n uint16)\n", (2135, 2212), False, 'import tiledb\n'), ((2224, 2314), 'tiledb.Dim', 'tiledb.Dim', ([], {'name': '"""columns"""', 'domain': '(0, dataset_shape[1] - 1)', 'tile': '(4)', 'dtype': 'np.uint16'}), "(name='columns', domain=(0, dataset_shape[1] - 1), tile=4, dtype=\n np.uint16)\n", (2234, 2314), False, 'import tiledb\n'), ((2325, 2353), 'tiledb.Domain', 'tiledb.Domain', (['rows', 'columns'], {}), '(rows, columns)\n', (2338, 2353), False, 'import tiledb\n'), ((2494, 2559), 'tiledb.ArraySchema', 'tiledb.ArraySchema', ([], {'domain': 'domain', 'sparse': '(False)', 'attrs': 'attributes'}), '(domain=domain, sparse=False, attrs=attributes)\n', (2512, 2559), False, 'import tiledb\n'), ((2568, 2614), 'tiledb.DenseArray.create', 'tiledb.DenseArray.create', (['dataset_path', 'schema'], {}), '(dataset_path, schema)\n', (2592, 2614), False, 'import tiledb\n'), ((3665, 3688), 'zarr.open', 'zarr.open', (['zarr_dataset'], {}), '(zarr_dataset)\n', (3674, 3688), False, 'import zarr\n'), ((1661, 1703), 'sys.exit', 'sys.exit', (['"""Multiple shapes in zarr arrays"""'], {}), "('Multiple shapes in zarr arrays')\n", (1669, 1703), False, 'import argparse, sys, os\n'), ((3457, 3494), 'tiledb.open', 'tiledb.open', (['tiledb_dataset_path', '"""w"""'], {}), "(tiledb_dataset_path, 'w')\n", (3468, 3494), False, 'import tiledb\n'), ((2446, 2475), 'tiledb.Attr', 'tiledb.Attr', (['a[0]'], {'dtype': 'a[1]'}), '(a[0], dtype=a[1])\n', (2457, 2475), False, 'import tiledb\n'), ((1984, 2014), 'os.path.normpath', 'os.path.normpath', (['zarr_dataset'], {}), '(zarr_dataset)\n', (2000, 2014), False, 'import argparse, sys, os\n'), ((3144, 3186), 'math.ceil', 'ceil', (["arr_data.attrs['dzi_sampling_level']"], {}), "(arr_data.attrs['dzi_sampling_level'])\n", (3148, 3186), False, 'from math import ceil\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def add_webhook_notification_template_fields(apps, schema_editor):
# loop over all existing webhook notification templates and make
# sure they have the new "http_method" field filled in with "POST"
NotificationTemplate = apps.get_model('main', 'notificationtemplate')
webhooks = NotificationTemplate.objects.filter(notification_type='webhook')
for w in webhooks:
w.notification_configuration['http_method'] = 'POST'
w.save()
class Migration(migrations.Migration):
dependencies = [
('main', '0081_v360_notify_on_start'),
]
operations = [
migrations.RunPython(add_webhook_notification_template_fields, migrations.RunPython.noop),
]
| [
"django.db.migrations.RunPython"
] | [((706, 800), 'django.db.migrations.RunPython', 'migrations.RunPython', (['add_webhook_notification_template_fields', 'migrations.RunPython.noop'], {}), '(add_webhook_notification_template_fields, migrations.\n RunPython.noop)\n', (726, 800), False, 'from django.db import migrations\n')] |
#!/usr/bin/env python
import os
import sys
import subprocess
from django.core.management import execute_from_command_line
FLAKE8_ARGS = ['django_restql', 'tests', 'setup.py', 'runtests.py']
WARNING_COLOR = '\033[93m'
END_COLOR = '\033[0m'
def flake8_main(args):
print('Running flake8 code linting')
ret = subprocess.call(['flake8'] + args)
msg = (
WARNING_COLOR + 'flake8 failed\n' + END_COLOR
if ret else 'flake8 passed\n'
)
print(msg)
return ret
def runtests():
ret = flake8_main(FLAKE8_ARGS)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
argv = sys.argv[:1] + ['test'] + sys.argv[1:]
execute_from_command_line(argv)
sys.exit(ret) # Fail build if code linting fails
if __name__ == '__main__':
runtests()
| [
"os.environ.setdefault",
"sys.exit",
"subprocess.call",
"django.core.management.execute_from_command_line"
] | [((318, 352), 'subprocess.call', 'subprocess.call', (["(['flake8'] + args)"], {}), "(['flake8'] + args)\n", (333, 352), False, 'import subprocess\n'), ((550, 615), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""tests.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'tests.settings')\n", (571, 615), False, 'import os\n'), ((670, 701), 'django.core.management.execute_from_command_line', 'execute_from_command_line', (['argv'], {}), '(argv)\n', (695, 701), False, 'from django.core.management import execute_from_command_line\n'), ((706, 719), 'sys.exit', 'sys.exit', (['ret'], {}), '(ret)\n', (714, 719), False, 'import sys\n')] |
# Copyright (c) 2016 Civic Knowledge. This file is licensed under the terms of the
# Revised BSD License, included in this distribution as LICENSE
"""
Parser for the Simple Data Package format. The parser consists of several iterable generator
objects.
"""
NO_TERM = '<no_term>' # No parent term -- no '.' -- in term cell
ELIDED_TERM = '<elided_term>' # A '.' in term cell, but no term before it.
class ParserError(Exception):
def __init__(self, *args, **kwargs):
super(ParserError, self).__init__(*args, **kwargs)
self.term = None
class IncludeError(ParserError):
pass
class Term(object):
"""Parses a row into the parts of a term
Public attributes. These are set externally to the constructor.
file_name Filename or URL of faile that contains term
row: Row number of term
col Column number of term
is_arg_child Term was generated from arguments of parent
child_property_type What datatype to use in dict conversion
valid Did term pass validation tests? Usually based on DeclaredTerm values.
"""
def __init__(self, term, value, term_args=[]):
"""
:param term: Simple or compoint term name
:param value: Term value, from second column of spreadsheet
:param term_args: Colums 2+ from term row
"""
self.parent_term, self.record_term = Term.split_term_lower(term)
self.value = value.strip() if value else None
self.args = [x.strip() for x in term_args]
self.section = None # Name of section the term is in.
self.file_name = None
self.row = None
self.col = None
# When converting to a dict, what dict to to use for the self.value value
self.term_value_name = '@value' # May be change in term parsing
# When converting to a dict, what datatype should be used for this term.
# Can be forced to list, scalar, dict or other types.
self.child_property_type = 'any'
self.valid = None
self.is_arg_child = None # If true, term was
self.children = [] # WHen terms are linked, hold term's children.
@classmethod
def split_term(cls, term):
"""
Split a term in to parent and record term components
:param term: combined term text
:return: Tuple of parent and record term
"""
if '.' in term:
parent_term, record_term = term.split('.')
parent_term, record_term = parent_term.strip(), record_term.strip()
if parent_term == '':
parent_term = ELIDED_TERM
else:
parent_term, record_term = NO_TERM, term.strip()
return parent_term, record_term
@classmethod
def split_term_lower(cls, term):
"""
Like split_term, but also lowercases both parent and record term
:param term: combined term text
:return: Tuple of parent and record term
"""
return tuple(e.lower() for e in Term.split_term(term))
def file_ref(self):
"""Return a string for the file, row and column of the term."""
if self.file_name is not None and self.row is not None:
return "{} {}:{} ".format(self.file_name, self.row, self.col)
elif self.row is not None:
return " {}:{} ".format(self.row, self.col)
else:
return ''
def add_child(self, child):
self.children.append(child)
def __repr__(self):
return "<Term: {}{}.{} {} {} >".format(self.file_ref(), self.parent_term,
self.record_term, self.value, self.args)
def __str__(self):
if self.parent_term == NO_TERM:
return "{}{}: {}".format(self.file_ref(), self.record_term, self.value)
elif self.parent_term == ELIDED_TERM:
return "{}.{}: {}".format(self.file_ref(), self.record_term, self.value)
else:
return "{}{}.{}: {}".format(self.file_ref(), self.parent_term, self.record_term, self.value)
class CsvPathRowGenerator(object):
"""An object that generates rows. The current implementation mostly just a wrapper around
csv.reader, but it add a path property so term interperters know where the terms are coming from
"""
def __init__(self, path):
self._path = path
self._f = None
@property
def path(self):
return self._path
def open(self):
if self._path.startswith('http'):
import urllib2
try:
f = urllib2.urlopen(self._path)
except urllib2.URLError:
raise IncludeError("Failed to find file by url: {}".format(self._path))
f.name = self._path # to be symmetric with files.
else:
from os.path import join
try:
f = open(self._path)
except IOError:
raise IncludeError("Failed to find file: {}".format(self._path) )
self._f = f
def close(self):
if self._f:
self._f.close()
self._f = None
def __iter__(self):
import csv
self.open()
# Python 3, should use yield from
for row in csv.reader(self._f):
yield row
self.close()
class CsvDataRowGenerator(object):
"""Generate rows from CSV data, as a string
"""
def __init__(self, data, path = None):
self._data = data
self._path = path or '<none>'
@property
def path(self):
return self._path
def open(self):
pass
def close(self):
pass
def __iter__(self):
import csv
from cStringIO import StringIO
f = StringIO(self._data)
# Python 3, should use yield from
for row in csv.reader(f):
yield row
class RowGenerator(object):
"""An object that generates rows. The current implementation mostly just a wrapper around
csv.reader, but it add a path property so term interperters know where the terms are coming from
"""
def __init__(self, rows, path = None):
self._rows = rows
self._path = path or '<none>'
@property
def path(self):
return self._path
def open(self):
pass
def close(self):
pass
def __iter__(self):
for row in self._rows:
yield row
class TermGenerator(object):
"""Generate terms from a row generator. It will produce a term for each row, and child
terms for any arguments to the row. """
def __init__(self, row_gen):
"""
:param row_gen: an interator that generates rows
:return:
"""
from os.path import dirname, basename
self._row_gen = row_gen
self._path = self._row_gen.path
def __iter__(self):
"""An interator that generates term objects"""
for line_n, row in enumerate(self._row_gen, 1):
if not row[0].strip() or row[0].strip().startswith('#'):
continue
t = Term(row[0].lower(),
row[1] if len(row)>1 else '',
row[2:] if len(row)>2 else [])
t.row = line_n
t.col = 1
t.file_name = self._path
rt_l = t.record_term.lower()
if rt_l == 'include':
yield t
for t in self.include_term_generator(t.value):
yield t
continue # Already yielded the include term
yield t
# Yield any child terms, from the term row arguments
if rt_l != 'section':
for col, value in enumerate(t.args, 0):
if value.strip():
t2 = Term(t.record_term.lower() + '.' + str(col), value, [])
t2.is_arg_child = True
t2.row = line_n
t2.col = col + 2 # The 0th argument starts in col 2
t2.file_name = self._path
yield t2
def include_term_generator(self, include_ref):
from os.path import dirname, join
if not self._path:
raise ParserError("Can't include because don't know current path"
.format(self._root_directory))
if include_ref.startwith('http'):
path = include_ref
else:
path = join(dirname(self._path), include_ref.strip('/'))
return TermGenerator(RowGenerator(path))
class TermInterpreter(object):
"""Takes a stream of terms and sets the parameter map, valid term names, etc """
def __init__(self, term_gen, remove_special=True):
"""
:param term_gen: an an iterator that generates terms
:param remove_special: If true ( default ) remove the special terms from the stream
:return:
"""
from collections import defaultdict
self._remove_special = remove_special
self._term_gen = term_gen
self._param_map = [] # Current parameter map, the args of the last Section term
# _sections and _terms are loaded from Declare documents, in
# handle_declare and import_declare_doc. The Declare doc information
# can also be loaded before parsing, so the Declare term can be eliminated.
self._sections = {} # Declared sections and their arguments
self._terms = {} # Pre-defined terms, plus TermValueName and ChildPropertyType
self.errors = []
@property
def sections(self):
return self._sections
@property
def synonyms(self):
return {k: v['synonym'] for k, v in self._terms.items() if 'synonym' in v}
@property
def terms(self):
return self._terms
@property
def declare_dict(self):
return {
'sections': self.sections,
'terms': self.terms,
}
def as_dict(self):
"""Iterate, link terms and convert to a dict"""
return convert_to_dict(link_terms(self))
def errors_as_dict(self):
errors = []
for e in self.errors:
errors.append({
'file': e.term.file_name,
'row': e.term.row,
'col': e.term.col,
'term': self.join(e.term.parent_term, e.term.record_term),
'error': str(e)
})
return errors
@staticmethod
def join(t1, t2):
return '.'.join((t1, t2))
def __iter__(self):
import copy
last_parent_term = 'root'
# Remapping the default record value to another property name
for t in self._term_gen:
nt = copy.copy(t)
# Substitute synonyms
try:
syn_term = self.synonyms[self.join(t.parent_term, t.record_term)]
nt.parent_term, nt.record_term = Term.split_term_lower(syn_term);
except KeyError:
pass
if nt.parent_term == ELIDED_TERM and last_parent_term:
nt.parent_term = last_parent_term
elif not nt.is_arg_child:
last_parent_term = nt.record_term
# Remap integer record terms to names from the parameter map
try:
nt.record_term = str(self._param_map[int(t.record_term)])
except ValueError:
pass # the record term wasn't an integer
except IndexError:
pass # Probably no parameter map.
# Handle other special terms
if hasattr(self, 'handle_' + t.record_term.lower()):
getattr(self, 'handle_' + t.record_term.lower())(t)
if self._remove_special:
continue
nt.child_property_type = self._terms.get(self.join(nt.parent_term, nt.record_term), {}) \
.get('childpropertytype', 'any')
nt.term_value_name = self._terms.get(self.join(nt.parent_term, nt.record_term), {}) \
.get('termvaluename', '@value')
nt.valid = self.join(nt.parent_term.lower(), nt.record_term.lower()) in self._terms
yield nt
def handle_section(self, t):
self._param_map = [p.lower() if p else i for i, p in enumerate(t.args)]
def handle_declare(self, t):
"""Load the information in the file referenced by a Delare term, but don't
insert the terms in the file into the stream"""
from os.path import dirname, join
if t.value.startswith('http'):
fn = t.value.strip('/')
else:
fn = join(dirname(t.file_name), t.value.strip('/'))
ti = DeclareTermInterpreter(TermGenerator(CsvPathRowGenerator(fn)))
try:
self.import_declare_doc(ti.as_dict())
except IncludeError as e:
e.term = t
self.errors.append(e)
def import_declare_doc(self, d):
"""Import a declare cod that has been parsed and converted to a dict"""
if 'declaresection' in d:
for e in d['declaresection']:
if e:
self._sections[e['section_name'].lower()] = {
'args': [v for k, v in sorted((k, v) for k, v in e.items() if isinstance(k, int))],
'terms': list()
}
if 'declareterm' in d:
for e in d['declareterm']:
terms = self.join(*Term.split_term_lower(e['term_name']))
self._terms[terms] = e
if 'section' in e and e['section']:
if e['section'] not in self._sections:
self._sections[e['section'].lower()] = {
'args': [],
'terms': list()
}
st = self._sections[e['section'].lower()]['terms']
if e['section'] not in st:
st.append(e['term_name'])
if 'declarevalueset' in d:
for e in d['declarevalueset']:
for k,v in self._terms.items():
if 'valueset' in v and e.get('name',None) == v['valueset']:
v['valueset'] = e['value']
class DeclareTermInterpreter(TermInterpreter):
"""
A version of the TermInterpreter specifically for parsing Declare documents. These documents
require some special handling because they declare terms that are required for propertly parsing
Metatab files. These require declarations are pre-declared in this class.
"""
def __init__(self, term_gen, remove_special=False):
super(DeclareTermInterpreter, self).__init__(term_gen, remove_special)
# Configure the parser to output a more useful structure
self._terms.update({
NO_TERM + '.section': {'termvaluename': 'name'},
NO_TERM + '.synonym': {'termvaluename': 'term_name', 'childpropertytype': 'sequence'},
NO_TERM + '.declareterm': {'termvaluename': 'term_name', 'childpropertytype': 'sequence'},
NO_TERM + '.declaresection': {'termvaluename': 'section_name', 'childpropertytype': 'sequence'},
NO_TERM + '.declarevalueset': {'termvaluename': 'name', 'childpropertytype': 'sequence'},
'declarevalueset.value': {'termvaluename': 'value', 'childpropertytype': 'sequence'},
})
def link_terms(term_generator):
"""Return a heirarchy of records from a stream of terms
:param term_generator:
"""
root = Term('Root', None)
last_term_map = {NO_TERM: root}
for term in term_generator:
try:
parent = last_term_map[term.parent_term]
except KeyError as e:
raise ParserError("Failed to find parent term in last term map: {} {} \nTerm: \n{}"
.format(e.__class__.__name__, e, term))
parent.add_child(term)
if not term.is_arg_child and term.parent_term != ELIDED_TERM:
# Recs created from term args don't go in the maps.
# Nor do record term records with elided parent terms
last_term_map[ELIDED_TERM] = term
last_term_map[term.record_term] = term
return root
def convert_to_dict(term):
"""Converts a record heirarchy to nested dicts.
:param term: Root term at which to start conversion
"""
if term.children:
d = {}
for c in term.children:
if c.child_property_type == 'scalar':
d[c.record_term] = convert_to_dict(c)
elif c.child_property_type == 'sequence':
try:
d[c.record_term].append(convert_to_dict(c))
except (KeyError, AttributeError):
# The c.term property doesn't exist, so add a list
d[c.record_term] = [convert_to_dict(c)]
else:
try:
d[c.record_term].append(convert_to_dict(c))
except KeyError:
# The c.term property doesn't exist, so add a scalar
d[c.record_term] = convert_to_dict(c)
except AttributeError as e:
# d[c.term] exists, but is a scalar, so convert it to a list
d[c.record_term] = [d[c.record_term]] + [convert_to_dict(c)]
if term.value:
d[term.term_value_name] = term.value
return d
else:
return term.value
| [
"urllib2.urlopen",
"cStringIO.StringIO",
"os.path.dirname",
"copy.copy",
"csv.reader"
] | [((5263, 5282), 'csv.reader', 'csv.reader', (['self._f'], {}), '(self._f)\n', (5273, 5282), False, 'import csv\n'), ((5756, 5776), 'cStringIO.StringIO', 'StringIO', (['self._data'], {}), '(self._data)\n', (5764, 5776), False, 'from cStringIO import StringIO\n'), ((5839, 5852), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (5849, 5852), False, 'import csv\n'), ((10750, 10762), 'copy.copy', 'copy.copy', (['t'], {}), '(t)\n', (10759, 10762), False, 'import copy\n'), ((4585, 4612), 'urllib2.urlopen', 'urllib2.urlopen', (['self._path'], {}), '(self._path)\n', (4600, 4612), False, 'import urllib2\n'), ((8483, 8502), 'os.path.dirname', 'dirname', (['self._path'], {}), '(self._path)\n', (8490, 8502), False, 'from os.path import dirname, join\n'), ((12676, 12696), 'os.path.dirname', 'dirname', (['t.file_name'], {}), '(t.file_name)\n', (12683, 12696), False, 'from os.path import dirname, join\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Specified KV model for storing monitor secrets."""
import base64
import os
import struct
import time
from decapod_common.models import kv
class MonitorSecret(kv.KV):
NAMESPACE = "monitor_secret"
@classmethod
def upsert(cls, key, value):
return super().upsert(cls.NAMESPACE, key, value)
@classmethod
def find(cls, keys):
return super().find(cls.NAMESPACE, keys)
@classmethod
def find_one(cls, key):
models = cls.find([key])
if models:
return models[0]
@classmethod
def remove(cls, keys):
return super().remove(cls.NAMESPACE, keys)
def generate_monitor_secret():
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
secret = base64.b64encode(header + key)
secret = secret.decode("utf-8")
return secret
| [
"os.urandom",
"base64.b64encode",
"time.time"
] | [((1280, 1294), 'os.urandom', 'os.urandom', (['(16)'], {}), '(16)\n', (1290, 1294), False, 'import os\n'), ((1376, 1406), 'base64.b64encode', 'base64.b64encode', (['(header + key)'], {}), '(header + key)\n', (1392, 1406), False, 'import base64\n'), ((1336, 1347), 'time.time', 'time.time', ([], {}), '()\n', (1345, 1347), False, 'import time\n')] |
import re
import pathlib
from clldutils.text import strip_chars
from cldfbench import Dataset as BaseDataset
from cldfbench import CLDFSpec
QUOTES = '“”'
class Dataset(BaseDataset):
dir = pathlib.Path(__file__).parent
id = "lapollaqiang"
def cldf_specs(self): # A dataset must declare all CLDF sets it creates.
return CLDFSpec(dir=self.cldf_dir, module='Generic', metadata_fname='cldf-metadata.json')
def cmd_download(self, args):
pass
def cmd_makecldf(self, args):
args.writer.cldf.add_component('LanguageTable')
args.writer.cldf.add_component(
'ExampleTable',
'Text_ID',
{'name': 'Sentence_Number', 'datatype': 'integer'},
{'name': 'Phrase_Number', 'datatype': 'integer'},
)
args.writer.cldf.add_table('texts.csv', 'ID', 'Title')
args.writer.cldf.add_foreign_key('ExampleTable', 'Text_ID', 'texts.csv', 'ID')
args.writer.objects['LanguageTable'].append({'ID': 'qiang', 'Name':
'Qiang', 'Glottocode': 'west2876'})
example_number = 0
for text_id, title, lines in iter_texts(self.raw_dir.read('Qiang-2.txt').split('\n')):
args.writer.objects['texts.csv'].append({'ID': text_id, 'Title': title})
text, gloss = [], []
for igt in iter_igts(lines):
text.extend(igt[1])
gloss.extend(igt[2])
for sid, sentence in enumerate(iter_sentences(zip(text, gloss)), start=1):
for pid, phrase in enumerate(iter_phrases(sentence), start=1):
example_number += 1
args.writer.objects['ExampleTable'].append({
'ID': example_number,
'Primary_Text': ' '.join(p[0] for p in phrase),
'Analyzed_Word': [p[0] for p in phrase],
'Gloss': [p[1] for p in phrase],
'Text_ID': text_id,
'Language_ID': 'qiang',
'Sentence_Number': sid,
'Phrase_Number': pid,
})
def iter_phrases(chunks):
phrase_end = ',;'
phrase = []
for text, gloss in chunks:
phrase.append((text, gloss))
if strip_chars(QUOTES, text)[-1] in phrase_end:
yield phrase[:]
phrase = []
assert phrase
yield phrase
def iter_sentences(chunks):
sentence_end = '.!?'
sentence = []
for text, gloss in chunks:
sentence.append((text, gloss))
if strip_chars(QUOTES, text)[-1] in sentence_end:
yield sentence[:]
sentence = []
assert not sentence
def iter_igts(lines):
assert len(lines) % 3 == 0
for text, gloss, sep in [lines[i:i+3] for i in range(0, len(lines), 3)]:
assert not sep
m = re.match('(?P<number>[0-9]+)\s+', text)
assert m
sid = m.group('number')
text = text[m.end():].split()
gloss = gloss.split()
assert len(text) == len(gloss)
yield sid, text, gloss
def iter_texts(all_lines):
header_pattern = re.compile('Text\s+(?P<number>[0-9]+)\s*:\s+(?P<title>.+)')
text_id, title, lines = None, None, []
for line in all_lines:
line = line.strip()
header = header_pattern.match(line)
if header:
if text_id:
yield text_id, title, lines
lines = []
text_id, title = header.group('number'), header.group('title')
continue
lines.append(line)
if lines:
yield text_id, title, lines
| [
"cldfbench.CLDFSpec",
"pathlib.Path",
"re.compile",
"clldutils.text.strip_chars",
"re.match"
] | [((3145, 3207), 're.compile', 're.compile', (['"""Text\\\\s+(?P<number>[0-9]+)\\\\s*:\\\\s+(?P<title>.+)"""'], {}), "('Text\\\\s+(?P<number>[0-9]+)\\\\s*:\\\\s+(?P<title>.+)')\n", (3155, 3207), False, 'import re\n'), ((196, 218), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (208, 218), False, 'import pathlib\n'), ((344, 431), 'cldfbench.CLDFSpec', 'CLDFSpec', ([], {'dir': 'self.cldf_dir', 'module': '"""Generic"""', 'metadata_fname': '"""cldf-metadata.json"""'}), "(dir=self.cldf_dir, module='Generic', metadata_fname=\n 'cldf-metadata.json')\n", (352, 431), False, 'from cldfbench import CLDFSpec\n'), ((2868, 2908), 're.match', 're.match', (['"""(?P<number>[0-9]+)\\\\s+"""', 'text'], {}), "('(?P<number>[0-9]+)\\\\s+', text)\n", (2876, 2908), False, 'import re\n'), ((2288, 2313), 'clldutils.text.strip_chars', 'strip_chars', (['QUOTES', 'text'], {}), '(QUOTES, text)\n', (2299, 2313), False, 'from clldutils.text import strip_chars\n'), ((2574, 2599), 'clldutils.text.strip_chars', 'strip_chars', (['QUOTES', 'text'], {}), '(QUOTES, text)\n', (2585, 2599), False, 'from clldutils.text import strip_chars\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from typing import Any, Dict, Optional, Tuple
import gym
import torch
from nle import nethack
from minihack.agent.common.models.embed import GlyphEmbedding
from minihack.agent.common.models.transformer import TransformerEncoder
from omegaconf import DictConfig
from ray.rllib.models import ModelCatalog
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from torch import nn
from torch.nn import functional as F
NUM_GLYPHS = nethack.MAX_GLYPH
NUM_FEATURES = nethack.BLSTATS_SHAPE[0]
PAD_CHAR = 0
NUM_CHARS = 128
class RLLibGlyphEmbedding(GlyphEmbedding):
def glyphs_to_idgroup(self, glyphs):
B, H, W = glyphs.shape
ids_groups = self.id_pairs_table.index_select(
0, glyphs.contiguous().view(-1).long()
)
ids = ids_groups.select(1, 0).view(B, H, W).long()
groups = ids_groups.select(1, 1).view(B, H, W).long()
return (ids, groups)
def prepare_input(self, inputs):
"""Take the inputs to the network as dictionary and return a namedtuple
of the input/index tensors to be embedded (GlyphTuple)"""
embeddable_data = {}
# Only flatten the data we want
for key, value in inputs.items():
if key in self.embeddings:
# -- [ B x ...] -> [ B' x ... ]
# embeddable_data[key] = torch.flatten(value, 0, 1).long()
embeddable_data[key] = value.long()
# add our group id and subgroup id if we want them
if self.requires_id_pairs_table:
ids, groups = self.glyphs_to_idgroup(inputs["glyphs"])
embeddable_data["groups"] = groups
embeddable_data["subgroup_ids"] = ids
# convert embeddable_data to a named tuple
return self.GlyphTuple(**embeddable_data)
class NetHackNet(nn.Module):
AgentOutput = collections.namedtuple(
"AgentOutput",
"action policy_logits baseline chosen_option teacher_logits pot_sm",
)
def __init__(self):
super(NetHackNet, self).__init__()
self.register_buffer("reward_sum", torch.zeros(()))
self.register_buffer("reward_m2", torch.zeros(()))
self.register_buffer("reward_count", torch.zeros(()).fill_(1e-8))
def forward(self, inputs, core_state):
raise NotImplementedError
def initial_state(self, batch_size=1):
return ()
def prepare_input(self, inputs):
# -- [B x H x W]
glyphs = inputs["glyphs"]
# -- [B x F]
features = inputs["blstats"]
B, *_ = glyphs.shape
return glyphs, features
def embed_state(self, inputs):
raise NotImplementedError
@torch.no_grad()
def update_running_moments(self, reward_batch):
"""Maintains a running mean of reward."""
new_count = len(reward_batch)
new_sum = torch.sum(reward_batch)
new_mean = new_sum / new_count
curr_mean = self.reward_sum / self.reward_count
new_m2 = torch.sum((reward_batch - new_mean) ** 2) + (
(self.reward_count * new_count)
/ (self.reward_count + new_count)
* (new_mean - curr_mean) ** 2
)
self.reward_count += new_count
self.reward_sum += new_sum
self.reward_m2 += new_m2
@torch.no_grad()
def get_running_std(self):
"""Returns standard deviation of the running mean of the reward."""
return torch.sqrt(self.reward_m2 / self.reward_count)
class Crop(nn.Module):
def __init__(self, height, width, height_target, width_target):
super(Crop, self).__init__()
self.width = width
self.height = height
self.width_target = width_target
self.height_target = height_target
width_grid = self._step_to_range(
2 / (self.width - 1), self.width_target
)[None, :].expand(self.height_target, -1)
height_grid = self._step_to_range(
2 / (self.height - 1), height_target
)[:, None].expand(-1, self.width_target)
# "clone" necessary, https://github.com/pytorch/pytorch/issues/34880
self.register_buffer("width_grid", width_grid.clone())
self.register_buffer("height_grid", height_grid.clone())
def _step_to_range(self, step, num_steps):
return torch.tensor(
[step * (i - num_steps // 2) for i in range(num_steps)]
)
def forward(self, inputs, coordinates):
"""Calculates centered crop around given x,y coordinates.
Args:
inputs [B x H x W] or [B x H x W x C]
coordinates [B x 2] x,y coordinates
Returns:
[B x H' x W'] inputs cropped and centered around x,y coordinates.
"""
assert inputs.shape[1] == self.height, "expected %d but found %d" % (
self.height,
inputs.shape[1],
)
assert inputs.shape[2] == self.width, "expected %d but found %d" % (
self.width,
inputs.shape[2],
)
permute_results = False
if inputs.dim() == 3:
inputs = inputs.unsqueeze(1)
else:
permute_results = True
inputs = inputs.permute(0, 2, 3, 1)
inputs = inputs.float()
x = coordinates[:, 0]
y = coordinates[:, 1]
x_shift = 2 / (self.width - 1) * (x.float() - self.width // 2)
y_shift = 2 / (self.height - 1) * (y.float() - self.height // 2)
grid = torch.stack(
[
self.width_grid[None, :, :] + x_shift[:, None, None],
self.height_grid[None, :, :] + y_shift[:, None, None],
],
dim=3,
)
crop = (
torch.round(F.grid_sample(inputs, grid, align_corners=True))
.squeeze(1)
.long()
)
if permute_results:
# [B x C x H x W] -> [B x H x W x C]
crop = crop.permute(0, 2, 3, 1)
return crop
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class BaseNet(NetHackNet):
def __init__(self, processed_observation_shape, flags: DictConfig):
super(BaseNet, self).__init__()
self.observation_space = processed_observation_shape.original_space
self.H = self.observation_space["glyphs"].shape[0]
self.W = self.observation_space["glyphs"].shape[1]
self.k_dim = flags.embedding_dim
self.h_dim = flags.hidden_dim
self.crop_model = flags.crop_model
self.crop_dim = flags.crop_dim
self.num_features = NUM_FEATURES
self.crop = Crop(self.H, self.W, self.crop_dim, self.crop_dim)
self.glyph_type = flags.glyph_type
self.glyph_embedding = RLLibGlyphEmbedding(
flags.glyph_type,
flags.embedding_dim,
None,
flags.use_index_select,
)
K = flags.embedding_dim # number of input filters
F = 3 # filter dimensions
S = 1 # stride
P = 1 # padding
M = 16 # number of intermediate filters
self.Y = 8 # number of output filters
L = flags.layers # number of convnet layers
in_channels = [K] + [M] * (L - 1)
out_channels = [M] * (L - 1) + [self.Y]
def interleave(xs, ys):
return [val for pair in zip(xs, ys) for val in pair]
conv_extract = [
nn.Conv2d(
in_channels=in_channels[i],
out_channels=out_channels[i],
kernel_size=(F, F),
stride=S,
padding=P,
)
for i in range(L)
]
self.extract_representation = nn.Sequential(
*interleave(conv_extract, [nn.ELU()] * len(conv_extract))
)
if self.crop_model == "transformer":
self.extract_crop_representation = TransformerEncoder(
K,
N=L,
heads=8,
height=self.crop_dim,
width=self.crop_dim,
device=None,
)
elif self.crop_model == "cnn":
conv_extract_crop = [
nn.Conv2d(
in_channels=in_channels[i],
out_channels=out_channels[i],
kernel_size=(F, F),
stride=S,
padding=P,
)
for i in range(L)
]
self.extract_crop_representation = nn.Sequential(
*interleave(conv_extract_crop, [nn.ELU()] * len(conv_extract))
)
# MESSAGING MODEL
if "msg" not in flags:
self.msg_model = "none"
else:
self.msg_model = flags.msg.model
self.msg_hdim = flags.msg.hidden_dim
self.msg_edim = flags.msg.embedding_dim
if self.msg_model in ("gru", "lstm", "lt_cnn"):
# character-based embeddings
self.char_lt = nn.Embedding(
NUM_CHARS, self.msg_edim, padding_idx=PAD_CHAR
)
else:
# forward will set up one-hot inputs for the cnn, no lt needed
pass
if self.msg_model.endswith("cnn"):
# from Zhang et al, 2016
# Character-level Convolutional Networks for Text Classification
# https://arxiv.org/abs/1509.01626
if self.msg_model == "cnn":
# inputs will be one-hot vectors, as done in paper
self.conv1 = nn.Conv1d(NUM_CHARS, self.msg_hdim, kernel_size=7)
elif self.msg_model == "lt_cnn":
# replace one-hot inputs with learned embeddings
self.conv1 = nn.Conv1d(
self.msg_edim, self.msg_hdim, kernel_size=7
)
else:
raise NotImplementedError("msg.model == %s", flags.msg.model)
# remaining convolutions, relus, pools, and a small FC network
self.conv2_6_fc = nn.Sequential(
nn.ReLU(),
nn.MaxPool1d(kernel_size=3, stride=3),
# conv2
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=7),
nn.ReLU(),
nn.MaxPool1d(kernel_size=3, stride=3),
# conv3
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=3),
nn.ReLU(),
# conv4
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=3),
nn.ReLU(),
# conv5
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=3),
nn.ReLU(),
# conv6
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=3),
nn.ReLU(),
nn.MaxPool1d(kernel_size=3, stride=3),
# fc receives -- [ B x h_dim x 5 ]
Flatten(),
nn.Linear(5 * self.msg_hdim, 2 * self.msg_hdim),
nn.ReLU(),
nn.Linear(2 * self.msg_hdim, self.msg_hdim),
) # final output -- [ B x h_dim x 5 ]
elif self.msg_model in ("gru", "lstm"):
def rnn(flag):
return nn.LSTM if flag == "lstm" else nn.GRU
self.char_rnn = rnn(self.msg_model)(
self.msg_edim,
self.msg_hdim // 2,
batch_first=True,
bidirectional=True,
)
elif self.msg_model != "none":
raise NotImplementedError("msg.model == %s", flags.msg.model)
self.embed_features = nn.Sequential(
nn.Linear(self.num_features, self.k_dim),
nn.ReLU(),
nn.Linear(self.k_dim, self.k_dim),
nn.ReLU(),
)
self.equalize_input_dim = flags.equalize_input_dim
if not self.equalize_input_dim:
# just added up the output dimensions of the input featurizers
# feature / status dim
out_dim = self.k_dim
# CNN over full glyph map
out_dim += self.H * self.W * self.Y
if self.crop_model == "transformer":
out_dim += self.crop_dim ** 2 * K
elif self.crop_model == "cnn":
out_dim += self.crop_dim ** 2 * self.Y
# messaging model
if self.msg_model != "none":
out_dim += self.msg_hdim
else:
# otherwise, project them all to h_dim
NUM_INPUTS = 4 if self.msg_model != "none" else 3
project_hdim = flags.equalize_factor * self.h_dim
out_dim = project_hdim * NUM_INPUTS
# set up linear layers for projections
self.project_feature_dim = nn.Linear(self.k_dim, project_hdim)
self.project_glyph_dim = nn.Linear(
self.H * self.W * self.Y, project_hdim
)
c__2 = self.crop_dim ** 2
if self.crop_model == "transformer":
self.project_crop_dim = nn.Linear(c__2 * K, project_hdim)
elif self.crop_model == "cnn":
self.project_crop_dim = nn.Linear(c__2 * self.Y, project_hdim)
if self.msg_model != "none":
self.project_msg_dim = nn.Linear(self.msg_hdim, project_hdim)
self.fc = nn.Sequential(
nn.Linear(out_dim, self.h_dim),
nn.ReLU(),
nn.Linear(self.h_dim, self.h_dim),
nn.ReLU(),
)
def prepare_input(self, inputs):
# -- [B x H x W]
B, H, W = inputs["glyphs"].shape
# take our chosen glyphs and merge the time and batch
glyphs = self.glyph_embedding.prepare_input(inputs)
# -- [B x F]
features = inputs["blstats"]
return glyphs, features
def forward(self, inputs):
B, *_ = inputs["glyphs"].shape
glyphs, features = self.prepare_input(inputs)
# -- [B x 2] x,y coordinates
coordinates = features[:, :2]
# -- [B x K]
features_emb = self.embed_features(features)
if self.equalize_input_dim:
features_emb = self.project_feature_dim(features_emb)
assert features_emb.shape[0] == B
reps = [features_emb] # either k_dim or project_hdim
# -- [B x H' x W']
crop = self.glyph_embedding.GlyphTuple(
*[self.crop(g, coordinates) for g in glyphs]
)
# -- [B x H' x W' x K]
crop_emb = self.glyph_embedding(crop)
if self.crop_model == "transformer":
# -- [B x W' x H' x K]
crop_rep = self.extract_crop_representation(crop_emb, mask=None)
elif self.crop_model == "cnn":
# -- [B x K x W' x H']
crop_emb = crop_emb.transpose(1, 3)
# -- [B x W' x H' x K]
crop_rep = self.extract_crop_representation(crop_emb)
# -- [B x K']
crop_rep = crop_rep.view(B, -1)
if self.equalize_input_dim:
crop_rep = self.project_crop_dim(crop_rep)
assert crop_rep.shape[0] == B
reps.append(crop_rep) # either k_dim or project_hdim
# -- [B x H x W x K]
glyphs_emb = self.glyph_embedding(glyphs)
# glyphs_emb = self.embed(glyphs)
# -- [B x K x W x H]
glyphs_emb = glyphs_emb.transpose(1, 3)
# -- [B x W x H x K]
glyphs_rep = self.extract_representation(glyphs_emb)
# -- [B x K']
glyphs_rep = glyphs_rep.view(B, -1)
# -- [B x K']
if self.equalize_input_dim:
glyphs_rep = self.project_glyph_dim(glyphs_rep)
assert glyphs_rep.shape[0] == B
# -- [B x K'']
reps.append(glyphs_rep)
# MESSAGING MODEL
if self.msg_model != "none":
messages = inputs["message"].long()
if self.msg_model == "cnn":
# convert messages to one-hot, [B x 96 x 256]
one_hot = F.one_hot(messages, num_classes=NUM_CHARS).transpose(
1, 2
)
char_rep = self.conv2_6_fc(self.conv1(one_hot.float()))
elif self.msg_model == "lt_cnn":
# [B x E x 256 ]
char_emb = self.char_lt(messages).transpose(1, 2)
char_rep = self.conv2_6_fc(self.conv1(char_emb))
else: # lstm, gru
char_emb = self.char_lt(messages)
output = self.char_rnn(char_emb)[0]
fwd_rep = output[:, -1, : self.h_dim // 2]
bwd_rep = output[:, 0, self.h_dim // 2 :]
char_rep = torch.cat([fwd_rep, bwd_rep], dim=1)
if self.equalize_input_dim:
char_rep = self.project_msg_dim(char_rep)
reps.append(char_rep)
st = torch.cat(reps, dim=1)
# -- [B x K]
st = self.fc(st)
return st
class RLLibNLENetwork(TorchModelV2, nn.Module):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
num_outputs: Optional[int],
model_config: dict,
name: str,
**kwargs: Any,
):
TorchModelV2.__init__(
self,
observation_space,
action_space,
num_outputs,
model_config,
name,
)
nn.Module.__init__(self)
flags = model_config["custom_model_config"]["flags"]
self.num_outputs = num_outputs or flags.hidden_dim
self.base = BaseNet(observation_space, flags) # device is sorted later
@override(TorchModelV2)
def forward(self, x: Dict[str, Any], *_: Any) -> Tuple[torch.Tensor, list]:
return self.base(x["obs"]), []
ModelCatalog.register_custom_model("rllib_nle_model", RLLibNLENetwork)
| [
"torch.nn.ReLU",
"torch.sqrt",
"ray.rllib.utils.annotations.override",
"torch.sum",
"torch.nn.Module.__init__",
"torch.nn.MaxPool1d",
"torch.nn.functional.grid_sample",
"ray.rllib.models.torch.torch_modelv2.TorchModelV2.__init__",
"torch.nn.Embedding",
"collections.namedtuple",
"minihack.agent.c... | [((18373, 18443), 'ray.rllib.models.ModelCatalog.register_custom_model', 'ModelCatalog.register_custom_model', (['"""rllib_nle_model"""', 'RLLibNLENetwork'], {}), "('rllib_nle_model', RLLibNLENetwork)\n", (18407, 18443), False, 'from ray.rllib.models import ModelCatalog\n'), ((2499, 2609), 'collections.namedtuple', 'collections.namedtuple', (['"""AgentOutput"""', '"""action policy_logits baseline chosen_option teacher_logits pot_sm"""'], {}), "('AgentOutput',\n 'action policy_logits baseline chosen_option teacher_logits pot_sm')\n", (2521, 2609), False, 'import collections\n'), ((3326, 3341), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3339, 3341), False, 'import torch\n'), ((3939, 3954), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3952, 3954), False, 'import torch\n'), ((18229, 18251), 'ray.rllib.utils.annotations.override', 'override', (['TorchModelV2'], {}), '(TorchModelV2)\n', (18237, 18251), False, 'from ray.rllib.utils.annotations import override\n'), ((3500, 3523), 'torch.sum', 'torch.sum', (['reward_batch'], {}), '(reward_batch)\n', (3509, 3523), False, 'import torch\n'), ((4077, 4123), 'torch.sqrt', 'torch.sqrt', (['(self.reward_m2 / self.reward_count)'], {}), '(self.reward_m2 / self.reward_count)\n', (4087, 4123), False, 'import torch\n'), ((6107, 6241), 'torch.stack', 'torch.stack', (['[self.width_grid[None, :, :] + x_shift[:, None, None], self.height_grid[\n None, :, :] + y_shift[:, None, None]]'], {'dim': '(3)'}), '([self.width_grid[None, :, :] + x_shift[:, None, None], self.\n height_grid[None, :, :] + y_shift[:, None, None]], dim=3)\n', (6118, 6241), False, 'import torch\n'), ((17434, 17456), 'torch.cat', 'torch.cat', (['reps'], {'dim': '(1)'}), '(reps, dim=1)\n', (17443, 17456), False, 'import torch\n'), ((17811, 17908), 'ray.rllib.models.torch.torch_modelv2.TorchModelV2.__init__', 'TorchModelV2.__init__', (['self', 'observation_space', 'action_space', 'num_outputs', 'model_config', 'name'], {}), '(self, observation_space, action_space, num_outputs,\n model_config, name)\n', (17832, 17908), False, 'from ray.rllib.models.torch.torch_modelv2 import TorchModelV2\n'), ((17996, 18020), 'torch.nn.Module.__init__', 'nn.Module.__init__', (['self'], {}), '(self)\n', (18014, 18020), False, 'from torch import nn\n'), ((2741, 2756), 'torch.zeros', 'torch.zeros', (['()'], {}), '(())\n', (2752, 2756), False, 'import torch\n'), ((2800, 2815), 'torch.zeros', 'torch.zeros', (['()'], {}), '(())\n', (2811, 2815), False, 'import torch\n'), ((3637, 3678), 'torch.sum', 'torch.sum', (['((reward_batch - new_mean) ** 2)'], {}), '((reward_batch - new_mean) ** 2)\n', (3646, 3678), False, 'import torch\n'), ((8067, 8179), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels[i]', 'out_channels': 'out_channels[i]', 'kernel_size': '(F, F)', 'stride': 'S', 'padding': 'P'}), '(in_channels=in_channels[i], out_channels=out_channels[i],\n kernel_size=(F, F), stride=S, padding=P)\n', (8076, 8179), False, 'from torch import nn\n'), ((8538, 8634), 'minihack.agent.common.models.transformer.TransformerEncoder', 'TransformerEncoder', (['K'], {'N': 'L', 'heads': '(8)', 'height': 'self.crop_dim', 'width': 'self.crop_dim', 'device': 'None'}), '(K, N=L, heads=8, height=self.crop_dim, width=self.\n crop_dim, device=None)\n', (8556, 8634), False, 'from minihack.agent.common.models.transformer import TransformerEncoder\n'), ((9640, 9700), 'torch.nn.Embedding', 'nn.Embedding', (['NUM_CHARS', 'self.msg_edim'], {'padding_idx': 'PAD_CHAR'}), '(NUM_CHARS, self.msg_edim, padding_idx=PAD_CHAR)\n', (9652, 9700), False, 'from torch import nn\n'), ((12276, 12316), 'torch.nn.Linear', 'nn.Linear', (['self.num_features', 'self.k_dim'], {}), '(self.num_features, self.k_dim)\n', (12285, 12316), False, 'from torch import nn\n'), ((12330, 12339), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (12337, 12339), False, 'from torch import nn\n'), ((12353, 12386), 'torch.nn.Linear', 'nn.Linear', (['self.k_dim', 'self.k_dim'], {}), '(self.k_dim, self.k_dim)\n', (12362, 12386), False, 'from torch import nn\n'), ((12400, 12409), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (12407, 12409), False, 'from torch import nn\n'), ((13387, 13422), 'torch.nn.Linear', 'nn.Linear', (['self.k_dim', 'project_hdim'], {}), '(self.k_dim, project_hdim)\n', (13396, 13422), False, 'from torch import nn\n'), ((13460, 13509), 'torch.nn.Linear', 'nn.Linear', (['(self.H * self.W * self.Y)', 'project_hdim'], {}), '(self.H * self.W * self.Y, project_hdim)\n', (13469, 13509), False, 'from torch import nn\n'), ((13988, 14018), 'torch.nn.Linear', 'nn.Linear', (['out_dim', 'self.h_dim'], {}), '(out_dim, self.h_dim)\n', (13997, 14018), False, 'from torch import nn\n'), ((14032, 14041), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14039, 14041), False, 'from torch import nn\n'), ((14055, 14088), 'torch.nn.Linear', 'nn.Linear', (['self.h_dim', 'self.h_dim'], {}), '(self.h_dim, self.h_dim)\n', (14064, 14088), False, 'from torch import nn\n'), ((14102, 14111), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14109, 14111), False, 'from torch import nn\n'), ((10178, 10228), 'torch.nn.Conv1d', 'nn.Conv1d', (['NUM_CHARS', 'self.msg_hdim'], {'kernel_size': '(7)'}), '(NUM_CHARS, self.msg_hdim, kernel_size=7)\n', (10187, 10228), False, 'from torch import nn\n'), ((10694, 10703), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (10701, 10703), False, 'from torch import nn\n'), ((10721, 10758), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(3)', 'stride': '(3)'}), '(kernel_size=3, stride=3)\n', (10733, 10758), False, 'from torch import nn\n'), ((10800, 10854), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.msg_hdim', 'self.msg_hdim'], {'kernel_size': '(7)'}), '(self.msg_hdim, self.msg_hdim, kernel_size=7)\n', (10809, 10854), False, 'from torch import nn\n'), ((10872, 10881), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (10879, 10881), False, 'from torch import nn\n'), ((10899, 10936), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(3)', 'stride': '(3)'}), '(kernel_size=3, stride=3)\n', (10911, 10936), False, 'from torch import nn\n'), ((10978, 11032), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.msg_hdim', 'self.msg_hdim'], {'kernel_size': '(3)'}), '(self.msg_hdim, self.msg_hdim, kernel_size=3)\n', (10987, 11032), False, 'from torch import nn\n'), ((11050, 11059), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11057, 11059), False, 'from torch import nn\n'), ((11101, 11155), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.msg_hdim', 'self.msg_hdim'], {'kernel_size': '(3)'}), '(self.msg_hdim, self.msg_hdim, kernel_size=3)\n', (11110, 11155), False, 'from torch import nn\n'), ((11173, 11182), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11180, 11182), False, 'from torch import nn\n'), ((11224, 11278), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.msg_hdim', 'self.msg_hdim'], {'kernel_size': '(3)'}), '(self.msg_hdim, self.msg_hdim, kernel_size=3)\n', (11233, 11278), False, 'from torch import nn\n'), ((11296, 11305), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11303, 11305), False, 'from torch import nn\n'), ((11347, 11401), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.msg_hdim', 'self.msg_hdim'], {'kernel_size': '(3)'}), '(self.msg_hdim, self.msg_hdim, kernel_size=3)\n', (11356, 11401), False, 'from torch import nn\n'), ((11419, 11428), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11426, 11428), False, 'from torch import nn\n'), ((11446, 11483), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(3)', 'stride': '(3)'}), '(kernel_size=3, stride=3)\n', (11458, 11483), False, 'from torch import nn\n'), ((11579, 11626), 'torch.nn.Linear', 'nn.Linear', (['(5 * self.msg_hdim)', '(2 * self.msg_hdim)'], {}), '(5 * self.msg_hdim, 2 * self.msg_hdim)\n', (11588, 11626), False, 'from torch import nn\n'), ((11644, 11653), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11651, 11653), False, 'from torch import nn\n'), ((11671, 11714), 'torch.nn.Linear', 'nn.Linear', (['(2 * self.msg_hdim)', 'self.msg_hdim'], {}), '(2 * self.msg_hdim, self.msg_hdim)\n', (11680, 11714), False, 'from torch import nn\n'), ((13667, 13700), 'torch.nn.Linear', 'nn.Linear', (['(c__2 * K)', 'project_hdim'], {}), '(c__2 * K, project_hdim)\n', (13676, 13700), False, 'from torch import nn\n'), ((13903, 13941), 'torch.nn.Linear', 'nn.Linear', (['self.msg_hdim', 'project_hdim'], {}), '(self.msg_hdim, project_hdim)\n', (13912, 13941), False, 'from torch import nn\n'), ((2862, 2877), 'torch.zeros', 'torch.zeros', (['()'], {}), '(())\n', (2873, 2877), False, 'import torch\n'), ((8830, 8942), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels[i]', 'out_channels': 'out_channels[i]', 'kernel_size': '(F, F)', 'stride': 'S', 'padding': 'P'}), '(in_channels=in_channels[i], out_channels=out_channels[i],\n kernel_size=(F, F), stride=S, padding=P)\n', (8839, 8942), False, 'from torch import nn\n'), ((10368, 10422), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.msg_edim', 'self.msg_hdim'], {'kernel_size': '(7)'}), '(self.msg_edim, self.msg_hdim, kernel_size=7)\n', (10377, 10422), False, 'from torch import nn\n'), ((13784, 13822), 'torch.nn.Linear', 'nn.Linear', (['(c__2 * self.Y)', 'project_hdim'], {}), '(c__2 * self.Y, project_hdim)\n', (13793, 13822), False, 'from torch import nn\n'), ((17250, 17286), 'torch.cat', 'torch.cat', (['[fwd_rep, bwd_rep]'], {'dim': '(1)'}), '([fwd_rep, bwd_rep], dim=1)\n', (17259, 17286), False, 'import torch\n'), ((16595, 16637), 'torch.nn.functional.one_hot', 'F.one_hot', (['messages'], {'num_classes': 'NUM_CHARS'}), '(messages, num_classes=NUM_CHARS)\n', (16604, 16637), True, 'from torch.nn import functional as F\n'), ((6361, 6408), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['inputs', 'grid'], {'align_corners': '(True)'}), '(inputs, grid, align_corners=True)\n', (6374, 6408), True, 'from torch.nn import functional as F\n'), ((8404, 8412), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (8410, 8412), False, 'from torch import nn\n'), ((9217, 9225), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (9223, 9225), False, 'from torch import nn\n')] |
import os.path
from depthaware.data.base_dataset import *
from PIL import Image
import time
def make_dataset_fromlst(dataroot, listfilename):
"""
NYUlist format:
imagepath seglabelpath depthpath HHApath
"""
images = []
segs = []
depths = []
HHAs = []
with open(listfilename) as f:
content = f.readlines()
for x in content:
imgname, segname, depthname, HHAname = x.strip().split(' ')
images += [os.path.join(dataroot, imgname)]
segs += [os.path.join(dataroot, segname)]
depths += [os.path.join(dataroot, depthname)]
HHAs += [os.path.join(dataroot, HHAname)]
return {'images':images, 'segs':segs, 'HHAs':HHAs, 'depths':depths}
class SUNRGBDDataset(BaseDataset):
def __init__(self, opt):
self.opt = opt
np.random.seed(int(time.time()))
self.paths_dict = make_dataset_fromlst(opt.dataroot, opt.list)
self.len = len(self.paths_dict['images'])
# self.label_weight = torch.Tensor(label_weight)
self.datafile = 'sunrgbd_dataset.py'
def __getitem__(self, index):
#self.paths['images'][index]
# print self.opt.scale,self.opt.flip,self.opt.crop,self.opt.colorjitter
img = np.asarray(Image.open(self.paths_dict['images'][index]))#.astype(np.uint8)
HHA = np.asarray(Image.open(self.paths_dict['HHAs'][index]))[:,:,::-1]
seg = np.asarray(Image.open(self.paths_dict['segs'][index])).astype(np.uint8)-1
depth = np.asarray(Image.open(self.paths_dict['depths'][index])).astype(np.uint16)
assert (img.shape[0]==HHA.shape[0]==seg.shape[0]==depth.shape[0])
assert (img.shape[1]==HHA.shape[1]==seg.shape[1]==depth.shape[1])
depth = np.bitwise_or(np.right_shift(depth,3),np.left_shift(depth,16-3))
depth = depth.astype(np.float32)/120. # 1/5 * depth
params = get_params_sunrgbd(self.opt, seg.shape, maxcrop=.7)
depth_tensor_tranformed = transform(depth, params, normalize=False,istrain=self.opt.isTrain)
seg_tensor_tranformed = transform(seg, params, normalize=False,method='nearest',istrain=self.opt.isTrain)
if self.opt.inputmode == 'bgr-mean':
img_tensor_tranformed = transform(img, params, normalize=False, istrain=self.opt.isTrain, option=1)
HHA_tensor_tranformed = transform(HHA, params, normalize=False, istrain=self.opt.isTrain, option=2)
else:
img_tensor_tranformed = transform(img, params, istrain=self.opt.isTrain, option=1)
HHA_tensor_tranformed = transform(HHA, params, istrain=self.opt.isTrain, option=2)
# print img_tensor_tranformed
# print(np.unique(depth_tensor_tranformed.numpy()).shape)
# print img_tensor_tranformed.size()
return {'image':img_tensor_tranformed,
'depth':depth_tensor_tranformed,
'seg': seg_tensor_tranformed,
'HHA': HHA_tensor_tranformed,
'imgpath': self.paths_dict['segs'][index]}
def __len__(self):
return self.len
def name(self):
return 'sunrgbd_dataset'
class SUNRGBDDataset_val(BaseDataset):
def __init__(self, opt):
self.opt = opt
np.random.seed(8964)
self.paths_dict = make_dataset_fromlst(opt.dataroot, opt.vallist)
self.len = len(self.paths_dict['images'])
def __getitem__(self, index):
#self.paths['images'][index]
img = np.asarray(Image.open(self.paths_dict['images'][index]))#.astype(np.uint8)
HHA = np.asarray(Image.open(self.paths_dict['HHAs'][index]))[:,:,::-1]
seg = np.asarray(Image.open(self.paths_dict['segs'][index])).astype(np.uint8)-1
depth = np.asarray(Image.open(self.paths_dict['depths'][index])).astype(np.uint16)
depth = np.bitwise_or(np.right_shift(depth,3),np.left_shift(depth,16-3))
depth = depth.astype(np.float32)/120. # 1/5 * depth
assert (img.shape[0]==HHA.shape[0]==seg.shape[0]==depth.shape[0])
assert (img.shape[1]==HHA.shape[1]==seg.shape[1]==depth.shape[1])
params = get_params_sunrgbd(self.opt, seg.shape, test=True)
depth_tensor_tranformed = transform(depth, params, normalize=False,istrain=self.opt.isTrain)
seg_tensor_tranformed = transform(seg, params, normalize=False,method='nearest',istrain=self.opt.isTrain)
# HHA_tensor_tranformed = transform(HHA, params,istrain=self.opt.isTrain)
if self.opt.inputmode == 'bgr-mean':
img_tensor_tranformed = transform(img, params, normalize=False, istrain=self.opt.isTrain, option=1)
HHA_tensor_tranformed = transform(HHA, params, normalize=False, istrain=self.opt.isTrain, option=2)
else:
img_tensor_tranformed = transform(img, params, istrain=self.opt.isTrain, option=1)
HHA_tensor_tranformed = transform(HHA, params, istrain=self.opt.isTrain, option=2)
return {'image':img_tensor_tranformed,
'depth':depth_tensor_tranformed,
'seg': seg_tensor_tranformed,
'HHA': HHA_tensor_tranformed,
'imgpath': self.paths_dict['segs'][index]}
def __len__(self):
return self.len
def name(self):
return 'sunrgbd_dataset_Val'
| [
"PIL.Image.open",
"time.time"
] | [((1271, 1315), 'PIL.Image.open', 'Image.open', (["self.paths_dict['images'][index]"], {}), "(self.paths_dict['images'][index])\n", (1281, 1315), False, 'from PIL import Image\n'), ((3487, 3531), 'PIL.Image.open', 'Image.open', (["self.paths_dict['images'][index]"], {}), "(self.paths_dict['images'][index])\n", (3497, 3531), False, 'from PIL import Image\n'), ((857, 868), 'time.time', 'time.time', ([], {}), '()\n', (866, 868), False, 'import time\n'), ((1360, 1402), 'PIL.Image.open', 'Image.open', (["self.paths_dict['HHAs'][index]"], {}), "(self.paths_dict['HHAs'][index])\n", (1370, 1402), False, 'from PIL import Image\n'), ((3576, 3618), 'PIL.Image.open', 'Image.open', (["self.paths_dict['HHAs'][index]"], {}), "(self.paths_dict['HHAs'][index])\n", (3586, 3618), False, 'from PIL import Image\n'), ((1529, 1573), 'PIL.Image.open', 'Image.open', (["self.paths_dict['depths'][index]"], {}), "(self.paths_dict['depths'][index])\n", (1539, 1573), False, 'from PIL import Image\n'), ((3745, 3789), 'PIL.Image.open', 'Image.open', (["self.paths_dict['depths'][index]"], {}), "(self.paths_dict['depths'][index])\n", (3755, 3789), False, 'from PIL import Image\n'), ((1439, 1481), 'PIL.Image.open', 'Image.open', (["self.paths_dict['segs'][index]"], {}), "(self.paths_dict['segs'][index])\n", (1449, 1481), False, 'from PIL import Image\n'), ((3655, 3697), 'PIL.Image.open', 'Image.open', (["self.paths_dict['segs'][index]"], {}), "(self.paths_dict['segs'][index])\n", (3665, 3697), False, 'from PIL import Image\n')] |
from Arquivo1 import Produto
#READ
#Consultar o Banco de dados
#1.Retorna todas as informações do Banco de dados
produtos = Produto.objects()
print(produtos)
for produto in produtos:
print(produto.Nome, produto.Valor) | [
"Arquivo1.Produto.objects"
] | [((128, 145), 'Arquivo1.Produto.objects', 'Produto.objects', ([], {}), '()\n', (143, 145), False, 'from Arquivo1 import Produto\n')] |
from flask_script import Manager
from src import app
manager = Manager(app)
if __name__ == "__main__":
manager.run()
| [
"flask_script.Manager"
] | [((66, 78), 'flask_script.Manager', 'Manager', (['app'], {}), '(app)\n', (73, 78), False, 'from flask_script import Manager\n')] |
from django.test import TestCase, Client
from django.urls import reverse
from shop.products.models import Product
from tests.base.mixins import ProductTestUtils
class ProductDetailsTest(ProductTestUtils, TestCase):
def setUp(self):
self.client = Client()
self.product = self.create_product(
title="Barry",
price=555.55,
image='media/products/Dart.png',
description="dasd",
is_sold=False,
)
def test_getProductDetails(self):
response = self.client.get(reverse('product_details', kwargs={'pk': self.product.id}))
self.assertEqual(200, response.status_code)
def test_showErrorIfProductDoesNotExist(self):
try:
self.client.get(reverse('product_details', kwargs={'pk': self.product.id}))
except Product.DoesNotExist:
self.assertRaises(Exception)
| [
"django.urls.reverse",
"django.test.Client"
] | [((261, 269), 'django.test.Client', 'Client', ([], {}), '()\n', (267, 269), False, 'from django.test import TestCase, Client\n'), ((556, 614), 'django.urls.reverse', 'reverse', (['"""product_details"""'], {'kwargs': "{'pk': self.product.id}"}), "('product_details', kwargs={'pk': self.product.id})\n", (563, 614), False, 'from django.urls import reverse\n'), ((762, 820), 'django.urls.reverse', 'reverse', (['"""product_details"""'], {'kwargs': "{'pk': self.product.id}"}), "('product_details', kwargs={'pk': self.product.id})\n", (769, 820), False, 'from django.urls import reverse\n')] |
import socket
import threading
import os
import sys
from pathlib import Path
#---------------------------------------------------
def ReadLine(conn):
line = ''
while True:
try:
byte = conn.recv(1)
except:
print('O cliente encerrou')
return 0
if not byte:
return 0
byte = byte.decode()
if byte == '\r':
continue
if byte == '\n':
break
line += byte
return line
#------------------------------------------------
def Upload(conn, ip, file):
try:
f = open(file,'w+')
except:
print('erro abertura arquivo')
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip,9998))
while True:
data = s.recv(1024)
#print(data.decode('utf-8'))
f.write(data.decode('utf-8'))
if not data:
break
f.close()
s.close()
conn.send(bytes('TRANSMISSAO ENCERRADA\n','utf-8'))
except:
f.close()
conn.send(bytes('A PORTA DE DADOS NÃO ESTA ABERTA\n','utf-8'))
#-----------------------------------------------
def Download(conn, ip, file):
try:
f = open(Path(file),'rb')
except:
print('erro abertura arquivo')
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip,9998))
s.send(f.read())
f.close()
s.close()
conn.send(bytes('TRANSMISSAO ENCERRADA\n','utf-8'))
except:
print('ERRO DE DOWNLOAD')
f.close()
conn.send(bytes('A PORTA DE DADOS NÃO ESTA ABERTA\n','utf-8'))
#------------------------------------------------
def TrataCliente(conn, addr):
while True:
conn.send(bytes('\r\n','utf-8'))
data = ReadLine(conn)
print('{} enviou {}'.format(addr,data))
if data == 0:
break
try:
if data == 'os.getcwd()':
res=os.getcwd()
conn.send(bytes(res,'utf-8'))
elif data.startswith('os.listdir'):
file = data.split('(')[1].split(')')[0]
if file == '':
file = '.'
res=os.listdir(file)
conn.send(bytes(str(res),'utf-8'))
elif data.startswith('os.makedirs'):
file = data.split('(')[1].split(')')[0]
print(file)
if file != '':
os.makedirs(file)
conn.send(bytes('OK','utf-8'))
else:
conn.send(bytes('NOK','utf-8'))
elif data.startswith('upload'):
try:
file = data.split('(')[1].split(')')[0]
Upload(conn, addr[0], file)
except:
conn.send(bytes('COMANDO INVALIDO','utf-8'))
elif data.startswith('download'):
try:
file = data.split('(')[1].split(')')[0]
Download(conn, addr[0], file)
except:
conn.send(bytes('COMANDO INVALIDO','utf-8'))
else:
print('teste:',data,'teste',len(data))
conn.send(bytes('COMANDO DESCONHECIDO','utf-8'))
except:
conn.send(bytes('ERRO DESCONHECIDO\n','utf-8'))
print('{} encerrou'.format(addr))
#---------------------------------------------------------
# PROGRAMA PRINCIPAL
#---------------------------------------------------------
pydir= os.path.dirname(os.path.realpath(__file__))
print('Diretorio do script: ', pydir)
os.chdir(pydir)
print('Simple File Transfer Protocol Server\n')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('', 9999))
except:
print('# erro de bind')
sys.exit()
s.listen(5)
print('aguardando conexões na porta ', 9999)
print('Canal de controle: cliente ----> [9999] servidor')
print('Canal de dados (call back): servidor ----> [9998] cliente')
while True:
conn, addr = s.accept()
print('recebi uma conexao do cliente ', addr)
t = threading.Thread( target=TrataCliente, args=(conn,addr,))
t.start()
| [
"os.listdir",
"socket.socket",
"pathlib.Path",
"os.makedirs",
"os.getcwd",
"os.chdir",
"os.path.realpath",
"sys.exit",
"threading.Thread"
] | [((3962, 3977), 'os.chdir', 'os.chdir', (['pydir'], {}), '(pydir)\n', (3970, 3977), False, 'import os\n'), ((4036, 4085), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (4049, 4085), False, 'import socket\n'), ((3894, 3920), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3910, 3920), False, 'import os\n'), ((4484, 4540), 'threading.Thread', 'threading.Thread', ([], {'target': 'TrataCliente', 'args': '(conn, addr)'}), '(target=TrataCliente, args=(conn, addr))\n', (4500, 4540), False, 'import threading\n'), ((737, 786), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (750, 786), False, 'import socket\n'), ((1459, 1508), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1472, 1508), False, 'import socket\n'), ((4157, 4167), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4165, 4167), False, 'import sys\n'), ((1361, 1371), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (1365, 1371), False, 'from pathlib import Path\n'), ((2198, 2209), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2207, 2209), False, 'import os\n'), ((2466, 2482), 'os.listdir', 'os.listdir', (['file'], {}), '(file)\n', (2476, 2482), False, 'import os\n'), ((2742, 2759), 'os.makedirs', 'os.makedirs', (['file'], {}), '(file)\n', (2753, 2759), False, 'import os\n')] |