hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7949b46abf93a8c89542a41e9995cdcc77854a17
| 2,053
|
py
|
Python
|
binding-python/runtime/src/main/python/etch/binding/support/Validator_long.py
|
apache/etch
|
5a875755019a7f342a07c8c368a50e3efb6ae68c
|
[
"ECL-2.0",
"Apache-2.0"
] | 9
|
2015-02-14T15:09:54.000Z
|
2021-11-10T15:09:45.000Z
|
binding-python/runtime/src/main/python/etch/binding/support/Validator_long.py
|
apache/etch
|
5a875755019a7f342a07c8c368a50e3efb6ae68c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
binding-python/runtime/src/main/python/etch/binding/support/Validator_long.py
|
apache/etch
|
5a875755019a7f342a07c8c368a50e3efb6ae68c
|
[
"ECL-2.0",
"Apache-2.0"
] | 14
|
2015-04-20T10:35:00.000Z
|
2021-11-10T15:09:35.000Z
|
"""
# Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License.
"""
from __future__ import absolute_import
import types
from ...python.Exceptions import *
from ...python.Types import *
from .TypeValidator import *
from ..transport.fmt.TypeCode import *
class Validator_long(NumberTypeValidator):
"""
Validator for long
"""
def __init__(self, nDims):
"""
Construct the validator.
@param nDims the number of dimensions. 0 or scalar.
"""
typs = [Long]
super(Validator_long,self).__init__(typs, None, nDims, "long[%d]" % nDims, Long, [TypeCode.LONGS, TypeCode.LONG])
def checkValue(self, value):
tc = super(Validator_long,self).checkValue(value)
if tc == TypeCode.LONG:
if value >= Byte.MIN_VALUE and value <= Byte.MAX_VALUE:
return TypeCode.BYTE
if value >= Short.MIN_VALUE and value <= Short.MAX_VALUE:
return TypeCode.SHORT
if value >= Integer.MIN_VALUE and value <= Integer.MAX_VALUE:
return TypeCode.INT
return tc
| 39.480769
| 121
| 0.608865
|
7949b651a739be2fbff05378bef3ae2f5f118b99
| 613
|
py
|
Python
|
scaffold/wsgi.py
|
wooojtek/una
|
d06a15f763aebe25a1111ff69ffcd2a84b707df4
|
[
"Apache-2.0"
] | null | null | null |
scaffold/wsgi.py
|
wooojtek/una
|
d06a15f763aebe25a1111ff69ffcd2a84b707df4
|
[
"Apache-2.0"
] | null | null | null |
scaffold/wsgi.py
|
wooojtek/una
|
d06a15f763aebe25a1111ff69ffcd2a84b707df4
|
[
"Apache-2.0"
] | null | null | null |
"""
WSGI config for scaffold project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
from scaffold.boot import fix_path
fix_path()
import os
from django.core.wsgi import get_wsgi_application
from djangae.wsgi import DjangaeApplication
from djangae.utils import on_production
settings = "scaffold.settings_live" if on_production() else "scaffold.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings)
application = DjangaeApplication(get_wsgi_application())
| 26.652174
| 79
| 0.807504
|
7949b6d520b99186591675ae19340aa7c18660d6
| 3,609
|
py
|
Python
|
pid_control/src/pid_control/PID.py
|
andrewtron3000/hacdc-ros-pkg
|
f3178ebc5a71889066db102e3b57beb0dcc1a204
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
pid_control/src/pid_control/PID.py
|
andrewtron3000/hacdc-ros-pkg
|
f3178ebc5a71889066db102e3b57beb0dcc1a204
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
pid_control/src/pid_control/PID.py
|
andrewtron3000/hacdc-ros-pkg
|
f3178ebc5a71889066db102e3b57beb0dcc1a204
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
#!/usr/bin/env python
#*********************************************************************
# Software License Agreement (BSD License)
#
# Copyright (c) 2011 andrewtron3000
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#********************************************************************/
#
# PID controller adapted from "PID without a PhD" written by
# Tim Wescott
#
import roslib; roslib.load_manifest('pid_control')
import rospy
from pid_control.msg import PIDDiagnostics
class PIDController:
def __init__(self, namespace, commandMin, commandMax, iMin, iMax):
self._namespace = namespace
self._commandMin= commandMin
self._commandMax= commandMax
self._iMin = iMin
self._iMax= iMax
self._iState = 0.0
self._dState = None
self._Publisher = rospy.Publisher(self._namespace + '/diagnostics', PIDDiagnostics)
def update(self, error, position):
# get the gains from the parameter server
try:
gains = rospy.get_param(self._namespace)
p, i, d = gains['p'], gains['i'], gains['d']
# proportional term
pTerm = p * error
# integral term
self._iState += error
if self._iState > self._iMax:
self._iState = self._iMax
elif self._iState < self._iMin:
self._iState = self._iMin
iTerm = i * self._iState
# derivitive term
if self._dState == None:
self._dState = position
dTerm = d * (self._dState - position)
self._dState = position
# compute command
command = pTerm + iTerm + dTerm
if command > self._commandMax:
command = self._commandMax
elif command < self._commandMin:
command = self._commandMin
# publish diagnostics
self._Publisher.publish(PIDDiagnostics(error = error, command = command))
except:
rospy.logwarn('PID gains not set')
command = 0.0
return command
| 39.659341
| 91
| 0.636741
|
7949b6f36f9d405d6bf081ad66bdf7a1db847cc3
| 1,410
|
py
|
Python
|
core/arxiv/submission/domain/event/versioning/version_0_0_0_example.py
|
woutersj/arxiv-submission-core
|
6077ce4e0685d67ce7010800083a898857158112
|
[
"MIT"
] | 14
|
2019-05-26T22:52:17.000Z
|
2021-11-05T12:26:46.000Z
|
core/arxiv/submission/domain/event/versioning/version_0_0_0_example.py
|
woutersj/arxiv-submission-core
|
6077ce4e0685d67ce7010800083a898857158112
|
[
"MIT"
] | 30
|
2018-01-31T19:16:08.000Z
|
2018-12-08T08:41:04.000Z
|
core/arxiv/submission/domain/event/versioning/version_0_0_0_example.py
|
cul-it/arxiv-submission-core
|
6077ce4e0685d67ce7010800083a898857158112
|
[
"MIT"
] | 8
|
2019-01-10T22:01:39.000Z
|
2021-11-20T21:44:51.000Z
|
"""
An example version mapping module.
This module gathers together all event mappings for version 0.0.0.
The mappings in this module will never be used, since there are no
data prior to version 0.0.0.
"""
from typing import Tuple
from ._base import BaseVersionMapping, EventData
VERSION = '0.0.0'
class SetTitleExample(BaseVersionMapping):
"""Perform no changes whatsoever to the `title` field."""
class Meta:
"""Metadata about this mapping."""
event_version = VERSION
"""All of the mappings in this module are for the same version."""
event_type = 'SetTitle'
"""This mapping applies to :class:`.domain.event.SetTitle`."""
tests = [
({'event_version': '0.0.0', 'title': 'The title'},
{'event_version': '0.0.0', 'title': 'The best title!!'})
]
"""Expected changes to the ``title`` field."""
def transform_title(self, orig: EventData, key: str, val: str) \
-> Tuple[str, str]:
"""Make the title the best."""
parts = val.split()
return key, " ".join([parts[0], "best"] + parts[1:])
def transform(self, orig: EventData, xf: EventData) -> EventData:
"""Add some emphasis."""
ed = EventData()
for k, v in xf.items():
if isinstance(v, str):
v = f"{v}!!"
ed[k] = v # type: ignore
return ed
| 30
| 74
| 0.580142
|
7949b72832334e04c07b9843f516d96b6d8a57f1
| 2,837
|
py
|
Python
|
code/sa_experiment1b.py
|
mliang1987/Traveling-Salesman-Project
|
ed3789e37aafb5d254ce7fdbc512711db3d8351c
|
[
"MIT"
] | 1
|
2020-01-12T02:05:31.000Z
|
2020-01-12T02:05:31.000Z
|
code/sa_experiment1b.py
|
mliang1987/Traveling-Salesman-Project
|
ed3789e37aafb5d254ce7fdbc512711db3d8351c
|
[
"MIT"
] | null | null | null |
code/sa_experiment1b.py
|
mliang1987/Traveling-Salesman-Project
|
ed3789e37aafb5d254ce7fdbc512711db3d8351c
|
[
"MIT"
] | null | null | null |
####################################################################################################
# CSE 6140 - Fall 2019
# Rodrigo Alves Lima
# Shichao Liang
# Jeevanjot Singh
# Kevin Tynes
####################################################################################################
"""
This file contains the SQD experiment for Simulated Annealing.
For a set of fixed times, the algorithm iterates through specified qualities for 50 iterations,
calculating the probability of a solution of at least the specified quality being found.
Then, plots the result.
"""
import simulated_annealing as sa
import random
import time
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import make_interp_spline, BSpline
from scipy.ndimage.filters import gaussian_filter1d
def run_sqd_experiment(city, optimal):
'''
Plots the SQD for a given city instance and the optimal path for that city.
'''
file_path = "Data/{}.tsp".format(city)
times = [1, 5, 10, 20]
qualities = [0, 0.05, 0.1, 0.2, 0.3]
df2 = pd.DataFrame(index = qualities, columns = times)
for max_time in times:
print("Running time",max_time)
p_values = []
for quality in qualities:
print("\tRunning quality",quality)
test_quality = math.floor((quality+1)*optimal)
experiment = []
for i in range(10):
sol, _, _ = sa.simulated_annealing_single(file_path, random.randint(1,100), time.time(), max_time, test_quality = test_quality)
experiment.append(sol<=test_quality)
t_count = experiment.count(True)
p = t_count / len(experiment)
p_values.append(p)
df2[max_time] = p_values
print("Smoothing out splines...")
for t in times:
df2[t] = gaussian_filter1d(df2[t].values.tolist(), sigma = 1)
print("Plotting...")
plt.figure()
plt.gcf().subplots_adjust(bottom=0.2)
plt.axis([min(qualities),max(qualities),-0.1,1.1])
plt.plot(df2[1], color = 'b', linewidth = 1.0)
plt.plot(df2[5], color = 'g', linewidth = 1.0)
plt.plot(df2[10], color = 'r', linewidth = 1.0)
plt.plot(df2[20], color = 'b', linewidth = 1.0, linestyle = "--")
#plt.plot(df2[1], color = 'g', linewidth = 1.0, linestyle = "--")
#plt.plot(df2[5], color = 'r', linewidth = 1.0, linestyle = "--")
plt.legend(["{}s".format(item) for item in times])
plt.title("Solution Quality Distributions for {}".format(city), fontsize = 10)
plt.ylabel("Probability(Solve)", fontsize = 8)
plt.xlabel("Relative Solution Quality [ratio]", fontsize = 8)
plt.savefig("sqd_ls1_{}.png".format(city))
if __name__ == "__main__":
run_sqd_experiment("Champaign", 52643)
#run_sqd_experiment("Atlanta", 2003763)
| 37.328947
| 143
| 0.602749
|
7949b73271ed69e12688ec79bc36cf4e7bfc27d7
| 5,038
|
py
|
Python
|
pyflux/gas/tests/gas_llt_tests_skewt.py
|
ThomasHoppe/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 2,091
|
2016-04-01T02:52:10.000Z
|
2022-03-29T11:38:15.000Z
|
pyflux/gas/tests/gas_llt_tests_skewt.py
|
EricSchles/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 160
|
2016-04-26T14:52:18.000Z
|
2022-03-15T02:09:07.000Z
|
pyflux/gas/tests/gas_llt_tests_skewt.py
|
EricSchles/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 264
|
2016-05-02T14:03:31.000Z
|
2022-03-29T07:48:20.000Z
|
import numpy as np
import pyflux as pf
noise = np.random.normal(0,1,200)
data = np.zeros(200)
for i in range(1,len(data)):
data[i] = 1.0*data[i-1] + noise[i]
countdata = np.random.poisson(3,200)
def test_skewt_couple_terms():
"""
Tests latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GASLLT(data=data, family=pf.Skewt())
x = model.fit()
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_skewt_couple_terms_integ():
"""
Tests latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GASLLT(data=data, integ=1, family=pf.Skewt())
x = model.fit()
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_skewt_bbvi():
"""
Tests an GAS model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
"""
model = pf.GASLLT(data=data, family=pf.Skewt())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
"""
def test_skewt_bbvi_mini_batch():
"""
Tests an ARIMA model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
"""
model = pf.GASLLT(data=data, family=pf.Skewt())
x = model.fit('BBVI',iterations=100, mini_batch=32)
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
"""
def test_skewt_bbvi_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASLLT(data=data, family=pf.Skewt())
x = model.fit('BBVI',iterations=100, record_elbo=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_skewt_bbvi_mini_batch_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASLLT(data=data, family=pf.Skewt())
x = model.fit('BBVI',iterations=100, mini_batch=32, record_elbo=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_skewt_mh():
"""
Tests an GAS model estimated with Metropolis-Hastings and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
"""
model = pf.GASLLT(data=data, family=pf.Skewt())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
"""
""" Uncomment in future if Skewt becomes more robust
def test_skewt_laplace():
Tests an GAS model estimated with Laplace approximation and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
model = pf.GASLLT(data=data, family=pf.Skewt())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
"""
def test_skewt_pml():
"""
Tests a PML model estimated with Laplace approximation and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLT(data=data, family=pf.Skewt())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_skewt_predict_length():
"""
Tests that the prediction dataframe length is equal to the number of steps h
"""
model = pf.GASLLT(data=data, family=pf.Skewt())
x = model.fit()
x.summary()
assert(model.predict(h=5).shape[0] == 5)
def test_skewt_predict_is_length():
"""
Tests that the prediction IS dataframe length is equal to the number of steps h
"""
model = pf.GASLLT(data=data, family=pf.Skewt())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_skewt_predict_nans():
"""
Tests that the predictions are not nans
model = pf.GASLLT(data=data, family=pf.Skewt())
"""
"""
model = pf.GASLLT(data=data, family=pf.Skewt())
x = model.fit()
x.summary()
assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)
"""
"""
def test_skewt_predict_is_nans():
Tests that the in-sample predictions are not nans
model = pf.GASLLT(data=data, family=pf.Skewt())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
"""
| 33.364238
| 90
| 0.665939
|
7949b8a55d67807673992517aa6c7d22f6466fba
| 4,293
|
py
|
Python
|
torch_glow/tests/nodes/quantized_linear_test.py
|
YonginKwon/glow
|
7d316d028e1792534416755bf80af422adccdaa9
|
[
"Apache-2.0"
] | 2
|
2020-03-23T21:04:00.000Z
|
2020-04-02T22:49:49.000Z
|
torch_glow/tests/nodes/quantized_linear_test.py
|
YonginKwon/glow
|
7d316d028e1792534416755bf80af422adccdaa9
|
[
"Apache-2.0"
] | null | null | null |
torch_glow/tests/nodes/quantized_linear_test.py
|
YonginKwon/glow
|
7d316d028e1792534416755bf80af422adccdaa9
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests.utils import jitVsGlow
import unittest
class TestQuantizedLinear(unittest.TestCase):
def test_quantized_linear_packed(self):
"""Basic test of the PyTorch quantized::linear Node on Glow."""
q = torch.nn.quantized.Quantize(
scale=1 / 25, zero_point=17, dtype=torch.quint8)
dq = torch.nn.quantized.DeQuantize()
linear = torch.nn.Linear(5, 5)
linear.weight.data.fill_(1.2)
linear.bias.data.fill_(3.0)
model = torch.nn.Sequential(q, linear, dq)
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
torch.quantization.prepare(model, inplace=True)
torch.quantization.convert(model, inplace=True)
x = torch.tensor(range(5), dtype=torch.float)
x = torch.cat((x, x, x, x, x))
x = torch.reshape(x, [5, 5])
jitVsGlow(
model,
x,
expected_fused_ops={
"aten::quantize_per_tensor",
"quantized::linear",
"aten::dequantize",
},
)
def test_quantized_linear_packed_dq_cut(self):
"""Basic test of the PyTorch quantized::linear Node on Glow, with dequantize excluded. """
q = torch.nn.quantized.Quantize(
scale=1 / 25, zero_point=17, dtype=torch.quint8)
dq = torch.nn.quantized.DeQuantize()
linear = torch.nn.Linear(5, 5)
linear.weight.data.fill_(1.2)
linear.bias.data.fill_(3.0)
model = torch.nn.Sequential(q, linear, dq)
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
torch.quantization.prepare(model, inplace=True)
torch.quantization.convert(model, inplace=True)
x = torch.tensor(range(5), dtype=torch.float)
x = torch.cat((x, x, x, x, x))
x = torch.reshape(x, [5, 5])
jitVsGlow(
model,
x,
expected_fused_ops={
"aten::quantize_per_tensor",
"quantized::linear",
},
black_list=[
"aten::dequantize",
]
)
@unittest.skip(reason="random input could cause flaky")
def test_quantized_linear_random_input(self):
"""Basic test of the PyTorch quantized::linear Node on Glow."""
def test_f(inputs, weights, bias=None):
q_int = torch.nn.quantized.Quantize(
scale=1 / 13, zero_point=0, dtype=torch.qint8
)
q_uint = torch.nn.quantized.Quantize(
scale=1 / 13, zero_point=10, dtype=torch.quint8
)
dq = torch.nn.quantized.DeQuantize()
q_inputs = q_uint(inputs)
q_weights = q_int(weights)
return dq(torch.nn.quantized.functional.linear(q_inputs, q_weights, bias))
for _ in range(100):
inputs = torch.randn(7, 7)
weights = torch.randn(7, 7)
bias = torch.tensor([1, 1, 1, 1, 1, 1, 1], dtype=torch.float) * 0.1
jitVsGlow(
test_f,
inputs,
weights,
bias,
expected_fused_ops={
"glow::unpacked_quantized_linear",
"aten::quantize_per_tensor",
"aten::dequantize",
},
)
def test_quantized_linear_packed_rowwise(self):
"""Basic test of the PyTorch quantized::linear Node with rowwise quantized
packed weights on Glow."""
linear = torch.nn.Linear(6, 5)
linear.weight.data.random_(0, 100)
linear.bias.data.random_(0, 10)
x = torch.tensor(range(30), dtype=torch.float)
x = torch.reshape(x, [5, 6])
model = torch.quantization.QuantWrapper(linear)
model.qconfig = torch.quantization.get_default_qconfig('fbgemm')
torch.quantization.prepare(model, inplace=True)
torch.quantization.convert(model, inplace=True)
jitVsGlow(model, x, expected_fused_ops={"aten::quantize_per_tensor",
"quantized::linear",
"aten::dequantize"})
| 33.023077
| 98
| 0.566271
|
7949bb2e19e10dd58a78aecae386dd1caffc6181
| 768
|
py
|
Python
|
tests/demo/demoproject/urls.py
|
GigiusB/djaxei
|
2fd181acd8c10c259d4ffdfbcef03ea959222d6f
|
[
"MIT"
] | null | null | null |
tests/demo/demoproject/urls.py
|
GigiusB/djaxei
|
2fd181acd8c10c259d4ffdfbcef03ea959222d6f
|
[
"MIT"
] | null | null | null |
tests/demo/demoproject/urls.py
|
GigiusB/djaxei
|
2fd181acd8c10c259d4ffdfbcef03ea959222d6f
|
[
"MIT"
] | null | null | null |
"""demoproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| 34.909091
| 79
| 0.705729
|
7949bb95a685cce66419d9bac0f8a6396dd72ecb
| 1,071
|
py
|
Python
|
ait/dsn/sle/__init__.py
|
kmarwah/AIT-DSN
|
338614dfef6713431f79d6daaffc0e4303be0ced
|
[
"MIT"
] | 12
|
2019-01-30T17:43:51.000Z
|
2022-02-23T03:36:57.000Z
|
ait/dsn/sle/__init__.py
|
kmarwah/AIT-DSN
|
338614dfef6713431f79d6daaffc0e4303be0ced
|
[
"MIT"
] | 117
|
2018-04-16T16:11:48.000Z
|
2022-03-31T18:21:24.000Z
|
ait/dsn/sle/__init__.py
|
kmarwah/AIT-DSN
|
338614dfef6713431f79d6daaffc0e4303be0ced
|
[
"MIT"
] | 12
|
2018-08-30T15:52:56.000Z
|
2022-01-12T19:52:04.000Z
|
# Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT)
# Bespoke Link to Instruments and Small Satellites (BLISS)
#
# Copyright 2017, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any
# commercial use must be negotiated with the Office of Technology Transfer
# at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting
# this software, the user agrees to comply with all applicable U.S. export
# laws and regulations. User has the responsibility to obtain export licenses,
# or other export authority as may be required before exporting such
# information to foreign countries or providing access to foreign persons.
import sys
from .raf import RAF
from .rcf import RCF
from .cltu import CLTU
# Default port to which Frames will be emitted by SLE RAF/RCF ('FRAM')
sys.modules['ait'].DEFAULT_FRAME_PORT = 3726
# Default frame type for incoming frame bytearrays
sys.modules['ait'].DEFAULT_FRAME_TYPE = 'TMTransFrame'
| 41.192308
| 78
| 0.79085
|
7949bbec5becbee936601c098d76b594bc90d038
| 453
|
py
|
Python
|
conanfile.py
|
rossb34/wildcat-ws
|
f1636982fcbac21ce6231858555f4544641c88b8
|
[
"MIT"
] | null | null | null |
conanfile.py
|
rossb34/wildcat-ws
|
f1636982fcbac21ce6231858555f4544641c88b8
|
[
"MIT"
] | null | null | null |
conanfile.py
|
rossb34/wildcat-ws
|
f1636982fcbac21ce6231858555f4544641c88b8
|
[
"MIT"
] | null | null | null |
from conans import ConanFile, CMake, tools
class NetConan(ConanFile):
name = "wildcat-ws"
version = "0.2.2"
license = "MIT"
author = "<Ross Bennett> <rossbennett34@gmail.com>"
url = "https://github.com/rossb34/wildcat-ws"
description = "Web socket library"
exports_sources = "include/*"
no_copy_source = True
def package(self):
self.copy("*.hpp")
def package_id(self):
self.info.header_only()
| 23.842105
| 55
| 0.637969
|
7949bbfd4758f87e5c6fed7fe3d7bd5c2b97c2e2
| 778
|
py
|
Python
|
anstoss3k/engine/definitions.py
|
ericziethen/anstoss3k-poc
|
e28424556258b24ad4e513011b7597f150ad9719
|
[
"MIT"
] | null | null | null |
anstoss3k/engine/definitions.py
|
ericziethen/anstoss3k-poc
|
e28424556258b24ad4e513011b7597f150ad9719
|
[
"MIT"
] | 36
|
2020-02-13T09:23:36.000Z
|
2022-03-12T00:16:20.000Z
|
anstoss3k/engine/definitions.py
|
ericziethen/anstoss3k-poc
|
e28424556258b24ad4e513011b7597f150ad9719
|
[
"MIT"
] | null | null | null |
import enum
@enum.unique
class GameAction(enum.Enum):
# pylint: disable=invalid-name
FINISH_MOVE = 'Finish Move'
@enum.unique
class GameState(enum.Enum):
# pylint: disable=invalid-name
UNKNOWN = 'Unknown State'
TEAM_SELECTION = 'Team Selection'
PROGRESS_WEEK = 'Week Progress'
MATCH_DAY_PREVIEW = 'Match Day Preview'
MATCH_DAY_RESULTS = 'Match Day Results'
MATCH_DAY_TABLES = 'Tables'
SEASON_END = 'Season End'
class State():
def __init__(self, data):
self.data = data
def handle_input(self, action):
'''Process the input and update the current state.'''
raise NotImplementedError
def next_state(self, action):
'''Decide which state to go next to.'''
raise NotImplementedError
| 22.882353
| 61
| 0.670951
|
7949bc1912f9eb09f3739364371cf20d489c2051
| 6,502
|
py
|
Python
|
pyEX/client.py
|
jay-johnson/pyEX
|
b326cfeffa06c3699bc15168b539323f4fd9dec8
|
[
"Apache-2.0"
] | null | null | null |
pyEX/client.py
|
jay-johnson/pyEX
|
b326cfeffa06c3699bc15168b539323f4fd9dec8
|
[
"Apache-2.0"
] | null | null | null |
pyEX/client.py
|
jay-johnson/pyEX
|
b326cfeffa06c3699bc15168b539323f4fd9dec8
|
[
"Apache-2.0"
] | null | null | null |
import os
from functools import partial
from .common import PyEXception, _getJson, _USAGE_TYPES
from .refdata import symbols, iexSymbols, symbolsDF, iexSymbolsDF, \
symbolsList, iexSymbolsList, corporateActions, corporateActionsDF, dividends as refDividends, dividendsDF as refDividendsDF, nextDayExtDate, nextDayExtDateDF, directory, directoryDF
from .markets import markets, marketsDF
from .stats import stats, statsDF, recent, recentDF, records, recordsDF, summary, summaryDF, daily, dailyDF
from .stocks import balanceSheet, balanceSheetDF, batch, batchDF, bulkBatch, bulkBatchDF, book, bookDF, cashFlow, cashFlowDF, chart, chartDF, \
bulkMinuteBars, bulkMinuteBarsDF, company, companyDF, collections, collectionsDF, crypto, cryptoDF, delayedQuote, delayedQuoteDF, dividends, dividendsDF, \
earnings, earningsDF, earningsToday, earningsTodayDF, estimates, estimatesDF, spread, spreadDF, financials, financialsDF, incomeStatement, incomeStatementDF, ipoToday, ipoTodayDF, \
ipoUpcoming, ipoUpcomingDF, threshold, thresholdDF, shortInterest, shortInterestDF, marketShortInterest, marketShortInterestDF, keyStats, keyStatsDF, \
largestTrades, largestTradesDF, list, listDF, logo, logoPNG, logoNotebook, news, newsDF, marketNews, marketNewsDF, ohlc, ohlcDF, marketOhlc, marketOhlcDF, \
peers, peersDF, yesterday, yesterdayDF, marketYesterday, marketYesterdayDF, price, priceDF, quote, quoteDF, relevant, relevantDF, sectorPerformance, \
sectorPerformanceDF, splits, splitsDF, volumeByVenue, volumeByVenueDF
from .marketdata.sse import topsSSE, lastSSE, deepSSE, tradesSSE
_INCLUDE_FUNCTIONS = [
# Refdata
('symbols', symbols),
('iexSymbols', iexSymbols),
('symbolsDF', symbolsDF),
('iexSymbolsDF', iexSymbolsDF),
('symbolsList', symbolsList),
('iexSymbolsList', iexSymbolsList),
('corporateActions', corporateActions),
('corporateActionsDF', corporateActionsDF),
('refDividends', refDividends),
('refDividendsDF', refDividendsDF),
('nextDayExtDate', nextDayExtDate),
('nextDayExtDateDF', nextDayExtDateDF),
('directory', directory),
('directoryDF', directoryDF),
# Markets
('markets', markets),
('marketsDF', marketsDF),
# Stats
('stats', stats),
('statsDF', statsDF),
('recent', recent),
('recentDF', recentDF),
('records', records),
('recordsDF', recordsDF),
('summary', summary),
('summaryDF', summaryDF),
('daily', daily),
('dailyDF', dailyDF),
# Stocks
('balanceSheet', balanceSheet),
('balanceSheetDF', balanceSheetDF),
('batch', batch),
('batchDF', batchDF),
('bulkBatch', bulkBatch),
('bulkBatchDF', bulkBatchDF),
('book', book),
('bookDF', bookDF),
('cashFlow', cashFlow),
('cashFlowDF', cashFlowDF),
('chart', chart),
('chartDF', chartDF),
('bulkMinuteBars', bulkMinuteBars),
('bulkMinuteBarsDF', bulkMinuteBarsDF),
('company', company),
('companyDF', companyDF),
('collections', collections),
('collectionsDF', collectionsDF),
('crypto', crypto),
('cryptoDF', cryptoDF),
('delayedQuote', delayedQuote),
('delayedQuoteDF', delayedQuoteDF),
('dividends', dividends),
('dividendsDF', dividendsDF),
('earnings', earnings),
('earningsDF', earningsDF),
('earningsToday', earningsToday),
('earningsTodayDF', earningsTodayDF),
('spread', spread),
('spreadDF', spreadDF),
('financials', financials),
('financialsDF', financialsDF),
('incomeStatement', incomeStatement),
('incomeStatementDF', incomeStatementDF),
('ipoToday', ipoToday),
('ipoTodayDF', ipoTodayDF),
('ipoUpcoming', ipoUpcoming),
('ipoUpcomingDF', ipoUpcomingDF),
('threshold', threshold),
('thresholdDF', thresholdDF),
('shortInterest', shortInterest),
('shortInterestDF', shortInterestDF),
('marketShortInterest', marketShortInterest),
('marketShortInterestDF', marketShortInterestDF),
('estimates', estimates),
('estimatesDF', estimatesDF),
('keyStats', keyStats),
('keyStatsDF', keyStatsDF),
('largestTrades', largestTrades),
('largestTradesDF', largestTradesDF),
('list', list),
('listDF', listDF),
('logo', logo),
('logoPNG', logoPNG),
('logoNotebook', logoNotebook),
('news', news),
('newsDF', newsDF),
('marketNews', marketNews),
('marketNewsDF', marketNewsDF),
('ohlc', ohlc),
('ohlcDF', ohlcDF),
('marketOhlc', marketOhlc),
('marketOhlcDF', marketOhlcDF),
('peers', peers),
('peersDF', peersDF),
('yesterday', yesterday),
('yesterdayDF', yesterdayDF),
('marketYesterday', marketYesterday),
('marketYesterdayDF', marketYesterdayDF),
('price', price),
('priceDF', priceDF),
('quote', quote),
('quoteDF', quoteDF),
('relevant', relevant),
('relevantDF', relevantDF),
('sectorPerformance', sectorPerformance),
('sectorPerformanceDF', sectorPerformanceDF),
('splits', splits),
('splitsDF', splitsDF),
('volumeByVenue', volumeByVenue),
('volumeByVenueDF', volumeByVenueDF),
# SSE Streaming
('topsSSE', topsSSE),
('lastSSE', lastSSE),
('deepSSE', deepSSE),
('tradesSSE', tradesSSE),
]
class Client(object):
'''IEX Cloud Client
Client has access to all methods provided as standalone, but in an authenticated way
Args:
api_token (string): api token (can pickup from IEX_TOKEN environment variable)
verson (string): api version to use (defaults to beta)
'''
def __init__(self, api_token=None, version='beta'):
self._token = api_token or os.environ.get('IEX_TOKEN', '')
if not self._token:
raise PyEXception('API Token missing or not in environment (IEX_TOKEN)')
self._version = version
for name, method in _INCLUDE_FUNCTIONS:
setattr(self, name, partial(self.bind, meth=method))
def bind(self, *args, meth=None, **kwargs):
return meth(token=self._token, version=self._version, *args, **kwargs)
def account(self):
return _getJson('account/metadata', self._token, self._version)
def usage(self, type=None):
if type:
if type not in _USAGE_TYPES:
raise PyEXception('type not recognized: {}'.format(type))
return _getJson('account/usage/{type}'.format(type=type), self._token, self._version)
return _getJson('account/usage/messages', self._token, self._version)
| 38.702381
| 185
| 0.675177
|
7949bc23e335994e38769e22908011bccccd621f
| 27,183
|
py
|
Python
|
conga/tcrdist/make_10x_clones_file.py
|
scfurl/conga
|
eaf7b0a583180a72f7c81f981b29b53781507a12
|
[
"MIT"
] | null | null | null |
conga/tcrdist/make_10x_clones_file.py
|
scfurl/conga
|
eaf7b0a583180a72f7c81f981b29b53781507a12
|
[
"MIT"
] | null | null | null |
conga/tcrdist/make_10x_clones_file.py
|
scfurl/conga
|
eaf7b0a583180a72f7c81f981b29b53781507a12
|
[
"MIT"
] | null | null | null |
from .basic import *
from .all_genes import all_genes
#import parse_tsv
from collections import Counter
from itertools import chain
import sys
import pandas as pd
from ..util import organism2vdj_type, IG_VDJ_TYPE
MIN_CDR3_LEN = 6 # otherwise tcrdist barfs; actually we could also set this to 5 and be OK
def show(tcr):
"For debugging"
if type( tcr[0] ) is str:
return ' '.join(tcr[:3])
else:
return ' '.join( show(x) for x in tcr )
def fixup_gene_name( gene, gene_suffix, expected_gene_names ):
if gene in expected_gene_names:
return gene # ALL DONE
if '*' not in gene:
gene += gene_suffix
if gene in expected_gene_names:
return gene # ALL DONE
vj = gene[3]
assert vj in 'VJ'
if vj=='V' and 'DV' in gene:
# sometimes they just delete the '/'
# sometimes they convert it into a '-'
new_gene = gene[:gene.index('DV')]+'/'+gene[gene.index('DV'):]
if new_gene in expected_gene_names:
gene = new_gene
else:
new_gene = gene[:gene.index('DV')-1]+'/'+gene[gene.index('DV'):]
if new_gene in expected_gene_names:
gene = new_gene
else:
print('trouble parsing V gene with DV in it:', gene)
return gene # may still not be in expected_gene_names, will check for that later
def get_ab_from_10x_chain(chain, organism):
''' Returns None if the chain is not valid for this 'organism'
'''
if organism in ['human', 'mouse']:
if chain in ['TRA','TRB']:
return chain[2]
else:
return None
elif organism in ['human_gd','mouse_gd']:
if chain in ['TRA','TRG','TRD']:
return 'A' if chain=='TRG' else 'B'
else:
return None
elif organism in ['human_ig','mouse_ig']:
if chain in ['IGH', 'IGK', 'IGL']:
return 'B' if chain=='IGH' else 'A'
else:
return None
else:
print('unrecognized organism in get_ab_from_10x_chain:', organism)
sys.exit()
def read_tcr_data(
organism,
contig_annotations_csvfile,
consensus_annotations_csvfile,
allow_unknown_genes = False,
verbose = False,
prefix_clone_ids_with_tcr_type = False,
):
""" Parse tcr data, only taking 'productive' tcrs
Returns:
clonotype2tcrs, clonotype2barcodes
"""
expected_gene_names = set(all_genes[organism].keys())
#from cdr3s_human import all_align_fasta
gene_suffix = '*01' # may not be used
if prefix_clone_ids_with_tcr_type:
if organism2vdj_type[organism] == IG_VDJ_TYPE:
clone_id_prefix = 'bcr_'
else:
clone_id_prefix = 'tcr_'
else:
clone_id_prefix = ''
# read the contig annotations-- map from clonotypes to barcodes
# barcode,is_cell,contig_id,high_confidence,length,chain,v_gene,d_gene,j_gene,c_gene,full_length,productive,cdr3,cdr3_nt,reads,umis,raw_clonotype_id,raw_consensus_id
# AAAGATGGTCTTCTCG-1,True,AAAGATGGTCTTCTCG-1_contig_1,True,695,TRB,TRBV5-1*01,TRBD2*02,TRBJ2-3*01,TRBC2*01,True,True,CASSPLAGYAADTQYF,TGCGCCAGCAGCCCCCTAGCGGGATACGCAGCAGATACGCAGTATTTT,9427,9,clonotype14,clonotype14_consensus_1
assert exists( contig_annotations_csvfile )
#_, lines = parse_csv_file(contig_annotations_csvfile)
df = pd.read_csv(contig_annotations_csvfile)
df['productive'] = df['productive'].astype(str) #sometimes it already is if there are 'Nones' in there...
clonotype2barcodes = {}
clonotype2tcrs_backup = {} ## in case we dont have a consensus_annotations_csvfile
for l in df.itertuples():
bc = l.barcode
clonotype = clone_id_prefix + l.raw_clonotype_id
# annoying: pandas sometimes converts to True/False booleans and sometimes not.
assert l.productive in [ 'None', 'False', 'True']
if clonotype =='None':
continue
if clonotype not in clonotype2barcodes:
clonotype2barcodes[clonotype] = []
if bc in clonotype2barcodes[clonotype]:
pass
#print 'repeat barcode'
else:
clonotype2barcodes[clonotype].append( bc )
## experimenting here ########################################3
if l.productive != 'True':
continue
if l.cdr3.lower() == 'none' or l.cdr3_nt.lower() == 'none':
continue
chain = l.chain
ab = get_ab_from_10x_chain(chain, organism)
if ab is None:
continue
if clonotype not in clonotype2tcrs_backup:
clonotype2tcrs_backup[ clonotype ] = {'A':Counter(), 'B':Counter() }
# stolen from below
vg = fixup_gene_name(l.v_gene, gene_suffix, expected_gene_names)
jg = fixup_gene_name(l.j_gene, gene_suffix, expected_gene_names)
if vg not in expected_gene_names:
print('unrecognized V gene:', organism, vg)
if not allow_unknown_genes:
continue
if jg not in expected_gene_names:
print('unrecognized J gene:', organism, jg)
if not allow_unknown_genes:
continue
#assert vg in all_align_fasta[organism]
#assert jg in all_align_fasta[organism]
tcr_chain = ( vg, jg, l.cdr3, l.cdr3_nt.lower() )
clonotype2tcrs_backup[clonotype][ab][tcr_chain] += int(l.umis)
for id in clonotype2tcrs_backup:
for ab in 'AB':
for t1,count1 in clonotype2tcrs_backup[id][ab].items():
for t2, count2 in clonotype2tcrs_backup[id][ab].items():
if t2<=t1:continue
if t1[3] == t2[3]:
print('repeat??', count1, count2, t1, t2)
if consensus_annotations_csvfile is None:
clonotype2tcrs = clonotype2tcrs_backup
else:
## now read details on the individual chains for each clonotype
# ==> tcr/human/JCC176_TX2_TCR_consensus_annotations.csv <==
# clonotype_id,consensus_id,length,chain,v_gene,d_gene,j_gene,c_gene,full_length,productive,cdr3,cdr3_nt,reads,umis
# clonotype100,clonotype100_consensus_1,550,TRB,TRBV24-1*01,TRBD1*01,TRBJ2-7*01,TRBC2*01,True,True,CATSDPGQGGYEQYF,TGTGCCACCAGTGACCCCGGACAGGGAGGATACGAGCAGTACTTC,8957,9
assert exists(consensus_annotations_csvfile)
df = pd.read_csv( consensus_annotations_csvfile )
df['productive'] = df['productive'].astype(str) #sometimes it already is if there are 'Nones' in there...
## first get clonotypes with one alpha and one beta
clonotype2tcrs = {}
for l in df.itertuples():
assert l.productive in [ 'None', 'False', 'True']
if l.productive == 'True':
id = l.clonotype_id
if id not in clonotype2tcrs:
# dictionaries mapping from tcr to umi-count
clonotype2tcrs[id] = { 'A':Counter(), 'B':Counter() } #, 'G':[], 'D': [] }
assert id in clonotype2barcodes
ch = l.chain
ab = get_ab_from_10x_chain(ch, organism)
if ab is None:
print('skipline:', consensus_annotations_csvfile, ch, l.v_gene, l.j_gene)
continue
vg = fixup_gene_name(l.v_gene, gene_suffix, expected_gene_names)
jg = fixup_gene_name(l.j_gene, gene_suffix, expected_gene_names)
if vg not in expected_gene_names:
print('unrecognized V gene:', organism, vg)
if not allow_unknown_genes:
continue
if jg not in expected_gene_names:
print('unrecognized J gene:', organism, jg)
if not allow_unknown_genes:
continue
#assert vg in all_align_fasta[organism]
#assert jg in all_align_fasta[organism]
tcr_chain = ( vg, jg, l.cdr3, l.cdr3_nt.lower() )
if tcr_chain not in clonotype2tcrs[id][ab]:
umis = int( l.umis )
clonotype2tcrs[id][ab][ tcr_chain ] = umis
old_umis = clonotype2tcrs_backup[id][ab][tcr_chain]
if umis != old_umis:
print('diff_umis:',umis, old_umis, id,ab,tcr_chain)
else:
print('repeat?',id,ab,tcr_chain)
# else:
# if l.productive not in [ 'None','False' ]:
# print('unproductive?',l.productive)
if verbose:
idl1 = sorted( clonotype2tcrs_backup.keys())
idl2 = sorted( clonotype2tcrs.keys())
print('same ids:', len(idl1), len(idl2), idl1==idl2)
for id in clonotype2tcrs_backup:
if id in clonotype2tcrs:
for ab in 'AB':
tl1 = sorted(clonotype2tcrs_backup[id][ab].keys())
tl2 = sorted(clonotype2tcrs[id][ab].keys())
if tl1 != tl2:
print('diffids:',id,ab,tl1,tl2)
return clonotype2tcrs, clonotype2barcodes
def read_tcr_data_batch(
organism,
metadata_file,
allow_unknown_genes = False,
verbose = False,
prefix_clone_ids_with_tcr_type = False,
):
""" Parse tcr data, only taking 'productive' tcrs
Returns:
clonotype2tcrs, clonotype2barcodes
"""
assert exists( metadata_file )
md = pd.read_csv(metadata_file, dtype=str)#"string")
if prefix_clone_ids_with_tcr_type:
if organism2vdj_type[organism] == IG_VDJ_TYPE:
clone_id_prefix = 'bcr_'
else:
clone_id_prefix = 'tcr_'
else:
clone_id_prefix = ''
# read in contig files and update suffix to match GEX matrix
contig_list = []
for x in range(len(md['file'])):
dfx = pd.read_csv( md.loc[ x , 'file'] ) #read contig_df
suffix = md.loc[ x , 'suffix'] # new suffix
barcodes = dfx['barcode'].str.split('-').str.get(0)
#hacky
dfx['barcode'] = barcodes + '-' + suffix
dfx['contig_id'] = barcodes + '-' + suffix + '_' + \
dfx['contig_id'].str.split('_').str.get(1) + \
'_' + dfx['contig_id'].str.split('_').str.get(2) # currently unused, but can't hurt
# giving each library a tag here really boosted the number of clones I got back
dfx['raw_clonotype_id'] = clone_id_prefix + dfx['raw_clonotype_id'] + '_' + suffix
dfx['raw_consensus_id'] = clone_id_prefix + dfx['raw_consensus_id'] + '_' + suffix # currently unused, can't hurt
contig_list.append(dfx)
df = pd.concat(contig_list)
expected_gene_names = set(all_genes[organism].keys())
#from cdr3s_human import all_align_fasta
gene_suffix = '*01' # may not be used
# read the contig annotations-- map from clonotypes to barcodes
# barcode,is_cell,contig_id,high_confidence,length,chain,v_gene,d_gene,j_gene,c_gene,full_length,productive,cdr3,cdr3_nt,reads,umis,raw_clonotype_id,raw_consensus_id
# AAAGATGGTCTTCTCG-1,True,AAAGATGGTCTTCTCG-1_contig_1,True,695,TRB,TRBV5-1*01,TRBD2*02,TRBJ2-3*01,TRBC2*01,True,True,CASSPLAGYAADTQYF,TGCGCCAGCAGCCCCCTAGCGGGATACGCAGCAGATACGCAGTATTTT,9427,9,clonotype14,clonotype14_consensus_1
#_, lines = parse_csv_file(contig_annotations_csvfile)
#df = pd.read_csv(contig_annotations_csvfile)
df['productive'] = df['productive'].astype(str) #sometimes it already is if there are 'Nones' in there...
clonotype2tcrs = {}
clonotype2barcodes = {}
for l in df.itertuples():
# the fields we use: barcode raw_clonotype_id productive cdr3 cdr3_nt chain v_gene j_gene umis
bc = l.barcode
clonotype = l.raw_clonotype_id
# annoying: pandas sometimes converts to True/False booleans and sometimes not.
assert l.productive in [ 'None', 'False', 'True']
if clonotype =='None':
continue
if clonotype not in clonotype2barcodes:
clonotype2barcodes[clonotype] = []
if bc in clonotype2barcodes[clonotype]:
pass
#print 'repeat barcode'
else:
clonotype2barcodes[clonotype].append( bc )
## experimenting here ########################################3
if l.productive != 'True':
continue
if l.cdr3.lower() == 'none' or l.cdr3_nt.lower() == 'none':
continue
chain = l.chain
ab = get_ab_from_10x_chain(chain, organism)
if ab is None:
continue
if clonotype not in clonotype2tcrs:
clonotype2tcrs[ clonotype ] = {'A':Counter(), 'B':Counter() }
# stolen from below
vg = fixup_gene_name(l.v_gene, gene_suffix, expected_gene_names)
jg = fixup_gene_name(l.j_gene, gene_suffix, expected_gene_names)
if vg not in expected_gene_names:
print('unrecognized V gene:', organism, vg)
if not allow_unknown_genes:
continue
if jg not in expected_gene_names:
print('unrecognized J gene:', organism, jg)
if not allow_unknown_genes:
continue
#assert vg in all_align_fasta[organism]
#assert jg in all_align_fasta[organism]
tcr_chain = ( vg, jg, l.cdr3, l.cdr3_nt.lower() )
clonotype2tcrs[clonotype][ab][tcr_chain] += int(l.umis)
for id in clonotype2tcrs:
for ab in 'AB':
for t1,count1 in clonotype2tcrs[id][ab].items():
for t2, count2 in clonotype2tcrs[id][ab].items():
if t2<=t1:continue
if t1[3] == t2[3]:
print('repeat??', count1, count2, t1, t2)
return clonotype2tcrs, clonotype2barcodes
def _make_clones_file( organism, outfile, clonotype2tcrs, clonotype2barcodes, verbose=False ):
''' Make a clones file with information parsed from the 10X csv files
organism is one of ['mouse','human']
outfile is the name of the clones file to be created
'''
#tmpfile = outfile+'.tmp' # a temporary intermediate file
bc_mapfile = outfile+'.barcode_mapping.tsv'
outmap = open(bc_mapfile,'w')
outmap.write('clone_id\tbarcodes\n')
outfields = 'clone_id subject clone_size va_gene ja_gene va2_gene ja2_gene vb_gene jb_gene cdr3a cdr3a_nucseq cdr3a2 cdr3a2_nucseq cdr3b cdr3b_nucseq'\
.split()
extra_fields = 'alpha_umi alpha2_umi beta_umi num_alphas num_betas'.split()
outfields += extra_fields
# we used to make a temporary file and then run tcr-dist/file_converter.py on it
# for this slim version, just take the temporary file
#out = open(tmpfile,'w')
out = open(outfile,'w')
out.write('\t'.join( outfields )+'\n' )
for clonotype in sorted(clonotype2tcrs.keys()):
tcrs = clonotype2tcrs[clonotype]
if len(tcrs['A']) >= 1 and len(tcrs['B']) >= 1:
atcrs = tcrs['A'].most_common()
btcrs = tcrs['B'].most_common()
if len(atcrs)>1:
if verbose:
print('multiple alphas, picking top umi:',' '.join( str(x) for _,x in atcrs ))
atcr2, atcr2_umi = atcrs[1]
else:
atcr2, atcr2_umi = ('', '', '', ''), 0
if len(btcrs)>1 and verbose:
print('multiple betas, picking top umi:',' '.join( str(x) for _,x in btcrs ))
atcr, atcr_umi = atcrs[0]
btcr, btcr_umi = btcrs[0]
outl = {}
outl['clone_id'] = clonotype
outl['subject'] = 'UNK_S'
outl['clone_size'] = len(clonotype2barcodes[clonotype])
outl['va_gene'] = atcr[0]
outl['ja_gene'] = atcr[1]
outl['cdr3a'] = atcr[2]
outl['cdr3a_nucseq'] = atcr[3]
outl['alpha_umi'] = str(atcr_umi)
outl['va2_gene'] = atcr2[0]
outl['ja2_gene'] = atcr2[1]
outl['cdr3a2'] = atcr2[2]
outl['cdr3a2_nucseq'] = atcr2[3]
outl['alpha2_umi'] = str(atcr2_umi)
outl['num_alphas'] = str(len(atcrs))
outl['vb_gene'] = btcr[0]
outl['jb_gene'] = btcr[1]
outl['cdr3b'] = btcr[2]
outl['cdr3b_nucseq'] = btcr[3]
outl['beta_umi'] = str(btcr_umi)
outl['num_betas'] = str(len(btcrs))
if len(outl['cdr3a']) < MIN_CDR3_LEN or len(outl['cdr3b']) < MIN_CDR3_LEN:
if verbose:
print('Warning: skipping clonotype with short cdr3s: {} {} {}'\
.format(clonotype, outl['cdr3a'], outl['cdr3b'] ))
continue
out.write( '\t'.join(str(outl[x]) for x in outfields)+'\n')
outmap.write('{}\t{}\n'.format(clonotype,','.join(clonotype2barcodes[clonotype])))
out.close()
outmap.close()
# for the time being, go with the clones file we just made even though it doesn't have all the usual stupid info
def setup_filtered_clonotype_dicts(
clonotype2tcrs,
clonotype2barcodes,
min_repeat_count_fraction = 0.33,
verbose = False
):
''' returns new_clonotype2tcrs, new_clonotype2barcodes
'''
# get count of how many cells support each pairing
#
pairing_counts, pairing_counts_by_umi = Counter(), Counter()
for cid, tcrs in clonotype2tcrs.items():
size = len(clonotype2barcodes[cid])
for t1, umi1 in tcrs['A'].items():
for t2, umi2 in tcrs['B'].items():
pairing_counts[ (t1,t2) ] += size
pairing_counts_by_umi[ (t1,t2) ] += min(umi1, umi2)
# this is no longer going to be a bijection!
chain_partner = {}
valid_ab_pairings = set()
for ( (t1,t2), count ) in list(pairing_counts.most_common()): # make a copy for sanity
if t1 in chain_partner or t2 in chain_partner:
t1_p2 = chain_partner.get(t1,None)
t2_p2 = chain_partner.get(t2,None)
oldcount1 = pairing_counts[ (t1,t1_p2) ]
oldcount2 = pairing_counts[ (t2_p2,t2) ]
if count >= min_repeat_count_fraction * max(oldcount1,oldcount2):
# take it anyway -- second alpha or genuinely shared alpha?
if verbose:
print('take_rep_partners:', count, oldcount1, oldcount2, t1, t2, t1_p2, t2_p2)
# dont overwrite the old pairing... might not do either of these!
if t1 not in chain_partner:
chain_partner[t1] = t2
if t2 not in chain_partner:
chain_partner[t2] = t1
valid_ab_pairings.add( (t1, t2 ) )
else:
if verbose:
print('skip_rep_partners:', count, oldcount1, oldcount2, t1, t2, t1_p2, t2_p2)
else: # neither chain already in chain_partner
#
# NOTE: removed the code checking for TIES!!!!!!!!!!!
if verbose:
print('norep_partners:', count, t1, t2)
chain_partner[t1] = t2
chain_partner[t2] = t1
valid_ab_pairings.add( ( t1, t2 ) )
# now let's revisit the clonotypes
pairs_tuple2clonotypes = {}
ab_counts = Counter() # for diagnostics
for (clone_size, cid) in reversed( sorted( (len(y), x) for x,y in clonotype2barcodes.items() ) ):
if cid not in clonotype2tcrs:
#print('WHOAH missing tcrs for clonotype', clone_size, cid, clonotype2barcodes[cid])
continue
tcrs = clonotype2tcrs[cid]
was_good_clone = len(tcrs['A']) >= 1 and len(tcrs['B']) >= 1
pairs = []
for atcr in tcrs['A']:
for btcr in tcrs['B']:
if ( atcr,btcr ) in valid_ab_pairings:
pairs.append( (atcr,btcr) )
if pairs:
pairs_tuple = None
if len(pairs)>1:
alphas = set( x[0] for x in pairs )
betas = set( x[1] for x in pairs )
ab_counts[ (len(alphas),len(betas) ) ] += clone_size
if len(alphas) == 2 and len(betas) == 1: ## only allow double-alphas
assert len(pairs) == 2
pairs_tuple = tuple(x[1] for x in reversed(sorted( [ (pairing_counts[x],x) for x in pairs ] )))
assert len( pairs_tuple)==2
# confirm ordered by counts
assert pairing_counts[pairs_tuple[0]] >= pairing_counts[pairs_tuple[1]]
else:
ab_counts[ (1,1) ] += clone_size
pairs_tuple = tuple(pairs)
assert len(pairs_tuple) == 1
if pairs_tuple:
pairs_tuple2clonotypes.setdefault( pairs_tuple, [] ).append( cid )
else:
if verbose:
print('SKIPCLONE:', was_good_clone, cid, clone_size, pairs, 'bad_pairs')
else:
if verbose:
print('SKIPCLONE:', was_good_clone, cid, clone_size, 'no_valid_pairs')
## reorder pairs_tuples in the case of ties, using umis
reorder = []
for pt, clonotypes in pairs_tuple2clonotypes.items():
assert len(pt) in [1,2]
if len(pt) == 2:
assert pt[0][1] == pt[1][1] # same beta chain
at1, bt = pt[0]
at2, _ = pt[1]
count1, count2 = pairing_counts[(at1,bt)], pairing_counts[(at2,bt)]
assert count1 >= count2
if count1 == count2:
# no way to figure out which is the primary alpha!
# look at umis?
c1 = sum( clonotype2tcrs[x]['A'][at1] for x in clonotypes )
c2 = sum( clonotype2tcrs[x]['A'][at2] for x in clonotypes )
if verbose:
print('alphatie:', count1, count2, c1, c2, show(at1), show(at2), show(bt))
if c2 > c1:
reorder.append( pt )
for pt in reorder:
#print('reorder:', show(pt))
assert len(pt) == 2
rpt = (pt[1], pt[0])
assert pt in pairs_tuple2clonotypes and rpt not in pairs_tuple2clonotypes
pairs_tuple2clonotypes[rpt] = pairs_tuple2clonotypes[pt][:]
del pairs_tuple2clonotypes[pt]
## look for len1 pairs_tuples that overlap with two different len2 pairs_tuples
merge_into_pairs = []
for pt1 in pairs_tuple2clonotypes:
if len(pt1) == 1:
overlaps = []
for pt2 in pairs_tuple2clonotypes:
if len(pt2) == 2 and pt2[0] == pt1[0]:
overlaps.append( pt2 )
elif len(pt2) == 2 and pt2[1] == pt1[0]:
# this 'elif' was added 2020-12-12; before that we were missing some
# overlaps...
# print('MISSED OVERLAP:', pairing_counts[pt2[0]], pairing_counts[pt2[1]],
# pairing_counts_by_umi[pt2[0]], pairing_counts_by_umi[pt2[1]],
# show(pt1), show(pt2))
overlaps.append( pt2 )
if len(overlaps)>1:
if verbose:
print('badoverlaps:', len(overlaps), show(pt1), show(overlaps))
elif len(overlaps)==1:
pt2 = overlaps[0]
merge_into_pairs.append( (pt1,pt2) )
for pt1, pt2 in merge_into_pairs:
assert len(pt1) == 1 and len(pt2) == 2
#print('mergeinto:', show(pt1), show(pt2))
pairs_tuple2clonotypes[pt2].extend( pairs_tuple2clonotypes[pt1] )
del pairs_tuple2clonotypes[pt1]
## look for pairs_tuples that will give the same clones file line
if verbose:
for pt1, clonotypes in pairs_tuple2clonotypes.items():
for pt2 in pairs_tuple2clonotypes:
if pt1 < pt2 and pt1[0] == pt2[0]:
print('overlap:', len(pt1), len(pt2), pt1, pt2)
## now setup new clonotype2tcrs, clonotype2barcodes mappings
new_clonotype2tcrs = {}
new_clonotype2barcodes = {}
for pairs_tuple, clonotypes in pairs_tuple2clonotypes.items():
c0 = clonotypes[0]
if len(clonotypes)>1:
if verbose:
print('merging:', ' '.join(clonotypes))
tcrs = {'A':Counter(), 'B':Counter()}
for (atcr,btcr) in pairs_tuple:
tcrs['A'][atcr] += pairing_counts[(atcr, btcr)]
tcrs['B'][btcr] += pairing_counts[(atcr, btcr)]
if len(pairs_tuple)==2:
a1, a2 = pairs_tuple[0][0], pairs_tuple[1][0]
if tcrs['A'][a1] == tcrs['A'][a2]:
tcrs['A'][a1] += 1 # ensure the desired order
else:
assert tcrs['A'][a1] > tcrs['A'][a2]
assert len(tcrs['A']) in [1,2]
assert len(tcrs['B']) == 1
assert c0 not in new_clonotype2tcrs
new_clonotype2tcrs[c0] = tcrs
new_clonotype2barcodes[c0] = list(chain( *(clonotype2barcodes[x] for x in clonotypes)))
# print 'new:', new_clonotype2barcodes[c0]
# for c in clonotypes:
# print 'old:', clonotype2barcodes[c]
# print len(new_clonotype2barcodes[c0]), sum( len(clonotype2barcodes[x]) for x in clonotypes)
assert len(new_clonotype2barcodes[c0]) == sum( len(clonotype2barcodes[x]) for x in clonotypes)
print('ab_counts:', ab_counts.most_common())
old_good_clonotypes = [ x for x,y in clonotype2tcrs.items() if len(y['A']) >= 1 and len(y['B']) >= 1 ]
old_num_barcodes = sum( len(clonotype2barcodes[x]) for x in old_good_clonotypes )
new_num_barcodes = sum( len(x) for x in list(new_clonotype2barcodes.values()) )
print('old_num_barcodes:', old_num_barcodes, 'new_num_barcodes:', new_num_barcodes)
return new_clonotype2tcrs, new_clonotype2barcodes
def make_10x_clones_file(
filtered_contig_annotations_csvfile,
organism,
clones_file, # the OUTPUT file, the one we're making
stringent = True, # dont believe the 10x clonotypes; reduce 'duplicated' and 'fake' clones
consensus_annotations_csvfile = None,
):
clonotype2tcrs, clonotype2barcodes = read_tcr_data( organism, filtered_contig_annotations_csvfile,
consensus_annotations_csvfile )
if stringent:
clonotype2tcrs, clonotype2barcodes = setup_filtered_clonotype_dicts( clonotype2tcrs, clonotype2barcodes )
_make_clones_file( organism, clones_file, clonotype2tcrs, clonotype2barcodes )
def make_10x_clones_file_batch(
metadata_file,
organism,
clones_file, # the OUTPUT file, the one we're making
stringent = True, # dont believe the 10x clonotypes; reduce 'duplicated' and 'fake' clones
):
clonotype2tcrs, clonotype2barcodes = read_tcr_data_batch( organism, metadata_file )
if stringent:
clonotype2tcrs, clonotype2barcodes = setup_filtered_clonotype_dicts( clonotype2tcrs, clonotype2barcodes )
_make_clones_file( organism, clones_file, clonotype2tcrs, clonotype2barcodes )
| 40.211538
| 229
| 0.588125
|
7949bc75d3c7d1b8a78edddba0cc11fba159845c
| 1,214
|
py
|
Python
|
ginger/app/api/v1/token.py
|
MiracleWong/flask-api
|
a3e14c666284a39b3a9992558c494b869f9d864f
|
[
"MIT"
] | null | null | null |
ginger/app/api/v1/token.py
|
MiracleWong/flask-api
|
a3e14c666284a39b3a9992558c494b869f9d864f
|
[
"MIT"
] | null | null | null |
ginger/app/api/v1/token.py
|
MiracleWong/flask-api
|
a3e14c666284a39b3a9992558c494b869f9d864f
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
'''
created by MiracleWong on 2019/2/8
'''
from flask import current_app, jsonify
from app.libs.enums import ClientTypeEnum
from app.libs.redprint import Redprint
from app.modules.user import User
from app.validators.forms import ClientForm
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
__author__ = 'MiracleWong'
api = Redprint('token')
@api.route('', methods=['POST'])
def get_token():
form = ClientForm().validate_for_api()
promise = {
ClientTypeEnum.USER_MAIL: User.verify,
}
identity = promise[ClientTypeEnum(form.type.data)](
form.account.data,
form.secret.data
)
expiration = current_app.config['TOKEN_EXPIRATION']
token = generate_auth_token(identity['uid'], form.type.data,
identity['scope'], expiration)
t = {
'token': token.decode("ascii")
}
return jsonify(t), 201
def generate_auth_token(uid, ac_type, scope=None, expiration=7200):
"""生成令牌"""
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({
'uid': uid,
'type': ac_type.value,
'scope': scope
})
| 26.977778
| 70
| 0.652389
|
7949bd1d0f14c8e86af0468e100e1aa311170d9d
| 10,084
|
py
|
Python
|
firexapp/submit/arguments.py
|
FireXStuff/firexapp
|
04be9f087ca405582be6107b2b345923e25e6f5f
|
[
"BSD-3-Clause"
] | 7
|
2019-02-15T22:00:10.000Z
|
2022-02-02T02:12:35.000Z
|
firexapp/submit/arguments.py
|
FireXStuff/firexapp
|
04be9f087ca405582be6107b2b345923e25e6f5f
|
[
"BSD-3-Clause"
] | 13
|
2019-03-11T20:28:08.000Z
|
2021-02-22T16:12:59.000Z
|
firexapp/submit/arguments.py
|
FireXStuff/firexapp
|
04be9f087ca405582be6107b2b345923e25e6f5f
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
import re
from firexkit.argument_conversion import ConverterRegister
from typing import Union
from firexapp.submit.console import setup_console_logging
logger = setup_console_logging(__name__)
def get_chain_args(other_args: []):
"""This function converts a flat list of --key value pairs into a dictionary"""
chain_arguments = {}
# Create arguments list for the chain
it = iter(other_args)
no_value_exception = None
for x in it:
if not x.startswith('-'):
if no_value_exception:
# the error was earlier
raise no_value_exception
raise ChainArgException('Error: Argument should start with a proper dash (- or --)\n%s' % x)
try:
value = next(it)
if str(value).startswith("-"):
# there might be an error. we'll find out later
no_value_exception = ChainArgException(
'Error: Arguments must have an accompanying value\n%s' % x)
except StopIteration:
raise ChainArgException('Error: Arguments must have an accompanying value\n%s' % x)
key = x.lstrip('-')
if not re.match('^[A-Za-z].*', key):
raise ChainArgException('Error: Argument should start with a letter\n%s' % key)
chain_arguments[key] = value
return chain_arguments
class ChainArgException(Exception):
pass
class InputConverter:
"""
This class uses a singleton object design to store converters which parse the cli arguments. Converter functions
are stored into the singleton InputConverter object by adding the @register decorator to the top of each desired
function.
"""
_global_instance = None
pre_load_was_run = False
@classmethod
def instance(cls) -> ConverterRegister: # usd by tests only
"""Used for unit testing only"""
if cls._global_instance is None:
cls._global_instance = ConverterRegister()
return cls._global_instance
@classmethod
def register(cls, *args):
"""
Registers a callable object to be run during conversion. The callable should take in kwargs, and return a dict
with any changes to the input arguments, or None if no changes are necessary.
:Example single argument converter:
@InputConverter.register
@SingleArgDecorator('something')
def convert_something(arg_value):
arg_value = arg_value.upper()
return arg_value
:Optionally, dependencies can defined at registration:
@InputConverter.register('other_converter', 'and_another_converter')
@SingleArgDecorator('something')
def convert_something(arg_value):
arg_value = arg_value.upper()
return arg_value
Conversion occurs on two occasions, before microservices are loaded, or after. You can explicitly mark a
converter to run pre-loading or post-loading of the ALL microservices by passing True (pre) or False (post)
during registration. This design is used in the spirit of failing fast, providing early failure of runs before
the bulk of microservices are imported. If bool is not provided, it will register to run pre unless loading has
already occurred.
@InputConverter.register('other_converter', False)
@SingleArgDecorator('something')
def convert_something(arg_value):
...
return arg_value
When a conversion fails the given function can simply call raise to instruct the user how to correct their
inputs.
"""
for arg in args:
if not isinstance(arg, bool):
continue
if arg and cls.pre_load_was_run:
raise Exception("Pre-microservice load conversion has already been run. "
"You can only register post load")
preload = arg
break
else:
preload = not cls.pre_load_was_run
args = args + (preload,)
if preload:
for arg in args:
if not callable(arg):
continue
converter = arg
# special handling of single argument decorator
single_arg_decorator = getattr(converter, "single_arg_decorator", None)
if not single_arg_decorator:
continue
# need to override the append method of the single argument converters
old_append = converter.append
def new_append(*more_ags):
# special handling of first post load call
if cls.pre_load_was_run:
# re-register this converter, but in post
single_arg_decorator.args.clear()
InputConverter.register(converter)
# restore original behaviour
converter.append = old_append
old_append(*more_ags)
converter.append = new_append
return cls.instance().register(*args)
@classmethod
def convert(cls, pre_load=None, **kwargs) -> dict:
"""
Activates conversion. kwargs provided are passed to any registered converter. This function should be called
twice, and only twice. Once with initially loaded converters, and then with the secondary ones.
:param pre_load: Used for testing. preload is defaulted to None and will auto populate
"""
# Auto set whether this is preload, unless explicitly specified
pre_load = not cls.pre_load_was_run if pre_load is None else pre_load
if pre_load and cls.pre_load_was_run:
raise Exception("Pre-microservice conversion was already run")
cls.pre_load_was_run = True
return cls.instance().convert(pre_task=pre_load, **kwargs)
@InputConverter.register
def convert_booleans(kwargs):
"""Converts standard true/false/none values to bools and None"""
for key, value in kwargs.items():
if not isinstance(value, str):
continue
if value.upper() == 'TRUE':
value = True
elif value.upper() == 'FALSE':
value = False
elif value.upper() == 'NONE':
value = None
kwargs[key] = value
return kwargs
@InputConverter.register
def auto_load_pydev_debugging_plugin(kwargs):
if not sys.gettrace() or sys.gettrace().__class__.__module__ == "coverage":
return
logger.debug("Debug detected")
plugins = kwargs.get("plugins", "")
if "pydev_debug_plugin.py" in plugins:
logger.debug("Debug plugin already included")
return
# local and include the pydev debugging plugin
logger.debug("Auto-including debug plugin")
import firexapp.testing
debugging_plugin = os.path.join(os.path.dirname(firexapp.testing.__file__), "pydev_debug_plugin.py")
if plugins:
debugging_plugin = "," + debugging_plugin
return {"plugins": plugins + debugging_plugin}
_global_argument_whitelist = set()
def whitelist_arguments(argument_list: Union[str, list]):
"""
Function for adding argument keys to the global argument whitelist. Used during validation of input arguments
:param argument_list:List of argument keys to whitelist.
:type argument_list: list
"""
if type(argument_list) == str:
argument_list = [argument_list]
global _global_argument_whitelist
_global_argument_whitelist |= set(argument_list)
def find_unused_arguments(chain_args: {}, ignore_list: [], all_tasks: []):
"""
Function to detect any arguments that are not explicitly consumed by any microservice.
:note: This should be run AFTER all microservices have been loaded.
:param chain_args: The dictionary of chain args to check
:type chain_args: dict
:param ignore_list: A list of exception arguments that are acceptable. This usually includes application args.
:type ignore_list: list
:param all_tasks: A list of all microservices. Usually app.tasks
:return: A dictionary of un-applicable arguments
"""
import Levenshtein as Lev
if len(chain_args) == 0:
return {}, {}
ignore_list += _global_argument_whitelist
# remove any whitelisted
unused_chain_args = chain_args.copy()
for std_arg in ignore_list:
if std_arg in unused_chain_args:
unused_chain_args.pop(std_arg)
# build up used chain arg list
used_chain_args = []
for _, task in all_tasks.items():
used_chain_args.extend(getattr(task, "required_args", []))
used_chain_args.extend(getattr(task, "optional_args", []))
# Loop through used args and remove any found in unused list
for used_arg in used_chain_args:
if used_arg in unused_chain_args:
unused_chain_args.pop(used_arg)
# Loop through remaining unused chain args and build near-match dict
close_matches = {}
for unused_arg in unused_chain_args:
close_match = {}
for used_arg in used_chain_args:
# for unused args less than 10 chars long, use distance method, otherwise use ratio method.
if len(unused_arg) < 10:
distance = Lev.distance(used_arg, unused_arg)
if distance < 3:
if not close_match or close_match['distance'] > distance:
close_match['arg'] = used_arg
close_match['distance'] = distance
else:
match_ratio = Lev.ratio(used_arg, unused_arg)
if match_ratio > 0.9:
if not close_match or close_match['ratio'] < match_ratio:
close_match['arg'] = used_arg
close_match['ratio'] = match_ratio
# Store the closest match in the returned dict
if close_match:
close_matches[unused_arg] = close_match['arg']
return unused_chain_args, close_matches
| 37.76779
| 119
| 0.640916
|
7949bd7c87d9717796d5d7dc76f3dac2d087527d
| 9,263
|
py
|
Python
|
test/tasks_classification_task_test.py
|
jlin27/ClassyVision-1
|
113ddb0b66471eb84add9af53751d9067786a7f0
|
[
"MIT"
] | null | null | null |
test/tasks_classification_task_test.py
|
jlin27/ClassyVision-1
|
113ddb0b66471eb84add9af53751d9067786a7f0
|
[
"MIT"
] | null | null | null |
test/tasks_classification_task_test.py
|
jlin27/ClassyVision-1
|
113ddb0b66471eb84add9af53751d9067786a7f0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import shutil
import tempfile
import unittest
from test.generic.config_utils import get_fast_test_task_config, get_test_task_config
from test.generic.utils import compare_model_state, compare_samples, compare_states
import torch
from classy_vision.dataset import build_dataset
from classy_vision.generic.util import get_checkpoint_dict
from classy_vision.hooks import CheckpointHook, LossLrMeterLoggingHook
from classy_vision.losses import build_loss
from classy_vision.models import build_model
from classy_vision.optim import build_optimizer
from classy_vision.tasks import ClassificationTask, build_task
from classy_vision.trainer import LocalTrainer
class TestClassificationTask(unittest.TestCase):
def _compare_model_state(self, model_state_1, model_state_2, check_heads=True):
compare_model_state(self, model_state_1, model_state_2, check_heads)
def _compare_samples(self, sample_1, sample_2):
compare_samples(self, sample_1, sample_2)
def _compare_states(self, state_1, state_2, check_heads=True):
compare_states(self, state_1, state_2)
def setUp(self):
# create a base directory to write checkpoints to
self.base_dir = tempfile.mkdtemp()
def tearDown(self):
# delete all the temporary data created
shutil.rmtree(self.base_dir)
def test_build_task(self):
config = get_test_task_config()
task = build_task(config)
self.assertTrue(isinstance(task, ClassificationTask))
def test_hooks_config_builds_correctly(self):
config = get_test_task_config()
config["hooks"] = [{"name": "loss_lr_meter_logging"}]
task = build_task(config)
self.assertTrue(len(task.hooks) == 1)
self.assertTrue(isinstance(task.hooks[0], LossLrMeterLoggingHook))
def test_get_state(self):
config = get_test_task_config()
loss = build_loss(config["loss"])
task = (
ClassificationTask()
.set_num_epochs(1)
.set_loss(loss)
.set_model(build_model(config["model"]))
.set_optimizer(build_optimizer(config["optimizer"]))
)
for phase_type in ["train", "test"]:
dataset = build_dataset(config["dataset"][phase_type])
task.set_dataset(dataset, phase_type)
task.prepare()
task = build_task(config)
task.prepare()
def test_synchronize_losses_non_distributed(self):
"""
Tests that synchronize losses has no side effects in a non-distributed setting.
"""
test_config = get_fast_test_task_config()
task = build_task(test_config)
task.prepare()
old_losses = copy.deepcopy(task.losses)
task.synchronize_losses()
self.assertEqual(old_losses, task.losses)
def test_synchronize_losses_when_losses_empty(self):
config = get_fast_test_task_config()
task = build_task(config)
task.prepare()
task.set_use_gpu(torch.cuda.is_available())
# Losses should be empty when creating task
self.assertEqual(len(task.losses), 0)
task.synchronize_losses()
def test_checkpointing(self):
"""
Tests checkpointing by running train_steps to make sure the train_steps
run the same way after loading from a checkpoint.
"""
config = get_fast_test_task_config()
task = build_task(config).set_hooks([LossLrMeterLoggingHook()])
task_2 = build_task(config).set_hooks([LossLrMeterLoggingHook()])
task.set_use_gpu(torch.cuda.is_available())
# prepare the tasks for the right device
task.prepare()
# test in both train and test mode
for _ in range(2):
task.advance_phase()
# set task's state as task_2's checkpoint
task_2._set_checkpoint_dict(get_checkpoint_dict(task, {}, deep_copy=True))
task_2.prepare()
# task 2 should have the same state
self._compare_states(task.get_classy_state(), task_2.get_classy_state())
# this tests that both states' iterators return the same samples
sample = next(task.get_data_iterator())
sample_2 = next(task_2.get_data_iterator())
self._compare_samples(sample, sample_2)
# test that the train step runs the same way on both states
# and the loss remains the same
task.train_step()
task_2.train_step()
self._compare_states(task.get_classy_state(), task_2.get_classy_state())
def test_final_train_checkpoint(self):
"""Test that a train phase checkpoint with a where of 1.0 can be loaded"""
config = get_fast_test_task_config()
task = build_task(config).set_hooks(
[CheckpointHook(self.base_dir, {}, phase_types=["train"])]
)
task_2 = build_task(config)
task.set_use_gpu(torch.cuda.is_available())
trainer = LocalTrainer()
trainer.train(task)
# make sure fetching the where raises an exception, which means that
# where is >= 1.0
with self.assertRaises(Exception):
task.where
# set task_2's state as task's final train checkpoint
task_2.set_checkpoint(self.base_dir)
task_2.prepare()
# we should be able to train the task
trainer.train(task_2)
def test_test_only_checkpointing(self):
"""
Tests checkpointing by running train_steps to make sure the
train_steps run the same way after loading from a training
task checkpoint on a test_only task.
"""
train_config = get_fast_test_task_config()
train_config["num_epochs"] = 10
test_config = get_fast_test_task_config()
test_config["test_only"] = True
train_task = build_task(train_config).set_hooks([LossLrMeterLoggingHook()])
test_only_task = build_task(test_config).set_hooks([LossLrMeterLoggingHook()])
# prepare the tasks for the right device
train_task.prepare()
# test in both train and test mode
trainer = LocalTrainer()
trainer.train(train_task)
# set task's state as task_2's checkpoint
test_only_task._set_checkpoint_dict(
get_checkpoint_dict(train_task, {}, deep_copy=True)
)
test_only_task.prepare()
test_state = test_only_task.get_classy_state()
# We expect the phase idx to be different for a test only task
self.assertEqual(test_state["phase_idx"], -1)
# We expect that test only state is test, no matter what train state is
self.assertFalse(test_state["train"])
# Num updates should be 0
self.assertEqual(test_state["num_updates"], 0)
# train_phase_idx should -1
self.assertEqual(test_state["train_phase_idx"], -1)
# Verify task will run
trainer = LocalTrainer()
trainer.train(test_only_task)
def test_test_only_task(self):
"""
Tests the task in test mode by running train_steps
to make sure the train_steps run as expected on a
test_only task
"""
test_config = get_fast_test_task_config()
test_config["test_only"] = True
# delete train dataset
del test_config["dataset"]["train"]
test_only_task = build_task(test_config).set_hooks([LossLrMeterLoggingHook()])
test_only_task.prepare()
test_state = test_only_task.get_classy_state()
# We expect that test only state is test, no matter what train state is
self.assertFalse(test_state["train"])
# Num updates should be 0
self.assertEqual(test_state["num_updates"], 0)
# Verify task will run
trainer = LocalTrainer()
trainer.train(test_only_task)
def test_train_only_task(self):
"""
Tests that the task runs when only a train dataset is specified.
"""
test_config = get_fast_test_task_config()
# delete the test dataset from the config
del test_config["dataset"]["test"]
task = build_task(test_config).set_hooks([LossLrMeterLoggingHook()])
task.prepare()
# verify the the task can still be trained
trainer = LocalTrainer()
trainer.train(task)
@unittest.skipUnless(torch.cuda.is_available(), "This test needs a gpu to run")
def test_checkpointing_different_device(self):
config = get_fast_test_task_config()
task = build_task(config)
task_2 = build_task(config)
for use_gpu in [True, False]:
task.set_use_gpu(use_gpu)
task.prepare()
# set task's state as task_2's checkpoint
task_2._set_checkpoint_dict(get_checkpoint_dict(task, {}, deep_copy=True))
# we should be able to run the trainer using state from a different device
trainer = LocalTrainer()
task_2.set_use_gpu(not use_gpu)
trainer.train(task_2)
| 35.220532
| 87
| 0.665227
|
7949bda533828f1e8fdf163b488d976fb9c63c61
| 23,268
|
py
|
Python
|
pyxdsm/XDSM.py
|
eirikurj/pyXDSM
|
6c631150f6b78e367127c0b7f4e273c3bb37f6ce
|
[
"Apache-2.0"
] | 30
|
2018-09-30T11:47:05.000Z
|
2021-11-11T18:04:32.000Z
|
pyxdsm/XDSM.py
|
eirikurj/pyXDSM
|
6c631150f6b78e367127c0b7f4e273c3bb37f6ce
|
[
"Apache-2.0"
] | 20
|
2018-06-08T15:44:36.000Z
|
2022-01-05T19:43:50.000Z
|
pyxdsm/XDSM.py
|
eirikurj/pyXDSM
|
6c631150f6b78e367127c0b7f4e273c3bb37f6ce
|
[
"Apache-2.0"
] | 32
|
2017-11-29T09:55:35.000Z
|
2022-03-06T12:15:41.000Z
|
from __future__ import print_function
import os
import numpy as np
import json
import subprocess
from collections import namedtuple
from pyxdsm import __version__ as pyxdsm_version
OPT = "Optimization"
SUBOPT = "SubOptimization"
SOLVER = "MDA"
DOE = "DOE"
IFUNC = "ImplicitFunction"
FUNC = "Function"
GROUP = "Group"
IGROUP = "ImplicitGroup"
METAMODEL = "Metamodel"
tikzpicture_template = r"""
%%% Preamble Requirements %%%
% \usepackage{{geometry}}
% \usepackage{{amsfonts}}
% \usepackage{{amsmath}}
% \usepackage{{amssymb}}
% \usepackage{{tikz}}
% Optional packages such as sfmath set through python interface
% \usepackage{{{optional_packages}}}
% \usetikzlibrary{{arrows,chains,positioning,scopes,shapes.geometric,shapes.misc,shadows}}
%%% End Preamble Requirements %%%
\input{{"{diagram_styles_path}"}}
\begin{{tikzpicture}}
\matrix[MatrixSetup]{{
{nodes}}};
% XDSM process chains
{process}
\begin{{pgfonlayer}}{{data}}
\path
{edges}
\end{{pgfonlayer}}
\end{{tikzpicture}}
"""
tex_template = r"""
% XDSM diagram created with pyXDSM {version}.
\documentclass{{article}}
\usepackage{{geometry}}
\usepackage{{amsfonts}}
\usepackage{{amsmath}}
\usepackage{{amssymb}}
\usepackage{{tikz}}
% Optional packages such as sfmath set through python interface
\usepackage{{{optional_packages}}}
% Define the set of TikZ packages to be included in the architecture diagram document
\usetikzlibrary{{arrows,chains,positioning,scopes,shapes.geometric,shapes.misc,shadows}}
% Set the border around all of the architecture diagrams to be tight to the diagrams themselves
% (i.e. no longer need to tinker with page size parameters)
\usepackage[active,tightpage]{{preview}}
\PreviewEnvironment{{tikzpicture}}
\setlength{{\PreviewBorder}}{{5pt}}
\begin{{document}}
\input{{"{tikzpicture_path}"}}
\end{{document}}
"""
def chunk_label(label, n_chunks):
# looping till length l
for i in range(0, len(label), n_chunks):
yield label[i : i + n_chunks]
def _parse_label(label, label_width=None):
if isinstance(label, (tuple, list)):
if label_width is None:
return r"$\begin{array}{c}" + r" \\ ".join(label) + r"\end{array}$"
else:
labels = []
for chunk in chunk_label(label, label_width):
labels.append(", ".join(chunk))
return r"$\begin{array}{c}" + r" \\ ".join(labels) + r"\end{array}$"
else:
return r"${}$".format(label)
def _label_to_spec(label, spec):
if isinstance(label, str):
label = [
label,
]
for var in label:
if var:
spec.add(var)
System = namedtuple("System", "node_name style label stack faded label_width spec_name")
Input = namedtuple("Input", "node_name label label_width style stack")
Output = namedtuple("Output", "node_name label label_width style stack side")
Connection = namedtuple("Connection", "src target label label_width style stack faded")
class XDSM(object):
def __init__(self, use_sfmath=True):
self.systems = []
self.connections = []
self.left_outs = {}
self.right_outs = {}
self.ins = {}
self.processes = []
self.process_arrows = []
self.use_sfmath = use_sfmath
def add_system(self, node_name, style, label, stack=False, faded=False, label_width=None, spec_name=None):
"""
Add a "system" block, which will be placed on the diagonal of the XDSM diagram.
Parameters
----------
node_name : str
The unique name given to this component
style : str
The type of the component
label : str or list/tuple of strings
The label to appear on the diagram. There are two options for this:
- a single string
- a list or tuple of strings, which is used for line breaking
In either case, they should probably be enclosed in \text{} declarations to make sure
the font is upright.
stack : bool
If true, the system will be displayed as several stacked rectangles,
indicating the component is executed in parallel.
faded : bool
If true, the component will be faded, in order to highlight some other system.
label_width : int or None
If not None, AND if ``label`` is given as either a tuple or list, then this parameter
controls how many items in the tuple/list will be displayed per line.
If None, the label will be printed one item per line if given as a tuple or list,
otherwise the string will be printed on a single line.
spec_name : str
The spec name used for the spec file.
"""
if spec_name is None:
spec_name = node_name
sys = System(node_name, style, label, stack, faded, label_width, spec_name)
self.systems.append(sys)
def add_input(self, name, label, label_width=None, style="DataIO", stack=False):
"""
Add an input, which will appear in the top row of the diagram.
Parameters
----------
name : str
The unique name given to this component
label : str or list/tuple of strings
The label to appear on the diagram. There are two options for this:
- a single string
- a list or tuple of strings, which is used for line breaking
In either case, they should probably be enclosed in \text{} declarations to make sure
the font is upright.
label_width : int or None
If not None, AND if ``label`` is given as either a tuple or list, then this parameter
controls how many items in the tuple/list will be displayed per line.
If None, the label will be printed one item per line if given as a tuple or list,
otherwise the string will be printed on a single line.
style : str
The style given to this component. Can be one of ['DataInter', 'DataIO']
stack : bool
If true, the system will be displayed as several stacked rectangles,
indicating the component is executed in parallel.
"""
self.ins[name] = Input("output_" + name, label, label_width, style, stack)
def add_output(self, name, label, label_width=None, style="DataIO", stack=False, side="left"):
"""
Add an output, which will appear in the left or right-most column of the diagram.
Parameters
----------
name : str
The unique name given to this component
label : str or list/tuple of strings
The label to appear on the diagram. There are two options for this:
- a single string
- a list or tuple of strings, which is used for line breaking
In either case, they should probably be enclosed in \text{} declarations to make sure
the font is upright.
label_width : int or None
If not None, AND if ``label`` is given as either a tuple or list, then this parameter
controls how many items in the tuple/list will be displayed per line.
If None, the label will be printed one item per line if given as a tuple or list,
otherwise the string will be printed on a single line.
style : str
The style given to this component. Can be one of ``['DataInter', 'DataIO']``
stack : bool
If true, the system will be displayed as several stacked rectangles,
indicating the component is executed in parallel.
side : str
Must be one of ``['left', 'right']``. This parameter controls whether the output
is placed on the left-most column or the right-most column of the diagram.
"""
if side == "left":
self.left_outs[name] = Output("left_output_" + name, label, label_width, style, stack, side)
elif side == "right":
self.right_outs[name] = Output("right_output_" + name, label, label_width, style, stack, side)
else:
raise ValueError("The option 'side' must be given as either 'left' or 'right!'")
def connect(self, src, target, label, label_width=None, style="DataInter", stack=False, faded=False):
"""
Connects two components with a data line, and adds a label to indicate
the data being transferred.
Parameters
----------
src : str
The name of the source component.
target : str
The name of the target component.
label : str or list/tuple of strings
The label to appear on the diagram. There are two options for this:
- a single string
- a list or tuple of strings, which is used for line breaking
In either case, they should probably be enclosed in \text{} declarations to make sure
the font is upright.
label_width : int or None
If not None, AND if ``label`` is given as either a tuple or list, then this parameter
controls how many items in the tuple/list will be displayed per line.
If None, the label will be printed one item per line if given as a tuple or list,
otherwise the string will be printed on a single line.
style : str
The style given to this component. Can be one of ``['DataInter', 'DataIO']``
stack : bool
If true, the system will be displayed as several stacked rectangles,
indicating the component is executed in parallel.
faded : bool
If true, the component will be faded, in order to highlight some other system.
"""
if src == target:
raise ValueError("Can not connect component to itself")
if (not isinstance(label_width, int)) and (label_width is not None):
raise ValueError("label_width argument must be an integer")
self.connections.append(Connection(src, target, label, label_width, style, stack, faded))
def add_process(self, systems, arrow=True):
"""
Add a process line between a list of systems, to indicate process flow.
Parameters
----------
systems : list
The names of the components, in the order in which they should be connected.
For a complete cycle, repeat the first component as the last component.
arrow : bool
If true, arrows will be added to the process lines to indicate the direction
of the process flow.
"""
self.processes.append(systems)
self.process_arrows.append(arrow)
def _build_node_grid(self):
size = len(self.systems)
comps_rows = np.arange(size)
comps_cols = np.arange(size)
if self.ins:
size += 1
# move all comps down one row
comps_rows += 1
if self.left_outs:
size += 1
# shift all comps to the right by one, to make room for inputs
comps_cols += 1
if self.right_outs:
size += 1
# don't need to shift anything in this case
# build a map between comp node_names and row idx for ordering calculations
row_idx_map = {}
col_idx_map = {}
node_str = r"\node [{style}] ({node_name}) {{{node_label}}};"
grid = np.empty((size, size), dtype=object)
grid[:] = ""
# add all the components on the diagonal
for i_row, j_col, comp in zip(comps_rows, comps_cols, self.systems):
style = comp.style
if comp.stack is True: # stacking
style += ",stack"
if comp.faded is True: # fading
style += ",faded"
label = _parse_label(comp.label, comp.label_width)
node = node_str.format(style=style, node_name=comp.node_name, node_label=label)
grid[i_row, j_col] = node
row_idx_map[comp.node_name] = i_row
col_idx_map[comp.node_name] = j_col
# add all the off diagonal nodes from components
for conn in self.connections:
# src, target, style, label, stack, faded, label_width
src_row = row_idx_map[conn.src]
target_col = col_idx_map[conn.target]
loc = (src_row, target_col)
style = conn.style
if conn.stack is True: # stacking
style += ",stack"
if conn.faded is True: # fading
style += ",faded"
label = _parse_label(conn.label, conn.label_width)
node_name = "{}-{}".format(conn.src, conn.target)
node = node_str.format(style=style, node_name=node_name, node_label=label)
grid[loc] = node
# add the nodes for left outputs
for comp_name, out in self.left_outs.items():
style = out.style
if out.stack:
style += ",stack"
i_row = row_idx_map[comp_name]
loc = (i_row, 0)
label = _parse_label(out.label, out.label_width)
node = node_str.format(style=style, node_name=out.node_name, node_label=label)
grid[loc] = node
# add the nodes for right outputs
for comp_name, out in self.right_outs.items():
style = out.style
if out.stack:
style += ",stack"
i_row = row_idx_map[comp_name]
loc = (i_row, -1)
label = _parse_label(out.label, out.label_width)
node = node_str.format(style=style, node_name=out.node_name, node_label=label)
grid[loc] = node
# add the inputs to the top of the grid
for comp_name, inp in self.ins.items():
# node_name, style, label, stack = in_data
style = inp.style
if inp.stack:
style += ",stack"
j_col = col_idx_map[comp_name]
loc = (0, j_col)
label = _parse_label(inp.label, label_width=inp.label_width)
node = node_str.format(style=style, node_name=inp.node_name, node_label=label)
grid[loc] = node
# mash the grid data into a string
rows_str = ""
for i, row in enumerate(grid):
rows_str += "%Row {}\n".format(i) + "&\n".join(row) + r"\\" + "\n"
return rows_str
def _build_edges(self):
h_edges = []
v_edges = []
edge_string = "({start}) edge [DataLine] ({end})"
for conn in self.connections:
od_node_name = "{}-{}".format(conn.src, conn.target)
h_edges.append(edge_string.format(start=conn.src, end=od_node_name))
v_edges.append(edge_string.format(start=od_node_name, end=conn.target))
for comp_name, out in self.left_outs.items():
node_name = out.node_name
h_edges.append(edge_string.format(start=comp_name, end=node_name))
for comp_name, out in self.right_outs.items():
node_name = out.node_name
h_edges.append(edge_string.format(start=comp_name, end=node_name))
for comp_name, inp in self.ins.items():
node_name = inp.node_name
v_edges.append(edge_string.format(start=comp_name, end=node_name))
paths_str = "% Horizontal edges\n" + "\n".join(h_edges) + "\n"
paths_str += "% Vertical edges\n" + "\n".join(v_edges) + ";"
return paths_str
def _build_process_chain(self):
sys_names = [s.node_name for s in self.systems]
output_names = (
[data[0] for _, data in self.ins.items()]
+ [data[0] for _, data in self.left_outs.items()]
+ [data[0] for _, data in self.right_outs.items()]
)
# comp_name, in_data in self.ins.items():
# node_name, style, label, stack = in_data
chain_str = ""
for proc, arrow in zip(self.processes, self.process_arrows):
chain_str += "{ [start chain=process]\n \\begin{pgfonlayer}{process} \n"
start_tip = False
for i, sys in enumerate(proc):
if sys not in sys_names and sys not in output_names:
raise ValueError(
'process includes a system named "{}" but no system with that name exists.'.format(sys)
)
if sys in output_names and i == 0:
start_tip = True
if i == 0:
chain_str += "\\chainin ({});\n".format(sys)
else:
if sys in output_names or (i == 1 and start_tip):
if arrow:
chain_str += "\\chainin ({}) [join=by ProcessTipA];\n".format(sys)
else:
chain_str += "\\chainin ({}) [join=by ProcessTip];\n".format(sys)
else:
if arrow:
chain_str += "\\chainin ({}) [join=by ProcessHVA];\n".format(sys)
else:
chain_str += "\\chainin ({}) [join=by ProcessHV];\n".format(sys)
chain_str += "\\end{pgfonlayer}\n}"
return chain_str
def _compose_optional_package_list(self):
# Check for optional LaTeX packages
optional_packages_list = []
if self.use_sfmath:
optional_packages_list.append("sfmath")
# Join all packages into one string separated by comma
optional_packages_str = ",".join(optional_packages_list)
return optional_packages_str
def write(self, file_name, build=True, cleanup=True, quiet=False, outdir="."):
"""
Write output files for the XDSM diagram. This produces the following:
- {file_name}.tikz
A file containing the TikZ definition of the XDSM diagram.
- {file_name}.tex
A standalone document wrapped around an include of the TikZ file which can
be compiled to a pdf.
- {file_name}.pdf
An optional compiled version of the standalone tex file.
Parameters
----------
file_name : str
The prefix to be used for the output files
build : bool
Flag that determines whether the standalone PDF of the XDSM will be compiled.
Default is True.
cleanup : bool
Flag that determines if pdflatex build files will be deleted after build is complete
quiet : bool
Set to True to suppress output from pdflatex.
outdir : str
Path to an existing directory in which to place output files. If a relative
path is given, it is interpreted relative to the current working directory.
"""
nodes = self._build_node_grid()
edges = self._build_edges()
process = self._build_process_chain()
module_path = os.path.dirname(__file__)
diagram_styles_path = os.path.join(module_path, "diagram_styles")
# Hack for Windows. MiKTeX needs Linux style paths.
diagram_styles_path = diagram_styles_path.replace("\\", "/")
optional_packages_str = self._compose_optional_package_list()
tikzpicture_str = tikzpicture_template.format(
nodes=nodes,
edges=edges,
process=process,
diagram_styles_path=diagram_styles_path,
optional_packages=optional_packages_str,
)
base_output_fp = os.path.join(outdir, file_name)
with open(base_output_fp + ".tikz", "w") as f:
f.write(tikzpicture_str)
tex_str = tex_template.format(
nodes=nodes,
edges=edges,
tikzpicture_path=file_name + ".tikz",
diagram_styles_path=diagram_styles_path,
optional_packages=optional_packages_str,
version=pyxdsm_version,
)
with open(base_output_fp + ".tex", "w") as f:
f.write(tex_str)
if build:
command = [
"pdflatex",
"-halt-on-error",
"-interaction=nonstopmode",
"-output-directory={}".format(outdir),
]
if quiet:
command += ["-interaction=batchmode", "-halt-on-error"]
command += [f"{file_name}.tex"]
subprocess.run(command, check=True)
if cleanup:
for ext in ["aux", "fdb_latexmk", "fls", "log"]:
f_name = "{}.{}".format(base_output_fp, ext)
if os.path.exists(f_name):
os.remove(f_name)
def write_sys_specs(self, folder_name):
"""
Write I/O spec json files for systems to specified folder
An I/O spec of a system is the collection of all variables going into and out of it.
That includes any variables being passed between systems, as well as all inputs and outputs.
This information is useful for comparing implementations (such as components and groups in OpenMDAO)
to the XDSM diagrams.
The json spec files can be used to write testing utilities that compare the inputs/outputs of an implementation
to the XDSM, and thus allow you to verify that your codes match the XDSM diagram precisely.
This technique is especially useful when large engineering teams are collaborating on
model development. It allows them to use the XDSM as a shared contract between team members
so everyone can be sure that their codes will sync up.
Parameters
----------
folder_name: str
name of the folder, which will be created if it doesn't exist, to put spec files into
"""
# find un-connected to each system by looking at Inputs
specs = {}
for sys in self.systems:
specs[sys.node_name] = {"inputs": set(), "outputs": set()}
for sys_name, inp in self.ins.items():
_label_to_spec(inp.label, specs[sys_name]["inputs"])
# find connected inputs/outputs to each system by looking at Connections
for conn in self.connections:
_label_to_spec(conn.label, specs[conn.target]["inputs"])
_label_to_spec(conn.label, specs[conn.src]["outputs"])
# find unconnected outputs to each system by looking at Outputs
for sys_name, out in self.left_outs.items():
_label_to_spec(out.label, specs[sys_name]["outputs"])
for sys_name, out in self.right_outs.items():
_label_to_spec(out.label, specs[sys_name]["outputs"])
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
for sys in self.systems:
if sys.spec_name is not False:
path = os.path.join(folder_name, sys.spec_name + ".json")
with open(path, "w") as f:
spec = specs[sys.node_name]
spec["inputs"] = list(spec["inputs"])
spec["outputs"] = list(spec["outputs"])
json_str = json.dumps(spec, indent=2)
f.write(json_str)
| 37.169329
| 119
| 0.598204
|
7949be3a9ff9c4ac43a60bb4593001ffd25163e4
| 16,208
|
py
|
Python
|
venv/Lib/site-packages/twilio/rest/supersim/v1/network_access_profile/network_access_profile_network.py
|
syt1209/PythonProjects
|
0409dbd3c0b0ddf00debc38875059c828eb31dec
|
[
"MIT"
] | 1
|
2021-02-16T00:59:29.000Z
|
2021-02-16T00:59:29.000Z
|
venv/Lib/site-packages/twilio/rest/supersim/v1/network_access_profile/network_access_profile_network.py
|
syt1209/PythonProjects
|
0409dbd3c0b0ddf00debc38875059c828eb31dec
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/twilio/rest/supersim/v1/network_access_profile/network_access_profile_network.py
|
syt1209/PythonProjects
|
0409dbd3c0b0ddf00debc38875059c828eb31dec
|
[
"MIT"
] | null | null | null |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class NetworkAccessProfileNetworkList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, network_access_profile_sid):
"""
Initialize the NetworkAccessProfileNetworkList
:param Version version: Version that contains the resource
:param network_access_profile_sid: The unique string that identifies the Network Access Profile resource
:returns: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkList
:rtype: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkList
"""
super(NetworkAccessProfileNetworkList, self).__init__(version)
# Path Solution
self._solution = {'network_access_profile_sid': network_access_profile_sid, }
self._uri = '/NetworkAccessProfiles/{network_access_profile_sid}/Networks'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams NetworkAccessProfileNetworkInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists NetworkAccessProfileNetworkInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of NetworkAccessProfileNetworkInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of NetworkAccessProfileNetworkInstance
:rtype: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return NetworkAccessProfileNetworkPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of NetworkAccessProfileNetworkInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of NetworkAccessProfileNetworkInstance
:rtype: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return NetworkAccessProfileNetworkPage(self._version, response, self._solution)
def create(self, network):
"""
Create the NetworkAccessProfileNetworkInstance
:param unicode network: The SID that identifies the Network resource
:returns: The created NetworkAccessProfileNetworkInstance
:rtype: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkInstance
"""
data = values.of({'Network': network, })
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return NetworkAccessProfileNetworkInstance(
self._version,
payload,
network_access_profile_sid=self._solution['network_access_profile_sid'],
)
def get(self, sid):
"""
Constructs a NetworkAccessProfileNetworkContext
:param sid: The SID of the resource to fetch
:returns: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkContext
:rtype: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkContext
"""
return NetworkAccessProfileNetworkContext(
self._version,
network_access_profile_sid=self._solution['network_access_profile_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a NetworkAccessProfileNetworkContext
:param sid: The SID of the resource to fetch
:returns: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkContext
:rtype: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkContext
"""
return NetworkAccessProfileNetworkContext(
self._version,
network_access_profile_sid=self._solution['network_access_profile_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Supersim.V1.NetworkAccessProfileNetworkList>'
class NetworkAccessProfileNetworkPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the NetworkAccessProfileNetworkPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param network_access_profile_sid: The unique string that identifies the Network Access Profile resource
:returns: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkPage
:rtype: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkPage
"""
super(NetworkAccessProfileNetworkPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of NetworkAccessProfileNetworkInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkInstance
:rtype: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkInstance
"""
return NetworkAccessProfileNetworkInstance(
self._version,
payload,
network_access_profile_sid=self._solution['network_access_profile_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Supersim.V1.NetworkAccessProfileNetworkPage>'
class NetworkAccessProfileNetworkContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, network_access_profile_sid, sid):
"""
Initialize the NetworkAccessProfileNetworkContext
:param Version version: Version that contains the resource
:param network_access_profile_sid: The unique string that identifies the Network Access Profile resource
:param sid: The SID of the resource to fetch
:returns: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkContext
:rtype: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkContext
"""
super(NetworkAccessProfileNetworkContext, self).__init__(version)
# Path Solution
self._solution = {'network_access_profile_sid': network_access_profile_sid, 'sid': sid, }
self._uri = '/NetworkAccessProfiles/{network_access_profile_sid}/Networks/{sid}'.format(**self._solution)
def delete(self):
"""
Deletes the NetworkAccessProfileNetworkInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def fetch(self):
"""
Fetch the NetworkAccessProfileNetworkInstance
:returns: The fetched NetworkAccessProfileNetworkInstance
:rtype: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return NetworkAccessProfileNetworkInstance(
self._version,
payload,
network_access_profile_sid=self._solution['network_access_profile_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Supersim.V1.NetworkAccessProfileNetworkContext {}>'.format(context)
class NetworkAccessProfileNetworkInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, payload, network_access_profile_sid, sid=None):
"""
Initialize the NetworkAccessProfileNetworkInstance
:returns: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkInstance
:rtype: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkInstance
"""
super(NetworkAccessProfileNetworkInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'network_access_profile_sid': payload.get('network_access_profile_sid'),
'friendly_name': payload.get('friendly_name'),
'iso_country': payload.get('iso_country'),
'identifiers': payload.get('identifiers'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {
'network_access_profile_sid': network_access_profile_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: NetworkAccessProfileNetworkContext for this NetworkAccessProfileNetworkInstance
:rtype: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkContext
"""
if self._context is None:
self._context = NetworkAccessProfileNetworkContext(
self._version,
network_access_profile_sid=self._solution['network_access_profile_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def network_access_profile_sid(self):
"""
:returns: The unique string that identifies the Network Access Profile resource
:rtype: unicode
"""
return self._properties['network_access_profile_sid']
@property
def friendly_name(self):
"""
:returns: A human readable identifier of this resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def iso_country(self):
"""
:returns: The ISO country code of the Network resource
:rtype: unicode
"""
return self._properties['iso_country']
@property
def identifiers(self):
"""
:returns: The MCC/MNCs included in the resource
:rtype: list[dict]
"""
return self._properties['identifiers']
@property
def url(self):
"""
:returns: The absolute URL of the resource
:rtype: unicode
"""
return self._properties['url']
def delete(self):
"""
Deletes the NetworkAccessProfileNetworkInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def fetch(self):
"""
Fetch the NetworkAccessProfileNetworkInstance
:returns: The fetched NetworkAccessProfileNetworkInstance
:rtype: twilio.rest.supersim.v1.network_access_profile.network_access_profile_network.NetworkAccessProfileNetworkInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Supersim.V1.NetworkAccessProfileNetworkInstance {}>'.format(context)
| 40.52
| 135
| 0.687068
|
7949be69c09b29524e5240125420a9df8f942647
| 607
|
py
|
Python
|
python/anyascii/_data/_245.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
python/anyascii/_data/_245.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
python/anyascii/_data/_245.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
b='Na Dit Pao Ju Luo Shua Shang Su Luo Fen Wai Bao Li Xiong Vuot Dang Danh Mong Mong Mong Cheng Zhang Sou Shen Ge Cha Yu Hui Che Jiao Zhu Shu Xiao Ning Pan Jiang Jiang Diao Zong Qiang Qiu Feng Zhan Ke Die Ze Guang Se Fen Jiang Yan Zhi Riu Li Ling Yi Qu Pan Gou Jia He Peng Ju Banh Che Chua Lop Lie Shi Po Xiang Pi Luo Cu Yu Mui Kong Xie Wan Yan Pei Cheng Manh Ti Che Bi Lian Jia Ting Ti Cong Die Shu Li Lu Xia Cui Tam Bo Tui Pu Lin Fen Phuon Bo Chan Dang Tai'
| 607
| 607
| 0.574959
|
7949bfdb05efe16fbab5fc34fb448a282c8c1e7c
| 1,211
|
py
|
Python
|
cvxpy/reductions/dcp2cone/atom_canonicalizers/mul_canon.py
|
mostafaelaraby/cvxpy
|
078e025be8b8315b5f579bd0209e8e3a1e2a2a19
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-10-21T22:15:55.000Z
|
2020-10-21T22:15:55.000Z
|
cvxpy/reductions/dcp2cone/atom_canonicalizers/mul_canon.py
|
mostafaelaraby/cvxpy
|
078e025be8b8315b5f579bd0209e8e3a1e2a2a19
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cvxpy/reductions/dcp2cone/atom_canonicalizers/mul_canon.py
|
mostafaelaraby/cvxpy
|
078e025be8b8315b5f579bd0209e8e3a1e2a2a19
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-11-16T08:43:33.000Z
|
2020-11-16T08:43:33.000Z
|
from cvxpy.expressions.variable import Variable
from cvxpy.utilities import scopes
# TODO(akshayka): expose as a reduction for user's convenience
def mul_canon(expr, args):
# Only allow param * var (not var * param). Associate right to left.
# TODO: Only descend if both sides have parameters
lhs = args[0]
rhs = args[1]
if not (lhs.parameters() and rhs.parameters()):
return expr.copy(args), []
op_type = type(expr)
if lhs.variables():
with scopes.dpp_scope():
assert rhs.is_affine()
t = Variable(lhs.shape)
return op_type(t, rhs), [t == lhs]
elif rhs.variables():
with scopes.dpp_scope():
assert lhs.is_affine()
t = Variable(rhs.shape)
return op_type(lhs, t), [t == rhs]
# Neither side has variables. One side must be affine in parameters.
lhs_affine = False
rhs_affine = False
with scopes.dpp_scope():
lhs_affine = lhs.is_affine()
rhs_affine = rhs.is_affine()
assert lhs_affine or rhs_affine
if lhs_affine:
t = Variable(rhs.shape)
return lhs * t, [t == rhs]
else:
t = Variable(lhs.shape)
return t * rhs, [t == lhs]
| 30.275
| 72
| 0.615194
|
7949c04b52334035e4a93de37c2afd940e8d7ba3
| 5,272
|
py
|
Python
|
dataent/commands/scheduler.py
|
dataent/dataent
|
c41bd5942ffe5513f4d921c4c0595c84bbc422b4
|
[
"MIT"
] | null | null | null |
dataent/commands/scheduler.py
|
dataent/dataent
|
c41bd5942ffe5513f4d921c4c0595c84bbc422b4
|
[
"MIT"
] | 6
|
2020-03-24T17:15:56.000Z
|
2022-02-10T18:41:31.000Z
|
dataent/commands/scheduler.py
|
dataent/dataent
|
c41bd5942ffe5513f4d921c4c0595c84bbc422b4
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals, absolute_import, print_function
import click
import sys
import dataent
from dataent.utils import cint
from dataent.commands import pass_context, get_site
def _is_scheduler_enabled():
enable_scheduler = False
try:
dataent.connect()
enable_scheduler = cint(dataent.db.get_single_value("System Settings", "enable_scheduler")) and True or False
except:
pass
finally:
dataent.db.close()
return enable_scheduler
@click.command('trigger-scheduler-event')
@click.argument('event')
@pass_context
def trigger_scheduler_event(context, event):
"Trigger a scheduler event"
import dataent.utils.scheduler
for site in context.sites:
try:
dataent.init(site=site)
dataent.connect()
dataent.utils.scheduler.trigger(site, event, now=True)
finally:
dataent.destroy()
@click.command('enable-scheduler')
@pass_context
def enable_scheduler(context):
"Enable scheduler"
import dataent.utils.scheduler
for site in context.sites:
try:
dataent.init(site=site)
dataent.connect()
dataent.utils.scheduler.enable_scheduler()
dataent.db.commit()
print("Enabled for", site)
finally:
dataent.destroy()
@click.command('disable-scheduler')
@pass_context
def disable_scheduler(context):
"Disable scheduler"
import dataent.utils.scheduler
for site in context.sites:
try:
dataent.init(site=site)
dataent.connect()
dataent.utils.scheduler.disable_scheduler()
dataent.db.commit()
print("Disabled for", site)
finally:
dataent.destroy()
@click.command('scheduler')
@click.option('--site', help='site name')
@click.argument('state', type=click.Choice(['pause', 'resume', 'disable', 'enable']))
@pass_context
def scheduler(context, state, site=None):
from dataent.installer import update_site_config
import dataent.utils.scheduler
if not site:
site = get_site(context)
try:
dataent.init(site=site)
if state == 'pause':
update_site_config('pause_scheduler', 1)
elif state == 'resume':
update_site_config('pause_scheduler', 0)
elif state == 'disable':
dataent.connect()
dataent.utils.scheduler.disable_scheduler()
dataent.db.commit()
elif state == 'enable':
dataent.connect()
dataent.utils.scheduler.enable_scheduler()
dataent.db.commit()
print('Scheduler {0}d for site {1}'.format(state, site))
finally:
dataent.destroy()
@click.command('set-maintenance-mode')
@click.option('--site', help='site name')
@click.argument('state', type=click.Choice(['on', 'off']))
@pass_context
def set_maintenance_mode(context, state, site=None):
from dataent.installer import update_site_config
if not site:
site = get_site(context)
try:
dataent.init(site=site)
update_site_config('maintenance_mode', 1 if (state == 'on') else 0)
finally:
dataent.destroy()
@click.command('doctor') #Passing context always gets a site and if there is no use site it breaks
@click.option('--site', help='site name')
def doctor(site=None):
"Get diagnostic info about background workers"
from dataent.utils.doctor import doctor as _doctor
return _doctor(site=site)
@click.command('show-pending-jobs')
@click.option('--site', help='site name')
@pass_context
def show_pending_jobs(context, site=None):
"Get diagnostic info about background jobs"
from dataent.utils.doctor import pending_jobs as _pending_jobs
if not site:
site = get_site(context)
with dataent.init_site(site):
pending_jobs = _pending_jobs(site=site)
return pending_jobs
@click.command('purge-jobs')
@click.option('--site', help='site name')
@click.option('--queue', default=None, help='one of "low", "default", "high')
@click.option('--event', default=None, help='one of "all", "weekly", "monthly", "hourly", "daily", "weekly_long", "daily_long"')
def purge_jobs(site=None, queue=None, event=None):
"Purge any pending periodic tasks, if event option is not given, it will purge everything for the site"
from dataent.utils.doctor import purge_pending_jobs
dataent.init(site or '')
count = purge_pending_jobs(event=event, site=site, queue=queue)
print("Purged {} jobs".format(count))
@click.command('schedule')
def start_scheduler():
from dataent.utils.scheduler import start_scheduler
start_scheduler()
@click.command('worker')
@click.option('--queue', type=str)
@click.option('--quiet', is_flag = True, default = False, help = 'Hide Log Outputs')
def start_worker(queue, quiet = False):
from dataent.utils.background_jobs import start_worker
start_worker(queue, quiet = quiet)
@click.command('ready-for-migration')
@click.option('--site', help='site name')
@pass_context
def ready_for_migration(context, site=None):
from dataent.utils.doctor import get_pending_jobs
if not site:
site = get_site(context)
try:
dataent.init(site=site)
pending_jobs = get_pending_jobs(site=site)
if pending_jobs:
print('NOT READY for migration: site {0} has pending background jobs'.format(site))
sys.exit(1)
else:
print('READY for migration: site {0} does not have any background jobs'.format(site))
return 0
finally:
dataent.destroy()
commands = [
disable_scheduler,
doctor,
enable_scheduler,
purge_jobs,
ready_for_migration,
scheduler,
set_maintenance_mode,
show_pending_jobs,
start_scheduler,
start_worker,
trigger_scheduler_event,
]
| 26.761421
| 128
| 0.743361
|
7949c0897a2ff802477791b7589b0eb77d8a7ca1
| 62,075
|
py
|
Python
|
untitled.py
|
Steve-YJ/sagemaker-studio-end-to-end
|
2227bd22d531b2b41e3abcc4778a3e4d2b7aada8
|
[
"MIT"
] | 1
|
2021-04-15T00:47:16.000Z
|
2021-04-15T00:47:16.000Z
|
untitled.py
|
Steve-YJ/sagemaker-studio-end-to-end
|
2227bd22d531b2b41e3abcc4778a3e4d2b7aada8
|
[
"MIT"
] | null | null | null |
untitled.py
|
Steve-YJ/sagemaker-studio-end-to-end
|
2227bd22d531b2b41e3abcc4778a3e4d2b7aada8
|
[
"MIT"
] | null | null | null |
from pyspark.sql.session import SparkSession
from pyspark.sql.dataframe import DataFrame
# You may want to configure the Spark Context with the right credentials provider.
spark = SparkSession.builder.master('local').getOrCreate()
mode = None
def capture_stdout(func, *args, **kwargs):
"""Capture standard output to a string buffer"""
from contextlib import redirect_stdout
import io
stdout_string = io.StringIO()
with redirect_stdout(stdout_string):
func(*args, **kwargs)
return stdout_string.getvalue()
def convert_or_coerce(pandas_df, spark):
"""Convert pandas df to pyspark df and coerces the mixed cols to string"""
import re
try:
return spark.createDataFrame(pandas_df)
except TypeError as e:
match = re.search(r".*field (\w+).*Can not merge type.*", str(e))
if match is None:
raise e
mixed_col_name = match.group(1)
# Coercing the col to string
pandas_df[mixed_col_name] = pandas_df[mixed_col_name].astype("str")
return pandas_df
def default_spark(value):
return {"default": value}
def default_spark_with_stdout(df, stdout):
return {
"default": df,
"stdout": stdout,
}
def default_spark_with_trained_parameters(value, trained_parameters):
return {"default": value, "trained_parameters": trained_parameters}
def default_spark_with_trained_parameters_and_state(df, trained_parameters, state):
return {"default": df, "trained_parameters": trained_parameters, "state": state}
def dispatch(key_name, args, kwargs, funcs):
"""
Dispatches to another operator based on a key in the passed parameters.
This also slices out any parameters using the parameter_name passed in,
and will reassemble the trained_parameters correctly after invocation.
Args:
key_name: name of the key in kwargs used to identify the function to use.
args: dataframe that will be passed as the first set of parameters to the function.
kwargs: keyword arguments that key_name will be found in; also where args will be passed to parameters.
These are also expected to include trained_parameters if there are any.
funcs: dictionary mapping from value of key_name to (function, parameter_name)
"""
if key_name not in kwargs:
raise OperatorCustomerError(f"Missing required parameter {key_name}")
operator = kwargs[key_name]
if operator not in funcs:
raise OperatorCustomerError(f"Invalid choice selected for {key_name}. {operator} is not supported.")
func, parameter_name = funcs[operator]
# Extract out the parameters that should be available.
func_params = kwargs.get(parameter_name, {})
if func_params is None:
func_params = {}
# Extract out any trained parameters.
specific_trained_parameters = None
if "trained_parameters" in kwargs:
trained_parameters = kwargs["trained_parameters"]
if trained_parameters is not None and parameter_name in trained_parameters:
specific_trained_parameters = trained_parameters[parameter_name]
func_params["trained_parameters"] = specific_trained_parameters
# Execute the function (should return a dict).
result = func(*args, **func_params)
# Check if the result contains any trained parameters and remap them to the proper structure.
if result is not None and "trained_parameters" in result:
existing_trained_parameters = kwargs.get("trained_parameters")
updated_trained_parameters = result["trained_parameters"]
if existing_trained_parameters is not None or updated_trained_parameters is not None:
existing_trained_parameters = existing_trained_parameters if existing_trained_parameters is not None else {}
existing_trained_parameters[parameter_name] = result["trained_parameters"]
# Update the result trained_parameters so they are part of the original structure.
result["trained_parameters"] = existing_trained_parameters
else:
# If the given trained parameters were None and the returned trained parameters were None, don't return anything.
del result["trained_parameters"]
return result
def get_dataframe_with_sequence_ids(df: DataFrame):
df_cols = df.columns
rdd_with_seq = df.rdd.zipWithIndex()
df_with_seq = rdd_with_seq.toDF()
df_with_seq = df_with_seq.withColumnRenamed("_2", "_seq_id_")
for col_name in df_cols:
df_with_seq = df_with_seq.withColumn(col_name, df_with_seq["_1"].getItem(col_name))
df_with_seq = df_with_seq.drop("_1")
return df_with_seq
def get_execution_state(status: str, message=None):
return {"status": status, "message": message}
class OperatorCustomerError(Exception):
"""Error type for Customer Errors in Spark Operators"""
def encode_categorical_ordinal_encode(
df, input_column=None, output_column=None, invalid_handling_strategy=None, trained_parameters=None
):
INVALID_HANDLING_STRATEGY_SKIP = "Skip"
INVALID_HANDLING_STRATEGY_ERROR = "Error"
INVALID_HANDLING_STRATEGY_KEEP = "Keep"
INVALID_HANDLING_STRATEGY_REPLACE_WITH_NAN = "Replace with NaN"
from pyspark.ml.feature import StringIndexer, StringIndexerModel
from pyspark.sql.functions import when
expects_column(df, input_column, "Input column")
invalid_handling_map = {
INVALID_HANDLING_STRATEGY_SKIP: "skip",
INVALID_HANDLING_STRATEGY_ERROR: "error",
INVALID_HANDLING_STRATEGY_KEEP: "keep",
INVALID_HANDLING_STRATEGY_REPLACE_WITH_NAN: "keep",
}
output_column, output_is_temp = get_temp_col_if_not_set(df, output_column)
# process inputs
handle_invalid = (
invalid_handling_strategy
if invalid_handling_strategy in invalid_handling_map
else INVALID_HANDLING_STRATEGY_ERROR
)
trained_parameters = load_trained_parameters(
trained_parameters, {"invalid_handling_strategy": invalid_handling_strategy}
)
input_handle_invalid = invalid_handling_map.get(handle_invalid)
index_model, index_model_loaded = load_pyspark_model_from_trained_parameters(
trained_parameters, StringIndexerModel, "string_indexer_model"
)
if index_model is None:
indexer = StringIndexer(inputCol=input_column, outputCol=output_column, handleInvalid=input_handle_invalid)
# fit the model and transform
try:
index_model = fit_and_save_model(trained_parameters, "string_indexer_model", indexer, df)
except Exception as e:
if input_handle_invalid == "error":
raise OperatorSparkOperatorCustomerError(
f"Encountered error calculating string indexes. Halting because error handling is set to 'Error'. Please check your data and try again: {e}"
)
else:
raise e
output_df = transform_using_trained_model(index_model, df, index_model_loaded)
# finally, if missing should be nan, convert them
if handle_invalid == INVALID_HANDLING_STRATEGY_REPLACE_WITH_NAN:
new_val = float("nan")
# convert all numLabels indices to new_val
num_labels = len(index_model.labels)
output_df = output_df.withColumn(
output_column, when(output_df[output_column] == num_labels, new_val).otherwise(output_df[output_column])
)
# finally handle the output column name appropriately.
output_df = replace_input_if_output_is_temp(output_df, input_column, output_column, output_is_temp)
return default_spark_with_trained_parameters(output_df, trained_parameters)
def encode_categorical_one_hot_encode(
df,
input_column=None,
input_already_ordinal_encoded=None,
invalid_handling_strategy=None,
drop_last=None,
output_style=None,
output_column=None,
trained_parameters=None,
):
INVALID_HANDLING_STRATEGY_SKIP = "Skip"
INVALID_HANDLING_STRATEGY_ERROR = "Error"
INVALID_HANDLING_STRATEGY_KEEP = "Keep"
OUTPUT_STYLE_VECTOR = "Vector"
OUTPUT_STYLE_COLUMNS = "Columns"
invalid_handling_map = {
INVALID_HANDLING_STRATEGY_SKIP: "skip",
INVALID_HANDLING_STRATEGY_ERROR: "error",
INVALID_HANDLING_STRATEGY_KEEP: "keep",
}
handle_invalid = invalid_handling_map.get(invalid_handling_strategy, "error")
expects_column(df, input_column, "Input column")
output_format = output_style if output_style in [OUTPUT_STYLE_VECTOR, OUTPUT_STYLE_COLUMNS] else OUTPUT_STYLE_VECTOR
drop_last = parse_parameter(bool, drop_last, "Drop Last", True)
input_ordinal_encoded = parse_parameter(bool, input_already_ordinal_encoded, "Input already ordinal encoded", False)
output_column = output_column if output_column else input_column
trained_parameters = load_trained_parameters(
trained_parameters, {"invalid_handling_strategy": invalid_handling_strategy, "drop_last": drop_last}
)
from pyspark.ml.feature import (
StringIndexer,
StringIndexerModel,
OneHotEncoder,
OneHotEncoderModel,
)
from pyspark.ml.functions import vector_to_array
import pyspark.sql.functions as sf
from pyspark.sql.types import DoubleType
# first step, ordinal encoding. Not required if input_ordinal_encoded==True
# get temp name for ordinal encoding
ordinal_name = temp_col_name(df, output_column)
if input_ordinal_encoded:
df_ordinal = df.withColumn(ordinal_name, df[input_column].cast("int"))
labels = None
else:
index_model, index_model_loaded = load_pyspark_model_from_trained_parameters(
trained_parameters, StringIndexerModel, "string_indexer_model"
)
if index_model is None:
# apply ordinal encoding
indexer = StringIndexer(inputCol=input_column, outputCol=ordinal_name, handleInvalid=handle_invalid)
try:
index_model = fit_and_save_model(trained_parameters, "string_indexer_model", indexer, df)
except Exception as e:
if handle_invalid == "error":
raise OperatorSparkOperatorCustomerError(
f"Encountered error calculating string indexes. Halting because error handling is set to 'Error'. Please check your data and try again: {e}"
)
else:
raise e
try:
df_ordinal = transform_using_trained_model(index_model, df, index_model_loaded)
except Exception as e:
if handle_invalid == "error":
raise OperatorSparkOperatorCustomerError(
f"Encountered error transforming string indexes. Halting because error handling is set to 'Error'. Please check your data and try again: {e}"
)
else:
raise e
labels = index_model.labels
# drop the input column if required from the ordinal encoded dataset
if output_column == input_column:
df_ordinal = df_ordinal.drop(input_column)
temp_output_col = temp_col_name(df_ordinal, output_column)
# apply onehot encoding on the ordinal
cur_handle_invalid = handle_invalid if input_ordinal_encoded else "error"
cur_handle_invalid = "keep" if cur_handle_invalid == "skip" else cur_handle_invalid
ohe_model, ohe_model_loaded = load_pyspark_model_from_trained_parameters(
trained_parameters, OneHotEncoderModel, "one_hot_encoder_model"
)
if ohe_model is None:
ohe = OneHotEncoder(
dropLast=drop_last, handleInvalid=cur_handle_invalid, inputCol=ordinal_name, outputCol=temp_output_col
)
try:
ohe_model = fit_and_save_model(trained_parameters, "one_hot_encoder_model", ohe, df_ordinal)
except Exception as e:
if handle_invalid == "error":
raise OperatorSparkOperatorCustomerError(
f"Encountered error calculating encoding categories. Halting because error handling is set to 'Error'. Please check your data and try again: {e}"
)
else:
raise e
output_df = transform_using_trained_model(ohe_model, df_ordinal, ohe_model_loaded)
if output_format == OUTPUT_STYLE_COLUMNS:
if labels is None:
labels = list(range(ohe_model.categorySizes[0]))
current_output_cols = set(list(output_df.columns))
old_cols = [sf.col(name) for name in df.columns if name in current_output_cols]
arr_col = vector_to_array(output_df[temp_output_col])
new_cols = [(arr_col[i]).alias(f"{output_column}_{name}") for i, name in enumerate(labels)]
output_df = output_df.select(*(old_cols + new_cols))
else:
# remove the temporary ordinal encoding
output_df = output_df.drop(ordinal_name)
output_df = output_df.withColumn(output_column, sf.col(temp_output_col))
output_df = output_df.drop(temp_output_col)
final_ordering = [col for col in df.columns]
if output_column not in final_ordering:
final_ordering.append(output_column)
output_df = output_df.select(final_ordering)
return default_spark_with_trained_parameters(output_df, trained_parameters)
import re
from pyspark.sql import functions as sf, types
def format_string_lower_case(df, input_column=None, output_column=None, trained_parameters=None):
expects_column(df, input_column, "Input column")
return default_spark(
df.withColumn(
output_column if output_column else input_column, sf.lower(df[input_column].cast(types.StringType()))
)
)
def format_string_upper_case(df, input_column=None, output_column=None, trained_parameters=None):
expects_column(df, input_column, "Input column")
return default_spark(
df.withColumn(
output_column if output_column else input_column, sf.upper(df[input_column].cast(types.StringType()))
)
)
def format_string_title_case(df, input_column=None, output_column=None, trained_parameters=None):
expects_column(df, input_column, "Input column")
return default_spark(
df.withColumn(
output_column if output_column else input_column,
sf.pandas_udf(lambda s: s.str.title(), returnType=types.StringType())(
df[input_column].cast(types.StringType())
),
)
)
def format_string_capitalize(df, input_column=None, output_column=None, trained_parameters=None):
expects_column(df, input_column, "Input column")
return default_spark(
df.withColumn(
output_column if output_column else input_column,
sf.pandas_udf(lambda s: s.str.capitalize(), returnType=types.StringType())(
df[input_column].cast(types.StringType())
),
)
)
def format_string_swap_case(df, input_column=None, output_column=None, trained_parameters=None):
expects_column(df, input_column, "Input column")
return default_spark(
df.withColumn(
output_column if output_column else input_column,
sf.pandas_udf(lambda s: s.str.swapcase(), returnType=types.StringType())(
df[input_column].cast(types.StringType())
),
)
)
def format_string_left_pad(
df, input_column=None, width=None, fill_character=None, output_column=None, trained_parameters=None
):
expects_column(df, input_column, "Input column")
width = parse_parameter(int, width, "Width")
fill_character = parse_parameter(str, fill_character, "Fill character", " ")
MAX_WIDTH = 1000
if width > MAX_WIDTH:
raise OperatorSparkOperatorCustomerError(f"Width must be less than {MAX_WIDTH}. Received: {width}")
if len(fill_character) > 1:
raise OperatorSparkOperatorCustomerError(f"Fill character can only be a single character. Received: {fill_character}")
return default_spark(
df.withColumn(
output_column if output_column else input_column,
sf.lpad(df[input_column].cast(types.StringType()), len=width, pad=fill_character),
)
)
def format_string_right_pad(
df, input_column=None, width=None, fill_character=None, output_column=None, trained_parameters=None
):
expects_column(df, input_column, "Input column")
width = parse_parameter(int, width, "Width")
fill_character = parse_parameter(str, fill_character, "Fill character", " ")
MAX_WIDTH = 1000
if width > MAX_WIDTH:
raise OperatorSparkOperatorCustomerError(f"Width must be less than {MAX_WIDTH}. Received: {width}")
if len(fill_character) > 1:
raise OperatorSparkOperatorCustomerError(f"Fill character can only be a single character. Received: {fill_character}")
return default_spark(
df.withColumn(
output_column if output_column else input_column,
sf.rpad(df[input_column].cast(types.StringType()), len=width, pad=fill_character),
)
)
def format_string_center_pad_on_either_side(
df, input_column=None, width=None, fill_character=None, output_column=None, trained_parameters=None
):
expects_column(df, input_column, "Input column")
width = parse_parameter(int, width, "Width")
fill_character = parse_parameter(str, fill_character, "Fill character", " ")
MAX_WIDTH = 1000
if width > MAX_WIDTH:
raise OperatorSparkOperatorCustomerError(f"Width must be less than {MAX_WIDTH}. Received: {width}")
if len(fill_character) > 1:
raise OperatorSparkOperatorCustomerError(f"Fill character can only be a single character. Received: {fill_character}")
return default_spark(
df.withColumn(
output_column if output_column else input_column,
sf.pandas_udf(lambda s: s.str.center(width=width, fillchar=fill_character), returnType=types.StringType())(
df[input_column].cast(types.StringType())
),
)
)
def format_string_strip_characters_from_left(
df, input_column=None, characters_to_remove=None, output_column=None, trained_parameters=None
):
expects_column(df, input_column, "Input column")
return default_spark(
df.withColumn(
output_column if output_column else input_column,
sf.pandas_udf(lambda s: s.str.lstrip(to_strip=characters_to_remove), returnType=types.StringType())(
df[input_column].cast(types.StringType())
),
)
)
def format_string_strip_characters_from_right(
df, input_column=None, characters_to_remove=None, output_column=None, trained_parameters=None
):
expects_column(df, input_column, "Input column")
return default_spark(
df.withColumn(
output_column if output_column else input_column,
sf.pandas_udf(lambda s: s.str.rstrip(to_strip=characters_to_remove), returnType=types.StringType())(
df[input_column].cast(types.StringType())
),
)
)
def format_string_strip_left_and_right(
df, input_column=None, characters_to_remove=None, output_column=None, trained_parameters=None
):
expects_column(df, input_column, "Input column")
return default_spark(
df.withColumn(
output_column if output_column else input_column,
sf.pandas_udf(lambda s: s.str.strip(to_strip=characters_to_remove), returnType=types.StringType())(
df[input_column].cast(types.StringType())
),
)
)
def format_string_prepend_zeros(df, input_column=None, width=None, output_column=None, trained_parameters=None):
expects_column(df, input_column, "Input column")
width = parse_parameter(int, width, "Width")
MAX_WIDTH = 1000
if width > MAX_WIDTH:
raise OperatorSparkOperatorCustomerError(f"Width must be less than {MAX_WIDTH}. Received: {width}")
return default_spark(
df.withColumn(
output_column if output_column else input_column,
sf.lpad(df[input_column].cast(types.StringType()), len=width, pad="0"),
)
)
def format_string_add_prefix(df, input_column=None, prefix=None, output_column=None, trained_parameters=None):
expects_column(df, input_column, "Input column")
return default_spark(
df.withColumn(
output_column if output_column else input_column,
sf.concat(sf.lit(prefix), df[input_column].cast(types.StringType())),
)
)
def format_string_add_suffix(df, input_column=None, suffix=None, output_column=None, trained_parameters=None):
expects_column(df, input_column, "Input column")
return default_spark(
df.withColumn(
output_column if output_column else input_column,
sf.concat(df[input_column].cast(types.StringType()), sf.lit(suffix)),
)
)
def format_string_remove_symbols(df, input_column=None, symbols=None, output_column=None, trained_parameters=None):
expects_column(df, input_column, "Input column")
symbols = "!@#$%^&*()_+=-/\\`~{}|<>?" if not symbols else symbols
regex = "|".join([re.escape(symbol) for symbol in symbols])
return default_spark(
df.withColumn(
output_column if output_column else input_column,
sf.regexp_replace(df[input_column].cast(types.StringType()), f"({regex})", ""),
)
)
from enum import Enum
from pyspark.sql.types import BooleanType, DateType, DoubleType, LongType, StringType
from pyspark.sql import functions as f
class NonCastableDataHandlingMethod(Enum):
REPLACE_WITH_NULL = "replace_null"
REPLACE_WITH_NULL_AND_PUT_NON_CASTABLE_DATA_IN_NEW_COLUMN = "replace_null_with_new_col"
REPLACE_WITH_FIXED_VALUE = "replace_value"
REPLACE_WITH_FIXED_VALUE_AND_PUT_NON_CASTABLE_DATA_IN_NEW_COLUMN = "replace_value_with_new_col"
DROP_NON_CASTABLE_ROW = "drop"
@staticmethod
def get_names():
return [item.name for item in NonCastableDataHandlingMethod]
@staticmethod
def get_values():
return [item.value for item in NonCastableDataHandlingMethod]
class MohaveDataType(Enum):
BOOL = "bool"
DATE = "date"
FLOAT = "float"
LONG = "long"
STRING = "string"
OBJECT = "object"
@staticmethod
def get_names():
return [item.name for item in MohaveDataType]
@staticmethod
def get_values():
return [item.value for item in MohaveDataType]
PYTHON_TYPE_MAPPING = {
MohaveDataType.BOOL: bool,
MohaveDataType.DATE: str,
MohaveDataType.FLOAT: float,
MohaveDataType.LONG: int,
MohaveDataType.STRING: str,
}
MOHAVE_TO_SPARK_TYPE_MAPPING = {
MohaveDataType.BOOL: BooleanType,
MohaveDataType.DATE: DateType,
MohaveDataType.FLOAT: DoubleType,
MohaveDataType.LONG: LongType,
MohaveDataType.STRING: StringType,
}
SPARK_TYPE_MAPPING_TO_SQL_TYPE = {
BooleanType: "BOOLEAN",
LongType: "BIGINT",
DoubleType: "DOUBLE",
StringType: "STRING",
DateType: "DATE",
}
SPARK_TO_MOHAVE_TYPE_MAPPING = {value: key for (key, value) in MOHAVE_TO_SPARK_TYPE_MAPPING.items()}
def cast_single_column_type_helper(df, column_name_to_cast, column_name_to_add, mohave_data_type, date_formatting):
if mohave_data_type == MohaveDataType.DATE:
df = df.withColumn(column_name_to_add, f.to_date(df[column_name_to_cast], date_formatting))
else:
df = df.withColumn(
column_name_to_add, df[column_name_to_cast].cast(MOHAVE_TO_SPARK_TYPE_MAPPING[mohave_data_type]())
)
return df
def cast_single_column_type(
df, column, mohave_data_type, invalid_data_handling_method, replace_value=None, date_formatting="dd-MM-yyyy"
):
"""Cast single column to a new type
Args:
df (DataFrame): spark dataframe
column (Column): target column for type casting
mohave_data_type (Enum): Enum MohaveDataType
invalid_data_handling_method (Enum): Enum NonCastableDataHandlingMethod
replace_value (str): value to replace for invalid data when "replace_value" is specified
date_formatting (str): format for date. Default format is "dd-MM-yyyy"
Returns:
df (DataFrame): casted spark dataframe
"""
cast_to_date = f.to_date(df[column], date_formatting)
cast_to_non_date = df[column].cast(MOHAVE_TO_SPARK_TYPE_MAPPING[mohave_data_type]())
non_castable_column = f"{column}_typecast_error"
temp_column = "temp_column"
if invalid_data_handling_method == NonCastableDataHandlingMethod.REPLACE_WITH_NULL:
# Replace non-castable data to None in the same column. pyspark's default behaviour
# Original dataframe
# +---+------+
# | id | txt |
# +---+---+--+
# | 1 | foo |
# | 2 | bar |
# | 3 | 1 |
# +---+------+
# cast txt column to long
# +---+------+
# | id | txt |
# +---+------+
# | 1 | None |
# | 2 | None |
# | 3 | 1 |
# +---+------+
return df.withColumn(column, cast_to_date if (mohave_data_type == MohaveDataType.DATE) else cast_to_non_date)
if invalid_data_handling_method == NonCastableDataHandlingMethod.DROP_NON_CASTABLE_ROW:
# Drop non-castable row
# Original dataframe
# +---+------+
# | id | txt |
# +---+---+--+
# | 1 | foo |
# | 2 | bar |
# | 3 | 1 |
# +---+------+
# cast txt column to long, _ non-castable row
# +---+----+
# | id|txt |
# +---+----+
# | 3| 1 |
# +---+----+
df = df.withColumn(column, cast_to_date if (mohave_data_type == MohaveDataType.DATE) else cast_to_non_date)
return df.where(df[column].isNotNull())
if (
invalid_data_handling_method
== NonCastableDataHandlingMethod.REPLACE_WITH_NULL_AND_PUT_NON_CASTABLE_DATA_IN_NEW_COLUMN
):
# Replace non-castable data to None in the same column and put non-castable data to a new column
# Original dataframe
# +---+------+
# | id | txt |
# +---+------+
# | 1 | foo |
# | 2 | bar |
# | 3 | 1 |
# +---+------+
# cast txt column to long
# +---+----+------------------+
# | id|txt |txt_typecast_error|
# +---+----+------------------+
# | 1|None| foo |
# | 2|None| bar |
# | 3| 1 | |
# +---+----+------------------+
df = df.withColumn(temp_column, cast_to_date if (mohave_data_type == MohaveDataType.DATE) else cast_to_non_date)
df = df.withColumn(non_castable_column, f.when(df[temp_column].isNotNull(), "").otherwise(df[column]),)
elif invalid_data_handling_method == NonCastableDataHandlingMethod.REPLACE_WITH_FIXED_VALUE:
# Replace non-castable data to a value in the same column
# Original dataframe
# +---+------+
# | id | txt |
# +---+------+
# | 1 | foo |
# | 2 | bar |
# | 3 | 1 |
# +---+------+
# cast txt column to long, replace non-castable value to 0
# +---+-----+
# | id| txt |
# +---+-----+
# | 1| 0 |
# | 2| 0 |
# | 3| 1 |
# +---+----+
value = _validate_and_cast_value(value=replace_value, mohave_data_type=mohave_data_type)
df = df.withColumn(temp_column, cast_to_date if (mohave_data_type == MohaveDataType.DATE) else cast_to_non_date)
replace_date_value = f.when(df[temp_column].isNotNull(), df[temp_column]).otherwise(
f.to_date(f.lit(value), date_formatting)
)
replace_non_date_value = f.when(df[temp_column].isNotNull(), df[temp_column]).otherwise(value)
df = df.withColumn(
temp_column, replace_date_value if (mohave_data_type == MohaveDataType.DATE) else replace_non_date_value
)
elif (
invalid_data_handling_method
== NonCastableDataHandlingMethod.REPLACE_WITH_FIXED_VALUE_AND_PUT_NON_CASTABLE_DATA_IN_NEW_COLUMN
):
# Replace non-castable data to a value in the same column and put non-castable data to a new column
# Original dataframe
# +---+------+
# | id | txt |
# +---+---+--+
# | 1 | foo |
# | 2 | bar |
# | 3 | 1 |
# +---+------+
# cast txt column to long, replace non-castable value to 0
# +---+----+------------------+
# | id|txt |txt_typecast_error|
# +---+----+------------------+
# | 1| 0 | foo |
# | 2| 0 | bar |
# | 3| 1 | |
# +---+----+------------------+
value = _validate_and_cast_value(value=replace_value, mohave_data_type=mohave_data_type)
df = df.withColumn(temp_column, cast_to_date if (mohave_data_type == MohaveDataType.DATE) else cast_to_non_date)
df = df.withColumn(non_castable_column, f.when(df[temp_column].isNotNull(), "").otherwise(df[column]),)
replace_date_value = f.when(df[temp_column].isNotNull(), df[temp_column]).otherwise(
f.to_date(f.lit(value), date_formatting)
)
replace_non_date_value = f.when(df[temp_column].isNotNull(), df[temp_column]).otherwise(value)
df = df.withColumn(
temp_column, replace_date_value if (mohave_data_type == MohaveDataType.DATE) else replace_non_date_value
)
# drop temporary column
df = df.withColumn(column, df[temp_column]).drop(temp_column)
df_cols = df.columns
if non_castable_column in df_cols:
# Arrange columns so that non_castable_column col is next to casted column
df_cols.remove(non_castable_column)
column_index = df_cols.index(column)
arranged_cols = df_cols[: column_index + 1] + [non_castable_column] + df_cols[column_index + 1 :]
df = df.select(*arranged_cols)
return df
def _validate_and_cast_value(value, mohave_data_type):
if value is None:
return value
try:
return PYTHON_TYPE_MAPPING[mohave_data_type](value)
except ValueError as e:
raise ValueError(
f"Invalid value to replace non-castable data. "
f"{mohave_data_type} is not in mohave supported date type: {MohaveDataType.get_values()}. "
f"Please use a supported type",
e,
)
import os
import collections
import tempfile
import zipfile
import base64
import logging
from io import BytesIO
import numpy as np
class OperatorSparkOperatorCustomerError(Exception):
"""Error type for Customer Errors in Spark Operators"""
def temp_col_name(df, *illegal_names):
"""Generates a temporary column name that is unused.
"""
name = "temp_col"
idx = 0
name_set = set(list(df.columns) + list(illegal_names))
while name in name_set:
name = f"_temp_col_{idx}"
idx += 1
return name
def get_temp_col_if_not_set(df, col_name):
"""Extracts the column name from the parameters if it exists, otherwise generates a temporary column name.
"""
if col_name:
return col_name, False
else:
return temp_col_name(df), True
def replace_input_if_output_is_temp(df, input_column, output_column, output_is_temp):
"""Replaces the input column in the dataframe if the output was not set
This is used with get_temp_col_if_not_set to enable the behavior where a
transformer will replace its input column if an output is not specified.
"""
if output_is_temp:
df = df.withColumn(input_column, df[output_column])
df = df.drop(output_column)
return df
else:
return df
def parse_parameter(typ, value, key, default=None, nullable=False):
if value is None:
if default is not None or nullable:
return default
else:
raise OperatorSparkOperatorCustomerError(f"Missing required input: '{key}'")
else:
try:
value = typ(value)
if isinstance(value, (int, float, complex)) and not isinstance(value, bool):
if np.isnan(value) or np.isinf(value):
raise OperatorSparkOperatorCustomerError(
f"Invalid value provided for '{key}'. Expected {typ.__name__} but received: {value}"
)
else:
return value
else:
return value
except (ValueError, TypeError):
raise OperatorSparkOperatorCustomerError(
f"Invalid value provided for '{key}'. Expected {typ.__name__} but received: {value}"
)
except OverflowError:
raise OperatorSparkOperatorCustomerError(
f"Overflow Error: Invalid value provided for '{key}'. Given value '{value}' exceeds the range of type "
f"'{typ.__name__}' for this input. Insert a valid value for type '{typ.__name__}' and try your request "
f"again."
)
def expects_valid_column_name(value, key, nullable=False):
if nullable and value is None:
return
if value is None or len(str(value).strip()) == 0:
raise OperatorSparkOperatorCustomerError(f"Column name cannot be null, empty, or whitespace for parameter '{key}': {value}")
def expects_parameter(value, key, condition=None):
if value is None:
raise OperatorSparkOperatorCustomerError(f"Missing required input: '{key}'")
elif condition is not None and not condition:
raise OperatorSparkOperatorCustomerError(f"Invalid value provided for '{key}': {value}")
def expects_column(df, value, key):
if not value or value not in df.columns:
raise OperatorSparkOperatorCustomerError(f"Expected column in dataframe for '{key}' however received '{value}'")
def expects_parameter_value_in_list(key, value, items):
if value not in items:
raise OperatorSparkOperatorCustomerError(f"Illegal parameter value. {key} expected to be in {items}, but given {value}")
def encode_pyspark_model(model):
with tempfile.TemporaryDirectory() as dirpath:
dirpath = os.path.join(dirpath, "model")
# Save the model
model.save(dirpath)
# Create the temporary zip-file.
mem_zip = BytesIO()
with zipfile.ZipFile(mem_zip, "w", zipfile.ZIP_DEFLATED, compresslevel=9) as zf:
# Zip the directory.
for root, dirs, files in os.walk(dirpath):
for file in files:
rel_dir = os.path.relpath(root, dirpath)
zf.write(os.path.join(root, file), os.path.join(rel_dir, file))
zipped = mem_zip.getvalue()
encoded = base64.b85encode(zipped)
return str(encoded, "utf-8")
def decode_pyspark_model(model_factory, encoded):
with tempfile.TemporaryDirectory() as dirpath:
zip_bytes = base64.b85decode(encoded)
mem_zip = BytesIO(zip_bytes)
mem_zip.seek(0)
with zipfile.ZipFile(mem_zip, "r") as zf:
zf.extractall(dirpath)
model = model_factory.load(dirpath)
return model
def hash_parameters(value):
# pylint: disable=W0702
try:
if isinstance(value, collections.Hashable):
return hash(value)
if isinstance(value, dict):
return hash(frozenset([hash((hash_parameters(k), hash_parameters(v))) for k, v in value.items()]))
if isinstance(value, list):
return hash(frozenset([hash_parameters(v) for v in value]))
raise RuntimeError("Object not supported for serialization")
except: # noqa: E722
raise RuntimeError("Object not supported for serialization")
def load_trained_parameters(trained_parameters, operator_parameters):
trained_parameters = trained_parameters if trained_parameters else {}
parameters_hash = hash_parameters(operator_parameters)
stored_hash = trained_parameters.get("_hash")
if stored_hash != parameters_hash:
trained_parameters = {"_hash": parameters_hash}
return trained_parameters
def load_pyspark_model_from_trained_parameters(trained_parameters, model_factory, name):
if trained_parameters is None or name not in trained_parameters:
return None, False
try:
model = decode_pyspark_model(model_factory, trained_parameters[name])
return model, True
except Exception as e:
logging.error(f"Could not decode PySpark model {name} from trained_parameters: {e}")
del trained_parameters[name]
return None, False
def fit_and_save_model(trained_parameters, name, algorithm, df):
model = algorithm.fit(df)
trained_parameters[name] = encode_pyspark_model(model)
return model
def transform_using_trained_model(model, df, loaded):
try:
return model.transform(df)
except Exception as e:
if loaded:
raise OperatorSparkOperatorCustomerError(
f"Encountered error while using stored model. Please delete the operator and try again. {e}"
)
else:
raise e
import re
from datetime import date
import numpy as np
import pandas as pd
from pyspark.sql.types import (
BooleanType,
IntegralType,
FractionalType,
StringType,
)
def type_inference(df): # noqa: C901 # pylint: disable=R0912
"""Core type inference logic
Args:
df: spark dataframe
Returns: dict a schema that maps from column name to mohave datatype
"""
columns_to_infer = [col for (col, col_type) in df.dtypes if col_type == "string"]
pandas_df = df.toPandas()
report = {}
for (columnName, _) in pandas_df.iteritems():
if columnName in columns_to_infer:
column = pandas_df[columnName].values
report[columnName] = {
"sum_string": len(column),
"sum_numeric": sum_is_numeric(column),
"sum_integer": sum_is_integer(column),
"sum_boolean": sum_is_boolean(column),
"sum_date": sum_is_date(column),
"sum_null_like": sum_is_null_like(column),
"sum_null": sum_is_null(column),
}
# Analyze
numeric_threshold = 0.8
integer_threshold = 0.8
date_threshold = 0.8
bool_threshold = 0.8
column_types = {}
for col, insights in report.items():
# Convert all columns to floats to make thresholds easy to calculate.
proposed = MohaveDataType.STRING.value
if (insights["sum_numeric"] / insights["sum_string"]) > numeric_threshold:
proposed = MohaveDataType.FLOAT.value
if (insights["sum_integer"] / insights["sum_numeric"]) > integer_threshold:
proposed = MohaveDataType.LONG.value
elif (insights["sum_boolean"] / insights["sum_string"]) > bool_threshold:
proposed = MohaveDataType.BOOL.value
elif (insights["sum_date"] / insights["sum_string"]) > date_threshold:
proposed = MohaveDataType.DATE.value
column_types[col] = proposed
for f in df.schema.fields:
if f.name not in columns_to_infer:
if isinstance(f.dataType, IntegralType):
column_types[f.name] = MohaveDataType.LONG.value
elif isinstance(f.dataType, FractionalType):
column_types[f.name] = MohaveDataType.FLOAT.value
elif isinstance(f.dataType, StringType):
column_types[f.name] = MohaveDataType.STRING.value
elif isinstance(f.dataType, BooleanType):
column_types[f.name] = MohaveDataType.BOOL.value
else:
# unsupported types in mohave
column_types[f.name] = MohaveDataType.OBJECT.value
return column_types
def _is_numeric_single(x):
try:
x_float = float(x)
return np.isfinite(x_float)
except ValueError:
return False
except TypeError: # if x = None
return False
def sum_is_numeric(x):
"""count number of numeric element
Args:
x: numpy array
Returns: int
"""
castables = np.vectorize(_is_numeric_single)(x)
return np.count_nonzero(castables)
def _is_integer_single(x):
try:
if not _is_numeric_single(x):
return False
return float(x) == int(x)
except ValueError:
return False
except TypeError: # if x = None
return False
def sum_is_integer(x):
castables = np.vectorize(_is_integer_single)(x)
return np.count_nonzero(castables)
def _is_boolean_single(x):
boolean_list = ["true", "false"]
try:
is_boolean = x.lower() in boolean_list
return is_boolean
except ValueError:
return False
except TypeError: # if x = None
return False
except AttributeError:
return False
def sum_is_boolean(x):
castables = np.vectorize(_is_boolean_single)(x)
return np.count_nonzero(castables)
def sum_is_null_like(x): # noqa: C901
def _is_empty_single(x):
try:
return bool(len(x) == 0)
except TypeError:
return False
def _is_null_like_single(x):
try:
return bool(null_like_regex.match(x))
except TypeError:
return False
def _is_whitespace_like_single(x):
try:
return bool(whitespace_regex.match(x))
except TypeError:
return False
null_like_regex = re.compile(r"(?i)(null|none|nil|na|nan)") # (?i) = case insensitive
whitespace_regex = re.compile(r"^\s+$") # only whitespace
empty_checker = np.vectorize(_is_empty_single)(x)
num_is_null_like = np.count_nonzero(empty_checker)
null_like_checker = np.vectorize(_is_null_like_single)(x)
num_is_null_like += np.count_nonzero(null_like_checker)
whitespace_checker = np.vectorize(_is_whitespace_like_single)(x)
num_is_null_like += np.count_nonzero(whitespace_checker)
return num_is_null_like
def sum_is_null(x):
return np.count_nonzero(pd.isnull(x))
def _is_date_single(x):
try:
return bool(date.fromisoformat(x)) # YYYY-MM-DD
except ValueError:
return False
except TypeError:
return False
def sum_is_date(x):
return np.count_nonzero(np.vectorize(_is_date_single)(x))
def cast_df(df, schema):
"""Cast datafram from given schema
Args:
df: spark dataframe
schema: schema to cast to. It map from df's col_name to mohave datatype
Returns: casted dataframe
"""
# col name to spark data type mapping
col_to_spark_data_type_map = {}
# get spark dataframe's actual datatype
fields = df.schema.fields
for f in fields:
col_to_spark_data_type_map[f.name] = f.dataType
cast_expr = []
# iterate given schema and cast spark dataframe datatype
for col_name in schema:
mohave_data_type_from_schema = MohaveDataType(schema.get(col_name, MohaveDataType.OBJECT.value))
if mohave_data_type_from_schema != MohaveDataType.OBJECT:
spark_data_type_from_schema = MOHAVE_TO_SPARK_TYPE_MAPPING[mohave_data_type_from_schema]
# Only cast column when the data type in schema doesn't match the actual data type
if not isinstance(col_to_spark_data_type_map[col_name], spark_data_type_from_schema):
# use spark-sql expression instead of spark.withColumn to improve performance
expr = f"CAST (`{col_name}` as {SPARK_TYPE_MAPPING_TO_SQL_TYPE[spark_data_type_from_schema]})"
else:
# include column that has same dataType as it is
expr = f"`{col_name}`"
else:
# include column that has same mohave object dataType as it is
expr = f"`{col_name}`"
cast_expr.append(expr)
if len(cast_expr) != 0:
df = df.selectExpr(*cast_expr)
return df, schema
def validate_schema(df, schema):
"""Validate if every column is covered in the schema
Args:
schema ():
"""
columns_in_df = df.columns
columns_in_schema = schema.keys()
if len(columns_in_df) != len(columns_in_schema):
raise ValueError(
f"Invalid schema column size. "
f"Number of columns in schema should be equal as number of columns in dataframe. "
f"schema columns size: {len(columns_in_schema)}, dataframe column size: {len(columns_in_df)}"
)
for col in columns_in_schema:
if col not in columns_in_df:
raise ValueError(
f"Invalid column name in schema. "
f"Column in schema does not exist in dataframe. "
f"Non-existed columns: {col}"
)
def s3_source(spark, mode, dataset_definition):
"""Represents a source that handles sampling, etc."""
content_type = dataset_definition["s3ExecutionContext"]["s3ContentType"].upper()
has_header = dataset_definition["s3ExecutionContext"]["s3HasHeader"]
path = dataset_definition["s3ExecutionContext"]["s3Uri"].replace("s3://", "s3a://")
try:
if content_type == "CSV":
df = spark.read.csv(path=path, header=has_header, escape='"', quote='"')
elif content_type == "PARQUET":
df = spark.read.parquet(path)
return default_spark(df)
except Exception as e:
raise RuntimeError("An error occurred while reading files from S3") from e
def infer_and_cast_type(df, spark, inference_data_sample_size=1000, trained_parameters=None):
"""Infer column types for spark dataframe and cast to inferred data type.
Args:
df: spark dataframe
spark: spark session
inference_data_sample_size: number of row data used for type inference
trained_parameters: trained_parameters to determine if we need infer data types
Returns: a dict of pyspark df with column data type casted and trained parameters
"""
from pyspark.sql.utils import AnalysisException
# if trained_parameters is none or doesn't contain schema key, then type inference is needed
if trained_parameters is None or not trained_parameters.get("schema", None):
# limit first 1000 rows to do type inference
limit_df = df.limit(inference_data_sample_size)
schema = type_inference(limit_df)
else:
schema = trained_parameters["schema"]
try:
validate_schema(df, schema)
except ValueError as e:
raise OperatorCustomerError(e)
try:
df, schema = cast_df(df, schema)
except (AnalysisException, ValueError) as e:
raise OperatorCustomerError(e)
trained_parameters = {"schema": schema}
return default_spark_with_trained_parameters(df, trained_parameters)
def custom_pandas(df, spark, code):
""" Apply custom pandas code written by the user on the input dataframe.
Right now only pyspark dataframe is supported as input, so the pyspark df is
converted to pandas df before the custom pandas code is being executed.
The output df is converted back to pyspark df before getting returned.
Example:
The custom code expects the user to provide an output df.
code = \"""
import pandas as pd
df = pd.get_dummies(df['country'], prefix='country')
\"""
Notes:
This operation expects the user code to store the output in df variable.
Args:
spark: Spark Session
params (dict): dictionary that has various params. Required param for this operation is "code"
df: pyspark dataframe
Returns:
df: pyspark dataframe with the custom pandas code executed on the input df.
"""
import ast
exec_block = ast.parse(code, mode="exec")
if len(exec_block.body) == 0:
return default_spark(df)
pandas_df = df.toPandas()
_globals, _locals = {}, {"df": pandas_df}
stdout = capture_stdout(exec, compile(exec_block, "<string>", mode="exec"), _locals) # pylint: disable=W0122
pandas_df = eval("df", _globals, _locals) # pylint: disable=W0123
# find list of columns with all None values and fill with empty str.
null_columns = pandas_df.columns[pandas_df.isnull().all()].tolist()
pandas_df[null_columns] = pandas_df[null_columns].fillna("")
# convert the mixed cols to str, since pyspark df does not support mixed col.
df = convert_or_coerce(pandas_df, spark)
# while statement is to recurse over all fields that have mixed type and cannot be converted
while not isinstance(df, DataFrame):
df = convert_or_coerce(df, spark)
return default_spark_with_stdout(df, stdout)
def format_string(df, spark, **kwargs):
return dispatch(
"operator",
[df],
kwargs,
{
"Lower case": (format_string_lower_case, "lower_case_parameters"),
"Upper case": (format_string_upper_case, "upper_case_parameters"),
"Title case": (format_string_title_case, "title_case_parameters"),
"Capitalize": (format_string_capitalize, "capitalize_parameters"),
"Swap case": (format_string_swap_case, "swap_case_parameters"),
"Left pad": (format_string_left_pad, "left_pad_parameters"),
"Right pad": (format_string_right_pad, "right_pad_parameters"),
"Center (pad on either side)": (
format_string_center_pad_on_either_side,
"center_pad_on_either_side_parameters",
),
"Strip characters from left": (
format_string_strip_characters_from_left,
"strip_characters_from_left_parameters",
),
"Strip characters from right": (
format_string_strip_characters_from_right,
"strip_characters_from_right_parameters",
),
"Strip left and right": (format_string_strip_left_and_right, "strip_left_and_right_parameters"),
"Prepend zeros": (format_string_prepend_zeros, "prepend_zeros_parameters"),
"Add prefix": (format_string_add_prefix, "add_prefix_parameters"),
"Add suffix": (format_string_add_suffix, "add_suffix_parameters"),
"Remove symbols": (format_string_remove_symbols, "remove_symbols_parameters"),
},
)
def encode_categorical(df, spark, **kwargs):
return dispatch(
"operator",
[df],
kwargs,
{
"Ordinal encode": (encode_categorical_ordinal_encode, "ordinal_encode_parameters"),
"One-hot encode": (encode_categorical_one_hot_encode, "one_hot_encode_parameters"),
},
)
def cast_single_data_type( # noqa: C901
df,
spark,
column,
data_type,
non_castable_data_handling_method="replace_null",
replace_value=None,
date_formatting="dd-MM-yyyy",
):
"""Cast pyspark dataframe column type
Args:
column: column name e.g.: "col_1"
data_type: data type to cast to
non_castable_data_handling_method:
supported method:
("replace_null","replace_null_with_new_col", "replace_value","replace_value_with_new_col","drop")
If not specified, it will use the default method replace_null.
see casting.NonCastableDataHandlingMethod
replace_value: value to replace non-castable data
date_formatting: date format to cast to
Returns: df: pyspark df with column data type casted
"""
from pyspark.sql.utils import AnalysisException
supported_type = MohaveDataType.get_values()
df_cols = df.columns
# Validate input params
if column not in df_cols:
raise OperatorCustomerError(
f"Invalid column name. {column} is not in current columns {df_cols}. Please use a valid column name."
)
if data_type not in supported_type:
raise OperatorCustomerError(
f"Invalid data_type. {data_type} is not in {supported_type}. Please use a supported data type."
)
support_invalid_data_handling_method = NonCastableDataHandlingMethod.get_values()
if non_castable_data_handling_method not in support_invalid_data_handling_method:
raise OperatorCustomerError(
f"Invalid data handling method. "
f"{non_castable_data_handling_method} is not in {support_invalid_data_handling_method}. "
f"Please use a supported method."
)
mohave_data_type = MohaveDataType(data_type)
spark_data_type = [f.dataType for f in df.schema.fields if f.name == column]
if isinstance(spark_data_type[0], MOHAVE_TO_SPARK_TYPE_MAPPING[mohave_data_type]):
return default_spark(df)
try:
df = cast_single_column_type(
df,
column=column,
mohave_data_type=MohaveDataType(data_type),
invalid_data_handling_method=NonCastableDataHandlingMethod(non_castable_data_handling_method),
replace_value=replace_value,
date_formatting=date_formatting,
)
except (AnalysisException, ValueError) as e:
raise OperatorCustomerError(e)
return default_spark(df)
op_1_output = s3_source(spark=spark, mode=mode, **{'dataset_definition': {'__typename': 'S3CreateDatasetDefinitionOutput', 'datasetSourceType': 'S3', 'name': 'claims.csv', 'description': None, 's3ExecutionContext': {'__typename': 'S3ExecutionContext', 's3Uri': 's3://sagemaker-us-east-1-870180618679/fraud-detect-demo/data/raw/claims.csv', 's3ContentType': 'csv', 's3HasHeader': True}}})
op_2_output = infer_and_cast_type(op_1_output['default'], spark=spark, **{})
op_3_output = custom_pandas(op_2_output['default'], spark=spark, **{'code': '# Table is available as variable `df`\ncat_cols = df.dtypes[df.dtypes == object].index\ndf[cat_cols] = df[cat_cols].apply(lambda x: x.str.lower())\n'})
op_4_output = format_string(op_3_output['default'], spark=spark, **{'operator': 'Remove symbols', 'remove_symbols_parameters': {'symbols': '!@#$%^&*()_+=-/\\`~{}|<>?', 'input_column': 'driver_relationship'}, 'lower_case_parameters': {}})
op_5_output = format_string(op_4_output['default'], spark=spark, **{'operator': 'Remove symbols', 'remove_symbols_parameters': {'symbols': '!@#$%^&*()_+=-/\\`~{}|<>?', 'input_column': 'collision_type'}, 'lower_case_parameters': {}})
op_6_output = format_string(op_5_output['default'], spark=spark, **{'operator': 'Remove symbols', 'remove_symbols_parameters': {'symbols': '!@#$%^&*()_+=-/\\`~{}|<>?', 'input_column': 'incident_type'}, 'lower_case_parameters': {}})
op_7_output = encode_categorical(op_6_output['default'], spark=spark, **{'operator': 'One-hot encode', 'one_hot_encode_parameters': {'invalid_handling_strategy': 'Keep', 'drop_last': False, 'output_style': 'Columns', 'input_column': 'driver_relationship'}, 'ordinal_encode_parameters': {'invalid_handling_strategy': 'Replace with NaN'}})
op_8_output = encode_categorical(op_7_output['default'], spark=spark, **{'operator': 'One-hot encode', 'one_hot_encode_parameters': {'invalid_handling_strategy': 'Keep', 'drop_last': False, 'output_style': 'Columns', 'input_column': 'incident_type'}, 'ordinal_encode_parameters': {'invalid_handling_strategy': 'Replace with NaN'}})
op_9_output = encode_categorical(op_8_output['default'], spark=spark, **{'operator': 'One-hot encode', 'one_hot_encode_parameters': {'invalid_handling_strategy': 'Keep', 'drop_last': False, 'output_style': 'Columns', 'input_column': 'collision_type'}, 'ordinal_encode_parameters': {'invalid_handling_strategy': 'Replace with NaN'}})
op_10_output = encode_categorical(op_9_output['default'], spark=spark, **{'operator': 'One-hot encode', 'one_hot_encode_parameters': {'invalid_handling_strategy': 'Keep', 'drop_last': False, 'output_style': 'Columns', 'input_column': 'authorities_contacted'}, 'ordinal_encode_parameters': {'invalid_handling_strategy': 'Replace with NaN'}})
op_11_output = encode_categorical(op_10_output['default'], spark=spark, **{'operator': 'Ordinal encode', 'ordinal_encode_parameters': {'invalid_handling_strategy': 'Replace with NaN', 'input_column': 'incident_severity'}})
op_12_output = encode_categorical(op_11_output['default'], spark=spark, **{'operator': 'Ordinal encode', 'ordinal_encode_parameters': {'invalid_handling_strategy': 'Replace with NaN', 'input_column': 'police_report_available'}})
op_13_output = custom_pandas(op_12_output['default'], spark=spark, **{'code': "# Table is available as variable `df`\nimport pandas as pd\ndf['event_time'] = pd.to_datetime('now').timestamp()"})
op_14_output = cast_single_data_type(op_13_output['default'], spark=spark, **{'column': 'police_report_available', 'original_data_type': 'Float', 'data_type': 'long'})
op_15_output = cast_single_data_type(op_14_output['default'], spark=spark, **{'column': 'authorities_contacted_fire', 'original_data_type': 'Float', 'data_type': 'long'})
op_16_output = cast_single_data_type(op_15_output['default'], spark=spark, **{'column': 'authorities_contacted_ambulance', 'original_data_type': 'Float', 'data_type': 'long'})
op_17_output = cast_single_data_type(op_16_output['default'], spark=spark, **{'column': 'authorities_contacted_none', 'original_data_type': 'Float', 'data_type': 'long'})
op_18_output = cast_single_data_type(op_17_output['default'], spark=spark, **{'column': 'authorities_contacted_police', 'original_data_type': 'Float', 'data_type': 'long'})
op_19_output = cast_single_data_type(op_18_output['default'], spark=spark, **{'column': 'collision_type_na', 'original_data_type': 'Float', 'data_type': 'long'})
op_20_output = cast_single_data_type(op_19_output['default'], spark=spark, **{'column': 'collision_type_side', 'original_data_type': 'Float', 'data_type': 'long'})
op_21_output = cast_single_data_type(op_20_output['default'], spark=spark, **{'column': 'incident_type_theft', 'original_data_type': 'Float', 'data_type': 'long'})
op_22_output = cast_single_data_type(op_21_output['default'], spark=spark, **{'column': 'incident_type_breakin', 'original_data_type': 'Float', 'data_type': 'long'})
op_23_output = cast_single_data_type(op_22_output['default'], spark=spark, **{'column': 'incident_type_collision', 'original_data_type': 'Float', 'data_type': 'long'})
op_24_output = cast_single_data_type(op_23_output['default'], spark=spark, **{'column': 'driver_relationship_other', 'original_data_type': 'Float', 'data_type': 'long'})
op_25_output = cast_single_data_type(op_24_output['default'], spark=spark, **{'column': 'driver_relationship_child', 'original_data_type': 'Float', 'data_type': 'long'})
op_26_output = cast_single_data_type(op_25_output['default'], spark=spark, **{'column': 'driver_relationship_spouse', 'original_data_type': 'Float', 'data_type': 'long'})
op_27_output = cast_single_data_type(op_26_output['default'], spark=spark, **{'column': 'driver_relationship_na', 'original_data_type': 'Float', 'data_type': 'long'})
op_28_output = cast_single_data_type(op_27_output['default'], spark=spark, **{'column': 'driver_relationship_self', 'original_data_type': 'Float', 'data_type': 'long'})
op_29_output = cast_single_data_type(op_28_output['default'], spark=spark, **{'column': 'total_claim_amount', 'original_data_type': 'Long', 'data_type': 'float'})
op_30_output = cast_single_data_type(op_29_output['default'], spark=spark, **{'column': 'vehicle_claim', 'original_data_type': 'Long', 'data_type': 'float'})
op_31_output = cast_single_data_type(op_30_output['default'], spark=spark, **{'column': 'injury_claim', 'original_data_type': 'Long', 'data_type': 'float'})
op_32_output = cast_single_data_type(op_31_output['default'], spark=spark, **{'column': 'incident_severity', 'original_data_type': 'Float', 'data_type': 'long'})
op_33_output = cast_single_data_type(op_32_output['default'], spark=spark, **{'column': 'collision_type_rear', 'original_data_type': 'Float', 'data_type': 'long'})
op_34_output = cast_single_data_type(op_33_output['default'], spark=spark, **{'column': 'collision_type_front', 'original_data_type': 'Float', 'data_type': 'long'})
# Glossary: variable name to node_id
#
# op_1_output: e5d60d4f-6284-4a68-a788-39787909ebc9
# op_2_output: 1626aef2-cad0-4922-a496-34586d3def90
# op_3_output: 921ad4e5-3812-4651-b3cd-fecc38e0bba6
# op_4_output: e90bfd12-d702-4c79-8e11-5e4011600fda
# op_5_output: 10ca6bec-5d89-43bd-ba3e-7b5e839e9d23
# op_6_output: 09a3e1c7-29c5-46e8-8690-4cdaf905628a
# op_7_output: a5333162-b98e-41f4-8b18-5bb68b98a615
# op_8_output: de96bceb-ac8c-40d4-a9e8-5a777b44437c
# op_9_output: 2e5cb36f-b0a5-4763-9740-2ef60f3c6376
# op_10_output: e20b22bc-12aa-469e-9d6c-c4083ddedf08
# op_11_output: cd48da2b-c648-41e2-b828-38b518fc795d
# op_12_output: 9ff4c2a4-e1fe-4a66-819f-f7d8df7a5fed
# op_13_output: 67fdfb06-278a-4360-8454-3ec1abf1ddd3
# op_14_output: f324323b-8d6e-4e6f-a362-eb9405c2aabf
# op_15_output: c3e12fe4-0792-4627-a728-43bb51312196
# op_16_output: 0e239308-1ad0-421b-9e8f-36011d83a6b6
# op_17_output: 93d33455-1cf2-485d-aaf7-5ae5c7d5197e
# op_18_output: 0eff4390-66cb-423a-bad4-40fc368aa6d4
# op_19_output: 488aa71e-881e-451e-be8b-5d960530715c
# op_20_output: 38ede4e0-ac3c-4e73-9b5e-ad5216997520
# op_21_output: dc6b5e18-0423-4940-afbe-57e24560400e
# op_22_output: acb3f245-c50b-4174-8295-5b933500568b
# op_23_output: 9dfb1906-6d9a-45d3-ac92-d5f3f535ff8a
# op_24_output: a65a0721-7696-4e64-9e65-2423dc65163e
# op_25_output: 9b23353f-c840-4bed-9368-636a249038b7
# op_26_output: 1c9c8473-9ccb-4ce4-a2a0-db15c648565e
# op_27_output: a00c44fb-3704-4e1c-bda7-887f0feae6a6
# op_28_output: 7bd2e323-2ef9-4329-844a-0f2406ff980c
# op_29_output: 7132ae96-da8f-4a02-8708-0bc233a5ecd2
# op_30_output: 10a9d871-4660-44fb-aa9f-14f14a4f5a44
# op_31_output: 01b14795-ad16-4e33-af09-5cedf7a3b806
# op_32_output: 8328f4cc-9adf-47e7-9752-9ab1cf83e7b9
# op_33_output: 6d59ff64-c095-4f20-a7ba-c4a49e842c7c
# op_34_output: 62d710d9-a288-4004-b960-6cf452c0380c
| 40.545395
| 387
| 0.679613
|
7949c22e3d0f8a9f1429ac74fb8b1711cc87c423
| 5,906
|
py
|
Python
|
assignment5/linkshort/linkshort_test.py
|
rahulraj/web_projects
|
4d16cb7d7a9c0e408d5f44165dbdc050bbaceddd
|
[
"MIT"
] | 1
|
2017-04-25T07:06:22.000Z
|
2017-04-25T07:06:22.000Z
|
assignment5/linkshort/linkshort_test.py
|
rahulraj/web_projects
|
4d16cb7d7a9c0e408d5f44165dbdc050bbaceddd
|
[
"MIT"
] | null | null | null |
assignment5/linkshort/linkshort_test.py
|
rahulraj/web_projects
|
4d16cb7d7a9c0e408d5f44165dbdc050bbaceddd
|
[
"MIT"
] | null | null | null |
import unittest
import tempfile
import os
import simplejson as json
from functools import partial
import __init__ as linkshort
class LinkShortTest(unittest.TestCase):
def setUp(self):
self.database_handle, linkshort.app.database = tempfile.mkstemp()
self.app = linkshort.app.test_client()
linkshort.initialize_database()
def test_get_index(self):
"""
Send a get request to /, it should return a template.
The test won't inspect the template too closely, because the
goal is to test the controller, not the presentation logic.
i.e. if the GUI is redesigned, these tests should not break.
"""
output = self.app.get('/')
self.assertTrue('Link Shortener' in output.data)
def register(self, username, password, confirmation):
return self.app.post('/register', data=dict(
username=username,
password=password,
confirmPassword=confirmation
), follow_redirects=True)
def register_test_user(self):
return self.register('test_user', 'test_password', 'test_password')
def login(self, username, password):
return self.app.post('/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def login_test_user(self):
return self.login('test_user', 'test_password')
def logout(self):
return self.app.get('/logout', follow_redirects=True)
def dict_from_request(self, request_function):
"""
Executes request_function, and then converts the JSON
response into a dict. Returns the dict.
"""
result = request_function()
return json.loads(result.data)
def test_simple_registration(self):
result = self.dict_from_request(self.register_test_user)
self.assertTrue(result['success'])
def test_invalid_confirmation_in_registration(self):
invalid_register = partial(self.register, 'another_user', 'paswod', 'password')
result = self.dict_from_request(invalid_register)
self.assertFalse(result['success'])
def test_duplicate_username_fails_registration(self):
self.register_test_user()
result = self.dict_from_request(self.register_test_user)
self.assertFalse(result['success'])
def test_blank_password_fails_registration(self):
invalid_register = partial(self.register, 'user', '', '')
result = self.dict_from_request(invalid_register)
self.assertFalse(result['success'])
def test_spaces_password_fails_registration(self):
invalid_register = partial(self.register, 'user', ' ', ' ')
result = self.dict_from_request(invalid_register)
self.assertFalse(result['success'])
def test_spaces_in_password_fails_registration(self):
invalid_register = partial(self.register, 'user', 'hi there', 'hi there')
result = self.dict_from_request(invalid_register)
self.assertFalse(result['success'])
def test_login_fails_with_new_user(self):
invalid_login = partial(self.login, 'user', 'password')
result = self.dict_from_request(invalid_login)
self.assertFalse(result['success'])
def test_login_succeeds_after_registration(self):
self.register_test_user()
result = self.dict_from_request(self.login_test_user)
self.assertTrue(result['success'])
def add_page(self, original_url, shortened_url):
return self.app.post('/pages', data=dict(
originalUrl=original_url,
outputUrl=shortened_url
), follow_redirects=True)
def get_pages(self):
return self.app.get('/pages', follow_redirects=True)
def test_no_pages_initially(self):
self.register_test_user()
pages = self.dict_from_request(self.get_pages)
self.assertEquals(0, len(pages['pages']))
def test_shorten_page_fails_for_blank_page(self):
self.register_test_user()
invalid_add_page = partial(self.add_page, '', '')
result = self.dict_from_request(invalid_add_page)
self.assertFalse(result['success'])
def test_get_pages_fails_when_no_user(self):
result = self.get_pages()
self.assertEquals(401, result.status_code)
def test_shorten_pages_fails_when_no_user(self):
result = self.add_page('www.google.com', '')
self.assertEquals(401, result.status_code)
def test_simple_shorten_page(self):
self.register_test_user()
add_page = partial(self.add_page, 'www.google.com', '')
result = self.dict_from_request(add_page)
self.assertTrue(result['success'])
def test_shorten_page_provided_value(self):
self.register_test_user()
add_page = partial(self.add_page, 'www.google.com', 'search')
result = self.dict_from_request(add_page)
self.assertTrue(result['success'])
self.assertEquals('search', result['shortenedUrl'])
def test_shortened_urls_must_be_unique(self):
self.test_shorten_page_provided_value()
add_page = partial(self.add_page, 'www.duckduckgo.com', 'search')
result = self.dict_from_request(add_page)
self.assertFalse(result['success'])
def test_shortened_urls_cant_have_forward_slashes(self):
self.register_test_user()
add_page = partial(self.add_page, 'www.google.com', 'a/search/engine')
result = self.dict_from_request(add_page)
self.assertFalse(result['success'])
def test_shortened_pages_are_added_to_users_pages(self):
self.test_simple_shorten_page()
result = self.dict_from_request(self.get_pages)
self.assertEquals(1, len(result['pages']))
def test_no_page_visits_initially(self):
self.test_simple_shorten_page()
result = self.dict_from_request(self.get_pages)
self.assertEquals(0, len(result['pages'][0]['visits']))
def test_page_visits_are_logged(self):
self.test_shorten_page_provided_value()
self.app.get('/search')
result = self.dict_from_request(self.get_pages)
self.assertEquals(1, len(result['pages'][0]['visits']))
def tearDown(self):
os.close(self.database_handle)
os.unlink(linkshort.app.database)
if __name__ == '__main__':
unittest.main()
| 35.154762
| 83
| 0.734677
|
7949c24318db3531fe578c4e1d6e70e7aa965cb8
| 1,133
|
py
|
Python
|
IntroToCS_ex4/check_update_word_pattern.py
|
nadavWeisler/IntroToCS
|
725fc8822eeb34f6917692846689dee29b24af55
|
[
"MIT"
] | null | null | null |
IntroToCS_ex4/check_update_word_pattern.py
|
nadavWeisler/IntroToCS
|
725fc8822eeb34f6917692846689dee29b24af55
|
[
"MIT"
] | null | null | null |
IntroToCS_ex4/check_update_word_pattern.py
|
nadavWeisler/IntroToCS
|
725fc8822eeb34f6917692846689dee29b24af55
|
[
"MIT"
] | null | null | null |
#############################################################
# FILE : check_update_word_pattern.py
# WRITER : Nadav Weisler , weisler , 316493758
# EXERCISE : intro2cs ex4 2019 - Hangman
# DESCRIPTION: test several inputs for the
# function "update_word_pattern from hangman.py
#############################################################
from hangman import update_word_pattern
def test_update_word_pattern():
"""Function that test several inputs for the
function "update_word_pattern from hangman.py"""
words = [
'hello',
'hello',
'hello',
'hello'
]
patterns = [
'_____',
'hell_',
'hell_',
'h____'
]
letters = [
'h',
'h',
'o',
'd'
]
results = [
"h____",
"hell_",
"hello",
"h____"
]
for i in range(len(words)):
if update_word_pattern(words[i], patterns[i], letters[i]) != results[i]:
print("Test failed")
return False
print("Test done!")
return True
if __name__ == '__main__':
test_update_word_pattern()
| 22.66
| 80
| 0.494263
|
7949c3ba293422c43e32702126015f51b411990d
| 7,664
|
py
|
Python
|
pet_ct/data/labeler.py
|
seyuboglu/weakly-supervised-petct
|
182821ae6b6abe1bc3623692aeba85da8083b27b
|
[
"Apache-2.0"
] | 9
|
2020-08-09T23:25:41.000Z
|
2022-03-09T20:24:02.000Z
|
pet_ct/data/labeler.py
|
seyuboglu/weakly-supervised-petct
|
182821ae6b6abe1bc3623692aeba85da8083b27b
|
[
"Apache-2.0"
] | 4
|
2020-09-17T10:47:09.000Z
|
2022-03-12T00:20:26.000Z
|
pet_ct/data/labeler.py
|
geoffreyangus/pet-ct
|
fa96a07734afade475f6a1e1587ec14965fe2de3
|
[
"Apache-2.0"
] | 1
|
2021-04-12T17:42:02.000Z
|
2021-04-12T17:42:02.000Z
|
"""
noisy labeling functions to be used for weak supervision tasks.
"""
import re
import pet_ct.data.term_graphs as term_graphs
import pet_ct.data.match_fns as match_fns
class Labeler(object):
"""
Labeler must always return a tuple of `labels` and a list of `edges`,
denoting each label by its index in `labels` list.
"""
def __init__(self, term_graph_class="", term_graph_args={}):
"""
"""
self.term_graph = getattr(term_graphs, term_graph_class)(**term_graph_args)
def _contains_term(self, c, name="", neg_regexes=[],
hit_code=2, miss_code=1, is_root=True):
"""returns hit_code if name is in s, else miss_code
Uses the term regexes specified in term_graph to find positions. If
term is the parent of another term, it will continue down the term
graph until it finds a term or reaches a leaf. If there is a hit, the
all node and all of its ancestors return hit_code, while the rest of the
graph returns abstentions. If there is no hit, the entire tree returns
the miss_code.
We use strings in order to allow for the use of more elaborate phrasal
regular expressions. Uses BFS when iterating through children.
Args:
s (string) lowercase string containing all tokens of interest
name (string) name of term of interest in term_graph.
Returns:
an integer reflecting metal's paradigm, where 0 is abstain, 1 is
a negative classification, and 2 is a positive classification.
"""
assert name in self.term_graph.name_to_term.keys(), (
f"term graph does not contain {name}"
)
term = self.term_graph.name_to_term[name]
labels = []
names = []
children = term.get("children", [])
for i, child in enumerate(children):
child_labels, child_names = self._contains_term(c, name=child, neg_regexes=neg_regexes,
miss_code=miss_code, hit_code=hit_code,
is_root=False)
labels += child_labels
names += child_names
search_results = Labeler.search(c,
regexes=term.get('match_res', []),
neg_regexes=neg_regexes,
hit_code=hit_code, miss_code=miss_code)
for match_fn_dict in term.get("match_fns", []):
fn = match_fn_dict['fn']
fn_args = match_fn_dict['args']
match = getattr(match_fns, fn)(c, **fn_args)
if match:
negated = False
pos = match['start']
start = max(c[:pos].rfind('.'), 0)
end = pos + c[pos:].find('.') # returns -1 if not found
for neg in neg_regexes:
pattern = re.compile(r"\b" + neg + r"\b")
# search for negations within the same sentence
if re.search(pattern, c[start:pos]) or re.search(pattern, c[pos:end]):
negated = True
break
if not negated:
search_results.append(hit_code)
else:
search_results.append(miss_code)
else:
search_results.append(miss_code)
if len(search_results) > 0:
labels += [max(search_results)]
names.append(name)
# no regexes matched in any of the nodes, set the whole tree to negative
if is_root:
names = {n:i for i, n in enumerate(names)}
if hit_code not in labels:
labels = [miss_code for label in labels]
return labels, names
def contains_term(self, c, names=[], neg_regexes=["no", "not", "without"],
hit_code=2, miss_code=1, aggregate=False):
"""
"""
if type(names) != list:
names = [names]
res = []
for name in names:
labels, _ = self._contains_term(c, name=name, neg_regexes=neg_regexes,
hit_code=hit_code, miss_code=miss_code,
is_root=True)
res += labels
if aggregate:
if res:
res = [max(res)]
else:
res = [miss_code]
# TODO: incorporate edges of G_{source}
return res, []
@staticmethod
def contains_evidence(c, neg_regexes=["no", "not", "without"],
hit_code=2, miss_code=1, aggregate=False):
"""
"""
regexes = [
"evidence of metabolically active disease",
"evidence of metabolically active",
"evidence.*?malignancy",
"evidence.*?disease",
"significant evidence",
"significant.*?activity",
"significant.*?uptake",
"significant",
"definite evidence",
"definite scintigraphic evidence"
"scintigraphic evidence",
"sign.*?malignancy",
"abnormal.*?uptake",
"hypermetabolic activity",
"evidence",
"disease",
"activity",
"uptake",
"malignancy",
]
labels = Labeler.search(c, regexes, neg_regexes=neg_regexes,
hit_code=hit_code, miss_code=miss_code)
if aggregate:
labels = [Labeler.aggregate_labels(labels, agg_type='max')]
return labels, [] # always independent
@staticmethod
def search(c, regexes, neg_regexes=[], hit_code=2, miss_code=1):
"""
"""
labels = []
for regex in regexes:
pattern = re.compile(r"\b" + regex + r"\b")
res = re.search(pattern, c)
if res:
pos = res.start()
start = max(c[:pos].rfind('.'), 0)
end = pos + c[pos:].find('.') # returns -1 if not found
negated = False
for neg in neg_regexes:
pattern = re.compile(r"\b" + neg + r"\b")
# search for negations within the same sentence
if re.search(pattern, c[start:pos]) or re.search(pattern, c[pos:end]):
negated = True
labels.append(miss_code)
break
if not negated:
labels.append(hit_code)
else:
labels.append(0)
return labels
@staticmethod
def extract_from_sources_df(series, cols=[], agg_type=None):
"""
Extracts labels directly from a dataframe series (row).
"""
labels = []
for col in cols:
labels.append(series[col])
if agg_type:
labels = [Labeler.aggregate_labels(labels, agg_type=agg_type)]
return labels, []
@staticmethod
def aggregate_labels(labels, agg_type='max'):
"""
Returns a single (representative) value for a set of labels.
Args:
labels (list) list of prospective labels for given sources
agg_type (string)
"""
if agg_type == 'max':
return max(labels)
elif agg_type == 'majority':
value, count = Counter(labels).most_common()
return value
else:
raise ValueError(f"agg_type {agg_type} not recognized.")
| 35.813084
| 99
| 0.522573
|
7949c607dd0195abd13aec31c53174d3358c5e0e
| 3,062
|
py
|
Python
|
src/rock_rhino_image_processor/rock_rhino_image_processor/robot_enable.py
|
househear/_ws_moveit2
|
ea5c43ddd412ade6b4bebbdb929b6e08b7a5e888
|
[
"Apache-2.0"
] | null | null | null |
src/rock_rhino_image_processor/rock_rhino_image_processor/robot_enable.py
|
househear/_ws_moveit2
|
ea5c43ddd412ade6b4bebbdb929b6e08b7a5e888
|
[
"Apache-2.0"
] | null | null | null |
src/rock_rhino_image_processor/rock_rhino_image_processor/robot_enable.py
|
househear/_ws_moveit2
|
ea5c43ddd412ade6b4bebbdb929b6e08b7a5e888
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1996-2020 Soft_illusion.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used to enable all the sensors and making robot move
import rclpy
from webots_ros2_core.webots_node import WebotsNode
from geometry_msgs.msg import Twist
DEVICE_CONFIG = {
'camera_mid': {'topic_name': 'camera', 'timestep': 16}
}
class RobotEnable(WebotsNode):
def __init__(self, args):
super().__init__('robot_enable', args)
# self.start_device_manager()
# Front wheels
self.left_motor_front = self.robot.getMotor('left_front_wheel')
self.left_motor_front.setPosition(float('inf'))
self.left_motor_front.setVelocity(0)
self.right_motor_front = self.robot.getMotor('right_front_wheel')
self.right_motor_front.setPosition(float('inf'))
self.right_motor_front.setVelocity(0)
# Rear wheels
self.left_motor_rear = self.robot.getMotor('left_rear_wheel')
self.left_motor_rear.setPosition(float('inf'))
self.left_motor_rear.setVelocity(0)
self.right_motor_rear = self.robot.getMotor('right_rear_wheel')
self.right_motor_rear.setPosition(float('inf'))
self.right_motor_rear.setVelocity(0)
self.motor_max_speed = self.left_motor_rear.getMaxVelocity()
# Create Subscriber
self.cmd_vel_subscriber = self.create_subscription(
Twist, 'cmd_vel', self.cmd_velocity_callback, 1)
self.start_device_manager(DEVICE_CONFIG)
self.get_logger().info('Sensor enabled')
def cmd_velocity_callback(self, msg):
wheel_gap = 0.1 # in meter
wheel_radius = 0.04 # in meter
left_speed = ((2.0 * msg.linear.x - msg.angular.z *
wheel_gap) / (2.0 * wheel_radius))
right_speed = ((2.0 * msg.linear.x + msg.angular.z *
wheel_gap) / (2.0 * wheel_radius))
left_speed = min(self.motor_max_speed,
max(-self.motor_max_speed, left_speed))
right_speed = min(self.motor_max_speed,
max(-self.motor_max_speed, right_speed))
self.left_motor_front.setVelocity(left_speed)
self.right_motor_front.setVelocity(right_speed)
self.left_motor_rear.setVelocity(left_speed)
self.right_motor_rear.setVelocity(right_speed)
def main(args=None):
rclpy.init(args=args)
robot_object = RobotEnable(args=args)
rclpy.spin(robot_object)
robot_object.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 35.195402
| 74
| 0.683867
|
7949c67473b9681ec59a45d555e98a842a31b776
| 4,419
|
py
|
Python
|
data/stuff.py
|
loganstone/rogue
|
ffcdd6073178e4fe5ae4774ef9d98b185cf1cc9e
|
[
"MIT"
] | null | null | null |
data/stuff.py
|
loganstone/rogue
|
ffcdd6073178e4fe5ae4774ef9d98b185cf1cc9e
|
[
"MIT"
] | null | null | null |
data/stuff.py
|
loganstone/rogue
|
ffcdd6073178e4fe5ae4774ef9d98b185cf1cc9e
|
[
"MIT"
] | null | null | null |
import tcod
from random import randint
from components.equipment import EquipmentSlots
from components.equippable import Equippable
from components.item import Item
from entity import Entity
from render_functions import RenderOrder
from game_messages import Message
from item_functions import (cast_confuse,
cast_fireball,
cast_lightning,
heal)
class ItemBase:
chances = 0
is_consumable = False
is_equippable = False
def __init__(self, name, character, color):
self.name = name
self.character = character
self.color = color
def get_entity(self, x, y):
raise NotImplementedError
class Consumable(ItemBase):
is_consumable = True
def __init__(self, name, character, color, item_component):
ItemBase.__init__(self, name, character, color)
self.item_component = item_component
def get_entity(self, x, y):
return Entity(x, y,
self.character,
self.color,
self.name,
render_order=RenderOrder.ITEM,
item=self.item_component)
class Scroll(Consumable):
def __init__(self, name, color, item_component):
Consumable.__init__(self, name, '#', color, item_component)
class Potion(Consumable):
def __init__(self, name, color, item_component):
Consumable.__init__(self, name, '!', color, item_component)
class Equipment(ItemBase):
is_equippable = True
def __init__(self, name, character, color, equippable_component):
ItemBase.__init__(self, name, character, color)
self.equippable_component = equippable_component
def get_entity(self, x, y):
return Entity(x, y,
self.character,
self.color,
self.name,
equippable=self.equippable_component)
class HealingPotion(Potion):
chances = 35
def __init__(self):
amount = randint(30, 40)
Potion.__init__(
self,
'Healing Potion',
tcod.violet,
Item(use_function=heal, amount=amount))
class LightningBoltScroll(Scroll):
chances = [[25, 4]]
def __init__(self):
damage = randint(10, 50)
Scroll.__init__(
self,
'Lightning Bolt Scroll',
tcod.yellow,
Item(use_function=cast_lightning,
damage=damage, maximum_range=5))
class FireballScroll(Scroll):
chances = [[25, 6]]
def __init__(self):
text = ('Left-click a target tile for the fireball,'
' or right-click to cancel.')
damage = randint(20, 25)
Scroll.__init__(
self,
'Fireball Scroll',
tcod.red,
Item(use_function=cast_fireball,
targeting=True,
targeting_message=Message(text, tcod.light_cyan),
damage=damage, radius=3))
class ConfusionScroll(Scroll):
chances = [[10, 2]]
def __init__(self):
text = ('Left-click an enemy to confuse it,'
' or right-click to cancel.')
Scroll.__init__(
self,
'Confusion Scroll',
tcod.light_pink,
Item(use_function=cast_confuse,
targeting=True,
targeting_message=Message(text, tcod.light_cyan),
number_of_turns=10))
class Dagger(Equipment):
def __init__(self):
Equipment.__init__(
self,
'Dagger',
'_',
tcod.sky,
Equippable(EquipmentSlots.MAIN_HAND, power_bonus=2))
class Sword(Equipment):
chances = [[5, 4]]
def __init__(self):
Equipment.__init__(
self,
'Sword',
'/',
tcod.sky,
Equippable(EquipmentSlots.MAIN_HAND, power_bonus=4))
class Shield(Equipment):
chances = [[15, 8]]
def __init__(self):
Equipment.__init__(
self,
'Shield',
'[',
tcod.darker_orange,
Equippable(EquipmentSlots.OFF_HAND, defense_bonus=1))
ITEMS = [
# Potions
HealingPotion,
# Scrolls
LightningBoltScroll,
FireballScroll,
ConfusionScroll,
# Equipment
Sword,
Shield
]
| 24.016304
| 69
| 0.572301
|
7949c82dd193e835afd703e167c31176ce1d93c9
| 3,938
|
py
|
Python
|
tests/unit/states/grafana_datasource_test.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | 2
|
2015-09-21T14:13:30.000Z
|
2016-02-12T11:33:46.000Z
|
tests/unit/states/grafana_datasource_test.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | 1
|
2019-09-06T13:57:28.000Z
|
2019-09-06T13:57:28.000Z
|
tests/unit/states/grafana_datasource_test.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | 2
|
2017-01-05T16:14:59.000Z
|
2019-01-31T23:15:25.000Z
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
Mock,
MagicMock,
patch
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import grafana_datasource
grafana_datasource.__opts__ = {}
grafana_datasource.__salt__ = {}
profile = {
'grafana_url': 'http://grafana',
'grafana_token': 'token',
}
def mock_json_response(data):
response = MagicMock()
response.json = MagicMock(return_value=data)
return Mock(return_value=response)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class GrafanaDatasourceTestCase(TestCase):
def test_present(self):
with patch('requests.get', mock_json_response([])):
with patch('requests.post') as rpost:
ret = grafana_datasource.present('test', 'type', 'url', profile=profile)
rpost.assert_called_once_with(
'http://grafana/api/datasources',
grafana_datasource._get_json_data('test', 'type', 'url'),
headers={'Authorization': 'Bearer token', 'Accept': 'application/json'},
timeout=3
)
self.assertTrue(ret['result'])
self.assertEqual(ret['comment'], 'New data source test added')
data = grafana_datasource._get_json_data('test', 'type', 'url')
data.update({'id': 1, 'orgId': 1})
with patch('requests.get', mock_json_response([data])):
with patch('requests.put') as rput:
ret = grafana_datasource.present('test', 'type', 'url', profile=profile)
rput.assert_called_once_with(
'http://grafana/api/datasources/1',
grafana_datasource._get_json_data('test', 'type', 'url'),
headers={'Authorization': 'Bearer token', 'Accept': 'application/json'},
timeout=3
)
self.assertTrue(ret['result'])
self.assertEqual(ret['comment'], 'Data source test already up-to-date')
self.assertEqual(ret['changes'], None)
with patch('requests.put') as rput:
ret = grafana_datasource.present('test', 'type', 'newurl', profile=profile)
rput.assert_called_once_with(
'http://grafana/api/datasources/1',
grafana_datasource._get_json_data('test', 'type', 'newurl'),
headers={'Authorization': 'Bearer token', 'Accept': 'application/json'},
timeout=3
)
self.assertTrue(ret['result'])
self.assertEqual(ret['comment'], 'Data source test updated')
self.assertEqual(ret['changes'], {'old': {'url': 'url'}, 'new': {'url': 'newurl'}})
def test_absent(self):
with patch('requests.get', mock_json_response([])):
with patch('requests.delete') as rdelete:
ret = grafana_datasource.absent('test', profile=profile)
rdelete.assert_not_called()
self.assertTrue(ret['result'])
self.assertEqual(ret['comment'], 'Data source test already absent')
with patch('requests.get', mock_json_response([{'name': 'test', 'id': 1}])):
with patch('requests.delete') as rdelete:
ret = grafana_datasource.absent('test', profile=profile)
rdelete.assert_called_once_with(
'http://grafana/api/datasources/1',
headers={'Authorization': 'Bearer token', 'Accept': 'application/json'},
timeout=3
)
self.assertTrue(ret['result'])
self.assertEqual(ret['comment'], 'Data source test was deleted')
| 40.597938
| 99
| 0.579736
|
7949c87e8954ad60872aa0ad909597e63eadba92
| 2,425
|
py
|
Python
|
multi-scale-economic-dynamics/control.py
|
sitar777/math-modeling-2022
|
cea86c26eac5002f31a69414059e593143598f77
|
[
"Apache-2.0"
] | null | null | null |
multi-scale-economic-dynamics/control.py
|
sitar777/math-modeling-2022
|
cea86c26eac5002f31a69414059e593143598f77
|
[
"Apache-2.0"
] | null | null | null |
multi-scale-economic-dynamics/control.py
|
sitar777/math-modeling-2022
|
cea86c26eac5002f31a69414059e593143598f77
|
[
"Apache-2.0"
] | null | null | null |
from scipy.integrate import RK45
import matplotlib.pyplot as plt
COEFFS_FRANCE = [
.0221,
.4608,
-.0077,
.5585,
-2.0733,
.1453,
.0067,
1.1813,
-0.51,
]
def ode_system(t, v):
coeffs = COEFFS_FRANCE
control = [
[-0.016, 0],
[-15, 0.1],
[0, 0],
]
x, y, z = v
dinputdt = [
coeffs[0] * x * (1 - x) + coeffs[1] * x * z / (1 + z) + coeffs[2] * x * y + control[0][0] * x + control[0][1],
coeffs[3] * y * (1 - y) + coeffs[4] * y * z / (1 + z) + coeffs[5] * x * y + control[1][0] * y + control[1][1],
(coeffs[6] * z * (1 - z) + coeffs[7] * z * x / (1 + x) + coeffs[8] * z * y / (1 + y) +
control[2][0] * z + control[2][1]),
]
return dinputdt
if __name__ == '__main__':
fig, axs = plt.subplots(2, 2)
axs[0, 0].remove()
axs[0, 0] = fig.add_subplot(2, 2, 1, projection='3d')
x_range = list(range(0, 80, 20))
y_range = list(range(0, 80, 20))
z_range = list(range(70, 150, 20))
v0 = (20.72, 2.71, 113.58)
solution = RK45(ode_system, 0, v0, 1000, 1)
t_values = []
y_values = []
while True:
# get solution step state
t_values.append(solution.t)
y_values.append(solution.y.tolist())
# break loop after modeling is finished
if solution.status == 'finished':
break
if solution.status == 'failed':
break
solution.step()
x, y, z = zip(*y_values)
axs[0, 0].plot(x, y, z, color='r')
axs[0, 0].plot(v0[0], v0[1], v0[2], marker='o', color='g')
axs[0, 0].plot(x[-1], y[-1], z[-1], marker='o', color='b')
print(x[-1], y[-1], z[-1])
axs[0, 1].plot(x, y, color='r')
axs[0, 1].plot(v0[0], v0[1], marker='o', color='g')
axs[0, 1].plot(x[-1], y[-1], marker='o', color='b')
axs[1, 0].plot(y, z, color='r')
axs[1, 0].plot(v0[1], v0[2], marker='o', color='g')
axs[1, 0].plot(y[-1], z[-1], marker='o', color='b')
axs[1, 1].plot(x, z, color='r')
axs[1, 1].plot(v0[0], v0[2], marker='o', color='g')
axs[1, 1].plot(x[-1], z[-1], marker='o', color='b')
axs[0, 1].set_xlabel('x')
axs[0, 1].set_ylabel('y')
axs[1, 0].set_xlabel('y')
axs[1, 0].set_ylabel('z')
axs[1, 1].set_xlabel('x')
axs[1, 1].set_ylabel('z')
axs[0, 0].set_xlabel('x')
axs[0, 0].set_ylabel('y')
axs[0, 0].set_zlabel('z')
plt.show()
| 24.744898
| 118
| 0.489072
|
7949c8853532a536be5126e01c4f6b29fb279038
| 14,319
|
py
|
Python
|
StichensisTest/TreeCalcs.py
|
northcoastmountainrat/Sitchensis
|
8e292dd91dd915495a04ab0abd9a606db7fd9ab4
|
[
"MIT"
] | null | null | null |
StichensisTest/TreeCalcs.py
|
northcoastmountainrat/Sitchensis
|
8e292dd91dd915495a04ab0abd9a606db7fd9ab4
|
[
"MIT"
] | 12
|
2018-04-04T20:27:52.000Z
|
2018-05-02T17:42:59.000Z
|
StichensisTest/TreeCalcs.py
|
northcoastmountainrat/Sitchensis
|
8e292dd91dd915495a04ab0abd9a606db7fd9ab4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 13 12:46:48 2017
@author: rdk10
"""
import numpy as np
import pandas as pd #For storing data as a data.frame
#import os, sys
##import numpy as np
#os.chdir('C:/Users/rdk10/Desktop/ComputerProgramming/Python/TreeCalcs') #use this to nav to where files are stored #This adds the directory of the CrownMapFunctions module to the search path
#sys.path.append('C:/Users/rdk10/Desktop/ComputerProgramming/Python/TreeCalcs')
#sys.path.append('C:/Users/rdk10/Desktop/ComputerProgramming/Python/TreeViz')
import Functions as f #This accesses the file with function definitions
import Routines as rt
#import numbers as n
import pdb
#import temp as tmp
#import numbers
def calculateTree(treeData, custRefs, mapType, treeName, outDir, intermediateFiles = False):
##This is a giant function that brings in a dictionary of tree data and does all the calculations on it,
#
#Inputs: dictionary of Trunk, segment, and trunk data, a dictionary with any custom references, a treeName and a logFileName
#Outputs a dictionary with calculated Trunks, Segments, Branches, a logFile written to the current directory
#The calculated dictioary can be directly input to the Plot tree routine.
treePathAndName = outDir + '/' + treeName
logFileName = '{0}_ProcessLog.txt'.format(treePathAndName)
treeName = logFileName.split('_')[0]
lf = open(logFileName,'w') #open a file for writing ('w') to, lf for log file
lf.write('\n#####################################################\n')
lf.write('############## Main Trunk Calculations ##############\n')
lf.write('#####################################################\n')
lf.close()
"""Section for calculating things on the main trunk, this includes arranging the trunk
into a segment-like form, it also calculates reference x,y locations, other locations
on the trunk with the @ symbol, and x,y of each section location"""
#Calculates trunk values, resorts columns, not sure if I need to correct this as program looks for col names anyway.
#Row sorted by names then by height
#pdb.set_trace()
trunkDat = treeData['trunk']
trunkDat = rt.trunkRoutine(trunkDat, custRefs, logFileName)
#####For testing ###########################################
#trunkDat.loc[:,['name','height','ref','ref type','ref x','ref y','x','y']]
################################################################
#Rearranges the trunk data into segment form
mainTrunks = rt.arrangeTrunkData(trunkDat, logFileName)
trunks = pd.concat(list(mainTrunks.values()),ignore_index = True)
if intermediateFiles:
trunks.to_csv('{0}_trunks.csv'.format(treeName), date_format = False, index = False)
"""#################################################################################################"""
"""This section will do all the calculations on segments, starting with determining reference x,y
locations on references main trunks and other trunk segments. """
"""#################################################################################################"""
###This is the log file I'll use to help error check.
lf = open(logFileName,'a') #'a' stands for append
lf.write('\n##################################################\n')
lf.write('############## Segment calculations ##############\n')
lf.write('##################################################\n')
lf.close()
if mapType == 'trunk map' or mapType == 'trunk and branch map':
f.print2Log(logFileName,'There are no segments in this tree')
if mapType == 'segment map' or mapType == 'full map':
segs = treeData['segments']
branches = treeData['branches']
#######################################################################
######## Segments to Custom References ##############################
#######################################################################
if len(custRefs)>0 and (any(segs['base ref'].isin(custRefs['name'])) or any(segs['top ref'].isin(custRefs['name']))):
segs = rt.segs2custRefs(segs, custRefs, logFileName)
#######################################################################
###### References to Main Trunks ###################################
#######################################################################
f.print2Log(logFileName,'\n############# Segments referenced to main trunks ############\n')
####This code calcualates the x,y values for segment bases and tops referenced to the main trunk.
segs = rt.segs2trunks(segs, mainTrunks, custRefs, logFileName)
if intermediateFiles:
segs.to_csv('{0}_segs2trunks.csv'.format(treeName), date_format = False, index = False)
#f.print2Log(logFileName, '\n\n############## Segments referenced to nodes #################\n')
"""The while loop bleow cycles through all the segments until either all the rows are calculated or until it has cycled though
a number of times equal to the numer of uncalculated rows + 1"""
#Count the number of missing values
emptyRows = int(np.isnan(segs.loc[:,['base x', 'base y','top x','top y']]).sum().sum()/2)
#origNumRows = emptyRows
counter = 0
#loop until there are no more values to calculate or until a number of iterations = to rows needing calculating + 1
# This loop is needed because we there are refs to segments that aren't calculated on the first pass.
while emptyRows > 0 and counter < 10:
counter = counter + 1
previousEmptyRows = emptyRows #This is for the break statement at the end of the loop
#######################################################################
###### References to Nodes ###################################
#######################################################################
f.print2Log(logFileName, "\n############ Segments referenced to nodes round {0} #############\n".format(counter))
segs = rt.segs2nodes(segs, logFileName)
if intermediateFiles:
segs.to_csv('{0}_segs2nodes{1}.csv'.format(treeName,counter), date_format = False, index = False)
#######################################################################
###### References to Tunk segments #################################
#######################################################################
f.print2Log(logFileName, "\n############ Segments referenced to trunk segments round {0} #############\n".format(counter))
segs = rt.segs2reits(segs, logFileName)
if intermediateFiles:
segs.to_csv('{0}_segs2reits{1}.csv'.format(treeName,counter), date_format = False, index = False)
#######################################################################
############ Mid Segment Calcs ######################################
#######################################################################
f.print2Log(logFileName, "\n############ Segments referenced to mid-segments round {0} #############\n".format(counter))
segs = rt.segs2midsegs(segs, logFileName)
if intermediateFiles:
segs.to_csv('{0}_segs2midSegs{1}.csv'.format(treeName,counter), date_format = False, index = False)
emptyRows = int(np.isnan(segs.loc[:,['base x', 'base y','top x','top y']]).sum().sum()/2)
f.print2Log(logFileName,'\nAfter pass {0} there are {1} uncalculated segment base or top locations'.format(counter, emptyRows))
if emptyRows == previousEmptyRows:
f.print2Log(logFileName,'\nThere are the same number of empty rows after the last pass, \ncheck to make sure these rows have enough information to calculate them'.format(counter, emptyRows))
#locate where basex and basey are missing
t1 = segs['base x'].isnull()
t2 = segs['base y'].isnull()
test1 = f.vectorBool(t1,t2,'or')
#locate where topy and topx are missing
t1 = segs['top x'].isnull()
t2 = segs['top y'].isnull()
test2 = f.vectorBool(t1,t2,'or')
#create a combination "or" vector
test3 = f.vectorBool(test1,test2,'or')
#subset segnames using above vector
emptySegs = segs.loc[test3,'name'].tolist()
f.print2Log(logFileName,'\nThe offending segment(s) are {0}, check that the heights are between origin segment heights'.format(emptySegs))
break
lf = open(logFileName,'a') #'a' stands for append
lf.write('\n\n####################################################\n')
lf.write('############## Branch calculations ##############\n')
lf.write('####################################################\n')
lf.write('\n############# Branches bases referenced to main trunks ############\n')
lf.close()
if mapType == 'trunk and branch map' or mapType == 'full map':
branches = treeData['branches']
branches = rt.brBase2Trunks(branches, mainTrunks, logFileName)
if intermediateFiles:
branches.to_csv('{0}_brFromTrnk.csv'.format(treeName), date_format = False, index = False)
f.print2Log(logFileName, '\n############## Branch bases referenced to segments #################\n')
f.print2Log(logFileName, '####################################################################\n')
if mapType == 'full map' :
f.print2Log(logFileName, '\n############## Branch bases referenced to nodes #################\n')
branches = rt.brBase2nodes(branches, segs, logFileName)
if intermediateFiles:
branches.to_csv('{0}_brFromNodes.csv'.format(treeName), date_format = False, index = False)
f.print2Log(logFileName, "\n############ Branch bases referenced to trunk segments #############\n")
branches = rt.brBase2reits(branches, segs, logFileName)
if intermediateFiles:
branches.to_csv('{0}_brFromReits.csv'.format(treeName), date_format = False, index = False)
f.print2Log(logFileName, "\n############ Branch bases referenced to mid-segments #############\n")
branches = rt.brBase2midsegs(branches, segs, logFileName)
if intermediateFiles:
branches.to_csv('{0}_brFromMidSegs.csv'.format(treeName), date_format = False, index = False)
else: f.print2Log(logFileName, "There are no segments on this tree to map to")
f.print2Log(logFileName, "\n############ All branch tops #############\n")
branches = rt.brTops(branches, logFileName)
if intermediateFiles:
branches.to_csv('{0}_brFinal.csv'.format(treeName), date_format = False, index = False)
else:
f.print2Log(logFileName,'\nThere are no mapped branches for this tree')
lf = open(logFileName,'a') #'a' stands for append
lf.write('\n\n##################################################################\n')
lf.write('############## Final clean and output up etc.... ##############\n')
lf.write('##################################################################\n')
lf.close()
date = pd.Timestamp("today").strftime("%d%b%Y").lstrip('0')
finalFileName = '{0}_{1}.xlsx'.format(treeName, date)
f.print2Log(logFileName,"Calculated and cleaned tree file output to: '{0}'".format(finalFileName))
## Rearrange column headers to make pretty
trunkPrintCols = ['name','base radius','base x','base y','base z','top radius','top x','top y','top z', '% dead','s_fuzz','l_fuzz','notes']
segPrintCols =['position','name','o/e','type','base radius','base x','base y','base z','top radius','top x','top y','top z', '% dead','s_fuzz','l_fuzz','notes']
branchPrintCols = ['name','origin','o/e','l/d','base radius','base x','base y','base z','top radius','top x','top y','top z','slope','hd', '% dead','notes']
##Move non-main segemnts to segments dataframe and only include relevant columns
sparseTrunks = trunks.loc[:,trunkPrintCols]
if mapType == 'segment map' or mapType == 'full map':
sparseSegments = segs.loc[:,segPrintCols]
if mapType == 'trunk and branch map' or mapType == 'full map':
sparseBranches = branches.loc[:,branchPrintCols]
segAppendRows = sparseTrunks['name'] != 'M'
if any(segAppendRows)==True:
if mapType == 'trunk map' or mapType == 'trunk and branch map':
sparseSegments = sparseTrunks.loc[segAppendRows]
else:
sparseSegments = sparseSegments.append(sparseTrunks.loc[segAppendRows])
sparseTrunks = sparseTrunks.loc[segAppendRows == False]
if mapType == 'trunk map':
treeData = {'trunk': sparseTrunks}
elif mapType == 'segment map':
treeData = {'trunk': sparseTrunks, 'segments':sparseSegments}
elif mapType == 'trunk and branch map':
treeData = {'trunk': sparseTrunks, 'branches': sparseBranches}
elif mapType == 'full map':
treeData = {'trunk': sparseTrunks, 'segments':sparseSegments, 'branches': sparseBranches}
return(treeData)
| 55.933594
| 226
| 0.507019
|
7949c8f0c73e255221fba021043f4afdc24dd1c0
| 338
|
py
|
Python
|
tests/test_data_extractor.py
|
DavidRobertQuinn/IV
|
eaa22b0ee98cb7186f461f0ca464299c96f0f189
|
[
"Apache-2.0"
] | null | null | null |
tests/test_data_extractor.py
|
DavidRobertQuinn/IV
|
eaa22b0ee98cb7186f461f0ca464299c96f0f189
|
[
"Apache-2.0"
] | null | null | null |
tests/test_data_extractor.py
|
DavidRobertQuinn/IV
|
eaa22b0ee98cb7186f461f0ca464299c96f0f189
|
[
"Apache-2.0"
] | 1
|
2019-07-12T16:14:01.000Z
|
2019-07-12T16:14:01.000Z
|
from IV.data_extractor import create_PV_dataframe
import pandas as pd
import os
HERE = os.path.dirname(__file__)
test_data_loc = os.path.join(HERE, "test_data")
def test_create_PV_dataframe():
df = create_PV_dataframe(
"test", "test", test_data_loc, light=True, force_analysis=True)
assert isinstance(df, pd.DataFrame)
| 26
| 71
| 0.754438
|
7949c96c2527ef50872a663a1bb8d924022350ce
| 8,544
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/_express_route_links_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/_express_route_links_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/_express_route_links_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteLinksOperations(object):
"""ExpressRouteLinksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
link_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRouteLink"
"""Retrieves the specified ExpressRouteLink resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param link_name: The name of the ExpressRouteLink resource.
:type link_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteLink, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteLink
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteLink"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
'linkName': self._serialize.url("link_name", link_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteLink', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}/links/{linkName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ExpressRouteLinkListResult"]
"""Retrieve the ExpressRouteLink sub-resources of the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteLinkListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_10_01.models.ExpressRouteLinkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteLinkListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteLinkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}/links'} # type: ignore
| 47.466667
| 198
| 0.668656
|
7949c9870f617671d4eb3fb0eb74359d2a70f649
| 770
|
py
|
Python
|
sample_app/_verify_user.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
sample_app/_verify_user.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
sample_app/_verify_user.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
import click
from accelbyte_py_sdk.api.iam import admin_verify_user_without_verification_code_v3
from ._utils import login_as as login_as_internal
@click.command()
@click.argument("user_id")
@click.option("--namespace")
@click.option("--doc", type=bool)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
def verify_user(
user_id,
namespace,
doc,
login_as,
):
login_as_internal(login_as)
if doc:
click.echo(admin_verify_user_without_verification_code_v3.__doc__)
result, error = admin_verify_user_without_verification_code_v3(
user_id=user_id,
namespace=namespace,
)
if error:
raise Exception(str(error))
click.echo("Verify user success.")
| 26.551724
| 88
| 0.709091
|
7949ca2865027ea0e1f90df1d73a9c9921501aac
| 11,477
|
py
|
Python
|
lib/python3.8/site-packages/ansible_collections/cisco/nxos/plugins/modules/nxos_aaa_server_host.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible_collections/cisco/nxos/plugins/modules/nxos_aaa_server_host.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible_collections/cisco/nxos/plugins/modules/nxos_aaa_server_host.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: nxos_aaa_server_host
extends_documentation_fragment:
- cisco.nxos.nxos
short_description: Manages AAA server host-specific configuration.
description:
- Manages AAA server host-specific configuration.
version_added: 1.0.0
author: Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Changes to the host key (shared secret) are not idempotent for type 0.
- If C(state=absent) removes the whole host configuration.
options:
server_type:
description:
- The server type is either radius or tacacs.
required: true
choices:
- radius
- tacacs
type: str
address:
description:
- Address or name of the radius or tacacs host.
required: true
type: str
key:
description:
- Shared secret for the specified host or keyword 'default'.
type: str
encrypt_type:
description:
- The state of encryption applied to the entered key. O for clear text, 7 for
encrypted. Type-6 encryption is not supported.
choices:
- '0'
- '7'
type: str
host_timeout:
description:
- Timeout period for specified host, in seconds or keyword 'default. Range is
1-60.
type: str
auth_port:
description:
- Alternate UDP port for RADIUS authentication or keyword 'default'.
type: str
acct_port:
description:
- Alternate UDP port for RADIUS accounting or keyword 'default'.
type: str
tacacs_port:
description:
- Alternate TCP port TACACS Server or keyword 'default'.
type: str
state:
description:
- Manage the state of the resource.
default: present
choices:
- present
- absent
type: str
"""
EXAMPLES = """
# Radius Server Host Basic settings
- name: Radius Server Host Basic settings
cisco.nxos.nxos_aaa_server_host:
state: present
server_type: radius
address: 1.2.3.4
acct_port: 2084
host_timeout: 10
# Radius Server Host Key Configuration
- name: Radius Server Host Key Configuration
cisco.nxos.nxos_aaa_server_host:
state: present
server_type: radius
address: 1.2.3.4
key: hello
encrypt_type: 7
# TACACS Server Host Configuration
- name: Tacacs Server Host Configuration
cisco.nxos.nxos_aaa_server_host:
state: present
server_type: tacacs
tacacs_port: 89
host_timeout: 10
address: 5.6.7.8
"""
RETURN = """
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"address": "1.2.3.4", "auth_port": "2084",
"host_timeout": "10", "server_type": "radius"}
existing:
description:
- k/v pairs of existing configuration
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of configuration after module execution
returned: always
type: dict
sample: {"address": "1.2.3.4", "auth_port": "2084",
"host_timeout": "10", "server_type": "radius"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["radius-server host 1.2.3.4 auth-port 2084 timeout 10"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
"""
import re
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.nxos import (
load_config,
run_commands,
)
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.nxos import (
get_capabilities,
nxos_argument_spec,
)
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
device_info = get_capabilities(module)
network_api = device_info.get("network_api", "nxapi")
if network_api == "cliconf":
cmds = [command]
body = run_commands(module, cmds)
elif network_api == "nxapi":
cmds = {"command": command, "output": "text"}
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_aaa_host_info(module, server_type, address):
aaa_host_info = {}
command = "show run | inc {0}-server.host.{1}".format(server_type, address)
body = execute_show_command(command, module)[0]
if body:
try:
if "radius" in body:
pattern = (
r"\S+ host \S+(?:\s+key 7\s+(\S+))?(?:\s+auth-port (\d+))?"
r"(?:\s+acct-port (\d+))?(?:\s+authentication)?"
r"(?:\s+accounting)?(?:\s+timeout (\d+))?"
)
match = re.search(pattern, body)
aaa_host_info["key"] = match.group(1)
if aaa_host_info["key"]:
aaa_host_info["key"] = aaa_host_info["key"].replace(
'"', ""
)
aaa_host_info["encrypt_type"] = "7"
aaa_host_info["auth_port"] = match.group(2)
aaa_host_info["acct_port"] = match.group(3)
aaa_host_info["host_timeout"] = match.group(4)
elif "tacacs" in body:
pattern = (
r"\S+ host \S+(?:\s+key 7\s+(\S+))?(?:\s+port (\d+))?"
r"(?:\s+timeout (\d+))?"
)
match = re.search(pattern, body)
aaa_host_info["key"] = match.group(1)
if aaa_host_info["key"]:
aaa_host_info["key"] = aaa_host_info["key"].replace(
'"', ""
)
aaa_host_info["encrypt_type"] = "7"
aaa_host_info["tacacs_port"] = match.group(2)
aaa_host_info["host_timeout"] = match.group(3)
aaa_host_info["server_type"] = server_type
aaa_host_info["address"] = address
except TypeError:
return {}
else:
return {}
return aaa_host_info
def config_aaa_host(server_type, address, params, existing):
cmds = []
cmd_str = "{0}-server host {1}".format(server_type, address)
cmd_no_str = "no " + cmd_str
key = params.get("key")
enc_type = params.get("encrypt_type", "")
defval = False
nondef = False
if key:
if key != "default":
cmds.append(cmd_str + " key {0} {1}".format(enc_type, key))
else:
cmds.append(cmd_no_str + " key 7 {0}".format(existing.get("key")))
locdict = {
"auth_port": "auth-port",
"acct_port": "acct-port",
"tacacs_port": "port",
"host_timeout": "timeout",
}
# platform CLI needs the keywords in the following order
for key in ["auth_port", "acct_port", "tacacs_port", "host_timeout"]:
item = params.get(key)
if item:
if item != "default":
cmd_str += " {0} {1}".format(locdict.get(key), item)
nondef = True
else:
cmd_no_str += " {0} 1".format(locdict.get(key))
defval = True
if defval:
cmds.append(cmd_no_str)
if nondef or not existing:
cmds.append(cmd_str)
return cmds
def main():
argument_spec = dict(
server_type=dict(choices=["radius", "tacacs"], required=True),
address=dict(type="str", required=True),
key=dict(type="str"),
encrypt_type=dict(type="str", choices=["0", "7"]),
host_timeout=dict(type="str"),
auth_port=dict(type="str"),
acct_port=dict(type="str"),
tacacs_port=dict(type="str"),
state=dict(choices=["absent", "present"], default="present"),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec, supports_check_mode=True
)
warnings = list()
server_type = module.params["server_type"]
address = module.params["address"]
key = module.params["key"]
encrypt_type = module.params["encrypt_type"]
host_timeout = module.params["host_timeout"]
auth_port = module.params["auth_port"]
acct_port = module.params["acct_port"]
tacacs_port = module.params["tacacs_port"]
state = module.params["state"]
args = dict(
server_type=server_type,
address=address,
key=key,
encrypt_type=encrypt_type,
host_timeout=host_timeout,
auth_port=auth_port,
acct_port=acct_port,
tacacs_port=tacacs_port,
)
proposed = dict((k, v) for k, v in args.items() if v is not None)
changed = False
if encrypt_type and not key:
module.fail_json(msg="encrypt_type must be used with key")
if tacacs_port and server_type != "tacacs":
module.fail_json(
msg="tacacs_port can only be used with server_type=tacacs"
)
if (auth_port or acct_port) and server_type != "radius":
module.fail_json(
msg="auth_port and acct_port can only be used"
"when server_type=radius"
)
existing = get_aaa_host_info(module, server_type, address)
end_state = existing
commands = []
delta = {}
if state == "present":
if not existing:
delta = proposed
else:
for key, value in proposed.items():
if key == "encrypt_type":
delta[key] = value
if value != existing.get(key):
if value != "default" or existing.get(key):
delta[key] = value
command = config_aaa_host(server_type, address, delta, existing)
if command:
commands.append(command)
elif state == "absent":
intersect = dict(set(proposed.items()).intersection(existing.items()))
if intersect.get("address") and intersect.get("server_type"):
command = "no {0}-server host {1}".format(
intersect.get("server_type"), intersect.get("address")
)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_aaa_host_info(module, server_type, address)
results = {}
results["proposed"] = proposed
results["existing"] = existing
results["updates"] = cmds
results["changed"] = changed
results["warnings"] = warnings
results["end_state"] = end_state
module.exit_json(**results)
if __name__ == "__main__":
main()
| 29.888021
| 83
| 0.611658
|
7949ca9863e34ed9d0c57c8edc8383e5102957d6
| 541
|
py
|
Python
|
mail_alias_manager/util/debug_routes/__init__.py
|
stuvusIT/mail_alias_manager
|
260b6d1da4db03079afee159c23c3f83f4e75937
|
[
"MIT"
] | 3
|
2020-11-21T13:10:13.000Z
|
2021-06-04T12:58:21.000Z
|
mail_alias_manager/util/debug_routes/__init__.py
|
stuvusIT/mail_alias_manager
|
260b6d1da4db03079afee159c23c3f83f4e75937
|
[
"MIT"
] | 1
|
2021-11-14T18:55:44.000Z
|
2021-11-14T18:55:44.000Z
|
mail_alias_manager/util/debug_routes/__init__.py
|
stuvusIT/mail_alias_manager
|
260b6d1da4db03079afee159c23c3f83f4e75937
|
[
"MIT"
] | 2
|
2020-11-21T13:20:10.000Z
|
2021-06-04T13:03:27.000Z
|
"""
Module containing Debug Methods and sites.
This Module should only be loaded in debug Mode.
"""
from flask.app import Flask
from . import root # noqa
from . import routes # noqa
def register_debug_routes(app: Flask):
"""Register the debug routes blueprint with the flask app."""
if not app.config["DEBUG"]:
app.logger.warning("This Module should only be loaded if DEBUG mode is active!")
raise Warning("This Module should only be loaded if DEBUG mode is active!")
app.register_blueprint(root.DEBUG_BLP)
| 30.055556
| 88
| 0.719039
|
7949caa0b3baa8d2fe9618104a4c52a16005a9c8
| 5,079
|
py
|
Python
|
script/runTazer.py
|
jstrube/tazer
|
18a6d66ad2c20376609f1af7940e43b2031e4fd0
|
[
"BSD-3-Clause"
] | null | null | null |
script/runTazer.py
|
jstrube/tazer
|
18a6d66ad2c20376609f1af7940e43b2031e4fd0
|
[
"BSD-3-Clause"
] | null | null | null |
script/runTazer.py
|
jstrube/tazer
|
18a6d66ad2c20376609f1af7940e43b2031e4fd0
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
import sys
import argparse
import tazerUtil as tu
# Check python version
def checkPython():
if sys.version_info.major < 3:
print("This script was tested with python version 3.7. Thus it requires python 3")
ver = "python " + str(sys.version_info.major) + "." + str(sys.version_info.minor) + "." + str(sys.version_info.micro)
print(ver)
exit(1)
# Handles if a client command is passed on command line using quotation marks (i.e. "ls -alh")
def parseClientCommand(comList):
res = []
for one in comList:
if ' ' in one:
one.split()
res + one
else:
res.append(one)
return res
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument('pathToBuild', action='store', type=str, help='path to the build directory of Tazer')
parser.add_argument('-c', '--client', action='append', nargs='+', type=str, help='command for the client to run')
parser.add_argument('-s', "--server", action='store', type=str, default=None, help='server IP address')
parser.add_argument('-p', "--port", action='store', type=str, default=None, help='server port')
parser.add_argument('--clientOnly', action='store_true', default=False, help='only runs the client')
parser.add_argument('--serverOnly', action='store_true', default=False, help='only runs the sever')
parser.add_argument('--closeServer', action='store_true', default=False, help='closes the server')
parser.add_argument('--serverLog', action='store', type=str, default=None, help='outfile for server log')
parser.add_argument('--clientLog', action='store', type=str, default=None, help='outfile for client log')
parser.add_argument('--printEnv', action='store_true', default=False, help='prints Tazer environment variables')
tazerEnvVars = tu.TazerEnv().getEnvVars()
for envVar in tazerEnvVars:
envVarArg = "--" + envVar
parser.add_argument(envVarArg, action='store', default=None, help='Tazer environment variable')
args = parser.parse_args()
if sum([args.serverOnly, args.closeServer, args.clientOnly]) > 1:
parser.error("Cannot set clientOnly serverOnly closeServer at the same time")
if args.clientOnly and args.client == None:
parser.error('Must set a client command to run')
if args.clientOnly == False and args.serverOnly == False and args.closeServer == False and args.client == None:
parser.error('Must set a client command to run')
return args
# This takes the arguments from the command line and sets the Tazer environment variables
def getEnvFromArgs(args):
tazerEnv = tu.TazerEnv()
tazerEnv.setVar(args)
tazerEnv.setEnv()
if args.printEnv:
tazerEnv.printEnv()
# This runs both the server and client
def runLocalCommand(path, server, port, comList, serverLog=None, clientLog=None):
print("--Run Server and Client--")
tazerServer = tu.TazerServer(path, serverIpAddr=server, port=port, outputFileName=serverLog)
tazerServer.run()
if tazerServer.poll():
tazerServer.ping(serverIpAddr=server, port=port)
tazerServer.pid()
try:
tazerClient = tu.TazerClient(path, outputFileName=clientLog)
tazerClient.run(parseClientCommand(comList))
except:
print("Failed running client")
finally:
tazerServer.close(serverIpAddr=server, port=port)
tazerServer.kill()
else:
print("Could not launch server. Check to see if server:port is avaialble")
# Only runs the server. This leaves the server running.
def runServerCommand(path, server, port, serverLog=None):
print("--Server Only--")
tazerServer = tu.TazerServer(path, serverIpAddr=server, port=port, outputFileName=serverLog)
tazerServer.run()
if tazerServer.poll():
tazerServer.ping(serverIpAddr=server, port=port)
tazerServer.pid()
else:
print("Could not launch server. Check to see if server:port is avaialble")
# Send a close command to a server
def closeServerCommand(path, server, port, serverLog=None):
print("--Close server--")
tu.TazerServer(path, outputFileName=serverLog).close(serverIpAddr=server, port=port)
# Runs a client command
def runClientCommand(path, comList, clientLog=None):
print("--Client Only--")
newComList = parseClientCommand(comList)
tu.TazerClient(path, outputFileName=clientLog).run(newComList)
def main():
checkPython()
args = parseArgs()
getEnvFromArgs(args)
if args.closeServer:
closeServerCommand(args.pathToBuild, args.server, args.port, serverLog=args.serverLog)
elif args.serverOnly:
runServerCommand(args.pathToBuild, args.server, args.port, serverLog=args.serverLog)
elif args.clientOnly:
runClientCommand(args.pathToBuild, *args.client, clientLog=args.clientLog)
else:
runLocalCommand(args.pathToBuild, args.server, args.port, *args.client, serverLog=args.serverLog, clientLog=args.clientLog)
if __name__ == "__main__":
main()
| 44.165217
| 131
| 0.692065
|
7949cae2cafb04608c997156a078a9263c806b47
| 146
|
py
|
Python
|
app/backend/emailtemplates/admin.py
|
NicholasCF/meridien
|
dd00caf341d4c9979b89dc8441224ff2b97eac7f
|
[
"MIT"
] | null | null | null |
app/backend/emailtemplates/admin.py
|
NicholasCF/meridien
|
dd00caf341d4c9979b89dc8441224ff2b97eac7f
|
[
"MIT"
] | 41
|
2020-05-24T06:47:53.000Z
|
2022-02-27T11:10:41.000Z
|
app/backend/emailtemplates/admin.py
|
NicholasCF/meridien
|
dd00caf341d4c9979b89dc8441224ff2b97eac7f
|
[
"MIT"
] | 2
|
2020-11-26T12:19:30.000Z
|
2020-12-19T01:14:02.000Z
|
from django.contrib import admin
from emailtemplates.models import EmailTemplate
# Register your models here.
admin.site.register(EmailTemplate)
| 24.333333
| 47
| 0.842466
|
7949cb841e60289f7b5204ce7764e11ba9ab5b77
| 71,250
|
py
|
Python
|
QMTest/TestCommonTests.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 1
|
2017-01-28T15:39:07.000Z
|
2017-01-28T15:39:07.000Z
|
QMTest/TestCommonTests.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 1
|
2020-05-19T02:59:19.000Z
|
2020-05-21T09:05:19.000Z
|
QMTest/TestCommonTests.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 2
|
2018-01-16T11:29:16.000Z
|
2020-05-13T16:48:26.000Z
|
#!/usr/bin/env python
"""
TestCommonTests.py: Unit tests for the TestCommon.py module.
Copyright 2000-2010 Steven Knight
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCommonTests.py 1.3.D001 2010/06/03 12:58:27 knight"
import difflib
import os
import re
import signal
import stat
import sys
import unittest
# Strip the current directory so we get the right TestCommon.py module.
sys.path = sys.path[1:]
import TestCmd
import TestCommon
def lstrip(s):
lines = [ _.expandtabs() for _ in s.split('\n') ]
if lines[0] == '':
lines = lines[1:]
spaces = len(re.match('^( *).*', lines[0]).group(1))
if spaces:
lines = [ l[spaces:] for l in lines ]
return '\n'.join(lines)
if sys.version[:3] == '1.5':
expected_newline = '\\012'
else:
expected_newline = '\\n'
def assert_display(expect, result, error=None):
try:
expect = expect.pattern
except AttributeError:
pass
result = [
'\n',
('*'*80) + '\n',
expect,
('*'*80) + '\n',
result,
('*'*80) + '\n',
]
if error:
result.append(error)
return ''.join(result)
class TestCommonTestCase(unittest.TestCase):
"""Base class for TestCommon test cases, fixture and utility methods."""
create_run_env = True
def setUp(self):
self.orig_cwd = os.getcwd()
if self.create_run_env:
self.run_env = TestCmd.TestCmd(workdir = '')
def tearDown(self):
os.chdir(self.orig_cwd)
def set_up_execution_scripts(self):
run_env = self.run_env
run_env.subdir('sub dir')
self.python = sys.executable
self.pass_script = run_env.workpath('sub dir', 'pass')
self.fail_script = run_env.workpath('sub dir', 'fail')
self.stdout_script = run_env.workpath('sub dir', 'stdout')
self.stderr_script = run_env.workpath('sub dir', 'stderr')
self.signal_script = run_env.workpath('sub dir', 'signal')
self.stdin_script = run_env.workpath('sub dir', 'stdin')
preamble = "import sys"
stdout = "; sys.stdout.write(r'%s: STDOUT: ' + repr(sys.argv[1:]) + '\\n')"
stderr = "; sys.stderr.write(r'%s: STDERR: ' + repr(sys.argv[1:]) + '\\n')"
exit0 = "; sys.exit(0)"
exit1 = "; sys.exit(1)"
if sys.platform == 'win32':
wrapper = '@python -c "%s" %%1 %%2 %%3 %%4 %%5 %%6 %%7 %%8 %%9\n'
else:
wrapper = '#! /usr/bin/env python\n%s\n'
wrapper = '#! /usr/bin/env python\n%s\n'
pass_body = preamble + stdout % self.pass_script + exit0
fail_body = preamble + stdout % self.fail_script + exit1
stderr_body = preamble + stderr % self.stderr_script + exit0
run_env.write(self.pass_script, wrapper % pass_body)
run_env.write(self.fail_script, wrapper % fail_body)
run_env.write(self.stderr_script, wrapper % stderr_body)
signal_body = lstrip("""\
import os
import signal
os.kill(os.getpid(), signal.SIGTERM)
""")
run_env.write(self.signal_script, wrapper % signal_body)
stdin_body = lstrip("""\
import sys
input = sys.stdin.read()[:-1]
sys.stdout.write(r'%s: STDOUT: ' + repr(input) + '\\n')
sys.stderr.write(r'%s: STDERR: ' + repr(input) + '\\n')
""" % (self.stdin_script, self.stdin_script))
run_env.write(self.stdin_script, wrapper % stdin_body)
def run_execution_test(self, script, expect_stdout, expect_stderr):
self.set_up_execution_scripts()
run_env = self.run_env
os.chdir(run_env.workpath('sub dir'))
# Everything before this prepared our "source directory."
# Now do the real test.
script = script % self.__dict__
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
stderr = run_env.stderr()
expect_stdout = expect_stdout % self.__dict__
assert stdout == expect_stdout, assert_display(expect_stdout,
stdout,
stderr)
try:
match = expect_stderr.match
except AttributeError:
expect_stderr = expect_stderr % self.__dict__
assert stderr == expect_stderr, assert_display(expect_stderr,
stderr)
else:
assert expect_stderr.match(stderr), assert_display(expect_stderr,
stderr)
class __init__TestCase(TestCommonTestCase):
def test___init__(self):
"""Test initialization"""
run_env = self.run_env
os.chdir(run_env.workdir)
script = lstrip("""\
from __future__ import print_function
from TestCommon import TestCommon
tc = TestCommon(workdir='')
import os
print(os.getcwd())
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()[:-1]
assert stdout != run_env.workdir, stdout
stderr = run_env.stderr()
assert stderr == "", stderr
class banner_TestCase(TestCommonTestCase):
create_run_env = False
def test_banner(self):
"""Test banner()"""
tc = TestCommon.TestCommon(workdir='')
b = tc.banner('xyzzy ')
assert b == "xyzzy ==========================================================================", b
tc.banner_width = 10
b = tc.banner('xyzzy ')
assert b == "xyzzy ====", b
b = tc.banner('xyzzy ', 20)
assert b == "xyzzy ==============", b
tc.banner_char = '-'
b = tc.banner('xyzzy ')
assert b == "xyzzy ----", b
class must_be_writable_TestCase(TestCommonTestCase):
def test_file_does_not_exists(self):
"""Test must_be_writable(): file does not exist"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.must_be_writable('file1')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "Missing files: `file1'\n", stdout
stderr = run_env.stderr()
assert stderr.find("FAILED") != -1, stderr
def test_writable_file_exists(self):
"""Test must_be_writable(): writable file exists"""
run_env = self.run_env
script = lstrip("""\
import os
import stat
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1\\n")
f1 = tc.workpath('file1')
mode = os.stat(f1)[stat.ST_MODE]
os.chmod(f1, mode | stat.S_IWUSR)
tc.must_be_writable('file1')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_non_writable_file_exists(self):
"""Test must_be_writable(): non-writable file exists"""
run_env = self.run_env
script = lstrip("""\
import os
import stat
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1\\n")
f1 = tc.workpath('file1')
mode = os.stat(f1)[stat.ST_MODE]
os.chmod(f1, mode & ~stat.S_IWUSR)
tc.must_be_writable('file1')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "Unwritable files: `file1'\n", stdout
stderr = run_env.stderr()
assert stderr.find("FAILED") != -1, stderr
def test_file_specified_as_list(self):
"""Test must_be_writable(): file specified as list"""
run_env = self.run_env
script = lstrip("""\
import os
import stat
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.subdir('sub')
tc.write(['sub', 'file1'], "sub/file1\\n")
f1 = tc.workpath('sub', 'file1')
mode = os.stat(f1)[stat.ST_MODE]
os.chmod(f1, mode | stat.S_IWUSR)
tc.must_be_writable(['sub', 'file1'])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
class must_contain_TestCase(TestCommonTestCase):
def test_success(self):
"""Test must_contain(): success"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1 contents\\n")
tc.must_contain('file1', "1 c")
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_file_missing(self):
"""Test must_contain(): file missing"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.must_contain('file1', "1 c\\n")
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr.find("No such file or directory:") != -1, stderr
def test_failure(self):
"""Test must_contain(): failure"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1 does not match\\n")
tc.must_contain('file1', "1 c")
tc.run()
""")
expect = lstrip("""\
File `file1' does not contain required string.
Required string ================================================================
1 c
file1 contents =================================================================
file1 does not match
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == expect, repr(stdout)
stderr = run_env.stderr()
assert stderr.find("FAILED") != -1, stderr
def test_mode(self):
"""Test must_contain(): mode"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1 contents\\n", mode='w')
tc.must_contain('file1', "1 c", mode='r')
tc.write('file2', "file2 contents\\n", mode='wb')
tc.must_contain('file2', "2 c", mode='rb')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
class must_contain_all_lines_TestCase(TestCommonTestCase):
def test_success(self):
"""Test must_contain_all_lines(): success"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'xxx\\n',
'yyy\\n',
]
output = '''\\
www
xxx
yyy
zzz
'''
test.must_contain_all_lines(output, lines)
test.must_contain_all_lines(output, ['www\\n'])
test.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_failure(self):
"""Test must_contain_all_lines(): failure"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'xxx\\n',
'yyy\\n',
]
output = '''\\
www
zzz
'''
test.must_contain_all_lines(output, lines)
test.pass_test()
""")
expect = lstrip("""\
Missing expected lines from output:
'xxx%(expected_newline)s'
'yyy%(expected_newline)s'
output =========================================================================
www
zzz
""" % globals())
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
stderr = run_env.stderr()
assert stdout == expect, assert_display(expect, stdout, stderr)
assert stderr.find("FAILED") != -1, stderr
def test_find(self):
"""Test must_contain_all_lines(): find"""
run_env = self.run_env
script = lstrip("""
import re
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'x.*',
'.*y',
]
output = '''\\
www
xxx
yyy
zzz
'''
def re_search(output, line):
return re.compile(line, re.S).search(output)
test.must_contain_all_lines(output, lines, find=re_search)
test.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_title(self):
"""Test must_contain_all_lines(): title"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'xxx\\n',
'yyy\\n',
]
output = '''\\
www
zzz
'''
test.must_contain_all_lines(output, lines, title='STDERR')
test.pass_test()
""")
expect = lstrip("""\
Missing expected lines from STDERR:
'xxx%(expected_newline)s'
'yyy%(expected_newline)s'
STDERR =========================================================================
www
zzz
""" % globals())
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
stderr = run_env.stderr()
assert stdout == expect, assert_display(expect, stdout, stderr)
assert stderr.find("FAILED") != -1, stderr
class must_contain_any_line_TestCase(TestCommonTestCase):
def test_success(self):
"""Test must_contain_any_line(): success"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'aaa\\n',
'yyy\\n',
]
output = '''\\
www
xxx
yyy
zzz
'''
test.must_contain_any_line(output, lines)
test.must_contain_any_line(output, ['www\\n'])
test.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_failure(self):
"""Test must_contain_any_line(): failure"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'xxx\\n',
'yyy\\n',
]
output = '''\\
www
zzz
'''
test.must_contain_any_line(output, lines)
test.pass_test()
""")
expect = lstrip("""\
Missing any expected line from output:
'xxx%(expected_newline)s'
'yyy%(expected_newline)s'
output =========================================================================
www
zzz
""" % globals())
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
stderr = run_env.stderr()
assert stdout == expect, assert_display(expect, stdout, stderr)
assert stderr.find("FAILED") != -1, stderr
def test_find(self):
"""Test must_contain_any_line(): find"""
run_env = self.run_env
script = lstrip("""
import re
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'aaa',
'.*y',
]
output = '''\\
www
xxx
yyy
zzz
'''
def re_search(output, line):
return re.compile(line, re.S).search(output)
test.must_contain_any_line(output, lines, find=re_search)
test.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_title(self):
"""Test must_contain_any_line(): title"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'xxx\\n',
'yyy\\n',
]
output = '''\\
www
zzz
'''
test.must_contain_any_line(output, lines, title='STDOUT')
test.pass_test()
""")
expect = lstrip("""\
Missing any expected line from STDOUT:
'xxx%(expected_newline)s'
'yyy%(expected_newline)s'
STDOUT =========================================================================
www
zzz
""" % globals())
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
stderr = run_env.stderr()
assert stdout == expect, assert_display(expect, stdout, stderr)
assert stderr.find("FAILED") != -1, stderr
class must_contain_exactly_lines_TestCase(TestCommonTestCase):
def test_success_list(self):
"""Test must_contain_exactly_lines(): success (input list)"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'yyy\\n',
'xxx\\n',
'zzz',
'www\\n',
]
output = '''\\
www
xxx
yyy
zzz
'''
test.must_contain_exactly_lines(output, lines)
test.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_success_string(self):
"""Test must_contain_exactly_lines(): success (input string)"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = '''\\
yyy
xxx
zzz
www
'''
output = '''\\
www
xxx
yyy
zzz
'''
test.must_contain_exactly_lines(output, lines)
test.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_failure(self):
"""Test must_contain_exactly_lines(): failure"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'xxx\\n',
'yyy\\n',
]
output = '''\\
www
zzz
'''
test.must_contain_exactly_lines(output, lines)
test.pass_test()
""")
expect = lstrip("""\
Missing expected lines from output:
'xxx'
'yyy'
Missing output =================================================================
Extra unexpected lines from output:
'www'
'zzz'
Extra output ===================================================================
""" % globals())
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
stderr = run_env.stderr()
assert stdout == expect, assert_display(expect, stdout, stderr)
assert stderr.find("FAILED") != -1, stderr
def test_find(self):
"""Test must_contain_exactly_lines(): find"""
run_env = self.run_env
script = lstrip("""
import re
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'zzz',
'.*y',
'xxx',
'www',
]
output = '''\\\
www
xxx
yyy
zzz
'''
def re_search(output, line):
pattern = re.compile(line, re.S)
index = 0
for o in output:
if pattern.search(o):
return index
index +=1
return None
test.must_contain_exactly_lines(output, lines, find=re_search)
test.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_title(self):
"""Test must_contain_exactly_lines(): title"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'xxx\\n',
'yyy\\n',
]
output = '''\\
www
zzz
'''
test.must_contain_exactly_lines(output, lines, title='STDOUT')
test.pass_test()
""")
expect = lstrip("""\
Missing expected lines from STDOUT:
'xxx'
'yyy'
Missing STDOUT =================================================================
Extra unexpected lines from STDOUT:
'www'
'zzz'
Extra STDOUT ===================================================================
""" % globals())
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
stderr = run_env.stderr()
assert stdout == expect, assert_display(expect, stdout, stderr)
assert stderr.find("FAILED") != -1, stderr
class must_contain_lines_TestCase(TestCommonTestCase):
def test_success(self):
"""Test must_contain_lines(): success"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'xxx\\n',
'yyy\\n',
]
output = '''\\
www
xxx
yyy
zzz
'''
test.must_contain_lines(lines, output)
test.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_failure(self):
"""Test must_contain_lines(): failure"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'xxx\\n',
'yyy\\n',
]
output = '''\\
www
zzz
'''
test.must_contain_lines(lines, output)
test.pass_test()
""")
expect = lstrip("""\
Missing expected lines from output:
'xxx%(expected_newline)s'
'yyy%(expected_newline)s'
output =========================================================================
www
zzz
""" % globals())
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
stderr = run_env.stderr()
assert stdout == expect, assert_display(expect, stdout, stderr)
assert stderr.find("FAILED") != -1, stderr
class must_exist_TestCase(TestCommonTestCase):
def test_success(self):
"""Test must_exist(): success"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1\\n")
tc.must_exist('file1')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_failure(self):
"""Test must_exist(): failure"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.must_exist('file1')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "Missing files: `file1'\n", stdout
stderr = run_env.stderr()
assert stderr.find("FAILED") != -1, stderr
def test_file_specified_as_list(self):
"""Test must_exist(): file specified as list"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.subdir('sub')
tc.write(['sub', 'file1'], "sub/file1\\n")
tc.must_exist(['sub', 'file1'])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_broken_link(self) :
"""Test must_exist(): exists but it is a broken link"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.symlink('badtarget', "brokenlink")
tc.must_exist('brokenlink')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
class must_exist_one_of_TestCase(TestCommonTestCase):
def test_success(self):
"""Test must_exist_one_of(): success"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1\\n")
tc.must_exist_one_of(['file1'])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_failure(self):
"""Test must_exist_one_of(): failure"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.must_exist_one_of(['file1'])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "Missing one of: `file1'\n", stdout
stderr = run_env.stderr()
assert stderr.find("FAILED") != -1, stderr
def test_files_specified_as_list(self):
"""Test must_exist_one_of(): files specified as list"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1\\n")
tc.must_exist_one_of(['file2', 'file1'])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_files_specified_with_wildcards(self):
"""Test must_exist_one_of(): files specified with wildcards"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file7', "file7\\n")
tc.must_exist_one_of(['file?'])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_file_given_as_list(self):
"""Test must_exist_one_of(): file given as list"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.subdir('sub')
tc.write(['sub', 'file1'], "sub/file1\\n")
tc.must_exist_one_of(['file2',
['sub', 'file1']])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_file_given_as_sequence(self):
"""Test must_exist_one_of(): file given as sequence"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.subdir('sub')
tc.write(['sub', 'file1'], "sub/file1\\n")
tc.must_exist_one_of(['file2',
('sub', 'file1')])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
class must_match_TestCase(TestCommonTestCase):
def test_success(self):
"""Test must_match(): success"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1\\n")
tc.must_match('file1', "file1\\n")
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_file_does_not_exists(self):
"""Test must_match(): file does not exist"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.must_match('file1', "file1\\n")
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr.find("No such file or directory:") != -1, stderr
def test_failure(self):
"""Test must_match(): failure"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1 does not match\\n")
tc.must_match('file1', "file1\\n")
tc.run()
""")
expect = lstrip("""\
Unexpected contents of `file1'
contents =======================================================================
1c1
< file1
---
> file1 does not match
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == expect, stdout
stderr = run_env.stderr()
assert stderr.find("FAILED") != -1, stderr
def test_mode(self):
"""Test must_match(): mode"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1\\n", mode='w')
tc.must_match('file1', "file1\\n", mode='r')
tc.write('file2', "file2\\n", mode='wb')
tc.must_match('file2', "file2\\n", mode='rb')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
class must_not_be_writable_TestCase(TestCommonTestCase):
def test_file_does_not_exists(self):
"""Test must_not_be_writable(): file does not exist"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.must_not_be_writable('file1')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "Missing files: `file1'\n", stdout
stderr = run_env.stderr()
assert stderr.find("FAILED") != -1, stderr
def test_writable_file_exists(self):
"""Test must_not_be_writable(): writable file exists"""
run_env = self.run_env
script = lstrip("""\
import os
import stat
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1\\n")
f1 = tc.workpath('file1')
mode = os.stat(f1)[stat.ST_MODE]
os.chmod(f1, mode | stat.S_IWUSR)
tc.must_not_be_writable('file1')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "Writable files: `file1'\n", stdout
stderr = run_env.stderr()
assert stderr.find("FAILED") != -1, stderr
def test_non_writable_file_exists(self):
"""Test must_not_be_writable(): non-writable file exists"""
run_env = self.run_env
script = lstrip("""\
import os
import stat
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1\\n")
f1 = tc.workpath('file1')
mode = os.stat(f1)[stat.ST_MODE]
os.chmod(f1, mode & ~stat.S_IWUSR)
tc.must_not_be_writable('file1')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_file_specified_as_list(self):
"""Test must_not_be_writable(): file specified as list"""
run_env = self.run_env
script = lstrip("""\
import os
import stat
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.subdir('sub')
tc.write(['sub', 'file1'], "sub/file1\\n")
f1 = tc.workpath('sub', 'file1')
mode = os.stat(f1)[stat.ST_MODE]
os.chmod(f1, mode & ~stat.S_IWUSR)
tc.must_not_be_writable(['sub', 'file1'])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
class must_not_contain_TestCase(TestCommonTestCase):
def test_success(self):
"""Test must_not_contain(): success"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1 contents\\n")
tc.must_not_contain('file1', "1 does not contain c")
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_file_does_not_exist(self):
"""Test must_not_contain(): file does not exist"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.must_not_contain('file1', "1 c\\n")
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr.find("No such file or directory:") != -1, stderr
def test_failure(self):
"""Test must_not_contain(): failure"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1 does contain contents\\n")
tc.must_not_contain('file1', "1 does contain c")
tc.run()
""")
expect = lstrip("""\
File `file1' contains banned string.
Banned string ==================================================================
1 does contain c
file1 contents =================================================================
file1 does contain contents
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == expect, repr(stdout)
stderr = run_env.stderr()
assert stderr.find("FAILED") != -1, stderr
def test_mode(self):
"""Test must_not_contain(): mode"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1 contents\\n", mode='w')
tc.must_not_contain('file1', "1 does not contain c", mode='r')
tc.write('file2', "file2 contents\\n", mode='wb')
tc.must_not_contain('file2', "2 does not contain c", mode='rb')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
class must_not_contain_any_line_TestCase(TestCommonTestCase):
def test_failure(self):
"""Test must_not_contain_any_line(): failure"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'xxx\\n',
'yyy\\n',
'www\\n',
]
output = '''\\
www
xxx
yyy
zzz
'''
test.must_not_contain_any_line(output, lines)
test.pass_test()
""")
expect = lstrip("""\
Unexpected lines in output:
'xxx%(expected_newline)s'
'yyy%(expected_newline)s'
'www%(expected_newline)s'
output =========================================================================
www
xxx
yyy
zzz
""" % globals())
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
stderr = run_env.stderr()
assert stdout == expect, assert_display(expect, stdout, stderr)
assert stderr.find("FAILED") != -1, stderr
def test_find(self):
"""Test must_not_contain_any_line(): find"""
run_env = self.run_env
script = lstrip("""
import re
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'x.*'
'.*y'
]
output = '''\\
www
zzz
'''
def re_search(output, line):
return re.compile(line, re.S).search(output)
test.must_not_contain_any_line(output, lines, find=re_search)
test.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_success(self):
"""Test must_not_contain_any_line(): success"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'xxx\\n'
'yyy\\n'
]
output = '''\\
www
zzz
'''
test.must_not_contain_any_line(output, lines)
test.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_title(self):
"""Test must_not_contain_any_line(): title"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'xxx\\n',
'yyy\\n',
]
output = '''\\
www
xxx
yyy
zzz
'''
test.must_not_contain_any_line(output, lines, title='XYZZY')
test.pass_test()
""")
expect = lstrip("""\
Unexpected lines in XYZZY:
'xxx%(expected_newline)s'
'yyy%(expected_newline)s'
XYZZY ==========================================================================
www
xxx
yyy
zzz
""" % globals())
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
stderr = run_env.stderr()
assert stdout == expect, assert_display(expect, stdout, stderr)
assert stderr.find("FAILED") != -1, stderr
class must_not_contain_lines_TestCase(TestCommonTestCase):
def test_failure(self):
"""Test must_not_contain_lines(): failure"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'xxx\\n',
'yyy\\n',
]
output = '''\\
www
xxx
yyy
zzz
'''
test.must_not_contain_lines(lines, output)
test.pass_test()
""")
expect = lstrip("""\
Unexpected lines in output:
'xxx%(expected_newline)s'
'yyy%(expected_newline)s'
output =========================================================================
www
xxx
yyy
zzz
""" % globals())
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
stderr = run_env.stderr()
assert stdout == expect, assert_display(expect, stdout, stderr)
assert stderr.find("FAILED") != -1, stderr
def test_success(self):
"""Test must_not_contain_lines(): success"""
run_env = self.run_env
script = lstrip("""
import TestCommon
test = TestCommon.TestCommon(workdir='')
lines = [
'xxx\\n'
'yyy\\n'
]
output = '''\\
www
zzz
'''
test.must_not_contain_lines(lines, output)
test.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
class must_not_exist_TestCase(TestCommonTestCase):
def test_failure(self):
"""Test must_not_exist(): failure"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1\\n")
tc.must_not_exist('file1')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "Unexpected files exist: `file1'\n", stdout
stderr = run_env.stderr()
assert stderr.find("FAILED") != -1, stderr
def test_success(self):
"""Test must_not_exist(): success"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.must_not_exist('file1')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_file_specified_as_list(self):
"""Test must_not_exist(): file specified as list"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.subdir('sub')
tc.must_not_exist(['sub', 'file1'])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_existing_broken_link(self):
"""Test must_not_exist(): exists but it is a broken link"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.symlink('badtarget', 'brokenlink')
tc.must_not_exist('brokenlink')
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "Unexpected files exist: `brokenlink'\n", stdout
stderr = run_env.stderr()
assert stderr.find("FAILED") != -1, stderr
class must_not_exist_any_of_TestCase(TestCommonTestCase):
def test_success(self):
"""Test must_not_exist_any_of(): success"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.must_not_exist_any_of(['file1'])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_failure(self):
"""Test must_not_exist_any_of(): failure"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file1', "file1\\n")
tc.must_not_exist_any_of(['file1'])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "Unexpected files exist: `file1'\n", stdout
stderr = run_env.stderr()
assert stderr.find("FAILED") != -1, stderr
def test_files_specified_as_list(self):
"""Test must_not_exist_any_of(): files specified as list"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.must_not_exist_any_of(['file2', 'file1'])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_files_specified_with_wildcards(self):
"""Test must_not_exist_any_of(): files specified with wildcards"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.write('file7', "file7\\n")
tc.must_not_exist_any_of(['files?'])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_file_given_as_list(self):
"""Test must_not_exist_any_of(): file given as list"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.subdir('sub')
tc.write(['sub', 'file1'], "sub/file1\\n")
tc.must_not_exist_any_of(['file2',
['sub', 'files*']])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
def test_file_given_as_sequence(self):
"""Test must_not_exist_any_of(): file given as sequence"""
run_env = self.run_env
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(workdir='')
tc.subdir('sub')
tc.write(['sub', 'file1'], "sub/file1\\n")
tc.must_not_exist_any_of(['file2',
('sub', 'files?')])
tc.pass_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
class run_TestCase(TestCommonTestCase):
def test_argument_handling(self):
"""Test run(): argument handling"""
script = lstrip("""\
from TestCommon import TestCommon, match_exact
tc = TestCommon(program=r'%(pass_script)s',
interpreter='%(python)s',
workdir="",
match=match_exact)
tc.run(arguments = "arg1 arg2 arg3",
stdout = r"%(pass_script)s: STDOUT: ['arg1', 'arg2', 'arg3']" + "\\n")
""")
self.run_execution_test(script, "", "")
def test_default_pass(self):
"""Test run(): default arguments, script passes"""
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(program=r'%(pass_script)s',
interpreter=r'%(python)s',
workdir='')
tc.run()
""")
self.run_execution_test(script, "", "")
def test_default_fail(self):
"""Test run(): default arguments, script fails"""
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(program=r'%(fail_script)s',
interpreter='%(python)s',
workdir='')
tc.run()
""")
expect_stdout = lstrip("""\
%(fail_script)s returned 1
STDOUT =========================================================================
%(fail_script)s: STDOUT: []
STDERR =========================================================================
""")
expect_stderr = lstrip("""\
FAILED test of .*fail
\\tat line \\d+ of .*TestCommon\\.py \\(_complete\\)
\\tfrom line \\d+ of .*TestCommon\\.py \\(run\\)
\\tfrom line \\d+ of <stdin>( \(<module>\))?
""")
expect_stderr = re.compile(expect_stderr, re.M)
self.run_execution_test(script, expect_stdout, expect_stderr)
def test_default_stderr(self):
"""Test run(): default arguments, error output"""
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(program=r'%(stderr_script)s',
interpreter='%(python)s',
workdir='')
tc.run()
""")
expect_stdout = lstrip("""\
STDOUT =========================================================================
STDERR =========================================================================
0a1
> %(stderr_script)s: STDERR: []
""")
expect_stderr = lstrip("""\
FAILED test of .*stderr
\\tat line \\d+ of .*TestCommon\\.py \\(_complete\\)
\\tfrom line \\d+ of .*TestCommon\\.py \\(run\\)
\\tfrom line \\d+ of <stdin>
""")
expect_stderr = re.compile(expect_stderr, re.M)
self.run_execution_test(script, expect_stdout, expect_stderr)
def test_exception_handling(self):
"""Test run(): exception handling"""
script = lstrip("""\
import TestCmd
from TestCommon import TestCommon
def raise_exception(*args, **kw):
raise TypeError("forced TypeError")
TestCmd.TestCmd.start = raise_exception
tc = TestCommon(program='%(pass_script)s',
interpreter='%(python)s',
workdir='')
tc.run()
""")
expect_stdout = lstrip("""\
STDOUT =========================================================================
STDERR =========================================================================
""")
expect_stderr = lstrip("""\
Exception trying to execute: \\[%s, '[^']*pass'\\]
Traceback \\((innermost|most recent call) last\\):
File "<stdin>", line \\d+, in (\\?|<module>)
File "[^"]+TestCommon.py", line \\d+, in run
TestCmd.run\\(self, \\*\\*kw\\)
File "[^"]+TestCmd.py", line \\d+, in run
.*
File "[^"]+TestCommon.py", line \\d+, in start
raise e
TypeError: forced TypeError
""" % re.escape(repr(sys.executable)))
expect_stderr = re.compile(expect_stderr, re.M)
self.run_execution_test(script, expect_stdout, expect_stderr)
def test_ignore_stderr(self):
"""Test run(): ignore stderr"""
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(program=r'%(stderr_script)s',
interpreter='%(python)s',
workdir='')
tc.run(stderr = None)
""")
self.run_execution_test(script, "", "")
def test_match_function_stdout(self):
"""Test run(): explicit match function, stdout"""
script = lstrip("""\
def my_match_exact(actual, expect): return actual == expect
from TestCommon import TestCommon, match_re_dotall
tc = TestCommon(program=r'%(pass_script)s',
interpreter='%(python)s',
workdir="",
match=match_re_dotall)
tc.run(arguments = "arg1 arg2 arg3",
stdout = r"%(pass_script)s: STDOUT: ['arg1', 'arg2', 'arg3']" + "\\n",
match = my_match_exact)
""")
self.run_execution_test(script, "", "")
def test_match_function_stderr(self):
"""Test run(): explicit match function, stderr"""
script = lstrip("""\
def my_match_exact(actual, expect): return actual == expect
from TestCommon import TestCommon, match_re_dotall
tc = TestCommon(program=r'%(stderr_script)s',
interpreter='%(python)s',
workdir="",
match=match_re_dotall)
tc.run(arguments = "arg1 arg2 arg3",
stderr = r"%(stderr_script)s: STDERR: ['arg1', 'arg2', 'arg3']" + "\\n",
match = my_match_exact)
""")
self.run_execution_test(script, "", "")
def test_matched_status_fails(self):
"""Test run(): matched status, script fails"""
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(program=r'%(fail_script)s',
interpreter='%(python)s',
workdir='')
tc.run(status = 1)
""")
self.run_execution_test(script, "", "")
def test_matched_stdout(self):
"""Test run(): matched stdout"""
script = lstrip("""\
from TestCommon import TestCommon, match_exact
tc = TestCommon(program=r'%(pass_script)s',
interpreter='%(python)s',
workdir="",
match=match_exact)
tc.run(stdout = r"%(pass_script)s: STDOUT: []" + "\\n")
""")
self.run_execution_test(script, "", "")
def test_matched_stderr(self):
"""Test run(): matched stderr"""
script = lstrip("""\
from TestCommon import TestCommon, match_exact
tc = TestCommon(program=r'%(stderr_script)s',
interpreter='%(python)s',
workdir="",
match=match_exact)
tc.run(stderr = r"%(stderr_script)s: STDERR: []" + "\\n")
""")
self.run_execution_test(script, "", "")
def test_mismatched_status_pass(self):
"""Test run(): mismatched status, script passes"""
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(program=r'%(pass_script)s',
interpreter='%(python)s',
workdir='')
tc.run(status = 1)
""")
expect_stdout = lstrip("""\
%(pass_script)s returned 0 (expected 1)
STDOUT =========================================================================
%(pass_script)s: STDOUT: []
STDERR =========================================================================
""")
expect_stderr = lstrip("""\
FAILED test of .*pass
\\tat line \\d+ of .*TestCommon\\.py \\(_complete\\)
\\tfrom line \\d+ of .*TestCommon\\.py \\(run\\)
\\tfrom line \\d+ of <stdin>( \(<module>\))?
""")
expect_stderr = re.compile(expect_stderr, re.M)
self.run_execution_test(script, expect_stdout, expect_stderr)
def test_mismatched_status_fail(self):
"""Test run(): mismatched status, script fails"""
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(program=r'%(fail_script)s',
interpreter='%(python)s',
workdir='')
tc.run(status = 2)
""")
expect_stdout = lstrip("""\
%(fail_script)s returned 1 (expected 2)
STDOUT =========================================================================
%(fail_script)s: STDOUT: []
STDERR =========================================================================
""")
expect_stderr = lstrip("""\
FAILED test of .*fail
\\tat line \\d+ of .*TestCommon\\.py \\(_complete\\)
\\tfrom line \\d+ of .*TestCommon\\.py \\(run\\)
\\tfrom line \\d+ of <stdin>( \(<module>\))?
""")
expect_stderr = re.compile(expect_stderr, re.M)
self.run_execution_test(script, expect_stdout, expect_stderr)
def test_mismatched_stdout(self):
"""Test run(): mismatched stdout"""
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(program=r'%(pass_script)s',
interpreter='%(python)s',
workdir='')
tc.run(stdout = "Not found\\n")
""")
expect_stdout = lstrip("""\
STDOUT =========================================================================
1c1
< Not found
---
> %(pass_script)s: STDOUT: []
""")
expect_stderr = lstrip("""\
FAILED test of .*pass
\\tat line \\d+ of .*TestCommon\\.py \\(_complete\\)
\\tfrom line \\d+ of .*TestCommon\\.py \\(run\\)
\\tfrom line \\d+ of <stdin>( \(<module>\))?
""")
expect_stderr = re.compile(expect_stderr, re.M)
self.run_execution_test(script, expect_stdout, expect_stderr)
def test_mismatched_stderr(self):
"""Test run(): mismatched stderr"""
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(program=r'%(stderr_script)s',
interpreter='%(python)s',
workdir='')
tc.run(stderr = "Not found\\n")
""")
expect_stdout = lstrip("""\
STDOUT =========================================================================
STDERR =========================================================================
1c1
< Not found
---
> %(stderr_script)s: STDERR: []
""")
expect_stderr = lstrip("""\
FAILED test of .*stderr
\\tat line \\d+ of .*TestCommon\\.py \\(_complete\\)
\\tfrom line \\d+ of .*TestCommon\\.py \\(run\\)
\\tfrom line \\d+ of <stdin>( \(<module>\))?
""")
expect_stderr = re.compile(expect_stderr, re.M)
self.run_execution_test(script, expect_stdout, expect_stderr)
def test_option_handling(self):
"""Test run(): option handling"""
script = lstrip("""\
from TestCommon import TestCommon, match_exact
tc = TestCommon(program=r'%(pass_script)s',
interpreter='%(python)s',
workdir="",
match=match_exact)
tc.run(options = "opt1 opt2 opt3",
stdout = r"%(pass_script)s: STDOUT: ['opt1', 'opt2', 'opt3']" + "\\n")
""")
self.run_execution_test(script, "", "")
def test_options_plus_arguments(self):
"""Test run(): option handling with arguments"""
script = lstrip("""\
from TestCommon import TestCommon, match_exact
tc = TestCommon(program=r'%(pass_script)s',
interpreter='%(python)s',
workdir="",
match=match_exact)
tc.run(options = "opt1 opt2 opt3",
arguments = "arg1 arg2 arg3",
stdout = r"%(pass_script)s: STDOUT: ['opt1', 'opt2', 'opt3', 'arg1', 'arg2', 'arg3']" + "\\n")
""")
self.run_execution_test(script, "", "")
def test_signal_handling(self):
"""Test run(): signal handling"""
try:
os.kill
except AttributeError:
sys.stderr.write('can not test, no os.kill ... ')
return
script = lstrip("""\
from TestCommon import TestCommon
tc = TestCommon(program=r'%(signal_script)s',
interpreter='%(python)s',
workdir='')
tc.run()
""")
self.SIGTERM = signal.SIGTERM
# Script returns the signal value as a negative number.
expect_stdout = lstrip("""\
%(signal_script)s returned -%(SIGTERM)s
STDOUT =========================================================================
STDERR =========================================================================
""")
expect_stderr = lstrip("""\
FAILED test of .*signal
\\tat line \\d+ of .*TestCommon\\.py \\(_complete\\)
\\tfrom line \\d+ of .*TestCommon\\.py \\(run\\)
\\tfrom line \\d+ of <stdin>
""")
expect_stderr = re.compile(expect_stderr, re.M)
self.run_execution_test(script, expect_stdout, expect_stderr)
def test_stdin(self):
"""Test run(): stdin handling"""
script = lstrip("""\
from TestCommon import TestCommon, match_exact
tc = TestCommon(program=r'%(stdin_script)s',
interpreter='%(python)s',
workdir='',
match=match_exact)
expect_stdout = r"%(stdin_script)s: STDOUT: 'input'" + "\\n"
expect_stderr = r"%(stdin_script)s: STDERR: 'input'" + "\\n"
tc.run(stdin="input\\n", stdout = expect_stdout, stderr = expect_stderr)
""")
expect_stdout = lstrip("""\
%(pass_script)s returned 0 (expected 1)
STDOUT =========================================================================
%(pass_script)s: STDOUT: []
STDERR =========================================================================
""")
self.run_execution_test(script, "", "")
class start_TestCase(TestCommonTestCase):
def test_option_handling(self):
"""Test start(): option handling"""
script = lstrip("""\
from TestCommon import TestCommon, match_exact
tc = TestCommon(program=r'%(pass_script)s',
interpreter='%(python)s',
workdir="",
match=match_exact)
p = tc.start(options = "opt1 opt2 opt3")
expect = r"%(pass_script)s: STDOUT: ['opt1', 'opt2', 'opt3']" + "\\n"
tc.finish(p, stdout = expect)
""")
self.run_execution_test(script, "", "")
def test_options_plus_arguments(self):
"""Test start(): option handling with arguments"""
script = lstrip("""\
from TestCommon import TestCommon, match_exact
tc = TestCommon(program=r'%(pass_script)s',
interpreter='%(python)s',
workdir="",
match=match_exact)
p = tc.start(options = "opt1 opt2 opt3",
arguments = "arg1 arg2 arg3")
expect = r"%(pass_script)s: STDOUT: ['opt1', 'opt2', 'opt3', 'arg1', 'arg2', 'arg3']" + "\\n"
tc.finish(p, stdout = expect)
""")
self.run_execution_test(script, "", "")
class skip_test_TestCase(TestCommonTestCase):
def test_skip_test(self):
"""Test skip_test()"""
run_env = self.run_env
script = lstrip("""\
import TestCommon
test = TestCommon.TestCommon(workdir='')
test.skip_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "Skipping test.\n", stdout
stderr = run_env.stderr()
expect = [
"NO RESULT for test at line 3 of <stdin>\n",
"NO RESULT for test at line 3 of <stdin> (<module>)\n",
]
assert stderr in expect, repr(stderr)
script = lstrip("""\
import TestCommon
test = TestCommon.TestCommon(workdir='')
test.skip_test("skipping test because I said so\\n")
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "skipping test because I said so\n", stdout
stderr = run_env.stderr()
expect = [
"NO RESULT for test at line 3 of <stdin>\n",
"NO RESULT for test at line 3 of <stdin> (<module>)\n",
]
assert stderr in expect, repr(stderr)
import os
os.environ['TESTCOMMON_PASS_SKIPS'] = '1'
try:
script = lstrip("""\
import TestCommon
test = TestCommon.TestCommon(workdir='')
test.skip_test()
""")
run_env.run(program=sys.executable, stdin=script)
stdout = run_env.stdout()
assert stdout == "Skipping test.\n", stdout
stderr = run_env.stderr()
assert stderr == "PASSED\n", stderr
finally:
del os.environ['TESTCOMMON_PASS_SKIPS']
class variables_TestCase(TestCommonTestCase):
def test_variables(self):
"""Test global variables"""
run_env = self.run_env
variables = [
'fail_test',
'no_result',
'pass_test',
'match_exact',
'match_re',
'match_re_dotall',
'python',
'_python_',
'TestCmd',
'TestCommon',
'exe_suffix',
'obj_suffix',
'shobj_prefix',
'shobj_suffix',
'lib_prefix',
'lib_suffix',
'dll_prefix',
'dll_suffix',
]
script = "from __future__ import print_function" + \
"import TestCommon\n" + \
'\n'.join([ "print(TestCommon.%s)\n" % v for v in variables ])
run_env.run(program=sys.executable, stdin=script)
stderr = run_env.stderr()
assert stderr == "", stderr
script = "from __future__ import print_function" + \
"from TestCommon import *\n" + \
'\n'.join([ "print(%s)" % v for v in variables ])
run_env.run(program=sys.executable, stdin=script)
stderr = run_env.stderr()
assert stderr == "", stderr
if __name__ == "__main__":
tclasses = [
__init__TestCase,
banner_TestCase,
must_be_writable_TestCase,
must_contain_TestCase,
must_contain_all_lines_TestCase,
must_contain_any_line_TestCase,
must_contain_exactly_lines_TestCase,
must_contain_lines_TestCase,
must_exist_TestCase,
must_exist_one_of_TestCase,
must_match_TestCase,
must_not_be_writable_TestCase,
must_not_contain_TestCase,
must_not_contain_any_line_TestCase,
must_not_contain_lines_TestCase,
must_not_exist_TestCase,
must_not_exist_any_of_TestCase,
run_TestCase,
start_TestCase,
skip_test_TestCase,
variables_TestCase,
]
suite = unittest.TestSuite()
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests([ tclass(n) for n in names ])
if not unittest.TextTestRunner().run(suite).wasSuccessful():
sys.exit(1)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 30.435711
| 111
| 0.525235
|
7949cc2babbaaaffa5e411764d29ed573a0c7c69
| 8,092
|
py
|
Python
|
test/test_database.py
|
darcyabjones/acc-to-tax
|
608651a4f055c75839eef26a9b4a0582f5840ed2
|
[
"MIT"
] | null | null | null |
test/test_database.py
|
darcyabjones/acc-to-tax
|
608651a4f055c75839eef26a9b4a0582f5840ed2
|
[
"MIT"
] | null | null | null |
test/test_database.py
|
darcyabjones/acc-to-tax
|
608651a4f055c75839eef26a9b4a0582f5840ed2
|
[
"MIT"
] | null | null | null |
"""
"""
import os
import re
from unittest.mock import MagicMock
import pytest
from acc2tax.database import int2bool
from acc2tax.database import bool2int
from acc2tax.database import Base
from acc2tax.database import BaseTable
from acc2tax.database import Acc2Tax
from acc2tax.database import Nodes
from acc2tax.database import Names
from acc2tax.database import Division
from acc2tax.database import GenCode
# Define fixtures
@pytest.fixture()
def session():
""" Sets up an in-memory sqlite database for testing. """
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# Setup
engine = create_engine("sqlite:///:memory:", echo=False)
Session = sessionmaker(bind=engine)
session = Session()
Base.metadata.create_all(engine)
yield session
# Teardown
session.close()
@pytest.fixture()
def nodes_table(session):
Nodes.from_file("test/data/sample_nodes.dmp", session)
return Nodes
@pytest.mark.parametrize("expected,i", [
(True, '1'),
(False, '0'),
])
def test_int2bool(i, expected):
assert int2bool(i) == expected
return
@pytest.mark.parametrize("i,expected", [
(True, '1'),
(False, '0'),
])
def test_bool2int(i, expected):
assert bool2int(i) == expected
return
class TestBaseTable(object):
parser_test_lines = [
{
"line": "one\t2\t0\n",
"sep": "\t",
"end": "\n",
"columns": [("a", str, str), ("b", int, str), ("c", int2bool, bool2int)],
"expected": {"a": "one", "b": 2, "c": False},
},
{
"line": "1234\t|\t567\t|\teight\t|\n",
"sep": "\t|\t",
"end": "\t|\n",
"columns": [("a", int, str), ("b", str, str), ("c", str, str)],
"expected": {"a": 1234, "b": "567", "c": "eight"}
},
{
"line": "1234\t|\t567\t|\t\t|\n",
"sep": "\t|\t",
"end": "\t|\n",
"columns": [("a", int, str), ("b", str, str), ("c", str, str)],
"expected": {"a": 1234, "b": "567", "c": ""}
},
]
parser_test_files = [
{
"path": "test/data/sample_nodes.dmp",
"sep": Nodes.sep,
"end": Nodes.end,
"columns": Nodes.columns,
"header": Nodes.header,
},
{
"path": "test/data/sample_names.dmp",
"sep": Names.sep,
"end": Names.end,
"columns": Names.columns,
"header": Names.header,
},
{
"path": "test/data/sample_division.dmp",
"sep": Division.sep,
"end": Division.end,
"columns": Division.columns,
"header": Division.header,
},
]
writer_test_records = [
{
"records": [
MagicMock(
taxid=2,
parent_taxid=1,
rank="species",
embl_code='',
division_id=0,
inherited_div_flag=False,
genetic_code_id='TEST',
inherited_genetic_code_flag=True,
mitochonchondrial_genetic_code_id="TEST",
inherited_mitochonchondrial_genetic_code_flag=True,
genbank_hidden_flag=False,
hidden_subtree_root_flag=False,
comments=''
),
MagicMock(
taxid=3,
parent_taxid=1,
rank="species",
embl_code='',
division_id=0,
inherited_div_flag=False,
genetic_code_id='TEST',
inherited_genetic_code_flag=True,
mitochonchondrial_genetic_code_id="TEST",
inherited_mitochonchondrial_genetic_code_flag=True,
genbank_hidden_flag=True,
hidden_subtree_root_flag=False,
comments=''
),
],
"sep": Nodes.sep,
"end": Nodes.end,
"columns": Nodes.columns,
"header": Nodes.header,
"expected": [
[["2", "1", "species", "", "0", "0", "TEST", "1", "TEST","1", "0", "0", ""],
["3", "1", "species", "", "0", "0", "TEST", "1", "TEST","1", "1", "0", ""],]
],
},
{
"records": [
MagicMock(
taxid=2,
name_="species1",
unique_name="",
name_class="Scientific name",
),
MagicMock(
taxid=3,
name_="species2",
unique_name="species2_1",
name_class="Common name",
),
],
"sep": Names.sep,
"end": Names.end,
"columns": Names.columns,
"header": Names.header,
"expected": [
"\t|\t".join(x) + "\t|\n" for x in
[["2", "species1", "", "Scientific name"],
["3", "species2", "species2_1", "Common name"],]
],
},
]
@pytest.mark.parametrize("line,sep,end,columns,expected",
[
(l["line"], l["sep"], l["end"], l["columns"], l["expected"])
for l in parser_test_lines
]
)
def test_line_trans(self, line, sep, end, columns, expected):
result = BaseTable.line_trans(line, sep, end, columns)
print("result:", result)
for k_exp, v_exp in expected.items():
assert v_exp == result[k_exp]
return
@pytest.mark.parametrize("line,sep,end,columns,expected",
[
(l["line"], l["sep"], l["end"], l["columns"], l["expected"])
for l in parser_test_lines
]
)
def test_string_fmt(self, line, sep, end, columns, expected):
obj = MagicMock(**expected)
result = BaseTable.string_fmt(obj, sep, end, columns)
print("result:", result)
assert result == line
return
@pytest.mark.parametrize("path,sep,end,columns,header",
[
(l["path"], l["sep"], l["end"], l["columns"], l["header"])
for l in parser_test_files
]
)
def test_from_file(self, path, sep, end, columns, header):
session = MagicMock()
session.commit = MagicMock(return_value=None)
session.bulk_insert_mappings = MagicMock(return_value=None)
BaseTable.from_file(
filepath=path,
session=session,
sep=sep,
columns=columns,
header=header,
)
session.bulk_insert_mappings.assert_called()
session.commit.assert_called_once()
return
@pytest.mark.parametrize("records,sep,end,columns,header,expected",
[
(l["records"], l["sep"], l["end"], l["columns"], l["header"], l["expected"])
for l in writer_test_records
]
)
def test_to_table(self, records, sep, end, columns, header, expected):
# Since name is a special attribute for mock we need to assign it
# After creation time.
for record in records:
record.name = record.name_
results = BaseTable.to_table(records, sep, end, columns, header)
for result, exp in zip(results, expected):
assert result == exp
return
class TestNodes(object):
def test_from_file(self, session):
Nodes.from_file("test/data/sample_nodes.dmp", session)
record = session.query(Nodes).filter(Nodes.taxid == 1224).one()
assert record.parent_taxid == 2
assert record.rank == "phylum"
return
def test_get_parents(self, session, nodes_table):
# Nodes table is already populated with fixture
Nodes.get_parents()
return
| 30.421053
| 96
| 0.498023
|
7949cd037e31307fbc988a596decd4ecf4e0dfad
| 6,702
|
py
|
Python
|
lib/surface/compute/url_maps/remove_host_rule.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/url_maps/remove_host_rule.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/url_maps/remove_host_rule.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for removing a host rule from a URL map."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import encoding
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute.url_maps import flags
from googlecloudsdk.command_lib.compute.url_maps import url_maps_utils
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)
class RemoveHostRule(base.UpdateCommand):
"""Remove a host rule from a URL map.
*{command}* is used to remove a host rule from a URL map. When
a host rule is removed, its path matcher is only removed if
it is not referenced by any other host rules and
`--delete-orphaned-path-matcher` is provided.
## EXAMPLES
To remove a host rule that contains the host `example.com`
from the URL map named `MY-URL-MAP`, you can use this
command:
$ {command} MY-URL-MAP --host example.com
"""
URL_MAP_ARG = None
@classmethod
def Args(cls, parser):
cls.URL_MAP_ARG = flags.UrlMapArgument()
cls.URL_MAP_ARG.AddArgument(parser)
parser.add_argument(
'--host',
required=True,
help='One of the hosts in the host rule to remove.')
parser.add_argument(
'--delete-orphaned-path-matcher',
action='store_true',
default=False,
help=('If provided and a path matcher is orphaned as a result of this '
'command, the command removes the orphaned path matcher instead '
'of failing.'))
def _GetGetRequest(self, client, url_map_ref):
"""Returns the request for the existing URL map resource."""
if url_maps_utils.IsGlobalUrlMapRef(url_map_ref):
return (client.apitools_client.urlMaps, 'Get',
client.messages.ComputeUrlMapsGetRequest(
urlMap=url_map_ref.Name(), project=url_map_ref.project))
else:
return (client.apitools_client.regionUrlMaps, 'Get',
client.messages.ComputeRegionUrlMapsGetRequest(
urlMap=url_map_ref.Name(),
project=url_map_ref.project,
region=url_map_ref.region))
def _GetSetRequest(self, client, url_map_ref, replacement):
if url_maps_utils.IsGlobalUrlMapRef(url_map_ref):
return (client.apitools_client.urlMaps, 'Update',
client.messages.ComputeUrlMapsUpdateRequest(
urlMap=url_map_ref.Name(),
urlMapResource=replacement,
project=url_map_ref.project))
else:
return (client.apitools_client.regionUrlMaps, 'Update',
client.messages.ComputeRegionUrlMapsUpdateRequest(
urlMap=url_map_ref.Name(),
urlMapResource=replacement,
project=url_map_ref.project,
region=url_map_ref.region))
def _Modify(self, args, existing):
"""Returns a modified URL map message."""
replacement = encoding.CopyProtoMessage(existing)
path_matcher_to_remove = None
new_host_rules = []
for host_rule in existing.hostRules:
if args.host in host_rule.hosts:
path_matcher_to_remove = host_rule.pathMatcher
else:
new_host_rules.append(host_rule)
if not path_matcher_to_remove:
raise exceptions.ToolException(
'No host rule contains the host [{0}].'.format(args.host))
replacement.hostRules = new_host_rules
path_matcher_is_used_by_other_rules = False
for host_rule in replacement.hostRules:
if host_rule.pathMatcher == path_matcher_to_remove:
path_matcher_is_used_by_other_rules = True
break
if not path_matcher_is_used_by_other_rules:
if args.delete_orphaned_path_matcher:
replacement.pathMatchers = [
path_matcher for path_matcher in existing.pathMatchers
if path_matcher.name != path_matcher_to_remove]
else:
raise exceptions.ToolException(
'This operation will orphan the path matcher [{0}]. To '
'delete the orphan path matcher, rerun this command with '
'[--delete-orphaned-path-matcher] or use [gcloud compute '
'url-maps edit] to modify the URL map by hand.'.format(
host_rule.pathMatcher))
return replacement
def Run(self, args):
"""Issues requests necessary to remove host rule on URL maps."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
url_map_ref = self.URL_MAP_ARG.ResolveAsResource(args, holder.resources)
get_request = self._GetGetRequest(client, url_map_ref)
objects = client.MakeRequests([get_request])
new_object = self._Modify(args, objects[0])
return client.MakeRequests(
[self._GetSetRequest(client, url_map_ref, new_object)])
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class RemoveHostRuleAlpha(RemoveHostRule):
"""Remove a host rule from a URL map.
*{command}* is used to remove a host rule from a URL map. When
a host rule is removed, its path matcher is only removed if
it is not referenced by any other host rules and
`--delete-orphaned-path-matcher` is provided.
## EXAMPLES
To remove a host rule that contains the host `example.com`
from the URL map named `MY-URL-MAP`, you can use this
command:
$ {command} MY-URL-MAP --host example.com --global
"""
URL_MAP_ARG = None
@classmethod
def Args(cls, parser):
cls.URL_MAP_ARG = flags.UrlMapArgument(include_alpha=True)
cls.URL_MAP_ARG.AddArgument(parser)
parser.add_argument(
'--host',
required=True,
help='One of the hosts in the host rule to remove.')
parser.add_argument(
'--delete-orphaned-path-matcher',
action='store_true',
default=False,
help=('If provided and a path matcher is orphaned as a result of this '
'command, the command removes the orphaned path matcher instead '
'of failing.'))
| 35.839572
| 79
| 0.693524
|
7949ce4693772bd404fef33280f01f30cf041320
| 3,135
|
py
|
Python
|
pt_bt/pso.py
|
LukeKort/Opp_lite
|
e47b0a1e38dc1e675663c83dc2d75f4506a859e5
|
[
"MIT"
] | null | null | null |
pt_bt/pso.py
|
LukeKort/Opp_lite
|
e47b0a1e38dc1e675663c83dc2d75f4506a859e5
|
[
"MIT"
] | null | null | null |
pt_bt/pso.py
|
LukeKort/Opp_lite
|
e47b0a1e38dc1e675663c83dc2d75f4506a859e5
|
[
"MIT"
] | null | null | null |
# Particle Swarm (Aug. 04, 2021)
import time
import numpy as np
import random as rand
from importlib import reload #to reload a previuly loaded file
def pso(n_particles,n_variables,n_iterations,tolerance,a,b,pso_only_w,pso_only_c1,pso_only_c2):
from random_matrix import radom_generator #random generator. takes x and y vector dimentions between the limits a and b
import functions
reload(functions) #uptade the changes made in the function file
from functions import objective #objetive function(s)
from functions import constraints #constraint function(s)
best_result_acum = np.empty((n_iterations)) #preallocation
v = radom_generator(n_variables,n_particles,a,b) #velocity matrix
x_aux = radom_generator(n_variables,n_particles,a,b)
x = radom_generator(n_variables,n_particles,a,b)
x_best = np.zeros((n_variables))
cc = 1 #controler counter
t_0 = time.time() #start time
for i in range(n_iterations):
x_0=x.copy() #stores the last x before uptade
for j in range(n_particles):
v[:,j]= pso_only_w*v[:,j] + rand.random()*pso_only_c1*(x_aux[:,j]-x[:,j]) + rand.random()*pso_only_c2*(x_best - x[:,j]) #new velocity matrix
x[:,j]=x_0[:,j]+v[:,j] #new position matrix
for k in range(n_variables): #test with the limits (a,b)
if x[k,j]<a[k]:
x[k,j]=a[k]+(b[k]-a[k])*rand.random()
if x[k,j]>b[k]:
x[k,j]=a[k]+(b[k]-a[k])*rand.random()
if (constraints(x[:,j])) is True: #teste the new x within the constraints functions
if (objective(x[:,j])) < objective(x_aux[:,j]): #teste the new x within the objetive function
x_aux[:,j] = x[:,j].copy() #setting new best particle position
if cc == 1:
results = np.full(n_particles,objective(x_aux[:,j])) #the 1st best value will fill the results vector
best_result_acum = np.full(n_iterations,objective(x_aux[:,j]))
cc += 1
else:
results[j] = objective(x_aux[:,j]) #save result per particle
best_result = min(results).copy() #find best result of all particles
best_result_acum[i] = best_result.copy()
idx = results.tolist().index(best_result) #find the best result's index inside the results vector
x_best = x_aux[:,idx] #find the best result's position
if tolerance >= np.amax(abs(x-x_0)): #break for setting tolerance
break
t_end = time.time() #finish time
t_total = t_end - t_0 #total processing time
if cc == 1:
best_result = x_best = 'Not found!' #if the problem can't be solved, rise the messange
print('#Particle Swarm\nMelhor resultado:',best_result,'\nPosição:',x_best,'\nTempo de execução:',(t_total),'s\n')
return({'best_result':best_result,'acumulate_result':best_result_acum,'x_best':x_best,'t_total':t_total,'max_n_iteration':i})
| 49.761905
| 152
| 0.610845
|
7949ced8450baf9835425acb6c6215ccc23e19c3
| 7,723
|
py
|
Python
|
cvlib/object_detection.py
|
wajahatali93/cvlib
|
2722470f22b44141b989c31acbaa1ec08d694050
|
[
"MIT"
] | null | null | null |
cvlib/object_detection.py
|
wajahatali93/cvlib
|
2722470f22b44141b989c31acbaa1ec08d694050
|
[
"MIT"
] | null | null | null |
cvlib/object_detection.py
|
wajahatali93/cvlib
|
2722470f22b44141b989c31acbaa1ec08d694050
|
[
"MIT"
] | null | null | null |
import cv2
import os
import numpy as np
from .utils import download_file
initialize = True
net = None
dest_dir = os.path.expanduser('~') + os.path.sep + '.cvlib' + os.path.sep + 'object_detection' + os.path.sep + 'yolo' + os.path.sep + 'yolov3'
classes = None
COLORS = np.random.uniform(0, 255, size=(80, 3))
def populate_class_labels():
class_file_name = 'yolov3_classes.txt'
class_file_abs_path = dest_dir + os.path.sep + class_file_name
url = 'https://github.com/arunponnusamy/object-detection-opencv/raw/master/yolov3.txt'
if not os.path.exists(class_file_abs_path):
download_file(url=url, file_name=class_file_name, dest_dir=dest_dir)
f = open(class_file_abs_path, 'r')
classes = [line.strip() for line in f.readlines()]
return classes
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def draw_bbox(img, bbox, labels, confidence, colors=None, write_conf=False):
global COLORS
global classes
if classes is None:
classes = populate_class_labels()
for i, label in enumerate(labels):
if colors is None:
color = COLORS[classes.index(label)]
else:
color = colors[classes.index(label)]
if write_conf:
label += ' ' + str(format(confidence[i] * 100, '.2f')) + '%'
cv2.rectangle(img, (bbox[i][0],bbox[i][1]), (bbox[i][2],bbox[i][3]), color, 2)
cv2.putText(img, label, (bbox[i][0],bbox[i][1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
return img
def detect_common_objects(image, confidence=0.5, nms_thresh=0.3, model='yolov3', enable_gpu=False):
Height, Width = image.shape[:2]
scale = 0.00392
global classes
global dest_dir
config_file_abs_path = ''
weights_file_abs_path = ''
if model == 'yolov3-tiny':
config_file_name = 'yolov3-tiny.cfg'
cfg_url = "https://github.com/pjreddie/darknet/raw/master/cfg/yolov3-tiny.cfg"
weights_file_name = 'yolov3-tiny.weights'
weights_url = 'https://pjreddie.com/media/files/yolov3-tiny.weights'
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
config_file_abs_path = dest_dir + os.path.sep + config_file_name
weights_file_abs_path = dest_dir + os.path.sep + weights_file_name
else:
config_file_name = 'yolov3.cfg'
cfg_url = 'https://github.com/arunponnusamy/object-detection-opencv/raw/master/yolov3.cfg'
weights_file_name = 'yolov3.weights'
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
config_file_abs_path = dest_dir + os.path.sep + config_file_name
weights_file_abs_path = '/cvlib-od/model_data/yolo3.weights'
if not os.path.exists(config_file_abs_path):
download_file(url=cfg_url, file_name=config_file_name, dest_dir=dest_dir)
if not os.path.exists(weights_file_abs_path):
download_file(url=weights_url, file_name=weights_file_name, dest_dir=dest_dir)
global initialize
global net
if initialize:
classes = populate_class_labels()
net = cv2.dnn.readNet(weights_file_abs_path, config_file_abs_path)
initialize = False
# enables opencv dnn module to use CUDA on Nvidia card instead of cpu
if enable_gpu:
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
net.setInput(blob)
outs = net.forward(get_output_layers(net))
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
max_conf = scores[class_id]
if max_conf > confidence:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - (w / 2)
y = center_y - (h / 2)
class_ids.append(class_id)
confidences.append(float(max_conf))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, confidence, nms_thresh)
bbox = []
label = []
conf = []
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
bbox.append([int(x), int(y), int(x+w), int(y+h)])
label.append(str(classes[class_ids[i]]))
conf.append(confidences[i])
return bbox, label, conf
class YOLO:
def __init__(self, weights, config, labels, version='yolov3'):
print('[INFO] Initializing YOLO ..')
self.config = config
self.weights = weights
self.version = version
with open(labels, 'r') as f:
self.labels = [line.strip() for line in f.readlines()]
self.colors = np.random.uniform(0, 255, size=(len(self.labels), 3))
self.net = cv2.dnn.readNet(self.weights, self.config)
layer_names = self.net.getLayerNames()
self.output_layers = [layer_names[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]
def detect_objects(self, image, confidence=0.5, nms_thresh=0.3,
enable_gpu=False):
if enable_gpu:
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
Height, Width = image.shape[:2]
scale = 0.00392
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True,
crop=False)
self.net.setInput(blob)
outs = self.net.forward(self.output_layers)
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
max_conf = scores[class_id]
if max_conf > confidence:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - (w / 2)
y = center_y - (h / 2)
class_ids.append(class_id)
confidences.append(float(max_conf))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, confidence, nms_thresh)
bbox = []
label = []
conf = []
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
bbox.append([int(x), int(y), int(x+w), int(y+h)])
label.append(str(self.labels[class_ids[i]]))
conf.append(confidences[i])
return bbox, label, conf
def draw_bbox(self, img, bbox, labels, confidence, colors=None, write_conf=False):
if colors is None:
colors = self.colors
for i, label in enumerate(labels):
color = colors[self.labels.index(label)]
if write_conf:
label += ' ' + str(format(confidence[i] * 100, '.2f')) + '%'
cv2.rectangle(img, (bbox[i][0],bbox[i][1]), (bbox[i][2],bbox[i][3]), color, 2)
cv2.putText(img, label, (bbox[i][0],bbox[i][1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
| 31.522449
| 142
| 0.582546
|
7949cef9db58642cf4708cbd41d38ef4a91abb86
| 252
|
py
|
Python
|
falmer/matte/utils.py
|
sussexstudent/services-api
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 2
|
2017-04-27T19:35:59.000Z
|
2017-06-13T16:19:33.000Z
|
falmer/matte/utils.py
|
sussexstudent/falmer
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 975
|
2017-04-13T11:31:07.000Z
|
2022-02-10T07:46:18.000Z
|
falmer/matte/utils.py
|
sussexstudent/services-api
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 3
|
2018-05-09T06:42:25.000Z
|
2020-12-10T18:29:30.000Z
|
def create_image_from_bytes(file, file_name, internal_source):
from .models import MatteImage
image = MatteImage()
image.file = file
image.title = file_name
image.internal_source = internal_source
image.save()
return image
| 25.2
| 62
| 0.722222
|
7949cf3833d15ea38a31bff5eebb536f90a0a27f
| 16,396
|
py
|
Python
|
scripts/initial.py
|
thunlp/HNRE
|
ab34ed9849f761d7e69d3dc42e3095d0e567b63c
|
[
"MIT"
] | 96
|
2018-08-29T08:57:25.000Z
|
2021-12-26T08:19:48.000Z
|
scripts/initial.py
|
thunlp/HNRE
|
ab34ed9849f761d7e69d3dc42e3095d0e567b63c
|
[
"MIT"
] | 14
|
2018-10-19T10:33:40.000Z
|
2022-01-08T23:37:26.000Z
|
scripts/initial.py
|
thunlp/HNRE
|
ab34ed9849f761d7e69d3dc42e3095d0e567b63c
|
[
"MIT"
] | 25
|
2018-10-18T09:46:24.000Z
|
2021-12-01T09:10:48.000Z
|
import numpy as np
import os
import sys
import json
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
makedirs('data/pn/')
makedirs('outputs/summary/')
makedirs('outputs/ckpt/')
makedirs('outputs/logits/')
# folder of training datasets
if len(sys.argv) > 1:
data_path = sys.argv[1]
else:
data_path = "./raw_data/"
# files to export data
if len(sys.argv) > 2:
export_path = sys.argv[2]
else:
export_path = "./data/"
#length of sentence
fixlen = 120
#max length of position embedding is 100 (-100~+100)
maxlen = 100
word2id = {}
relation2id = {}
word_size = 0
word_vec = None
def pos_embed(x):
return max(0, min(x + maxlen, maxlen + maxlen + 1))
def find_index(x,y):
for index, item in enumerate(y):
if x == item:
return index
return -1
def assert_equal(x, y):
assert x==y, 'ERROR: {} != {}'.format(x, y)
def init_word():
# reading word embedding data...
global word2id, word_size
print('reading word embedding data...')
f = open(data_path + 'vec.txt', "r")
total, size = f.readline().strip().split()[:2]
total = (int)(total)
word_size = (int)(size)
vec = np.ones((total, word_size), dtype = np.float32)
for i in range(total):
content = f.readline().strip().split()
word2id[content[0]] = len(word2id)
for j in range(word_size):
vec[i][j] = (float)(content[j+1])
f.close()
word2id['UNK'] = len(word2id)
word2id['BLANK'] = len(word2id)
global word_vec
word_vec = vec
def init_relation():
# reading relation ids...
global relation2id
print('reading relation ids...')
f = open(data_path + "relation2id.txt","r")
total = (int)(f.readline().strip())
for i in range(total):
content = f.readline().strip().split()
relation2id[content[0]] = int(content[1])
f.close()
def sort_files(name):
hash = {}
f = open(data_path + name + '.txt','r')
s = 0
while True:
content = f.readline()
if content == '':
break
s = s + 1
origin_data = content
content = content.strip().split()
en1_id = content[0]
en2_id = content[1]
rel_name = content[4]
if rel_name in relation2id:
relation = relation2id[rel_name]
else:
relation = relation2id['NA']
id = str(en1_id)+"#"+str(en2_id)+"#"+str(relation)
if not id in hash:
hash[id] = []
hash[id].append(origin_data)
f.close()
f = open(data_path + name + "_sort.txt", "w")
f.write("%d\n"%(s))
for i in hash:
for j in hash[i]:
f.write(j)
f.close()
def sort_test_files(name):
hash = {}
f = open(data_path + name + '.txt','r')
s = 0
while True:
content = f.readline()
if content == '':
break
s = s + 1
origin_data = content
content = content.strip().split()
en1_id = content[0]
en2_id = content[1]
rel_name = content[4]
if rel_name in relation2id:
relation = relation2id[rel_name]
else:
relation = relation2id['NA']
id = str(en1_id)+"#"+str(en2_id)
if not id in hash:
hash[id] = []
hash[id].append(origin_data)
f.close()
f = open(data_path + name + "_sort.txt", "w")
f.write("%d\n"%(s))
for i in hash:
for j in hash[i]:
f.write(j)
f.close()
def init_train_files(name):
print('reading ' + name +' data...')
f = open(data_path + name + '.txt','r')
total = (int)(f.readline().strip())
sen_word = np.zeros((total, fixlen), dtype = np.int32)
sen_pos1 = np.zeros((total, fixlen), dtype = np.int32)
sen_pos2 = np.zeros((total, fixlen), dtype = np.int32)
sen_mask = np.zeros((total, fixlen), dtype = np.int32)
sen_len = np.zeros((total), dtype = np.int32)
sen_label = np.zeros((total), dtype = np.int32)
instance_scope = []
instance_triple = []
for s in range(total):
content = f.readline().strip().split()
sentence = content[5:-1]
en1_id = content[0]
en2_id = content[1]
en1_name = content[2]
en2_name = content[3]
rel_name = content[4]
if rel_name in relation2id:
relation = relation2id[rel_name]
else:
relation = relation2id['NA']
for i in range(len(sentence)):
if sentence[i] == en1_name:
en1pos = i
if sentence[i] == en2_name:
en2pos = i
en_first = min(en1pos,en2pos)
en_second = en1pos + en2pos - en_first
for i in range(fixlen):
sen_word[s][i] = word2id['BLANK']
sen_pos1[s][i] = pos_embed(i - en1pos)
sen_pos2[s][i] = pos_embed(i - en2pos)
if i >= len(sentence):
sen_mask[s][i] = 0
elif i - en_first<=0:
sen_mask[s][i] = 1
elif i - en_second<=0:
sen_mask[s][i] = 2
else:
sen_mask[s][i] = 3
for i, word in enumerate(sentence):
if i >= fixlen:
break
elif not word in word2id:
sen_word[s][i] = word2id['UNK']
else:
sen_word[s][i] = word2id[word]
sen_len[s] = min(fixlen, len(sentence))
sen_label[s] = relation
tup = (en1_id,en2_id,relation)
if instance_triple == [] or instance_triple[len(instance_triple) - 1] != tup:
instance_triple.append(tup)
instance_scope.append([s,s])
instance_scope[len(instance_triple) - 1][1] = s
if (s+1) % 100 == 0:
sys.stdout.write(str(s)+'\r')
sys.stdout.flush()
return np.array(instance_triple), np.array(instance_scope), sen_len, sen_label, sen_word, sen_pos1, sen_pos2, sen_mask
def init_test_files(name):
print('reading ' + name +' data...')
f = open(data_path + name + '.txt','r')
total = (int)(f.readline().strip())
sen_word = np.zeros((total, fixlen), dtype = np.int32)
sen_pos1 = np.zeros((total, fixlen), dtype = np.int32)
sen_pos2 = np.zeros((total, fixlen), dtype = np.int32)
sen_mask = np.zeros((total, fixlen), dtype = np.int32)
sen_len = np.zeros((total), dtype = np.int32)
sen_label = np.zeros((total), dtype = np.int32)
entity_pair = []
entity_scope = []
for s in range(total):
content = f.readline().strip().split()
sentence = content[5:-1]
en1_id = content[0]
en2_id = content[1]
en1_name = content[2]
en2_name = content[3]
rel_name = content[4]
if rel_name in relation2id:
relation = relation2id[rel_name]
else:
relation = relation2id['NA']
for i in range(len(sentence)):
if sentence[i] == en1_name:
en1pos = i
if sentence[i] == en2_name:
en2pos = i
en_first = min(en1pos,en2pos)
en_second = en1pos + en2pos - en_first
for i in range(fixlen):
sen_word[s][i] = word2id['BLANK']
sen_pos1[s][i] = pos_embed(i - en1pos)
sen_pos2[s][i] = pos_embed(i - en2pos)
if i >= len(sentence):
sen_mask[s][i] = 0
elif i - en_first<=0:
sen_mask[s][i] = 1
elif i - en_second<=0:
sen_mask[s][i] = 2
else:
sen_mask[s][i] = 3
for i, word in enumerate(sentence):
if i >= fixlen:
break
elif not word in word2id:
sen_word[s][i] = word2id['UNK']
else:
sen_word[s][i] = word2id[word]
sen_len[s] = min(fixlen, len(sentence))
sen_label[s] = relation
pair = (en1_id,en2_id)
if entity_pair == [] or entity_pair[-1] != pair:
entity_pair.append(pair)
entity_scope.append([s,s])
entity_scope[-1][1] = s
if (s+1) % 100 == 0:
sys.stdout.write(str(s)+'\r')
sys.stdout.flush()
return np.array(entity_pair), np.array(entity_scope), sen_len, sen_label, sen_word, sen_pos1, sen_pos2, sen_mask
def init_test_files_pn(name):
print('reading ' + name +' data...')
f = open(data_path + name + '.txt','r')
total = (int)(f.readline().strip())
print(total)
sen_word = np.zeros((total, fixlen), dtype = np.int32)
sen_pos1 = np.zeros((total, fixlen), dtype = np.int32)
sen_pos2 = np.zeros((total, fixlen), dtype = np.int32)
sen_mask = np.zeros((total, fixlen), dtype = np.int32)
sen_len = np.zeros((total), dtype = np.int32)
sen_label = np.zeros((total), dtype = np.int32)
entity_pair = []
entity_pair_pall = []
entity_pair_palli = []
entity_scope = []
entity_scope_pall = []
sall = 0
for s in range(total):
content = f.readline().strip().split()
sentence = content[5:-1]
en1_id = content[0]
en2_id = content[1]
en1_name = content[2]
en2_name = content[3]
rel_name = content[4]
if rel_name in relation2id:
relation = relation2id[rel_name]
else:
relation = relation2id['NA']
for i in range(len(sentence)):
if sentence[i] == en1_name:
en1pos = i
if sentence[i] == en2_name:
en2pos = i
en_first = min(en1pos,en2pos)
en_second = en1pos + en2pos - en_first
for i in range(fixlen):
sen_word[s][i] = word2id['BLANK']
sen_pos1[s][i] = pos_embed(i - en1pos)
sen_pos2[s][i] = pos_embed(i - en2pos)
if i >= len(sentence):
sen_mask[s][i] = 0
elif i - en_first<=0:
sen_mask[s][i] = 1
elif i - en_second<=0:
sen_mask[s][i] = 2
else:
sen_mask[s][i] = 3
for i, word in enumerate(sentence):
if i >= fixlen:
break
elif not word in word2id:
sen_word[s][i] = word2id['UNK']
else:
sen_word[s][i] = word2id[word]
sen_len[s] = min(fixlen, len(sentence))
sen_label[s] = relation
pair = (en1_id,en2_id)
if entity_pair == [] or entity_pair[-1] != pair:
if len(entity_pair) > 0:
first_t = entity_scope[-1][0]
last_t = entity_scope[-1][1]
if last_t - first_t >= 0:
entity_pair_pall.append(entity_pair[-1])
entity_pair_palli.append(len(entity_pair) - 1)
entity_scope_pall.append([sall, sall + last_t - first_t])
sall = sall + 1 + last_t - first_t
entity_pair.append(pair)
entity_scope.append([s,s])
entity_scope[-1][1] = s
if (s+1) % 100 == 0:
sys.stdout.write(str(s)+'\r')
sys.stdout.flush()
f.close()
first_t = entity_scope[-1][0]
last_t = entity_scope[-1][1]
if last_t - first_t >= 0:
entity_pair_pall.append(entity_pair[-1])
entity_pair_palli.append(len(entity_pair) - 1)
entity_scope_pall.append([sall, sall + last_t - first_t])
sall = sall + 1 + last_t - first_t
index_pall = np.hstack([np.arange(entity_scope[x][0], entity_scope[x][1] + 1) for x in entity_pair_palli])
index_pone = np.hstack([np.random.randint(entity_scope[x][0], entity_scope[x][1] + 1) for x in entity_pair_palli])
index_ptwo = []
for x in entity_pair_palli:
if entity_scope[x][0] == entity_scope[x][1]:
index_ptwo.append(np.array([entity_scope[x][0],entity_scope[x][0]]))
else:
index_ptwo.append(np.random.choice(np.arange(entity_scope[x][0], entity_scope[x][1] + 1), 2, replace=False))
index_ptwo = np.hstack(index_ptwo)
arrays = {}
arrays['entity_pair'] = np.array(entity_pair)
arrays['word'] = sen_word
arrays['label'] = sen_label
arrays['len'] = sen_len
arrays['mask'] = sen_mask
arrays['pos1'] = sen_pos1
arrays['pos2'] = sen_pos2
arrays['entity_scope'] = np.array(entity_scope)
arrays['entity_pair_pn'] = np.array(entity_pair_pall)
arrays['word_pall'] = sen_word[index_pall]
arrays['label_pall'] = sen_label[index_pall]
arrays['len_pall'] = sen_len[index_pall]
arrays['mask_pall'] = sen_mask[index_pall]
arrays['pos1_pall'] = sen_pos1[index_pall]
arrays['pos2_pall'] = sen_pos2[index_pall]
arrays['entity_scope_pall'] = np.array(entity_scope_pall)
arrays['word_pone'] = sen_word[index_pone]
arrays['label_pone'] = sen_label[index_pone]
arrays['len_pone'] = sen_len[index_pone]
arrays['mask_pone'] = sen_mask[index_pone]
arrays['pos1_pone'] = sen_pos1[index_pone]
arrays['pos2_pone'] = sen_pos2[index_pone]
arrays['entity_scope_pone'] = np.tile(np.arange(arrays['word_pone'].shape[0]).reshape((-1, 1)), 2)
arrays['word_ptwo'] = sen_word[index_ptwo]
arrays['label_ptwo'] = sen_label[index_ptwo]
arrays['len_ptwo'] = sen_len[index_ptwo]
arrays['mask_ptwo'] = sen_mask[index_ptwo]
arrays['pos1_ptwo'] = sen_pos1[index_ptwo]
arrays['pos2_ptwo'] = sen_pos2[index_ptwo]
arrays['entity_scope_ptwo'] = np.tile(2 * np.arange(arrays['word_pone'].shape[0]).reshape((-1, 1)), 2)
arrays['entity_scope_ptwo'][:, 1] = arrays['entity_scope_ptwo'][:, 1] + 1
fin = open(data_path + name + '.txt', 'r').readlines()[1:]
fout = open(data_path + name + '_pall.txt', 'w')
fout.write('{}\n'.format(sall))
_ = [fout.write(fin[x]) for x in index_pall]
assert_equal(len(_), sall)
fout.close()
fout = open(data_path + name + '_pone.txt', 'w')
fout.write('{}\n'.format(arrays['word_pone'].shape[0]))
_ = [fout.write(fin[x]) for x in index_pone]
fout.close()
fout = open(data_path + name + '_ptwo.txt', 'w')
fout.write('{}\n'.format(arrays['word_ptwo'].shape[0]))
_ = [fout.write(fin[x]) for x in index_pall]
fout.close()
return arrays
init_word()
init_relation()
np.save(export_path+'vec', word_vec)
json.dump({
"word2id":word2id,
"relation2id":relation2id,
"word_size":word_size,
"fixlen":fixlen,
"maxlen":maxlen
}, open(export_path+'config', "wt"))
sort_files("train")
instance_triple, instance_scope, train_len, train_label, train_word, train_pos1, train_pos2, train_mask = init_train_files("train_sort")
np.save(export_path+'train_instance_triple', instance_triple)
np.save(export_path+'train_instance_scope', instance_scope)
np.save(export_path+'train_len', train_len)
np.save(export_path+'train_label', train_label)
np.save(export_path+'train_word', train_word)
np.save(export_path+'train_pos1', train_pos1)
np.save(export_path+'train_pos2', train_pos2)
np.save(export_path+'train_mask', train_mask)
sort_test_files("test")
instance_triple, instance_scope, test_len, test_label, test_word, test_pos1, test_pos2, test_mask = init_test_files("test_sort")
np.save(export_path+'test_entity_pair', instance_triple)
np.save(export_path+'test_entity_scope', instance_scope)
np.save(export_path+'test_len', test_len)
np.save(export_path+'test_label', test_label)
np.save(export_path+'test_word', test_word)
np.save(export_path+'test_pos1', test_pos1)
np.save(export_path+'test_pos2', test_pos2)
np.save(export_path+'test_mask', test_mask)
for name, data in init_test_files_pn("test_sort").items():
np.save(export_path + 'pn/test_' + name + '.npy', data)
# initialize bag label for test
label = np.load('./data/test_label.npy')
scope = np.load('./data/test_entity_scope.npy')
all_true_label = np.zeros((scope.shape[0], 53))
for pid in range(scope.shape[0]):
all_true_label[pid][label[scope[pid][0]:scope[pid][1]+1]] = 1
all_true_label = np.reshape(all_true_label[:, 1:], -1)
np.save('./data/all_true_label.npy', all_true_label)
label = np.load('./data/pn/test_label_pone.npy')
scope = np.load('./data/pn/test_entity_scope_pone.npy')
all_true_label = np.zeros((scope.shape[0], 53))
for pid in range(scope.shape[0]):
all_true_label[pid][label[scope[pid][0]:scope[pid][1]+1]] = 1
all_true_label = np.reshape(all_true_label[:, 1:], -1)
np.save('./data/pn/true_label.npy', all_true_label)
| 34.517895
| 136
| 0.582886
|
7949cf3e53dc7fa3d65da4affdc230ffc54a5f82
| 1,302
|
py
|
Python
|
test.py
|
samadams13/Linux-Email-Automation
|
202fcac287f3c4cb71892d85b47a7c8a6dd6dc42
|
[
"MIT"
] | null | null | null |
test.py
|
samadams13/Linux-Email-Automation
|
202fcac287f3c4cb71892d85b47a7c8a6dd6dc42
|
[
"MIT"
] | null | null | null |
test.py
|
samadams13/Linux-Email-Automation
|
202fcac287f3c4cb71892d85b47a7c8a6dd6dc42
|
[
"MIT"
] | null | null | null |
import smtplib, ssl, time, os
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
'''
This file takes the SMTP Sender Email address, Receiver Email address as inputs and
sends a mail with a specific message using GMail as the SMTP server.
This file is useful to send mails from the commandline or programmatically without opening a browser
'''
port = 465 # For SSL
smtp_server = "smtp.gmail.com"
sender_email = "" # Enter sender's email
receiver_email = "" # Enter receiver address
password = "" #Enter sender's pass
date = time.ctime()
message = MIMEMultipart()
text = MIMEText("""\
Subject: [Web Development Course Reminder]
This message is sent because you have took oath that you will study daily web development. So, start now.
Current Time/Date: {date}
--Abhishek Raj
--Naam to suna hi hoga.""")
message.attach(text) #attaches text to the email
img_data = open('', 'rb').read() #Enter image filename
image = MIMEImage(img_data, name=os.path.basename()) #Enter image filename
message.attach(image) #attaches image to the email
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
| 36.166667
| 105
| 0.764209
|
7949cf46c6daf77a9c65c056672897139376c8b0
| 4,127
|
py
|
Python
|
mypy/erasetype.py
|
crusaderky/mypy
|
b29a4330d2d7d1032c30a1990e7f1c477db070fd
|
[
"PSF-2.0"
] | null | null | null |
mypy/erasetype.py
|
crusaderky/mypy
|
b29a4330d2d7d1032c30a1990e7f1c477db070fd
|
[
"PSF-2.0"
] | null | null | null |
mypy/erasetype.py
|
crusaderky/mypy
|
b29a4330d2d7d1032c30a1990e7f1c477db070fd
|
[
"PSF-2.0"
] | null | null | null |
from typing import Optional, Container, Callable
from mypy.types import (
Type, TypeVisitor, UnboundType, AnyType, NoneType, TypeVarId, Instance, TypeVarType,
CallableType, TupleType, TypedDictType, UnionType, Overloaded, ErasedType, PartialType,
DeletedType, TypeTranslator, UninhabitedType, TypeType, TypeOfAny, LiteralType,
)
from mypy.nodes import ARG_STAR, ARG_STAR2
def erase_type(typ: Type) -> Type:
"""Erase any type variables from a type.
Also replace tuple types with the corresponding concrete types.
Examples:
A -> A
B[X] -> B[Any]
Tuple[A, B] -> tuple
Callable[[A1, A2, ...], R] -> Callable[..., Any]
Type[X] -> Type[Any]
"""
return typ.accept(EraseTypeVisitor())
class EraseTypeVisitor(TypeVisitor[Type]):
def visit_unbound_type(self, t: UnboundType) -> Type:
# TODO: replace with an assert after UnboundType can't leak from semantic analysis.
return AnyType(TypeOfAny.from_error)
def visit_any(self, t: AnyType) -> Type:
return t
def visit_none_type(self, t: NoneType) -> Type:
return t
def visit_uninhabited_type(self, t: UninhabitedType) -> Type:
return t
def visit_erased_type(self, t: ErasedType) -> Type:
# Should not get here.
raise RuntimeError()
def visit_partial_type(self, t: PartialType) -> Type:
# Should not get here.
raise RuntimeError()
def visit_deleted_type(self, t: DeletedType) -> Type:
return t
def visit_instance(self, t: Instance) -> Type:
return Instance(t.type, [AnyType(TypeOfAny.special_form)] * len(t.args), t.line)
def visit_type_var(self, t: TypeVarType) -> Type:
return AnyType(TypeOfAny.special_form)
def visit_callable_type(self, t: CallableType) -> Type:
# We must preserve the fallback type for overload resolution to work.
any_type = AnyType(TypeOfAny.special_form)
return CallableType(
arg_types=[any_type, any_type],
arg_kinds=[ARG_STAR, ARG_STAR2],
arg_names=[None, None],
ret_type=any_type,
fallback=t.fallback,
is_ellipsis_args=True,
implicit=True,
)
def visit_overloaded(self, t: Overloaded) -> Type:
return t.fallback.accept(self)
def visit_tuple_type(self, t: TupleType) -> Type:
return t.partial_fallback.accept(self)
def visit_typeddict_type(self, t: TypedDictType) -> Type:
return t.fallback.accept(self)
def visit_literal_type(self, t: LiteralType) -> Type:
# The fallback for literal types should always be either
# something like int or str, or an enum class -- types that
# don't contain any TypeVars. So there's no need to visit it.
return t
def visit_union_type(self, t: UnionType) -> Type:
erased_items = [erase_type(item) for item in t.items]
return UnionType.make_simplified_union(erased_items)
def visit_type_type(self, t: TypeType) -> Type:
return TypeType.make_normalized(t.item.accept(self), line=t.line)
def erase_typevars(t: Type, ids_to_erase: Optional[Container[TypeVarId]] = None) -> Type:
"""Replace all type variables in a type with any,
or just the ones in the provided collection.
"""
def erase_id(id: TypeVarId) -> bool:
if ids_to_erase is None:
return True
return id in ids_to_erase
return t.accept(TypeVarEraser(erase_id, AnyType(TypeOfAny.special_form)))
def replace_meta_vars(t: Type, target_type: Type) -> Type:
"""Replace unification variables in a type with the target type."""
return t.accept(TypeVarEraser(lambda id: id.is_meta_var(), target_type))
class TypeVarEraser(TypeTranslator):
"""Implementation of type erasure"""
def __init__(self, erase_id: Callable[[TypeVarId], bool], replacement: Type) -> None:
self.erase_id = erase_id
self.replacement = replacement
def visit_type_var(self, t: TypeVarType) -> Type:
if self.erase_id(t.id):
return self.replacement
return t
| 33.827869
| 91
| 0.665617
|
7949d04136353b10331d93a21f465e4406fa8185
| 1,573
|
py
|
Python
|
setup.py
|
tspp520/PaddleX
|
5f3974c7ba7b378cc2969b8609a7ee682e59a5c1
|
[
"Apache-2.0"
] | 1
|
2020-05-06T13:07:18.000Z
|
2020-05-06T13:07:18.000Z
|
setup.py
|
chunfeng0301/PaddleX
|
5f3974c7ba7b378cc2969b8609a7ee682e59a5c1
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
chunfeng0301/PaddleX
|
5f3974c7ba7b378cc2969b8609a7ee682e59a5c1
|
[
"Apache-2.0"
] | null | null | null |
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
import sys
long_description = "PaddleX. A end-to-end deeplearning model development toolkit base on PaddlePaddle\n\n"
setuptools.setup(
name="paddlex",
version='0.1.5',
author="paddlex",
author_email="paddlex@baidu.com",
description=long_description,
long_description=long_description,
long_description_content_type="text/plain",
url="https://github.com/PaddlePaddle/PaddleX",
packages=setuptools.find_packages(),
setup_requires=['cython', 'numpy', 'sklearn'],
install_requires=[
'pycocotools', 'pyyaml', 'colorama', 'tqdm', 'visualdl==1.3.0',
'paddleslim==1.0.1', 'paddlehub>=1.6.2'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
license='Apache 2.0',
entry_points={'console_scripts': [
'paddlex=paddlex.command:main',
]})
| 35.75
| 106
| 0.699936
|
7949d06455dc32bde6b56923227a5e16ecd95225
| 577
|
py
|
Python
|
Python/venv/lib/python3.6/site-packages/png/__init__.py
|
Nyahmi/ET-WorldGenerator
|
7506102ca1116536ebcdcdb352cf96b93cd53120
|
[
"MIT"
] | null | null | null |
Python/venv/lib/python3.6/site-packages/png/__init__.py
|
Nyahmi/ET-WorldGenerator
|
7506102ca1116536ebcdcdb352cf96b93cd53120
|
[
"MIT"
] | null | null | null |
Python/venv/lib/python3.6/site-packages/png/__init__.py
|
Nyahmi/ET-WorldGenerator
|
7506102ca1116536ebcdcdb352cf96b93cd53120
|
[
"MIT"
] | null | null | null |
try:
exec("from .png import *", globals(), locals())
# Following methods are not parts of API and imports only for unittest
exec("from .png import _main", globals(), locals())
exec("from .png import strtobytes", globals(), locals())
exec("from .png import array", globals(), locals())
except SyntaxError:
# On Python < 2.5 relative import cause syntax error
from png import *
# Following methods are not parts of API and imports only for unittest
from png import _main
from png import strtobytes
from png import array
| 41.214286
| 76
| 0.668977
|
7949d0c48c5314b5696589328376a79ccc061186
| 5,131
|
py
|
Python
|
colossus/apps/campaigns/api.py
|
hrithik098/colossus
|
9544838dfc2ab75895d8605d1480fd019b107828
|
[
"MIT"
] | 6
|
2021-02-08T02:46:48.000Z
|
2021-03-29T10:26:58.000Z
|
colossus/apps/campaigns/api.py
|
qube-ai/colossus
|
9544838dfc2ab75895d8605d1480fd019b107828
|
[
"MIT"
] | null | null | null |
colossus/apps/campaigns/api.py
|
qube-ai/colossus
|
9544838dfc2ab75895d8605d1480fd019b107828
|
[
"MIT"
] | null | null | null |
import logging
import re
from smtplib import SMTPException
import html2text
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMultiAlternatives
from django.utils import timezone
from django.utils.translation import gettext as _
from colossus.apps.campaigns.constants import CampaignStatus
from colossus.apps.subscribers.constants import ActivityTypes
from colossus.utils import get_absolute_url, get_campaign_connection
logger = logging.getLogger(__name__)
def get_test_email_context(**kwargs):
if 'sub' not in kwargs:
kwargs['sub'] = '#'
if 'unsub' not in kwargs:
kwargs['unsub'] = '#'
if 'name' not in kwargs:
kwargs['name'] = '<< Test Name >>'
if 'uuid' not in kwargs:
kwargs['uuid'] = '[SUBSCRIBER_UUID]'
return kwargs
def send_campaign_email(email, context, to, connection=None, is_test=False):
if isinstance(to, str):
to = [to, ]
subject = email.subject
if is_test:
subject = '[%s] %s' % (_('Test'), subject)
rich_text_message = email.render(context)
plain_text_message = html2text.html2text(rich_text_message, bodywidth=2000)
# Remove track open from plain text version
plain_text_message = re.sub(r'(!\[\]\(https?://.*/track/open/.*/\)\n\n)', '', plain_text_message, 1)
headers = dict()
if not is_test:
headers['List-ID'] = '%s <%s.list-id.%s>' % (
email.campaign.mailing_list.name,
email.campaign.mailing_list.uuid,
context['domain']
),
headers['List-Post'] = 'NO',
headers['List-Unsubscribe-Post'] = 'List-Unsubscribe=One-Click'
list_subscribe_header = ['<%s>' % context['sub']]
list_unsubscribe_header = ['<%s>' % context['unsub']]
if email.campaign.mailing_list.list_manager:
list_subscribe_header.append('<mailto:%s?subject=subscribe>' % email.campaign.mailing_list.list_manager)
list_unsubscribe_header.append(
'<mailto:%s?subject=unsubscribe>' % email.campaign.mailing_list.list_manager
)
headers['List-Subscribe'] = ', '.join(list_subscribe_header)
headers['List-Unsubscribe'] = ', '.join(list_unsubscribe_header)
message = EmailMultiAlternatives(
subject=subject,
body=plain_text_message,
from_email=email.get_from(),
to=to,
connection=connection,
headers=headers
)
message.attach_alternative(rich_text_message, 'text/html')
try:
message.send(fail_silently=False)
return True
except SMTPException:
logger.exception('Could not send email "%s" due to SMTP error.' % email.uuid)
return False
def send_campaign_email_subscriber(email, subscriber, site, connection=None):
unsubscribe_absolute_url = get_absolute_url('subscribers:unsubscribe', kwargs={
'mailing_list_uuid': email.campaign.mailing_list.uuid,
'subscriber_uuid': subscriber.uuid,
'campaign_uuid': email.campaign.uuid
})
subscribe_absolute_url = get_absolute_url('subscribers:subscribe', kwargs={
'mailing_list_uuid': email.campaign.mailing_list.uuid
})
context = {
'domain': site.domain,
'uuid': subscriber.uuid,
'name': subscriber.name,
'sub': subscribe_absolute_url,
'unsub': unsubscribe_absolute_url
}
return send_campaign_email(email, context, subscriber.get_email(), connection)
def send_campaign_email_test(email, recipient_list, connection=None):
if email.campaign.mailing_list is not None:
unsubscribe_absolute_url = get_absolute_url('subscribers:unsubscribe_manual', kwargs={
'mailing_list_uuid': email.campaign.mailing_list.uuid
})
else:
unsubscribe_absolute_url = '#'
context = get_test_email_context(unsub=unsubscribe_absolute_url)
return send_campaign_email(email, context, recipient_list, is_test=True, connection=connection)
def send_campaign(campaign):
connection = get_campaign_connection(campaign=campaign)
campaign.status = CampaignStatus.DELIVERING
campaign.save(update_fields=['status'])
site = get_current_site(request=None) # get site based on SITE_ID
if campaign.track_clicks:
campaign.email.enable_click_tracking()
if campaign.track_opens:
campaign.email.enable_open_tracking()
with connection:
for subscriber in campaign.get_recipients():
if not subscriber.activities.filter(activity_type=ActivityTypes.SENT, email=campaign.email).exists():
sent = send_campaign_email_subscriber(campaign.email, subscriber, site, connection)
if sent:
subscriber.create_activity(ActivityTypes.SENT, email=campaign.email)
subscriber.update_open_and_click_rate()
subscriber.last_sent = timezone.now()
subscriber.save(update_fields=['last_sent'])
campaign.mailing_list.update_open_and_click_rate()
campaign.status = CampaignStatus.SENT
campaign.save(update_fields=['status'])
| 36.913669
| 116
| 0.682908
|
7949d0da585f467394c19cbf96fb5353ce29be6d
| 1,076
|
py
|
Python
|
src/games/domino/module/players/strategies/alphazero.py
|
2kodevs/cooperAItive
|
910c4c1cb356e3c404ce4b9a64c812cab3333742
|
[
"MIT"
] | 1
|
2021-09-16T12:58:24.000Z
|
2021-09-16T12:58:24.000Z
|
src/games/domino/module/players/strategies/alphazero.py
|
2kodevs/cooperAItive
|
910c4c1cb356e3c404ce4b9a64c812cab3333742
|
[
"MIT"
] | 3
|
2021-09-08T23:20:20.000Z
|
2022-01-30T22:45:44.000Z
|
src/games/domino/module/players/strategies/alphazero.py
|
2kodevs/cooperAItive
|
910c4c1cb356e3c404ce4b9a64c812cab3333742
|
[
"MIT"
] | null | null | null |
from ..player import BasePlayer
from .utils.alphazero import encoder_generator, rollout_maker, selector_maker
from .utils.mc import monte_carlo
from .models import AlphaZeroNet
from .utils import parse_bool
class AlphaZero(BasePlayer):
def __init__(self, name, handouts, rollouts, NN, tag='', load_model=True):
super().__init__(f'AlphaZero::{name}')
if isinstance(NN, str):
_, self.NN = AlphaZeroNet().load(NN, tag, load_model=parse_bool(load_model))
else:
self.NN = NN
self.handouts = int(handouts)
self.rollouts = int(rollouts)
def filter(self, valids):
data = {}
selector = selector_maker(data, self.valid_moves(), self.pieces_per_player - len(self.pieces), False, 6)
encoder = encoder_generator(self.max_number)
rollout = rollout_maker(data, self.NN)
_, action, *_ = monte_carlo(
self,
encoder,
rollout,
selector,
self.handouts,
self.rollouts,
)
return [action]
| 30.742857
| 112
| 0.618959
|
7949d1c2f88e8fdb6e561cdfd1e2e96f9ed13e1f
| 779
|
py
|
Python
|
qmpy/analysis/thermodynamics/tests.py
|
tachyontraveler/qmpy
|
f024de3aa85d4367cd31775bd53eede30c74c083
|
[
"MIT"
] | 103
|
2015-02-13T16:51:59.000Z
|
2022-03-24T22:08:54.000Z
|
qmpy/analysis/thermodynamics/tests.py
|
tachyontraveler/qmpy
|
f024de3aa85d4367cd31775bd53eede30c74c083
|
[
"MIT"
] | 59
|
2015-12-02T22:43:21.000Z
|
2022-03-28T03:54:44.000Z
|
qmpy/analysis/thermodynamics/tests.py
|
tachyontraveler/qmpy
|
f024de3aa85d4367cd31775bd53eede30c74c083
|
[
"MIT"
] | 62
|
2015-02-24T21:58:59.000Z
|
2022-03-21T16:49:09.000Z
|
from django.test import TestCase
from qmpy.analysis.thermodynamics import *
class PhaseTestCase(TestCase):
def test_create(self):
test = Phase(composition={"Li": 2, "Fe": 4, "O": 6}, energy=-1.2345)
self.assertEqual(test.name, "LiFe2O3")
self.assertEqual(test.space, set(["Fe", "Li", "O"]))
self.assertEqual(
test.unit_comp,
{"Fe": 0.33333333333333331, "O": 0.5, "Li": 0.16666666666666666},
)
self.assertEqual(test.nom_comp, {"Fe": 2, "O": 3, "Li": 1})
self.assertEqual(test.comp, {"Fe": 4, "O": 6, "Li": 2})
self.assertEqual(test.latex, "Li$_{}$Fe$_{2}$O$_{3}$")
class PhaseSpaceTestCase(TestCase):
def test_create(self):
test = PhaseSpace("Li-Fe-O", load="legacy.dat")
| 35.409091
| 77
| 0.591784
|
7949d1ccac73ed84545ab848531e848ec4474175
| 3,421
|
py
|
Python
|
pympler/mprofile.py
|
mrJean1/pympler
|
a24dd8f22c427e9acd7ebfd8525b3db2f4b9abfe
|
[
"Apache-2.0"
] | null | null | null |
pympler/mprofile.py
|
mrJean1/pympler
|
a24dd8f22c427e9acd7ebfd8525b3db2f4b9abfe
|
[
"Apache-2.0"
] | null | null | null |
pympler/mprofile.py
|
mrJean1/pympler
|
a24dd8f22c427e9acd7ebfd8525b3db2f4b9abfe
|
[
"Apache-2.0"
] | 2
|
2018-04-04T17:13:01.000Z
|
2018-04-17T21:15:25.000Z
|
"""
Memory usage profiler for Python.
"""
import inspect
import sys
from pympler import muppy
class MProfiler(object):
"""A memory usage profiler class.
Memory data for each function is stored as a 3-element list in the
dictionary self.memories. The index is always a codepoint (see below).
The following are the definitions of the members:
[0] = The number of times this function was called
[1] = Minimum memory consumption when this function was measured.
[2] = Maximum memory consumption when this function was measured.
A codepoint is a list of 3-tuple of the type
(filename, functionname, linenumber). You can omit either element, which
will cause the profiling to be triggered if any of the other criteria
match. E.g.
- (None, foo, None), will profile any foo function,
- (bar, foo, None) will profile only the foo function from the bar file,
- (bar, foo, 17) will profile only line 17 of the foo function defined
in the file bar.
Additionally, you can define on what events you want the profiling be
triggered. Possible events are defined in
http://docs.python.org/lib/debugger-hooks.html.
If you do not define either codepoints or events, the profiler will
record the memory usage in at every codepoint and event.
"""
def __init__(self, codepoints=None, events=None):
"""
keyword arguments:
codepoints -- a list of points in code to monitor (defaults to all
codepoints)
events -- a list of events to monitor (defaults to all events)
"""
self.memories = {}
self.codepoints = codepoints
self.events = events
def codepoint_included(self, codepoint):
"""Check if codepoint matches any of the defined codepoints."""
if self.codepoints is None:
return True
for cp in self.codepoints:
mismatch = False
for i in range(len(cp)):
if (cp[i] is not None) and (cp[i] != codepoint[i]):
mismatch = True
break
if not mismatch:
return True
return False
def profile(self, frame, event, arg): # arg req to match signature
"""Profiling method used to profile matching codepoints and events."""
if (self.events is None) or (event in self.events):
frame_info = inspect.getframeinfo(frame)
cp = (frame_info[0], frame_info[2], frame_info[1])
if self.codepoint_included(cp):
objects = muppy.get_objects()
size = muppy.get_size(objects)
if cp not in self.memories:
self.memories[cp] = [0, 0, 0, 0]
self.memories[cp][0] = 1
self.memories[cp][1] = size
self.memories[cp][2] = size
else:
self.memories[cp][0] += 1
if self.memories[cp][1] > size:
self.memories[cp][1] = size
if self.memories[cp][2] < size:
self.memories[cp][2] = size
def run(self, cmd):
sys.setprofile(self.profile)
try:
exec(cmd)
finally:
sys.setprofile(None)
return self
if __name__ == "__main__":
p = MProfiler()
p.run("print 'hello'")
print(p.memories)
| 35.268041
| 78
| 0.590763
|
7949d218e56a09f778ca6ed4aeefa53bf31bdd01
| 5,147
|
py
|
Python
|
pl_bolts/datamodules/ssl_imagenet_datamodule.py
|
btwardow/pytorch-lightning-bolts
|
4a7b6ffe0fcbeee37f8bac6af1e926469b2052bf
|
[
"Apache-2.0"
] | 1
|
2021-06-28T03:24:11.000Z
|
2021-06-28T03:24:11.000Z
|
pl_bolts/datamodules/ssl_imagenet_datamodule.py
|
btwardow/pytorch-lightning-bolts
|
4a7b6ffe0fcbeee37f8bac6af1e926469b2052bf
|
[
"Apache-2.0"
] | null | null | null |
pl_bolts/datamodules/ssl_imagenet_datamodule.py
|
btwardow/pytorch-lightning-bolts
|
4a7b6ffe0fcbeee37f8bac6af1e926469b2052bf
|
[
"Apache-2.0"
] | null | null | null |
import os
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from pl_bolts.datasets.imagenet_dataset import UnlabeledImagenet
from pl_bolts.transforms.dataset_normalizations import imagenet_normalization
from pl_bolts.utils import _TORCHVISION_AVAILABLE
from pl_bolts.utils.warnings import warn_missing_pkg
if _TORCHVISION_AVAILABLE:
from torchvision import transforms as transform_lib
else:
warn_missing_pkg('torchvision') # pragma: no-cover
class SSLImagenetDataModule(LightningDataModule): # pragma: no cover
name = 'imagenet'
def __init__(
self,
data_dir,
meta_dir=None,
num_workers=16,
batch_size: int = 32,
shuffle: bool = False,
pin_memory: bool = False,
drop_last: bool = False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
if not _TORCHVISION_AVAILABLE:
raise ModuleNotFoundError( # pragma: no-cover
'You want to use ImageNet dataset loaded from `torchvision` which is not installed yet.'
)
self.data_dir = data_dir
self.num_workers = num_workers
self.meta_dir = meta_dir
self.batch_size = batch_size
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
@property
def num_classes(self):
return 1000
def _verify_splits(self, data_dir, split):
dirs = os.listdir(data_dir)
if split not in dirs:
raise FileNotFoundError(
f'a {split} Imagenet split was not found in {data_dir}, make sure the'
f' folder contains a subfolder named {split}'
)
def prepare_data(self):
# imagenet cannot be downloaded... must provide path to folder with the train/val splits
self._verify_splits(self.data_dir, 'train')
self._verify_splits(self.data_dir, 'val')
for split in ['train', 'val']:
files = os.listdir(os.path.join(self.data_dir, split))
if 'meta.bin' not in files:
raise FileNotFoundError(
"""
no meta.bin present. Imagenet is no longer automatically downloaded by PyTorch.
To get imagenet:
1. download yourself from http://www.image-net.org/challenges/LSVRC/2012/downloads
2. download the devkit (ILSVRC2012_devkit_t12.tar.gz)
3. generate the meta.bin file using the devkit
4. copy the meta.bin file into both train and val split folders
To generate the meta.bin do the following:
from pl_bolts.datamodules.imagenet_dataset import UnlabeledImagenet
path = '/path/to/folder/with/ILSVRC2012_devkit_t12.tar.gz/'
UnlabeledImagenet.generate_meta_bins(path)
"""
)
def train_dataloader(self, num_images_per_class=-1, add_normalize=False):
transforms = self._default_transforms() if self.train_transforms is None else self.train_transforms
dataset = UnlabeledImagenet(
self.data_dir,
num_imgs_per_class=num_images_per_class,
meta_dir=self.meta_dir,
split='train',
transform=transforms
)
loader = DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
return loader
def val_dataloader(self, num_images_per_class=50, add_normalize=False):
transforms = self._default_transforms() if self.val_transforms is None else self.val_transforms
dataset = UnlabeledImagenet(
self.data_dir,
num_imgs_per_class_val_split=num_images_per_class,
meta_dir=self.meta_dir,
split='val',
transform=transforms
)
loader = DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
return loader
def test_dataloader(self, num_images_per_class, add_normalize=False):
transforms = self._default_transforms() if self.test_transforms is None else self.test_transforms
dataset = UnlabeledImagenet(
self.data_dir,
num_imgs_per_class=num_images_per_class,
meta_dir=self.meta_dir,
split='test',
transform=transforms
)
loader = DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory
)
return loader
def _default_transforms(self):
mnist_transforms = transform_lib.Compose([transform_lib.ToTensor(), imagenet_normalization()])
return mnist_transforms
| 34.543624
| 107
| 0.628133
|
7949d26e5a187b3306ad26f86ec2b4816ea9b988
| 25,225
|
py
|
Python
|
Integrations/python/test/testTableTools.py
|
AlSpinks/deephaven-core
|
19686fb518229fd7e331071205cc56fca42ad943
|
[
"MIT"
] | null | null | null |
Integrations/python/test/testTableTools.py
|
AlSpinks/deephaven-core
|
19686fb518229fd7e331071205cc56fca42ad943
|
[
"MIT"
] | 1
|
2022-03-03T21:24:40.000Z
|
2022-03-03T21:24:54.000Z
|
Integrations/python/test/testTableTools.py
|
AlSpinks/deephaven-core
|
19686fb518229fd7e331071205cc56fca42ad943
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2016-2021 Deephaven Data Labs and Patent Pending
#
##############################################################################
# NOTE: the jvm should have been initialized, or this test will certainly fail
##############################################################################
import sys
import jpy
import numpy
from datetime import datetime, date
from deephaven import TableTools
if sys.version_info[0] < 3:
import unittest2 as unittest
# not part of the standard library, installed via pip (or the like)
# it provides backward compatibility with python3 style subTest context manager (handy for complex tests)
else:
import unittest
class TestTableTools(unittest.TestCase):
"""
Test cases for the deephaven.TableTools module (performed locally) -
"""
@classmethod
def setUpClass(self):
self.nByteArray = numpy.array([1, 2, 3], dtype=numpy.int8)
self.nShortArray = numpy.array([1, 2, 3], dtype=numpy.int16)
self.nIntArray = numpy.array([1, 2, 3], dtype=numpy.int32)
self.nLongArray = numpy.array([1, 2, 3], dtype=numpy.int64)
self.nFloatArray = numpy.array([1, 2, 3], dtype=numpy.float32)
self.nDoubleArray = numpy.array([1, 2, 3], dtype=numpy.float64)
self.nCharArray = numpy.array(['A', 'B', 'C'])
self.nStringArray = numpy.array([u'one', u'two', u'three'])
self.nBooleanArray = numpy.array([True, False, True], dtype=numpy.bool)
self.nTimeArray = numpy.array([1, 2, 3], dtype='datetime64[s]')
self.intList = [1, 2, None]
self.floatList = [1., 2., None]
self.charList = ['A', 'B', None]
self.stringList = [u'one', u'two', None]
self.booleanList = [True, False, None]
self.timeList = [datetime.utcnow(), datetime.utcnow(), datetime.utcnow()]
def testTableBasics(self):
"""
Test cases for table creation, and a few other basic table methods:
diff(), html(), show(), showCommaDelimited(), showWithIndex(), string(),
roundDecimalColumns(), roundDecimalColumnsExcept(), merge(), mergeSorted()
"""
tab, tab2, tab3, tab4, tab5, tab6 = None, None, None, None, None, None
with self.subTest(msg="emptyTable(long)"):
tab = TableTools.emptyTable(3)
# set some cols which aren't dumb
tab = tab.update("intCol=(int)i", "fltCol=(float)i*0.5", "dblCol=(double)i*0.3")
with self.subTest(msg="newTable(TableDefinition)"):
# assuming the first test passed...
tab3 = TableTools.newTable(tab.getDefinition())
# Essentially table to string methods
with self.subTest(msg="html test"):
print("html rendering = \n{}".format(TableTools.html(tab)))
with self.subTest(msg="show(Table, *cols)"):
print("show =")
TableTools.show(tab, "intCol", "dblCol")
with self.subTest(msg="show(Table, 2, *cols)"):
print("show & row limit =")
TableTools.show(tab, 2, "intCol", "dblCol")
with self.subTest(msg="showCommaDelimited(Table, *cols)"):
print("showCommaDelimited =")
TableTools.showCommaDelimited(tab, "intCol", "dblCol")
with self.subTest(msg="showCommaDelimited(Table, 2, *cols)"):
print("showCommaDelimited & row limit =")
TableTools.showCommaDelimited(tab, 2, "intCol", "dblCol")
with self.subTest(msg="showWithIndex(Table, *cols)"):
print("showWithIndex =")
TableTools.showWithIndex(tab, "intCol", "dblCol")
with self.subTest(msg="showWithIndex(Table, 2, *cols)"):
print("showWithIndex & row limit =")
TableTools.showWithIndex(tab, 2, "intCol", "dblCol")
with self.subTest(msg="string(Table, *cols)"):
print("string =\n {}".format(TableTools.string(tab, "intCol", "dblCol")))
with self.subTest(msg="string(Table, 2, *cols)"):
print("string & row limit =\n {}".format(TableTools.string(tab, 2, "intCol", "dblCol")))
with self.subTest(msg="roundDecimalColumns"):
tab4 = TableTools.roundDecimalColumns(tab)
with self.subTest(msg="roundDecimalColumns(*cols)"):
tab5 = TableTools.roundDecimalColumns(tab, "fltCol", "dblCol")
with self.subTest(msg="roundDecimalColumnsExcept(*cols)"):
tab6 = TableTools.roundDecimalColumns(tab, "fltCol")
with self.subTest(msg="diff test of a table with itself"):
print("diff output of table with itself = \n{}".format(TableTools.diff(tab, tab, 3)))
with self.subTest(msg="diff test of a table with rounded version of itself"):
print("diff output of table with rounded version of itself = \n{}".format(TableTools.diff(tab, tab4, 3)))
with self.subTest(msg="merge(*tables)"):
tab4 = TableTools.merge(tab, tab)
with self.subTest(msg="merge([tables])"):
tab4 = TableTools.merge([tab, tab])
with self.subTest(msg="mergeSorted(col, [tables])"):
tab4 = TableTools.mergeSorted("intCol", [tab, tab])
with self.subTest(msg="merge(col, *tables)"):
tab4 = TableTools.mergeSorted("intCol", tab, tab)
del tab, tab2, tab3, tab4, tab5, tab6
def testColumnHolder(self):
"""
Test cases for <primitive>Col() methods & associated newTable() method
"""
holders = []
junk, tab = None, None
with self.subTest(msg="byteCol"):
holders.append(TableTools.byteCol("byteCol", self.nByteArray))
with self.subTest(msg="shortCol"):
holders.append(TableTools.shortCol("shortCol", self.nShortArray))
with self.subTest(msg="intCol"):
holders.append(TableTools.intCol("intCol", self.nIntArray))
with self.subTest(msg="longCol"):
holders.append(TableTools.longCol("longCol", self.nLongArray))
with self.subTest(msg="floatCol"):
holders.append(TableTools.floatCol("floatCol", self.floatList))
with self.subTest(msg="doubleCol"):
holders.append(TableTools.doubleCol("doubleCol", self.nDoubleArray))
with self.subTest(msg="charCol"):
holders.append(TableTools.charCol("charCol", self.nCharArray))
with self.subTest(msg="newTable with column holders"):
self.assertGreater(len(holders), 0) # make sure that we even do something useful
tab = TableTools.newTable(*holders)
print("tab =\n{}".format(TableTools.html(tab)))
del holders, tab
holders = []
with self.subTest(msg="byteCol with list"):
holders.append(TableTools.byteCol("byteList", self.intList))
with self.subTest(msg="shortCol with list"):
holders.append(TableTools.shortCol("shortList", self.intList))
with self.subTest(msg="intCol with list"):
holders.append(TableTools.intCol("intList", self.intList))
with self.subTest(msg="longCol with list"):
holders.append(TableTools.longCol("longList", self.intList))
with self.subTest(msg="floatCol with list"):
holders.append(TableTools.doubleCol("floatList", self.floatList))
with self.subTest(msg="doubleCol with list"):
holders.append(TableTools.doubleCol("doubleList", self.floatList))
with self.subTest(msg="charCol with list"):
holders.append(TableTools.charCol("charList", self.charList))
print('prim col from list = \n{}'.format(TableTools.html(TableTools.newTable(*holders))))
del holders
holders = []
with self.subTest(msg="col with string array"):
holders.append(TableTools.col("stringCol", self.nStringArray))
with self.subTest(msg="col with boolean array"):
holders.append(TableTools.col("booleanCol", self.nBooleanArray))
with self.subTest(msg="col with time array"):
holders.append(TableTools.col("timeCol", self.nTimeArray))
print('obj col = \n{}'.format(TableTools.html(TableTools.newTable(*holders))))
del holders
holders = []
with self.subTest(msg="col with int list"):
holders.append(TableTools.col("intList2", self.intList))
with self.subTest(msg="col with double list"):
holders.append(TableTools.col("doubleList2", self.floatList))
with self.subTest(msg="col with char list"):
holders.append(TableTools.col("stringList", self.stringList))
with self.subTest(msg="col with char list"):
holders.append(TableTools.col("charList2", self.charList))
with self.subTest(msg="col with boolean list"):
holders.append(TableTools.col("booleanList", self.booleanList))
with self.subTest(msg="col with time list"):
holders.append(TableTools.col("timeList", self.timeList))
print('col from list = \n{}'.format(TableTools.html(TableTools.newTable(*holders))))
del holders
holders = []
with self.subTest(msg="col with byte"):
holders.append(TableTools.col("byteCol", self.nByteArray))
with self.subTest(msg="col with short"):
holders.append(TableTools.col("shortCol", self.nShortArray))
with self.subTest(msg="col with int"):
holders.append(TableTools.col("intCol", self.nIntArray))
with self.subTest(msg="col with long"):
holders.append(TableTools.col("longCol", self.nLongArray))
with self.subTest(msg="col with float"):
holders.append(TableTools.col("floatCol", self.nFloatArray))
with self.subTest(msg="col with double"):
holders.append(TableTools.col("doubleCol", self.nDoubleArray))
with self.subTest(msg="col with char"):
holders.append(TableTools.col("charCol", self.nCharArray))
print("primitive from col =\n{}".format(TableTools.html(TableTools.newTable(*holders))))
del holders
def testColumnSource(self):
"""
Test cases for colSource(), objColSource() methods & associated newTable() method
"""
# type inference does not work for list and dicts (i.e. not converted to java collections & maps)
colSources = []
names = []
mapSources = {}
with self.subTest(msg="colSource with byte"):
key, val = "byte", TableTools.colSource(self.nByteArray)
names.append(key)
colSources.append(val)
mapSources[key] = val
with self.subTest(msg="colSource with short"):
key, val = "short", TableTools.colSource(self.nShortArray)
names.append(key)
colSources.append(val)
mapSources[key] = val
with self.subTest(msg="colSource with int"):
key, val = "int", TableTools.colSource(self.nIntArray)
names.append(key)
colSources.append(val)
mapSources[key] = val
with self.subTest(msg="colSource with long"):
key, val = "long", TableTools.colSource(self.nLongArray)
names.append(key)
colSources.append(val)
mapSources[key] = val
with self.subTest(msg="colSource with float"):
key, val = "float", TableTools.colSource(self.nFloatArray)
names.append(key)
colSources.append(val)
mapSources[key] = val
with self.subTest(msg="colSource with double"):
key, val = "double", TableTools.colSource(self.nDoubleArray)
names.append(key)
colSources.append(val)
mapSources[key] = val
with self.subTest(msg="colSource with char"):
key, val = "char", TableTools.colSource(self.nCharArray)
names.append(key)
colSources.append(val)
mapSources[key] = val
with self.subTest(msg="colSource with string"):
key, val = "string", TableTools.colSource(self.nStringArray)
names.append(key)
colSources.append(val)
mapSources[key] = val
with self.subTest(msg="colSource with boolean"):
key, val = "boolean", TableTools.colSource(self.nBooleanArray)
names.append(key)
colSources.append(val)
mapSources[key] = val
with self.subTest(msg="colSource with time"):
key, val = "time", TableTools.colSource(self.nTimeArray)
names.append(key)
colSources.append(val)
mapSources[key] = val
with self.subTest(msg="newTable with name and column source list"):
self.assertGreater(len(colSources), 0) # make sure that we even do something useful
print("table from [names], [sources] = \n{}".format(TableTools.html(TableTools.newTable(3, names, colSources))))
with self.subTest(msg="newTable with {name: column source} map"):
self.assertGreater(len(mapSources), 0) # make sure that we even do something useful
print("table from [names : sources] = \n{}".format(TableTools.html(TableTools.newTable(3, mapSources))))
del names, colSources, mapSources
names, colSources = [], []
with self.subTest(msg="colSource with int list"):
key, val = "intList", TableTools.colSource(self.intList)
names.append(key)
colSources.append(val)
with self.subTest(msg="colSource with float list"):
key, val = "floatList", TableTools.colSource(self.floatList)
names.append(key)
colSources.append(val)
with self.subTest(msg="colSource with string list"):
key, val = "strList", TableTools.colSource(self.stringList)
names.append(key)
colSources.append(val)
with self.subTest(msg="colSource with char list"):
key, val = "charList", TableTools.colSource(self.charList)
names.append(key)
colSources.append(val)
with self.subTest(msg="colSource with boolean list"):
key, val = "boolList", TableTools.colSource(self.booleanList)
names.append(key)
colSources.append(val)
with self.subTest(msg="colSource with time list"):
key, val = "time", TableTools.colSource(self.timeList)
names.append(key)
colSources.append(val)
print("table from colSource with lists = \n{}".format(TableTools.html(TableTools.newTable(3, names, colSources))))
del names, colSources
names, colSources = [], []
with self.subTest(msg="objColSource with string"):
key, val = "string", TableTools.objColSource(self.nStringArray)
names.append(key)
colSources.append(val)
with self.subTest(msg="objColSource with boolean"):
key, val = "boolean", TableTools.objColSource(self.nBooleanArray)
names.append(key)
colSources.append(val)
with self.subTest(msg="objColSource with time"):
key, val = "time", TableTools.objColSource(self.nTimeArray)
names.append(key)
colSources.append(val)
# NOTE: this one is kinda dumb...probably not what anyone wants
with self.subTest(msg="objColSource with double primitive"):
key, val = "double", TableTools.objColSource(self.nDoubleArray)
names.append(key)
colSources.append(val)
print("table from objColSource = \n{}".format(TableTools.html(TableTools.newTable(3, names, colSources))))
del names, colSources
names, colSources = [], []
with self.subTest(msg="objColSource with string list"):
key, val = "string", TableTools.objColSource(self.stringList)
names.append(key)
colSources.append(val)
with self.subTest(msg="objColSource with boolean list"):
key, val = "boolean", TableTools.objColSource(self.booleanList)
names.append(key)
colSources.append(val)
with self.subTest(msg="objColSource with time list"):
key, val = "time", TableTools.objColSource(self.timeList)
names.append(key)
colSources.append(val)
# NOTE: this one is kinda dumb...probably not what anyone wants
with self.subTest(msg="objColSource with float list"):
key, val = "double", TableTools.objColSource(self.floatList)
names.append(key)
colSources.append(val)
print("table from objColSource with lists = \n{}".format(TableTools.html(TableTools.newTable(3, names, colSources))))
del names, colSources
@unittest.skip("what to do?")
def testGetKey(self):
"""
Test for getKey() & getPrevKey() methods?
"""
# TODO: getKey(), getPrevKey()
pass
@unittest.skip("what to do?")
def testReadFiles(self):
"""
Test for file reading capability?
"""
# TODO: readBin(), readCsv(), readHeaderlessCsv(), writeCsv()
# need data files for these tests
pass
def testPrimitiveColCases(self):
"""
Testing column construction from primitive cases
"""
with self.subTest(msg="col with bool"):
col1 = TableTools.col('bool1', numpy.array([True], dtype=numpy.bool))
self.assertEqual(col1.dataType.toString(), 'class java.lang.Boolean')
# print("table from bool = \n{}".format(TableTools.html(TableTools.newTable(col1))))
col2 = TableTools.col('bool2', True, False, None)
self.assertEqual(col2.dataType.toString(), 'class java.lang.Boolean')
# print("table from bool varargs = \n{}".format(TableTools.html(TableTools.newTable(col2))))
with self.subTest(msg="col with int"):
col1 = TableTools.col('int1', 1)
self.assertEqual(col1.dataType.toString(), 'long')
# print("table from int = \n{}".format(TableTools.html(TableTools.newTable(col1))))
col2 = TableTools.col('int2', 1, 2, None)
self.assertEqual(col2.dataType.toString(), 'long')
# print("table from int varargs = \n{}".format(TableTools.html(TableTools.newTable(col2))))
with self.subTest(msg="col with float"):
col1 = TableTools.col('float1', 1.0)
self.assertEqual(col1.dataType.toString(), 'double')
# print("table from float = \n{}".format(TableTools.html(TableTools.newTable(col1))))
col2 = TableTools.col('float2', 1.0, 2.0, None)
self.assertEqual(col2.dataType.toString(), 'double')
# print("table from float varargs = \n{}".format(TableTools.html(TableTools.newTable(col2))))
with self.subTest(msg="col with string"):
col1 = TableTools.col('string1', 'one')
self.assertEqual(col1.dataType.toString(), 'class java.lang.String')
# print("table from string = \n{}".format(TableTools.html(TableTools.newTable(col1))))
col2 = TableTools.col('string2', 'one', 'two', None)
self.assertEqual(col2.dataType.toString(), 'class java.lang.String')
# print("table from string varargs = \n{}".format(TableTools.html(TableTools.newTable(col2))))
with self.subTest(msg="col with datetime"):
col1 = TableTools.col('datetime1', datetime.utcnow())
self.assertEqual(col1.dataType.toString(), 'class io.deephaven.db.tables.utils.DBDateTime')
# print("table from string = \n{}".format(TableTools.html(TableTools.newTable(col1))))
col2 = TableTools.col('datetime2', datetime.utcnow(), datetime.utcnow(), None)
self.assertEqual(col2.dataType.toString(), 'class io.deephaven.db.tables.utils.DBDateTime')
# print("table from datetime varargs = \n{}".format(TableTools.html(TableTools.newTable(col2))))
with self.subTest(msg="col with date"):
col1 = TableTools.col('date1', date.today())
self.assertEqual(col1.dataType.toString(), 'class io.deephaven.db.tables.utils.DBDateTime')
# print("table from string = \n{}".format(TableTools.html(TableTools.newTable(col1))))
col2 = TableTools.col('date2', date.today(), date.today(), None)
self.assertEqual(col2.dataType.toString(), 'class io.deephaven.db.tables.utils.DBDateTime')
# print("table from date varargs = \n{}".format(TableTools.html(TableTools.newTable(col2))))
with self.subTest(msg="col with no argument"):
col1 = TableTools.col('empty', [])
self.assertEqual(col1.dataType.toString(), 'class java.lang.Object')
# print("table from empty = \n{}".format(TableTools.html(TableTools.newTable(col1))))
def testPrimitiveColSourceCases(self):
"""
Testing column source construction from primitive cases
"""
with self.subTest(msg="colSource with bool"):
col1 = TableTools.colSource(True)
self.assertEqual(col1.getType().toString(), 'class java.lang.Boolean')
# print("table from bool = \n{}".format(TableTools.html(TableTools.newTable(1, {'bool1': col1}))))
col2 = TableTools.colSource(True, False, None)
self.assertEqual(col2.getType().toString(), 'class java.lang.Boolean')
# print("table from bool varargs = \n{}".format(TableTools.html(TableTools.newTable(3, {'bool2': col2}))))
with self.subTest(msg="colSource with int"):
col1 = TableTools.colSource(1)
self.assertEqual(col1.getType().toString(), 'long')
# print("table from int = \n{}".format(TableTools.html(TableTools.newTable(1, {'int1': col1}))))
col2 = TableTools.colSource(1, 2, None)
self.assertEqual(col2.getType().toString(), 'long')
# print("table from int varargs = \n{}".format(TableTools.html(TableTools.newTable(3, {'int2': col2}))))
with self.subTest(msg="colSource with float"):
col1 = TableTools.colSource(1.0)
self.assertEqual(col1.getType().toString(), 'double')
# print("table from float = \n{}".format(TableTools.html(TableTools.newTable(1, {'float1': col1}))))
col2 = TableTools.colSource(1.0, 2.0, None)
self.assertEqual(col2.getType().toString(), 'double')
# print("table from float varargs = \n{}".format(TableTools.html(TableTools.newTable(3, {'float2': col2}))))
with self.subTest(msg="colSource with string"):
col1 = TableTools.colSource('one')
self.assertEqual(col1.getType().toString(), 'class java.lang.String')
# print("table from string = \n{}".format(TableTools.html(TableTools.newTable(1, {'string1': col1}))))
col2 = TableTools.colSource('one', 'two', None)
self.assertEqual(col2.getType().toString(), 'class java.lang.String')
# print("table from string varargs = \n{}".format(TableTools.html(TableTools.newTable(3, {'string2': col2}))))
with self.subTest(msg="colSource with datetime"):
col1 = TableTools.colSource(datetime.utcnow())
self.assertEqual(col1.getType().toString(), 'class io.deephaven.db.tables.utils.DBDateTime')
# print("table from string = \n{}".format(TableTools.html(TableTools.newTable(1, {'datetime1': col1}))))
col2 = TableTools.colSource(datetime.utcnow(), datetime.utcnow(), None)
self.assertEqual(col2.getType().toString(), 'class io.deephaven.db.tables.utils.DBDateTime')
# print("table from datetime varargs = \n{}".format(TableTools.html(TableTools.newTable(3, {'datetime2': col2}))))
with self.subTest(msg="colSource with date"):
col1 = TableTools.colSource(date.today())
self.assertEqual(col1.getType().toString(), 'class io.deephaven.db.tables.utils.DBDateTime')
# print("table from string = \n{}".format(TableTools.html(TableTools.newTable(1, {'date1': col1}))))
col2 = TableTools.colSource(date.today(), date.today(), None)
self.assertEqual(col2.getType().toString(), 'class io.deephaven.db.tables.utils.DBDateTime')
# print("table from date varargs = \n{}".format(TableTools.html(TableTools.newTable(3, {'date2': col2}))))
def testBreakingCases(self):
"""
Testing some cases observed to fail previously
"""
tab1, tab2, tab3 = None, None, None
with self.subTest(msg="charCol with list"):
tab1 = TableTools.newTable(TableTools.charCol('tab1', ['A', 'B', 'C', 'D']))
print("tab1 = \n{}".format(TableTools.html(tab1)))
with self.subTest(msg="charCol with string"):
tab2 = TableTools.newTable(TableTools.charCol('tab2', 'EFGH'))
print("tab2 = \n{}".format(TableTools.html(tab2)))
with self.subTest(msg="col with single string"):
tab3 = TableTools.newTable(TableTools.col('tab3', 'EFGH'))
print("tab3 = \n{}".format(TableTools.html(tab3)))
del tab1, tab2, tab3
| 49.075875
| 126
| 0.614073
|
7949d279bf67566ef448917878b1798a9c6560fc
| 221
|
py
|
Python
|
contrib/wallettools/walletchangepass.py
|
thachpv91/bapcoin85
|
3f889681d8487846f98f1b077ff03928b15038d8
|
[
"MIT"
] | null | null | null |
contrib/wallettools/walletchangepass.py
|
thachpv91/bapcoin85
|
3f889681d8487846f98f1b077ff03928b15038d8
|
[
"MIT"
] | null | null | null |
contrib/wallettools/walletchangepass.py
|
thachpv91/bapcoin85
|
3f889681d8487846f98f1b077ff03928b15038d8
|
[
"MIT"
] | null | null | null |
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:45882")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
| 36.833333
| 49
| 0.769231
|
7949d37ea786d83a734f17e4bc7fcfa9ab835b97
| 1,292
|
bzl
|
Python
|
dotnet/private/deps/nunit.bzl
|
lb5tr/rules_dotnet
|
43479c10ef07156a29a4265caa72db33279219db
|
[
"Apache-2.0"
] | null | null | null |
dotnet/private/deps/nunit.bzl
|
lb5tr/rules_dotnet
|
43479c10ef07156a29a4265caa72db33279219db
|
[
"Apache-2.0"
] | null | null | null |
dotnet/private/deps/nunit.bzl
|
lb5tr/rules_dotnet
|
43479c10ef07156a29a4265caa72db33279219db
|
[
"Apache-2.0"
] | null | null | null |
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
def dotnet_repositories_nunit():
http_file(
name = "nuget",
urls = ["https://dist.nuget.org/win-x86-commandline/v4.6.2/nuget.exe"],
sha256 = "2c562c1a18d720d4885546083ec8eaad6773a6b80befb02564088cc1e55b304e",
)
http_archive(
name = "nunit2",
url = "https://github.com/nunit/nunitv2/archive/2.6.4.zip",
sha256 = "2db7b4356e7cd9ac022c3f211853e39ae7b3587915124b555c7c39f712902c28",
strip_prefix = "nunitv2-2.6.4",
build_file = "@io_bazel_rules_dotnet//dotnet/externals:BUILD.nunit2",
)
http_archive(
name = "nunit3",
url = "https://www.nuget.org/api/v2/package/NUnit/3.10.1",
sha256 = "3529193f6028d7f7ccc65c6cb83d62d1b1c39a7d7ba5f74036cd6b69f55b10b6",
build_file = "@io_bazel_rules_dotnet//dotnet/externals:BUILD.nunit3",
type = "zip",
)
http_archive(
name = "nunit3_consolerunner",
url = "https://www.nuget.org/api/v2/package/NUnit.ConsoleRunner/3.8.0",
sha256 = "785d80095c50f142727e741578297b2ef5e1a0e537e2511697ac25e8bd9fa2ae",
build_file = "@io_bazel_rules_dotnet//dotnet/externals:BUILD.nunit3-consolerunner",
type = "zip",
)
| 39.151515
| 91
| 0.674923
|
7949d43fef3b196646bb8660a2bf6d9f524e0e1f
| 17,283
|
py
|
Python
|
omnibot/processor.py
|
lyft/omnibot
|
8e9000e872595010e4c3e294e2df39a33f05d491
|
[
"Apache-2.0"
] | 72
|
2019-11-21T00:02:18.000Z
|
2022-01-20T02:23:09.000Z
|
omnibot/processor.py
|
lyft/omnibot
|
8e9000e872595010e4c3e294e2df39a33f05d491
|
[
"Apache-2.0"
] | 30
|
2019-11-22T04:24:57.000Z
|
2021-09-24T22:50:12.000Z
|
omnibot/processor.py
|
lyft/omnibot
|
8e9000e872595010e4c3e294e2df39a33f05d491
|
[
"Apache-2.0"
] | 6
|
2019-12-26T00:37:41.000Z
|
2021-04-09T04:21:17.000Z
|
"""
Core processing logic.
"""
import re
import json
import importlib
import requests
from omnibot import logging
from omnibot import settings
from omnibot.services import slack
from omnibot.services.slack.message import Message, MessageUnsupportedError
from omnibot.services.slack.slash_command import SlashCommand
from omnibot.services.slack.interactive_component import InteractiveComponent
from omnibot.services import stats
from omnibot.services.slack import parser
from omnibot.services.slack.team import Team
from omnibot.services.slack.bot import Bot
from omnibot.utils import get_callback_id, merge_logging_context
logger = logging.getLogger(__name__)
def process_event(event):
"""
Dispatcher for slack api events.
"""
statsd = stats.get_statsd_client()
team = Team.get_team_by_id(event['team_id'])
bot = Bot.get_bot_by_bot_id(team, event['api_app_id'])
event_info = event['event']
event_type = event_info['type']
event_trace = merge_logging_context(
{
'event_ts': event_info['event_ts'],
'event_type': event_type,
},
bot.logging_context,
)
statsd.incr('event.process.attempt.{}'.format(event_type))
if event_type == 'message' or event_type == 'app_mention':
try:
with statsd.timer('process_event'):
logger.debug(
'Processing message: {}'.format(
json.dumps(event, indent=2)
),
extra=event_trace
)
try:
message = Message(bot, event_info, event_trace)
_process_message_handlers(message)
except MessageUnsupportedError:
pass
except Exception:
statsd.incr('event.process.failed.{}'.format(event_type))
logger.exception(
'Could not process message.',
exc_info=True,
extra=event_trace
)
else:
logger.debug(
'Event is not a message type.',
extra=event_trace
)
logger.debug(event)
def _process_message_handlers(message):
bot = message.bot
statsd = stats.get_statsd_client()
command_matched = False
handler_called = False
for handler in bot.message_handlers:
# We only match commands against directed messages
if handler['match_type'] == 'command':
if not _should_handle_command(handler, message):
continue
# We only match against a single command
if command_matched:
continue
if message.command_text.startswith(handler['match']):
command_matched = True
message.set_match('command', handler['match'])
for callback in handler['callbacks']:
_handle_message_callback(message, callback)
handler_called = True
if handler['match_type'] == 'regex':
match = bool(re.search(handler['match'], message.parsed_text))
regex_should_not_match = handler.get('regex_type') == 'absence'
# A matched regex should callback only if the regex is supposed to
# match. An unmatched regex should callback only if the regex is
# not supposed to match.
if match != regex_should_not_match:
message.set_match('regex', handler['match'])
for callback in handler['callbacks']:
_handle_message_callback(message, callback)
handler_called = True
if handler_called:
statsd.incr('event.handled')
elif not handler_called:
_handle_help(message)
def process_slash_command(command):
"""
Dispatcher for slack slash commands.
"""
statsd = stats.get_statsd_client()
team = Team.get_team_by_id(command['team_id'])
bot = Bot.get_bot_by_bot_id(team, command['omnibot_bot_id'])
if command['command'].startswith('/'):
command_name = command['command'][1:]
else:
command_name = command['command']
event_trace = merge_logging_context(
{
'trigger_id': command['trigger_id'],
'command': command_name,
},
bot.logging_context,
)
statsd.incr('slash_command.process.attempt.{}'.format(command_name))
try:
with statsd.timer('process_slash_command'):
logger.debug(
'Processing slash_command: {}'.format(
json.dumps(command, indent=2)
),
extra=event_trace
)
slash_command = SlashCommand(bot, command, event_trace)
_process_slash_command_handlers(slash_command)
except Exception:
statsd.incr('slash_command.process.failed.{}'.format(command_name))
logger.exception(
'Could not process slash command.',
exc_info=True,
extra=event_trace
)
def process_interactive_component(component):
"""
Dispatcher for slack interactive components
"""
statsd = stats.get_statsd_client()
team = Team.get_team_by_id(component['team']['id'])
bot = Bot.get_bot_by_bot_id(team, component['omnibot_bot_id'])
event_trace = merge_logging_context(
{
'callback_id': get_callback_id(component),
'component_type': component['type'],
},
bot.logging_context,
)
statsd.incr(
'interactive_component.process.attempt.{}'.format(
get_callback_id(component)
)
)
try:
with statsd.timer('process_interactive_component'):
logger.debug(
'Processing interactive component: {}'.format(
json.dumps(component, indent=2)
),
extra=event_trace
)
interactive_component = InteractiveComponent(
bot,
component,
event_trace
)
_process_interactive_component(interactive_component)
except Exception:
statsd.incr(
'interactive_component.process.failed.{}'.format(
get_callback_id(component)
)
)
logger.exception(
'Could not process interactive component.',
exc_info=True,
extra=event_trace
)
def _process_slash_command_handlers(command):
handler_called = False
for handler in command.bot.slash_command_handlers:
if command.command != handler.get('command'):
continue
for callback in handler['callbacks']:
_handle_slash_command_callback(
command,
callback,
handler.get('response_type', 'ephemeral')
)
handler_called = True
if not handler_called:
# TODO: send back a default help message here.
pass
def _process_interactive_component(component):
handler_called = False
for handler in component.bot.interactive_component_handlers:
if component.callback_id != handler.get('callback_id'):
continue
for callback in handler.get('callbacks', []):
_handle_interactive_component_callback(
component,
callback,
handler.get('response_type', 'ephemeral')
)
handler_called = True
if not handler_called:
# TODO: send back a default help message here.
pass
def _handle_help(message):
statsd = stats.get_statsd_client()
if message.directed:
statsd.incr('event.defaulted')
if settings.HELP_CALLBACK:
_handle_message_callback(
message,
settings.HELP_CALLBACK['callback']
)
elif settings.DEFAULT_TO_HELP:
_handle_message_callback(
message,
{
'module': 'omnibot.callbacks.message_callbacks:help_callback'
}
)
else:
# TODO: respond with error message here
pass
else:
statsd.incr('event.ignored')
def _should_handle_command(handler, message):
handle_mention = (
handler.get('match_mention', False) and message.mentioned
)
if message.directed or handle_mention:
return True
else:
return False
def parse_kwargs(kwargs, bot, event_trace=None):
if event_trace is None:
event_trace = {}
statsd = stats.get_statsd_client()
omnibot_parse = kwargs.pop('omnibot_parse', {})
for attr, to_parse in omnibot_parse.items():
if attr not in kwargs:
logger.warning(
'{} not found in kwargs when parsing post response.'.format(attr),
extra=event_trace
)
with statsd.timer('unexpand_metadata'):
if 'specials' in to_parse:
kwargs[attr] = parser.unextract_specials(kwargs[attr])
if 'channels' in to_parse:
kwargs[attr] = parser.unextract_channels(
kwargs[attr],
bot
)
if 'users' in to_parse:
kwargs[attr] = parser.unextract_users(
kwargs[attr],
bot
)
def _handle_post_message(message, kwargs):
try:
channel = kwargs.pop('channel')
except KeyError:
channel = message.channel_id
try:
thread_ts = kwargs.pop('thread_ts')
except KeyError:
if message.channel.get('is_im'):
thread_ts = None
else:
thread_ts = message.ts
if thread_ts:
kwargs['thread_ts'] = thread_ts
parse_kwargs(kwargs, message.bot, message.event_trace)
try:
ret = slack.client(
message.bot,
).api_call(
'chat.postMessage',
channel=channel,
**kwargs
)
except json.decoder.JSONDecodeError:
logger.exception(
'JSON decode failure when parsing {}'.format(kwargs),
extra=message.event_trace,
)
return
logger.debug(ret, extra=message.event_trace)
if not ret['ok']:
logger.error(ret, extra=message.event_trace)
def _handle_action(action, container, kwargs):
parse_kwargs(kwargs, container.bot, container.event_trace)
ret = slack.client(
container.bot,
).api_call(
action,
**kwargs
)
logger.debug(
'return from action {}: {}'.format(action, ret),
extra=container.event_trace,
)
if not ret['ok']:
if ret.get('error') == 'missing_scope':
logger.warning(
'action {} failed, attempting as user.'.format(action),
extra=container.event_trace
)
try:
ret = slack.client(
container.bot,
client_type='user'
).api_call(
action,
**kwargs
)
except json.decoder.JSONDecodeError:
logger.exception(
'JSON decode failure when parsing {}'.format(kwargs),
extra=container.event_trace,
)
return
logger.debug(
'return from action {}: {}'.format(action, ret),
extra=container.event_trace,
)
if not ret['ok']:
logger.debug(
'return from failed action {}: {}'.format(action, ret),
extra=container.event_trace,
)
else:
if not ret['ok']:
logger.debug(
'return from failed action {}: {}'.format(action, ret),
extra=container.event_trace,
)
def _handle_message_callback(message, callback):
logger.info(
'Handling callback for message: match_type="{}" match="{}"'.format(
message.match_type,
message.match
),
extra={
**message.event_trace,
'module': callback['module'],
'request_kwargs': callback.get('kwargs', {}).get('request_kwargs', {}),
'client_kwargs': {'service': callback.get('kwargs', {}).get('client_kwargs', {}).get('service', "")},
},
)
response = _handle_callback(message, callback)
for action in response.get('actions', []):
if not isinstance(action, dict):
logger.error(
'Action in response is not a dict.',
extra=message.event_trace
)
continue
logger.debug(
'action for callback: {}'.format(action),
extra=message.event_trace,
)
if action['action'] == 'chat.postMessage':
_handle_post_message(message, action['kwargs'])
else:
_handle_action(action['action'], message, action['kwargs'])
def _handle_slash_command_callback(command, callback, response_type):
logger.info(
'Handling callback for slash_command: command="{}"'.format(
command.command
),
extra={**command.event_trace, 'callback': callback},
)
response = _handle_callback(command, callback)
for command_response in response.get('responses', []):
logger.debug(
'Handling response for callback (pre-parse): {}'.format(
json.dumps(command_response)
),
extra=command.event_trace,
)
if 'response_type' not in command_response:
command_response['response_type'] = response_type
parse_kwargs(command_response, command.bot, command.event_trace)
logger.debug(
'Handling response for callback (post-parse): {}'.format(
json.dumps(command_response)
),
extra=command.event_trace,
)
r = requests.post(
command.response_url,
json=command_response
)
if r.status_code != requests.codes.ok:
msg = 'Got status code {0} for {1}, with response: {2}'
logger.error(
msg.format(
r.status_code,
command.response_url,
r.text
),
extra=command.event_trace
)
for action in response.get('actions', []):
if not isinstance(action, dict):
logger.error(
'Action in response is not a dict.',
extra=command.event_trace
)
continue
logger.debug(
'Action in response: {}'.format(action),
extra=command.event_trace,
)
_handle_action(action['action'], command, action['kwargs'])
def _handle_interactive_component_callback(component, callback, response_type):
logger.info(
'Handling callback for interactive component',
extra={**component.event_trace, 'callback': callback},
)
response = _handle_callback(component, callback)
for component_response in response.get('responses', []):
logger.debug(
'Handling response for callback (pre-parse): {}'.format(
json.dumps(component_response)
),
extra=component.event_trace,
)
if 'response_type' not in component_response:
component_response['response_type'] = response_type
parse_kwargs(component_response, component.bot, component.event_trace)
logger.debug(
'Handling response for callback (post-parse): {}'.format(
json.dumps(component_response)
),
extra=component.event_trace,
)
r = requests.post(
component.response_url,
json=component_response
)
if r.status_code != requests.codes.ok:
msg = 'Got status code {0} for {1}, with response: {2}'
logger.error(
msg.format(
r.status_code,
component.response_url,
r.text
),
extra=component.event_trace
)
for action in response.get('actions', []):
if not isinstance(action, dict):
logger.error(
'Action in response is not a dict.',
extra=component.event_trace
)
continue
logger.debug(
'Action in response: {}'.format(action),
extra=component.event_trace,
)
if action['action'] == 'chat.postMessage':
_handle_post_message(component, action['kwargs'])
else:
_handle_action(action['action'], component, action['kwargs'])
def _handle_callback(container, callback):
module_name, function_name = callback['module'].split(':')
module = importlib.import_module(module_name)
function = getattr(module, function_name)
kwargs = callback.get('kwargs', {})
response = function(
container=container,
**kwargs
)
return response
| 33.690058
| 113
| 0.567494
|
7949d51ef0576402bbde4f048ab92655db724063
| 7,646
|
py
|
Python
|
tests/extras/test_postgres.py
|
bharel/Yellowbox
|
d397d878ccd074af21f552cb1375714ba97e9e22
|
[
"MIT"
] | 1
|
2020-08-07T20:02:12.000Z
|
2020-08-07T20:02:12.000Z
|
tests/extras/test_postgres.py
|
bharel/yellowbox
|
d397d878ccd074af21f552cb1375714ba97e9e22
|
[
"MIT"
] | null | null | null |
tests/extras/test_postgres.py
|
bharel/yellowbox
|
d397d878ccd074af21f552cb1375714ba97e9e22
|
[
"MIT"
] | null | null | null |
from pytest import fixture, mark
from sqlalchemy import Column, Integer, MetaData, String, Table, create_engine, select
from tests.util import unique_name_generator
from yellowbox import connect, temp_network
from yellowbox.containers import upload_file
from yellowbox.extras.postgresql import POSTGRES_INTERNAL_PORT, PostgreSQLService
from yellowbox.utils import docker_host_name
@mark.parametrize('spinner', [True, False])
def test_make_pg(docker_client, spinner):
with PostgreSQLService.run(docker_client, spinner=spinner):
pass
@mark.asyncio
async def test_local_connection_async(docker_client):
service: PostgreSQLService
async with PostgreSQLService.arun(docker_client) as service:
with service.connection() as connection:
connection.execute("""
CREATE TABLE foo (x INTEGER, y TEXT);
INSERT INTO foo VALUES (1,'one'), (2, 'two'), (3, 'three'), (10, 'ten');
""")
connection.execute("""
DELETE FROM foo WHERE x = 10;
""")
with service.connection() as connection:
results = connection.execute("""
SELECT x, y FROM foo WHERE y like 't%%'
""")
vals = [row['x'] for row in results]
assert vals == [2, 3]
@fixture(scope='module')
def service(docker_client):
with PostgreSQLService.run(docker_client, spinner=False) as service:
yield service
db_name = fixture(unique_name_generator())
@fixture
def db(service, db_name):
with service.database(db_name) as db:
yield db
@fixture
def engine(db):
engine = create_engine(db.local_connection_string())
yield engine
engine.dispose()
def test_local_connection(engine):
with engine.connect() as connection:
connection.execute("""
CREATE TABLE foo (x INTEGER, y TEXT);
INSERT INTO foo VALUES (1,'one'), (2, 'two'), (3, 'three'), (10, 'ten');
""")
connection.execute("""
DELETE FROM foo WHERE x = 10;
""")
with engine.connect() as connection:
results = connection.execute("""
SELECT x, y FROM foo WHERE y like 't%%'
""")
vals = [row['x'] for row in results]
assert vals == [2, 3]
def test_sibling(docker_client, create_and_pull, engine, service, db_name):
with engine.connect() as connection:
connection.execute("""
CREATE TABLE foo (x INTEGER, y TEXT);
INSERT INTO foo VALUES (1,'one'), (2, 'two'), (3, 'three'), (10, 'ten');
""")
container = create_and_pull(
docker_client,
"postgres:latest",
f'psql -h {docker_host_name} -p {service.external_port()} -U {service.user} -d {db_name}'
" -c 'DELETE FROM foo WHERE x < 3'",
environment={'PGPASSWORD': service.password},
detach=True,
)
container.start()
return_status = container.wait()
assert return_status["StatusCode"] == 0
with engine.connect() as connection:
results = connection.execute("""SELECT y from foo""")
vals = [row['y'] for row in results]
assert vals == ['three', 'ten']
def test_sibling_network(docker_client, create_and_pull, engine, service, db_name):
with temp_network(docker_client) as network, \
connect(network, service) as service_alias:
with engine.connect() as connection:
connection.execute("""
CREATE TABLE foo (x INTEGER, y TEXT);
INSERT INTO foo VALUES (1,'one'), (2, 'two'), (3, 'three'), (10, 'ten');
""")
container = create_and_pull(
docker_client,
"postgres:latest",
f'psql -h {service_alias[0]} -p {POSTGRES_INTERNAL_PORT} -U {service.user} -d {db_name}'
" -c 'DELETE FROM foo WHERE x < 3'",
environment={'PGPASSWORD': service.password},
detach=True,
)
with connect(network, container):
container.start()
return_status = container.wait()
assert return_status["StatusCode"] == 0
with engine.connect() as connection:
results = connection.execute("SELECT y from foo")
vals = [row['y'] for row in results]
assert vals == ['three', 'ten']
def test_alchemy_usage(docker_client, engine):
table = Table('foo', MetaData(),
Column('x', Integer),
Column('y', String))
with engine.connect() as connection:
connection.execute("""
CREATE TABLE foo (x INTEGER, y TEXT);
INSERT INTO foo VALUES (1,'one'), (2, 'two'), (3, 'three'), (10, 'ten');
""")
results = connection.execute(select([table.c.x]).where(table.c.y.like('t%')))
vals = [row['x'] for row in results]
assert vals == [2, 3, 10]
def test_remote_connection_string(docker_client, create_and_pull, service, engine, db):
with temp_network(docker_client) as network, \
connect(network, service) as service_alias:
with engine.connect() as connection:
connection.execute("""
CREATE TABLE foo (x INTEGER, y TEXT);
INSERT INTO foo VALUES (1,'one'), (2, 'two'), (3, 'three'), (10, 'ten');
""")
conn_string = db.container_connection_string(service_alias[0])
container = create_and_pull(
docker_client,
"python:latest",
'sh -c "pip install sqlalchemy psycopg2 && python ./main.py"',
detach=True,
)
upload_file(
container, './main.py',
bytes(
"import sqlalchemy as sa;"
f"e = sa.create_engine('{conn_string}');"
"e.execute('DELETE FROM foo WHERE x < 3');",
'ascii')
)
with connect(network, container):
container.start()
return_status = container.wait()
assert return_status["StatusCode"] == 0
with engine.connect() as connection:
results = connection.execute("SELECT y from foo")
vals = [row['y'] for row in results]
assert vals == ['three', 'ten']
def test_remote_connection_string_host(docker_client, create_and_pull, service, engine, db):
with engine.connect() as connection:
connection.execute("""
CREATE TABLE foo (x INTEGER, y TEXT);
INSERT INTO foo VALUES (1,'one'), (2, 'two'), (3, 'three'), (10, 'ten');
""")
conn_string = db.host_connection_string()
container = create_and_pull(
docker_client,
"python:latest",
'sh -c "pip install sqlalchemy psycopg2 && python ./main.py"',
detach=True,
)
upload_file(
container, './main.py',
bytes(
"import sqlalchemy as sa;"
f"e = sa.create_engine('{conn_string}');"
"e.execute('DELETE FROM foo WHERE x < 3');",
'ascii')
)
container.start()
return_status = container.wait()
assert return_status["StatusCode"] == 0
with engine.connect() as connection:
results = connection.execute("SELECT y from foo")
vals = [row['y'] for row in results]
assert vals == ['three', 'ten']
def test_mk_db(docker_client):
with PostgreSQLService.run(docker_client, default_db='foo') as service:
assert service.database_exists('foo')
assert not service.database_exists('bar')
with service.database('bar'):
assert service.database_exists('foo')
assert service.database_exists('bar')
assert service.database_exists('foo')
assert not service.database_exists('bar')
| 34.754545
| 100
| 0.59639
|
7949d6aa835152feeeb100320ea47f2697bff0aa
| 744
|
py
|
Python
|
language/Basics/dictionaries3.py
|
Binary-bug/Python
|
233425ded6abc26c889599a82a181487789e3bab
|
[
"MIT"
] | null | null | null |
language/Basics/dictionaries3.py
|
Binary-bug/Python
|
233425ded6abc26c889599a82a181487789e3bab
|
[
"MIT"
] | null | null | null |
language/Basics/dictionaries3.py
|
Binary-bug/Python
|
233425ded6abc26c889599a82a181487789e3bab
|
[
"MIT"
] | null | null | null |
fruit = {"orange":"a sweet, orange, citrus fruit",
"apple": " good for making cider",
"lemon":"a sour, yellow citrus fruit",
"grape":"a small, sweet fruit growing in bunches",
"lime": "a sour, green citrus fruit",
"lime":"its yellow"}
print(fruit)
# .items will produce a dynamic view object that looks like tuples
print(fruit.items())
f_tuple = tuple(fruit.items())
print(f_tuple)
for snack in f_tuple:
item, description = snack
print(item + "-" + description)
print("-"*80)
#Python allows to construct dict out of tuples
print(dict(f_tuple))
# strings are actually immutable objects and concatenating two strings in a for loop
# might not be efficient
| 25.655172
| 85
| 0.642473
|
7949d6d5a03d611505284f723cda9b7838fa3f81
| 578
|
py
|
Python
|
oscar/lib/python2.7/site-packages/phonenumbers/shortdata/region_AX.py
|
AMuratTuran/mkn
|
557086426773ced10d82c969304bd349414a601e
|
[
"BSD-3-Clause"
] | 4
|
2018-10-19T04:36:20.000Z
|
2020-02-13T16:14:09.000Z
|
oscar/lib/python2.7/site-packages/phonenumbers/shortdata/region_AX.py
|
AMuratTuran/mkn
|
557086426773ced10d82c969304bd349414a601e
|
[
"BSD-3-Clause"
] | 5
|
2020-03-24T16:37:25.000Z
|
2021-06-10T21:24:54.000Z
|
upibo-venv/Lib/site-packages/phonenumbers/shortdata/region_AX.py
|
smbpgroup/upibo
|
625dcda9f9692c62aeb9fe8f7123a5d407c610ae
|
[
"BSD-3-Clause"
] | null | null | null |
"""Auto-generated file, do not edit by hand. AX metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_AX = PhoneMetadata(id='AX', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2}|75[12]\\d{2}', possible_length=(3, 5)),
emergency=PhoneNumberDesc(national_number_pattern='112', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='112|75[12]\\d{2}', example_number='112', possible_length=(3, 5)),
short_data=True)
| 64.222222
| 121
| 0.766436
|
7949d6d8c6e513d0c3d178447a46b48751790cd4
| 10,688
|
py
|
Python
|
fanficfare/writers/base_writer.py
|
C133742/FanFicFare
|
66b7de966825ddf1b44f53ea72f8ebcdeeeab73b
|
[
"Apache-2.0"
] | 1
|
2020-08-27T03:49:02.000Z
|
2020-08-27T03:49:02.000Z
|
fanficfare/writers/base_writer.py
|
C133742/FanFicFare
|
66b7de966825ddf1b44f53ea72f8ebcdeeeab73b
|
[
"Apache-2.0"
] | null | null | null |
fanficfare/writers/base_writer.py
|
C133742/FanFicFare
|
66b7de966825ddf1b44f53ea72f8ebcdeeeab73b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2011 Fanficdownloader team, 2018 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import re
import os.path
import datetime
import string
import zipfile
from zipfile import ZipFile, ZIP_DEFLATED
import logging
# py2 vs py3 transition
from .. import six
from ..six import text_type as unicode
from ..six import ensure_text
from ..six import ensure_binary
from ..six import BytesIO # StringIO under py2
from ..configurable import Configurable
from ..htmlcleanup import removeEntities, removeAllEntities, stripHTML
logger = logging.getLogger(__name__)
class BaseStoryWriter(Configurable):
@staticmethod
def getFormatName():
return 'base'
@staticmethod
def getFormatExt():
return '.bse'
def __init__(self, configuration, adapter):
Configurable.__init__(self, configuration)
self.adapter = adapter
self.story = adapter.getStoryMetadataOnly() # only cache the metadata initially.
self.story.setMetadata('formatname',self.getFormatName())
self.story.setMetadata('formatext',self.getFormatExt())
def getMetadata(self,key, removeallentities=False):
return stripHTML(self.story.getMetadata(key, removeallentities))
def getOutputFileName(self):
if self.getConfig('zip_output'):
return self.getZipFileName()
else:
return self.getBaseFileName()
def getBaseFileName(self):
return self.story.formatFileName(self.getConfig('output_filename'),self.getConfig('allow_unsafe_filename'))
def getZipFileName(self):
return self.story.formatFileName(self.getConfig('zip_filename'),self.getConfig('allow_unsafe_filename'))
def _write(self, out, text):
out.write(ensure_binary(text))
def writeTitlePage(self, out, START, ENTRY, END, WIDE_ENTRY=None, NO_TITLE_ENTRY=None):
"""
Write the title page, but only include entries that there's
metadata for. START, ENTRY and END are expected to already by
string.Template(). START and END are expected to use the same
names as Story.metadata, but ENTRY should use label and value.
"""
if self.getConfig("include_titlepage"):
if self.hasConfig("titlepage_start"):
START = string.Template(self.getConfig("titlepage_start"))
if self.hasConfig("titlepage_entry"):
ENTRY = string.Template(self.getConfig("titlepage_entry"))
if self.hasConfig("titlepage_end"):
END = string.Template(self.getConfig("titlepage_end"))
if self.hasConfig("titlepage_wide_entry"):
WIDE_ENTRY = string.Template(self.getConfig("titlepage_wide_entry"))
if self.hasConfig("titlepage_no_title_entry"):
NO_TITLE_ENTRY = string.Template(self.getConfig("titlepage_no_title_entry"))
self._write(out,START.substitute(self.story.getAllMetadata()))
if WIDE_ENTRY==None:
WIDE_ENTRY=ENTRY
titleEntriesList = self.getConfigList("titlepage_entries") + self.getConfigList("extra_titlepage_entries")
wideTitleEntriesList = self.getConfigList("wide_titlepage_entries")
for entry in titleEntriesList:
if self.isValidMetaEntry(entry):
if self.story.getMetadata(entry):
if entry in wideTitleEntriesList:
TEMPLATE=WIDE_ENTRY
else:
TEMPLATE=ENTRY
label=self.get_label(entry)
# if self.hasConfig(entry+"_label"):
# label=self.getConfig(entry+"_label")
# elif entry in self.titleLabels:
# logger.debug("Using fallback label for %s_label"%entry)
# label=self.titleLabels[entry]
# else:
# label="%s"%entry.title()
# logger.debug("No known label for %s, fallback to '%s'"%(entry,label))
# If the label for the title entry is empty, use the
# 'no title' option if there is one.
if label == "" and NO_TITLE_ENTRY:
TEMPLATE= NO_TITLE_ENTRY
self._write(out,TEMPLATE.substitute({'label':label,
'id':entry,
'value':self.story.getMetadata(entry)}))
else:
self._write(out, entry)
self._write(out,END.substitute(self.story.getAllMetadata()))
def writeTOCPage(self, out, START, ENTRY, END):
"""
Write the Table of Contents page. START, ENTRY and END are expected to already by
string.Template(). START and END are expected to use the same
names as Story.metadata, but ENTRY should use index and chapter.
"""
# Only do TOC if there's more than one chapter and it's configured.
if self.story.getChapterCount() > 1 and self.getConfig("include_tocpage") and not self.metaonly :
if self.hasConfig("tocpage_start"):
START = string.Template(self.getConfig("tocpage_start"))
if self.hasConfig("tocpage_entry"):
ENTRY = string.Template(self.getConfig("tocpage_entry"))
if self.hasConfig("tocpage_end"):
END = string.Template(self.getConfig("tocpage_end"))
self._write(out,START.substitute(self.story.getAllMetadata()))
for index, chap in enumerate(self.story.getChapters(fortoc=True)):
if chap['html']:
self._write(out,ENTRY.substitute(chap))
self._write(out,END.substitute(self.story.getAllMetadata()))
# if no outstream is given, write to file.
def writeStory(self,outstream=None, metaonly=False, outfilename=None, forceOverwrite=False):
self.metaonly = metaonly
if outfilename == None:
outfilename=self.getOutputFileName()
self.outfilename = outfilename
# minor cheat, tucking css into metadata.
if self.getConfig("output_css"):
self.story.setMetadata("output_css",
self.getConfig("output_css"),
condremoveentities=False)
else:
self.story.setMetadata("output_css",'')
if not outstream:
close=True
logger.info("Save directly to file: %s" % outfilename)
if self.getConfig('make_directories'):
path=""
outputdirs = os.path.dirname(ensure_text(outfilename)).split('/')
for dir in outputdirs:
path+=dir+"/"
if not os.path.exists(path):
os.mkdir(path) ## os.makedirs() doesn't work in 2.5.2?
## Check for output file date vs updated date here
if not (self.getConfig('always_overwrite') or forceOverwrite):
if os.path.exists(outfilename):
## date() truncs off time, which files have, but sites don't report.
lastupdated=self.story.getMetadataRaw('dateUpdated').date()
fileupdated=datetime.datetime.fromtimestamp(os.stat(outfilename)[8]).date()
if fileupdated > lastupdated:
logger.warn("File(%s) Updated(%s) more recently than Story(%s) - Skipping" % (outfilename,fileupdated,lastupdated))
return
if not metaonly:
self.story = self.adapter.getStory() # get full story
# now, just
# before writing.
# Fetch before
# opening file.
outstream = open(outfilename,"wb")
else:
close=False
logger.debug("Save to stream")
if not metaonly:
self.story = self.adapter.getStory() # get full story now,
# just before
# writing. Okay if
# double called with
# above, it will only
# fetch once.
if self.getConfig('zip_output'):
out = BytesIO()
self.zipout = ZipFile(outstream, 'w', compression=ZIP_DEFLATED)
self.writeStoryImpl(out)
self.zipout.writestr(self.getBaseFileName(),out.getvalue())
# declares all the files created by Windows. otherwise, when
# it runs in appengine, windows unzips the files as 000 perms.
for zf in self.zipout.filelist:
zf.create_system = 0
self.zipout.close()
out.close()
else:
self.writeStoryImpl(outstream)
if close:
outstream.close()
def writeFile(self, filename, data):
logger.debug("writeFile:%s"%filename)
if self.getConfig('zip_output'):
outputdirs = os.path.dirname(self.getBaseFileName())
if outputdirs:
filename=outputdirs+'/'+filename
self.zipout.writestr(filename,data)
else:
outputdirs = os.path.dirname(self.outfilename)
if outputdirs:
filename=outputdirs+'/'+filename
dir = os.path.dirname(filename)
if not os.path.exists(dir):
os.mkdir(dir) ## os.makedirs() doesn't work in 2.5.2?
outstream = open(filename,"wb")
outstream.write(data)
outstream.close()
def writeStoryImpl(self, out):
"Must be overriden by sub classes."
pass
| 40.950192
| 139
| 0.573915
|
7949d73dc8abd8b23ca090be1eca08da0142d808
| 972
|
py
|
Python
|
mmseg/datasets/pipelines/__init__.py
|
Togal-ai-Team/mmsegmentation
|
55f50ec84865270024d847b6e2a787693b90835d
|
[
"Apache-2.0"
] | null | null | null |
mmseg/datasets/pipelines/__init__.py
|
Togal-ai-Team/mmsegmentation
|
55f50ec84865270024d847b6e2a787693b90835d
|
[
"Apache-2.0"
] | null | null | null |
mmseg/datasets/pipelines/__init__.py
|
Togal-ai-Team/mmsegmentation
|
55f50ec84865270024d847b6e2a787693b90835d
|
[
"Apache-2.0"
] | 1
|
2022-01-04T01:16:12.000Z
|
2022-01-04T01:16:12.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
from .compose import Compose
from .formatting import (Collect, ImageToTensor, ToDataContainer, ToTensor,
Transpose, to_tensor)
from .loading import LoadAnnotations, LoadImageFromFile
from .test_time_aug import MultiScaleFlipAug
from .transforms import (CLAHE, AdjustGamma, Normalize, Pad,
PhotoMetricDistortion, RandomCrop, RandomCutOut,
RandomFlip, RandomMosaic, RandomRotate, Rerange,
Resize, RGB2Gray, SegRescale, Corrupt)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',
'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray', 'RandomCutOut',
'RandomMosaic', 'Corrupt'
]
| 48.6
| 75
| 0.679012
|
7949d7bd3525ca9e3e3fbff3c6337b5c402d6886
| 41,857
|
py
|
Python
|
cup/util/conf.py
|
0xflotus/CUP
|
5e4330cb2e4ccdc67ad94f0084e745eed6f96d6b
|
[
"Apache-2.0"
] | null | null | null |
cup/util/conf.py
|
0xflotus/CUP
|
5e4330cb2e4ccdc67ad94f0084e745eed6f96d6b
|
[
"Apache-2.0"
] | null | null | null |
cup/util/conf.py
|
0xflotus/CUP
|
5e4330cb2e4ccdc67ad94f0084e745eed6f96d6b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*
# Copyright: [CUP] - See LICENSE for details.
# Authors: Liu.Jia@baidu, Guannan Ma (@mythmgn),
"""
:description:
Complex and constructive conf support
"""
import os
import time
import copy
import shutil
from xml.dom import minidom
# import subprocess
import json
import cup
# G_TOOL_PATH = None
__all__ = [
'Configure2Dict', 'Dict2Configure',
'HdfsXmlConf'
]
class CConf(object):
"""
Depreciated class. Please do not use it. Use python configparser instead.
"""
def __init__(self, path, name, revert_version=''):
self.name = name
self.path = path
self.file_abspath = self.path + '/' + self.name
self.exclude = ['# ', '[']
self.sep = ':'
self.bakfile = self.path + '/' + self.name + '.bak.' + revert_version
def __del__(self):
if os.path.exists(self.bakfile):
os.unlink(self.bakfile)
def _backup(self, new_bakfile):
shutil.copyfile(self.file_abspath, new_bakfile)
def __getitem__(self, key):
with open(self.file_abspath) as src:
value = ''
for line in src.readlines():
if len(line) > 0 and line[0] not in self.exclude:
spstrs = line.split(':')
k = spstrs[0].strip()
if k == key:
value = spstrs[1].strip()
return value
def __len__(self):
"""
This function should not be used
"""
return 0
def update(self, kvs):
"""
update conf with a dict.
dict = {'key' : 'value', 'key1': 'value'}
"""
self._backup(self.bakfile)
with open(self.bakfile) as src:
with open(self.file_abspath, 'w') as trg:
for line in src.readlines():
if len(line) > 0 and line[0] not in self.exclude:
splist = line.splistlit(':')
k = splist[0].strip()
if k in kvs.keys():
line = k + ' : ' + kvs[k] + '\n'
trg.write(line)
def revert(self):
"""
revert the conf
"""
os.rename(self.bakfile, self.file_abspath)
def write_kv_into_conf(self, kvkvs):
"""
将key-value写进conf
"""
with open(self.file_abspath, 'w+') as fhandle:
for i in kvkvs.keys:
fhandle.write('%s:%s\n' % (i, kvkvs[i]))
class CConfModer(object):
"""
deprecated. Recommand using Configure2Dict / Dict2Configure
"""
def __init__(self, toolpath):
if not os.path.exists(toolpath):
raise IOError(
'File not found - The cfmod tool cannot be found: %s'
% toolpath
)
self._modtool = toolpath
def updatekv(self, confpath, key, val):
"""
update key with value
"""
cmd = "%s -c %s -u %s:%s " % (self._modtool, confpath, key, val)
try_times = 0
while True:
ret = cup.shell.ShellExec().run(cmd, 120)
if(
ret['returncode'] == 0
or not ret['returncode']
or try_times > 1
):
ret['stdout'] = ret['stdout'].decode('gbk')
ret['stdout'] = ret['stdout'].encode('utf-8')
# print ret['stdout']
break
else:
try_times += 1
print 'err:updatekv'
time.sleep(1)
def updatekvlist(self, confpath, kvlist):
"""
update a list of key/value
"""
strcmd = ''
for key_value in kvlist:
strcmd += ' -u %s:%s ' % (key_value['key'], key_value['value'])
cmd = "%s -c %s %s" % (self._modtool, confpath, strcmd)
try_times = 0
while True:
ret = cup.shell.ShellExec().run(cmd, 120)
if(
ret['returncode'] == 0
or not ret['returncode']
or try_times > 1
):
ret['stdout'] = ret['stdout'].decode('gbk')
ret['stdout'] = ret['stdout'].encode('utf-8')
print ret['stdout']
break
else:
try_times += 1
print 'err:updatekvlist'
time.sleep(1)
def addkv(self, confpath, key, val):
"""
add key value into a conf
"""
cmd = "%s -c %s -i %s:%s &>/dev/null" % (
self._modtool, confpath, key, val
)
try_times = 0
while True:
ret = cup.shell.ShellExec().run(cmd, 120)
if(
ret['returncode'] == 0
or not ret['returncode']
or try_times > 1
):
ret['stdout'] = ret['stdout'].decode('gbk')
ret['stdout'] = ret['stdout'].encode('utf-8')
print ret['stdout']
break
else:
try_times += 1
print 'err:addkv'
time.sleep(1)
if(ret == 0 or try_times > 1):
print cmd
break
else:
time.sleep(1)
try_times += 1
def delkv(self, confpath, key):
"""
del a key from a conf file
"""
cmd = "%s -c %s -d %s " % (self._modtool, confpath, key)
try_times = 0
while True:
ret = cup.shell.ShellExec().run(cmd, 120)
if(
ret['returncode'] == 0
or not ret['returncode']
or try_times > 1
):
ret['stdout'] = ret['stdout'].decode('gbk')
ret['stdout'] = ret['stdout'].encode('utf-8')
print ret['stdout']
break
else:
try_times += 1
print 'err:delkv'
time.sleep(1)
if(ret == 0 or try_times > 1):
print cmd
break
else:
time.sleep(1)
try_times += 1
class ArrayFormatError(cup.err.BaseCupException):
"""
array format error for Configure2Dict
"""
def __init__(self, errmsg):
super(self.__class__, self).__init__(errmsg)
class LineFormatError(cup.err.BaseCupException):
"""
Line error class
"""
def __init__(self, errmsg):
super(self.__class__, self).__init__(errmsg)
class KeyFormatError(cup.err.BaseCupException):
"""
Key error class
"""
def __init__(self, errmsg):
super(self.__class__, self).__init__(errmsg)
class ValueFormatError(cup.err.BaseCupException):
"""
value error class
"""
def __init__(self, errmsg):
super(self.__class__, self).__init__(errmsg)
class UnknowConfError(cup.err.BaseCupException):
"""
unkown error class
"""
def __init__(self, errmsg):
super(self.__class__, self).__init__(errmsg)
class ConfDictSetItemError(cup.err.BaseCupException):
"""
ConfDict Error
"""
def __init__(self, errmsg):
super(self.__class__, self).__init__(errmsg)
class ConfListSetItemError(cup.err.BaseCupException):
"""
ConfList Error
"""
def __init__(self, errmsg):
super(self.__class__, self).__init__(errmsg)
class ConfList(list):
"""
Conf list attributes.
e.g.
@disk: /home/disk1
@disk: /home/disk2
"""
def __init__(self):
super(self.__class__, self).__init__()
self._ind = 0
self._comments = []
def append_ex(self, item, comments):
"""
append a item with conf comments
"""
assert type(comments) == list, 'comments should be a list'
super(self.__class__, self).append(item)
self._ind += 1
self._comments.append(comments)
def get_ex(self, ind):
"""
get conf list item with its comments
"""
try:
return (self.__getitem__(ind), self._comments[ind])
except IndexError:
return (self.__getitem__(ind), [])
def __delitem__(self, index):
list.__delitem__(index)
del self._comments[index]
def append(self, item):
"""append item"""
self.append_ex(item, [])
def insert(self, ind, item):
"""plz do not use this function"""
raise ConfDictSetItemError(
'Do not support "insert". Use "append" instead'
)
def extend(self, seqs):
"""plz do not use extend"""
raise ConfDictSetItemError(
'Do not support "extend". Use "append" instead'
)
class ConfDict(dict):
"""
ConfDict that Configure2Dict and Dict2Configure can use.
"""
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
self._index = 0
self._extra_dict = {}
self._tail = None
self._reverse_ind = -99999999
def __delitem__(self, key):
dict.__delitem__(key)
del self._extra_dict[key]
def set_ex(self, key, value, comments):
"""
In addtion to dict['key'] = value, set_ex also set comments along with
the key.
"""
super(self.__class__, self).__setitem__(key, value)
if key not in self._extra_dict:
if isinstance(value, list) or isinstance(value, dict):
self._extra_dict[key] = (self._index, comments)
self._index += 1
else:
self._extra_dict[key] = (self._reverse_ind, comments)
self._reverse_ind += 1
def get_ex(self, key):
"""
get (value, comments) with key, comments is a list
"""
value = self.get(key)
comments = self._extra_dict.get(key)
if comments is None:
comments = []
return (value, comments)
def __setitem__(self, key, value):
super(self.__class__, self).__setitem__(key, value)
if key not in self._extra_dict:
if isinstance(value, list) or isinstance(value, dict):
self._extra_dict[key] = (self._index, [])
self._index += 1
else:
self._extra_dict[key] = (self._reverse_ind, [])
self._reverse_ind += 1
def _compare_keys(self, keyx, keyy):
if self._extra_dict[keyx][0] == self._extra_dict[keyy][0]:
return 0
elif self._extra_dict[keyx][0] < self._extra_dict[keyy][0]:
return -1
else:
return 1
def get_ordered_keys(self):
"""
get keys in order
"""
keys = sorted(self.keys(), self._compare_keys)
return keys
# @brief translate configure(public/configure) conf to dict
class Configure2Dict(object): # pylint: disable=R0903
"""
Configure2Dict support conf features below:
1. comments
As we support access/modify comments in a conf file, you should obey
rules below:
Comment closely above the object you want to comment.
Do NOT comment after the line.
Otherwise, you might get/set a wrong comment above the object.
2. sections
2.1 global section
- if key:value is not under any [section], it is under the global layer
by default
- global section is the 0th layer section
e.g.
test.conf:
# test.conf
global-key: value
global-key1: value1
2.2 child section
- [section1] means a child section under Global. And it's the
1st layer section
- [.section2] means a child section under the nearest section
above. It's the 2nd layer section.
- [..section3] means a child section under the nearest section
above. And the prefix .. means it is the 3rd layer section
e.g.:
test.conf:
::
global-key: value
[section]
host: abc.com
port: 8080
[.section_child]
child_key: child_value
[..section_child_child]
control: ssh
[...section_child_child_child]
wow_key: wow_value
2.3 section access method
get_dict method will convert conf into a ConfDict which is derived
from python dict.
- Access the section with confdict['section']['section-child'].
- Access the section with confdict.get_ex('section') with (value,
comments)
3. key:value and key:value array
3.1 key:value
key:value can be set under Global section which is closely after the
1st line with no [section] above.
key:value can also be set under sections.
::
# test.conf
key1: value1
[section]
key_section: value_in_section
[.seciton]
key_section_child: value_section_child
3.2 key:value arrays
key:value arrays can be access with confdict['section']['disk'].
You will get a ConfList derived from python list.
::
# test.conf
# Global layer, key:value
host: abc.com
port: 12345
# 1st layer [monitor]
@disk: /home/data0
@disk: /home/data1
[section]
@disk: /home/disk/disk1
@disk: /home/disk/disk2
4. Example
::
# test.conf
# Global layer, key:value
host: abc.com
port: 12345
# 1st layer [monitor]
@disk: /home/data0
@disk: /home/data1
[section]
@disk: /home/disk/disk1
@disk: /home/disk/disk2
[monitor]
timeout: 100
regex: sshd
# 2nd layer that belongs to [monitor]
[.timeout]
# key:value in timeout
max: 100
# 3rd layer that belongs to [monitor] [timeout]
[..handler]
default: exit
"""
def __init__(self, configure_file, remove_comments=True, separator=':'):
"""
@param configure_file:
configure file path
@param remove_comments:
if you comment after key:value # comment, whether we should
remove it when you access the key
@raise:
IOError configure_file not found
cup.util.conf.KeyFormatError Key format error
cup.util.conf.ValueFormatError value value
cup.util.conf.LineFormatError line format error
cup.util.conf.ArrayFormatError @array format error
cup.util.conf.UnknowConfError unknown error
"""
self._file = configure_file
if not os.path.exists(configure_file):
raise IOError('%s does not exists' % configure_file)
if not os.path.isfile(configure_file):
raise IOError('%s is not a file' % configure_file)
self._lines = []
self._dict = ConfDict()
self._remove_comments = remove_comments
self._blank_and_comments = {}
self._separator = separator
def _strip_value(self, value):
if self._remove_comments:
rev = value.split('#')[0].strip()
else:
rev = value
return rev
def _handle_key_value_tuple(self, linenum, conf_dict_now, comments):
num = linenum
line = self._lines[num]
key, value = line
rev_comments = comments
if not key.startswith('@'):
conf_dict_now.set_ex(key, value, rev_comments)
rev_comments = []
else:
# @key: value
# it's a conf array.
# e.g.
# @disk : /home/disk1
# @disk : /home/disk2
# conf_dict_now[key[1:]] = [value]
if not key[1:] in conf_dict_now:
conf_array = ConfList()
conf_dict_now.set_ex(key[1:], conf_array, rev_comments)
else:
conf_array = conf_dict_now[key[1:]]
conf_array.append_ex(value, [])
rev_comments = []
num += 1
while num < len(self._lines): # get all items
if self._handle_comments(rev_comments, self._lines[num]):
num += 1
continue
if not type(self._lines[num]) == tuple or \
self._lines[num][0] != key:
num -= 1
break
conf_dict_now[key[1:]].append_ex(
self._lines[num][1], rev_comments
)
rev_comments = []
num += 1
return (num, rev_comments)
@classmethod
def _handle_comments(cls, comments, line):
if line[0] == '__comments__':
comments.append(line[1])
return True
return False
@classmethod
def _handle_group_keys(
cls, key, conf_dict_now, conf_layer_stack, comments
):
for groupkey in key.split('.'):
conf_dict_now = conf_layer_stack[-1]
while isinstance(conf_dict_now, list):
conf_dict_now = conf_dict_now[-1]
if groupkey in conf_dict_now:
conf_dict_now = conf_dict_now[groupkey]
# push this layer into the stack
conf_layer_stack.append(conf_dict_now)
else:
if groupkey[0] == '@':
groupkey = groupkey[1:]
if groupkey in conf_dict_now:
conf_dict_now[groupkey].append_ex(
{}, comments
)
comments = []
else:
conflist = ConfList()
conf_dict_now.set_ex(
groupkey, conflist, comments
)
comments = []
conflist.append(ConfDict())
else:
conf_dict_now.set_ex(
groupkey, ConfDict(), comments
)
comments = []
conf_layer_stack.append(conf_dict_now[groupkey])
return comments
# GLOBAL level 1
# [groupA] level 2
# [.@groupB] level 3
# [..@groupC] level 4
# pylint: disable=R0912, R0915
def get_dict(self, ignore_error=False):
"""
get conf_dict which you can use to access conf info.
:param ignore_error:
True, CUP will parse the conf file without catching exceptions
"""
comments = []
self._get_input_lines(ignore_error)
conf_layer_stack = [self._dict]
num = 0
length = len(self._lines)
last_list_key = None
while num < length:
line = self._lines[num]
if self._handle_comments(comments, line):
num += 1
continue
conf_dict_now = conf_layer_stack[-1] # conf_dict_now is current
while isinstance(conf_dict_now, list): # [], find the working dict
conf_dict_now = conf_dict_now[-1]
if isinstance(line, tuple): # key value
num, comments = self._handle_key_value_tuple(
num, conf_dict_now, comments
)
else:
key = line.lstrip('.')
level = len(line) - len(key) + 2 # determine the level
if key == 'GLOBAL': # GLOBAL is the 1st level
level = 1
conf_layer_stack = [self._dict]
# [Group1.SubGroup1] sub-key: Value
# if sth below level cannot be computed as len(line) - len(key)
elif '.' in key: # conf_layer_stack back to [self._dict]
conf_layer_stack = [self._dict]
comments = self._handle_group_keys(
key, conf_dict_now, conf_layer_stack, comments
)
elif level > len(conf_layer_stack) + 1:
raise ArrayFormatError(line)
elif level == len(conf_layer_stack) + 1:
# new grou for
if key[0] == '@':
key = key[1:]
conflist = ConfList()
conflist.append(ConfDict())
conf_dict_now.set_ex(key, conflist, [])
else:
conf_dict_now.set_ex(key, ConfDict(), comments)
comments = []
conf_layer_stack.append(conf_dict_now[key])
elif level == len(conf_layer_stack):
# -1 means the last item. -2 means the second from the end
conf_dict_now = conf_layer_stack[-2] # back one
while isinstance(conf_dict_now, list):
conf_dict_now = conf_dict_now[-1]
if key[0] == '@':
tmpkey = key[1:]
if tmpkey in conf_dict_now: # the same group
# pylint: disable=E1101
# conf_dict_now[tmpkey] = ConfDict()
if tmpkey != last_list_key:
conf_layer_stack[-1] = conf_dict_now[tmpkey]
conf_layer_stack[-1].append_ex(
ConfDict(), comments
)
comments = []
else: # different group
conflist = ConfList()
conflist.append(ConfDict())
conf_dict_now.set_ex(tmpkey, conflist, comments)
# conf_dict_now.set_ex(tmpkey, conflist, [])
comments = []
conf_layer_stack[-1] = conf_dict_now[tmpkey]
last_list_key = tmpkey
else:
conf_dict_now.set_ex(key, ConfDict(), comments)
comments = []
conf_layer_stack[-1] = conf_dict_now[key]
elif level < len(conf_layer_stack):
conf_dict_now = conf_layer_stack[level - 2] # get back
while isinstance(conf_dict_now, list):
conf_dict_now = conf_dict_now[-1]
if key[0] == '@':
tmpkey = key[1:]
if tmpkey in conf_dict_now: # the same group
tmpdict = ConfDict()
conf_layer_stack[level - 1].append(tmpdict)
else: # different group
conflist = ConfList()
conflist.append(ConfDict())
conf_dict_now.set_ex(tmpkey, conflist, [])
conf_layer_stack[level - 1] = conf_dict_now[tmpkey]
else:
conf_dict_now.set_ex(key, ConfDict(), comments)
comments = []
conf_layer_stack[level - 1] = conf_dict_now[key]
conf_layer_stack = conf_layer_stack[:level]
else:
raise UnknowConfError('exception occured')
num += 1
return self._dict
# Check the key id format
def _check_key_valid(self, key): # pylint: disable=R0201
if key == '' or key == '@':
raise KeyFormatError(key)
if key[0] == '@':
key = key[1:]
for char in key:
if not char.isalnum() and char != '_' \
and char != '-' and char != '.' and char != '$':
raise KeyFormatError(key)
# Check the [GROUP] key format
def _check_groupkey_valid(self, key):
for groupkey in key.split('.'):
self._check_key_valid(groupkey)
def _handle_include_syntx(self, line, ignore_error):
"""handle $include file.conf """
if ignore_error:
try:
include_file = line.split()[-1].strip('"')
include_dict = Configure2Dict(include_file).get_dict(
ignore_error
)
if '$include' not in self._dict:
newdict = ConfDict()
self._dict.set_ex('$include', newdict, '')
newdict.set_ex(
include_file, include_dict, ''
)
# pylint: disable=W0703
# Does not know exact exception type
except Exception:
cup.log.warn(
'failed to handle include file, line:{0}'.format(
line)
)
else:
include_file = line.split()[-1].strip('"')
include_dict = Configure2Dict(include_file).get_dict(
ignore_error
)
if '$include' not in self._dict:
newdict = ConfDict()
self._dict.set_ex('$include', newdict, '')
newdict.set_ex(
include_file, include_dict, ''
)
# Read in the file content, with format check
def _get_input_lines(self, ignore_error): # pylint: disable=R0912,R0915
"""
read conf lines
"""
try:
fhanle = open(self._file, 'r')
except IOError as error:
cup.log.error('open file failed:%s, err:%s' % (self._file, error))
raise IOError(str(error))
for line in fhanle.readlines():
line = line.strip()
# if it's a blank line or a line with comments only
if line == '':
line = '__comments__%s%s' % (self._separator, '\n')
if line.startswith('#'):
line = '__comments__%s%s\n' % (self._separator, line)
continue
if line.startswith('$include'):
self._handle_include_syntx(line, ignore_error)
continue
# if it's a section
if line.startswith('['):
if line.find('#') > 0:
line = line[:line.find('#')].strip()
if not line.endswith(']'):
raise LineFormatError('Parse line error, line:\n' + line)
line = line[1:-1]
key = line.lstrip('.')
self._check_groupkey_valid(key) # check if key is valid
self._lines.append(line)
continue
# key, value = line.split(':', 1)
key, value = line.split(self._separator, 1)
key = key.strip()
value = value.strip(' \t')
# if remove_comments is True, delete comments in value.
self._check_key_valid(key)
if value.startswith('"'): # if the value is a string
if not value.endswith('"'):
raise ValueFormatError(line)
else:
if key != '__comments__':
value = self._strip_value(value)
if value.startswith('"'):
tmp_value = ''
# reserve escape in the value string
escape = False
for single in value:
if escape:
if single == '0':
tmp_value += '\0'
elif single == 'n':
tmp_value += '\n'
elif single == 'r':
tmp_value += '\r'
elif single == 't':
tmp_value += '\t'
elif single == 'v':
tmp_value += '\v'
elif single == 'a':
tmp_value += '\a'
elif single == 'b':
tmp_value += '\b'
elif single == 'd':
tmp_value += r'\d'
elif single == 'f':
tmp_value += '\f'
elif single == "'":
tmp_value += "'"
elif single == '"':
tmp_value += '"'
elif single == '\\':
tmp_value += '\\'
else:
# raise ValueFormatError(line)
pass
escape = False
elif single == '\\':
escape = True
else:
tmp_value += single
if escape:
raise ValueFormatError(line)
value = tmp_value
self._lines.append((key, value))
fhanle.close()
class Dict2Configure(object):
"""
Convert Dict into Configure.
You can convert a ConfDict or python dict into a conf file.
"""
##
# @param dict the conf dict, make sure the type format is right
#
def __init__(self, conf_dict, separator=':'):
self._dict = None
self.set_dict(conf_dict)
self._level = 0
self._str = ''
self._separator = separator
def _get_field_value_sep(self):
return self._separator
# The separator between each line
@classmethod
def _get_linesep(cls):
return '\n'
# The flag of an array
@classmethod
def _get_arrayflag(cls):
return '@'
def _get_levelsep(self):
return '.' * self._level
def _get_arraylevel_sep(self):
return '.' * self._level + self._get_arrayflag()
def _get_indents(self):
return ' ' * self._level * 4
def _get_write_string(self):
self._str = ''
self._level = 0
self._get_confstring(self._dict)
return self._str
def write_conf(self, conf_file):
"""
write the conf into of the dict into a conf_file
"""
with open(conf_file, 'w') as fhandle:
fhandle.write(self._get_write_string())
# pylint: disable=R0911
@classmethod
def _comp_write_keys(cls, valuex, valuey):
_py_type = [bool, int, float, str, unicode]
if type(valuex) == type(valuey):
return 0
for py_type in _py_type:
if isinstance(valuex, py_type):
return -1
for py_type in _py_type:
if isinstance(valuey, py_type):
return 1
if isinstance(valuex, list) and isinstance(valuey, list):
try:
if isinstance(valuex[0], dict) or isinstance(valuex[0], list):
return 1
else:
return -1
# pylint: disable=W0703
except Exception:
return -1
else:
return -1
# if isinstance(valuex, list) and isinstance(valuey, str):
# return 1
if isinstance(valuex, dict):
return 1
if isinstance(valuey, dict):
return -1
return 1
# pylint: disable=R0912
def _get_confstring(self, _dict):
# for item in sorted(
# _dict.items(), lambda x, y: self._comp_type(x[1], y[1])
# ):
try:
order_keys = _dict.get_ordered_keys()
except AttributeError:
order_keys = sorted(
_dict.keys(), lambda x, y: self._comp_write_keys(
_dict[x], _dict[y]
)
)
if '$include' in order_keys:
for filepath in _dict['$include']:
self._str += '$include "{0}"{1}'.format(
filepath, self._get_linesep()
)
order_keys.remove('$include')
for key in order_keys:
if key == '$include':
cup.log.warn('cup.conf does not support $include writeback yet')
continue
try:
item = _dict.get_ex(key)
value = item[0]
comments = item[1][1]
except AttributeError:
value = _dict.get(key)
comments = []
for comment in comments:
self._str += self._get_indents() + comment
if isinstance(value, tuple) or isinstance(value, list):
if isinstance(value, tuple):
print 'its a tuple, key:%s, value:%s' % (key, value)
if len(value) > 0 and isinstance(value[0], dict):
# items are all arrays
# [..@section]
# abc:
# [..@section]
# abc:
for ind in xrange(0, len(value)):
try:
item = value.get_ex(ind)
except AttributeError:
item = (value[ind], [])
for comment in item[1]:
self._str += self._get_indents() + comment
self._add_arraylevel(key)
self._get_confstring(item[0])
self._minus_level()
else:
# a array list and array list has no sub-dict
# @item
# @item
for ind in xrange(0, len(value)):
try:
item = value.get_ex(ind)
except AttributeError:
item = (value[ind], [])
for comment in item[1]:
self._str += self._get_indents() + comment
self._appendline(
self._get_arrayflag() + str(key), item[0]
)
elif isinstance(value, dict):
self._addlevel(key)
self._get_confstring(value)
self._minus_level()
else:
# type(value) == type(""):
self._appendline(key, value)
def _get_confstring_ex(self, _dict):
pass
def _appendline(self, key, value):
self._str += (
self._get_indents() + str(key) +
self._get_field_value_sep()+str(value)+self._get_linesep()
)
def _addlevel(self, key):
self._str += (
self._get_indents() + '[' + self._get_levelsep() + str(key) + ']'
+ self._get_linesep()
)
self._level += 1
def _add_arraylevel(self, key):
self._str += (
self._get_indents() + '[' + self._get_arraylevel_sep() +
str(key) + ']' + self._get_linesep()
)
self._level += 1
def _minus_level(self):
self._level -= 1
# Set the conf dict
def set_dict(self, conf_dict):
"""
set a new conf_dict
"""
if not isinstance(conf_dict, dict):
raise TypeError('conf_dict is not a type of dict')
self._dict = conf_dict
# itemlist=sorted(dict.items(), lambda x,y: _comp_type(x[1],y[1]))
# sort the dict, make type{dict} last
@classmethod
def _comp_type(cls, item_a, item_b):
if type(item_a) in (tuple, list):
if len(item_a) > 0:
item_a = item_a[0]
if type(item_b) in (tuple, list):
if len(item_b) > 0:
item_b = item_b[0]
if type(item_a) == type(item_b):
return 0
elif type(item_b) == dict:
return -1
elif type(item_a) == dict:
return 1
# if type(item_a)!=type({}) or type(item_b)==type({}):
# return -1
return 1
class HdfsXmlConf(object):
"""
hdfs xmlconf modifier.
Example:
::
# modify and write new conf into hadoop-site.xmlconf
xmlobj = xmlconf.HdfsXmlConf(xmlfile)
# get hdfs conf items into a python dict
key_values = xmlobj.get_items()
# modify hdfs conf items
for name in self._confdict['journalnode']['hadoop_site']:
if name in key_values:
key_values[name]['value'] = \
self._confdict['journalnode']['hadoop_site'][name]
else:
key_values[name] = {
'value': self._confdict['journalnode']['hadoop_site'][name],
'description': ' '
}
hosts = ','.join(self._confdict['journalnode']['host'])
key_values['dfs.journalnode.hosts'] = {
'value': hosts, 'description':' journalnode hosts'
}
# write back conf items with new values
xmlobj.write_conf(key_values)
"""
def __init__(self, filepath):
if not os.path.exists(filepath):
raise IOError('file not found:{0}'.format(filepath))
if not os.path.isfile(filepath):
raise IOError('{0} not a file'.format(filepath))
self._xmlpath = filepath
self._confdict = None
def _load_items(self):
self._confdict = {}
dom = minidom.parse(self._xmlpath)
properties = dom.getElementsByTagName('property')
for pro in properties:
tmpdict = {}
try:
tmpdict['value'] = pro.getElementsByTagName(
'value'
)[0].childNodes[0].nodeValue
except IndexError:
tmpdict['value'] = None
try:
tmpdict['description'] = pro.getElementsByTagName(
'description'
)[0].childNodes[0].nodeValue
except IndexError:
tmpdict['description'] = None
self._confdict[
pro.getElementsByTagName('name')[0].childNodes[0].nodeValue
] = tmpdict
return self._confdict
def get_items(self):
"""
return hadoop config items as a dict.
::
{
'dfs.datanode.max.xcievers': {
'value': 'true', 'description': 'xxxxxxxxxx'
},
......
}
"""
return self._load_items()
def _write_to_conf(self, new_confdict):
dom = minidom.parse(self._xmlpath)
properties = dom.getElementsByTagName('property')
tmpdict = copy.deepcopy(new_confdict)
# modify if name exists
for pro in properties:
name = pro.getElementsByTagName('name')[0].childNodes[0].nodeValue
valuenode = pro.getElementsByTagName('value')[0]
if name in tmpdict:
need_modify = False
if valuenode.firstChild is None:
if tmpdict[name]['value'] is not None:
valuenode.appendChild(dom.createTextNode(''))
need_modify = True
else:
need_modify = True
if need_modify:
valuenode.firstChild.replaceWholeText(
tmpdict[name]['value']
)
del tmpdict[name]
else:
parent = pro.parentNode
parent.insertBefore(dom.createComment(pro.toxml()), pro)
parent.removeChild(pro)
configuration_node = dom.getElementsByTagName('configuration')[0]
for name in tmpdict:
new_pro = dom.createElement('property')
new_name = dom.createElement('name')
new_name.appendChild(dom.createTextNode(name))
new_pro.appendChild(new_name)
# value in the new property
new_value = dom.createElement('value')
if new_value is not None:
new_value.appendChild(
dom.createTextNode(tmpdict[name]['value'])
)
new_pro.appendChild(new_value)
# description
new_desc = dom.createElement('description')
new_desc.appendChild(
dom.createTextNode(tmpdict[name]['description'])
)
new_pro.appendChild(new_desc)
configuration_node.appendChild(new_pro)
return dom.toprettyxml(newl='\n')
def write_conf(self, kvs):
"""
update config items with a dict kvs. Refer to the example above.
::
{
key : { 'value': value, 'description': 'description'},
......
}
"""
self._load_items()
str_xml = self._write_to_conf(kvs)
with open(self._xmlpath, 'w') as fhandle:
fhandle.write(str_xml)
self._confdict = kvs
def _main_hanle():
dict4afs = Configure2Dict('/tmp/metaserver.conf')
dictafs = dict4afs.get_dict()
print json.dumps(dictafs, sort_keys=True, indent=4)
if __name__ == "__main__":
pass
# conf = CConf(g_prodUnitRuntime + 'Unitserver0/conf/','UnitServer.conf')
# conf.update({'MasterPort':'1234','ProxyPortDelta':'0'})
# conf.addAfterKeywordIfNoexist(
# 'SnapshotPatchLimit', ('DelServerPerHourLimit', '99')
# )
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
| 33.62008
| 83
| 0.488855
|
7949d814ca2438d5469a43697e6e0f18d9762aca
| 3,353
|
py
|
Python
|
Lab2/Lab.py
|
GnomGad/PythonLabs
|
e04e03aee2e3f1c6960e7af707f1c56887c45d32
|
[
"MIT"
] | null | null | null |
Lab2/Lab.py
|
GnomGad/PythonLabs
|
e04e03aee2e3f1c6960e7af707f1c56887c45d32
|
[
"MIT"
] | null | null | null |
Lab2/Lab.py
|
GnomGad/PythonLabs
|
e04e03aee2e3f1c6960e7af707f1c56887c45d32
|
[
"MIT"
] | null | null | null |
import os
import hashlib
import random
import re
import subprocess
def Ex1():
rus = "йцукенгшщзхъфывапролджэячсмитьбюё"
eng = "qwertyuiopasdfghjklzxcvbnm"
Ex1_Result(rus,"Ex1Text.txt")
def Ex1_Result(lang,path):
with open(path, "r") as file:
text = file.read().lower()
dtext = {i:text.count(i) for i in lang}
[print(i[0],"-",round(i[1]/len(dtext),2),"%") for i in sorted(dtext.items(),key = lambda i: i[1],reverse=True)]
def Ex2():
path = os.getcwd()
hashes = {}
for filename in os.listdir(path):
if os.path.isfile(filename):
with open(filename, "rb") as file:
hashes.setdefault(hashlib.md5(file.read()).digest(),[]).append(filename)
for filenames in hashes:
if len(hashes[filenames]) > 1:
print("---Файлы Копии---\n", ", ".join(hashes[filenames]), sep="")
def Ex3():
path = os.getcwd()+"\\Music"
getMusic = "E:\\VKMusic 4\\Music"
if not os.path.exists(path):
os.mkdir(path)
Ex3_CreateMusics(os.getcwd())
Ex3_CreateDurations(os.getcwd())
Ex3_CreateList(os.getcwd())
Ex3_Rename(path)
def Ex3_Rename(path):
for filename in os.listdir(path):
if filename[-4:] ==".mp3":
for i in Ex3_GetList(path+"\\Musics.txt"):
if (filename[:-4]) in i and os.path.exists(path+"\\"+filename):
os.replace(path+"\\"+filename,path+"\\"+i[:-1])
break
def Ex3_CreateMusics(path):
for i in Ex3_GetList(path+"\\MusicNames.txt"):
with open(path+"\\Music\\"+i[:-1], "w+",encoding="UTF-8") as file:
pass
def Ex3_CreateDurations(path):
with open(path+"\\MusicDurations.txt", "w+",encoding="UTF-8") as file:
for i in Ex3_GetList(path+"\\MusicNames.txt"):
file.write("[{0}.{1}]\n".format(random.randint(0,9),random.randint(10,60)))
def Ex3_CreateList(path):
with open(path+"\\Music\\Musics.txt", "w+",encoding="UTF-8") as file:
mus =Ex3_GetList(path+"\\MusicNames.txt")
dur =Ex3_GetList(path+"\\MusicDurations.txt")
for i in range(len(mus)):
file.write(str(i+1)+". "+mus[i][:-5]+" "+dur[i][:-1]+".mp3\n")
def Ex3_GetList(path):
with open(path, "r",encoding="UTF-8") as file:
return file.readlines()
def Ex4():
lst = []
reg = "(:(-)?[\)]+)|\){3,}"
while True:
t = input()
if t == "":
break
else:
lst.append(t)
[print("Строка,",i,"позиция",j,": найдено",res) for i, j, res in TestReg(lst,reg)]
def TestReg(lst,reg):
for i in range(len(lst)):
parser = re.search(reg,lst[i])
count =0
while parser!=None:
yield i+1,parser.regs[0][0]+count+1,parser.group()
count+=parser.regs[0][1]
lst[i] = lst[i][parser.regs[0][1]:]
parser = re.search(reg,lst[i])
def Ex5():
reg = "[A-ZА-ЯЁ][A-ZА-ЯЁa-zа-яё]+([0-9]{4}$|[0-9]{2}$)"
[print(res) for i, j, res in TestReg(input().split(),reg)]
def Ex6():
subprocess.call("reorganize.py --source \"D:\\TestDir\" --days 2 --size 4096",shell=True)
def Ex7():
subprocess.call("trackmix.py -s \"D:\\Application\\Git\\PythonLabs\\Lab2\\TestM\" -d \"kek.mp3\" -f 15 -l --extended",shell=True)
Ex4()
| 28.415254
| 133
| 0.555025
|
7949d8337ed3b54a85fec27c1c99e5e140f0754c
| 2,345
|
py
|
Python
|
tests/test_env_spec.py
|
bobh66/hera-workflows
|
7dc94de3725b13fcba7ff54cd96cb90119ae2591
|
[
"MIT"
] | null | null | null |
tests/test_env_spec.py
|
bobh66/hera-workflows
|
7dc94de3725b13fcba7ff54cd96cb90119ae2591
|
[
"MIT"
] | null | null | null |
tests/test_env_spec.py
|
bobh66/hera-workflows
|
7dc94de3725b13fcba7ff54cd96cb90119ae2591
|
[
"MIT"
] | null | null | null |
import json
from argo_workflows.models import ConfigMapKeySelector, EnvVarSource, SecretKeySelector
from pydantic import BaseModel
from hera.env import ConfigMapEnvSpec, EnvSpec, SecretEnvSpec
class MockModel(BaseModel):
field1: int = 1
field2: int = 2
def test_env_spec_sets_base_model(mock_model):
m = mock_model()
env = EnvSpec(name="model_string", value=m)
argo_spec = env.argo_spec
assert argo_spec.value == '{"field1": 1, "field2": 2}'
model_dict = json.loads(argo_spec.value)
test_model = MockModel(**model_dict)
assert test_model.field1 == m.field1
assert test_model.field2 == m.field2
def test_env_spec_sets_primitive_types_as_expected():
int_val = 1
int_env = EnvSpec(name="int", value=int_val)
int_spec = int_env.argo_spec
assert int_spec.value == '1'
assert json.loads(int_spec.value) == int_val
# values are stringified to env variable values, but strings are already stringified
# so the test here ensures that strings are passed as strings, by comparison to
# other primitive types
str_val = 'str'
str_env = EnvSpec(name="str", value=str_val)
str_spec = str_env.argo_spec
assert str_spec.value == 'str'
dict_val = {'a': 42}
dict_env = EnvSpec(name="dict", value=dict_val)
dict_spec = dict_env.argo_spec
assert dict_spec.value == '{"a": 42}'
assert json.loads(dict_spec.value) == dict_val
def test_secret_env_spec_contains_expected_fields():
env = SecretEnvSpec(name='s', secret_name='a', secret_key='b')
spec = env.argo_spec
assert not hasattr(spec, 'value')
assert spec.name == 's'
assert isinstance(spec.value_from, EnvVarSource)
assert isinstance(spec.value_from.secret_key_ref, SecretKeySelector)
assert spec.value_from.secret_key_ref.name == 'a'
assert spec.value_from.secret_key_ref.key == 'b'
def test_config_map_env_spec_contains_expected_fields():
env = ConfigMapEnvSpec(name='s', config_map_name='a', config_map_key='b')
spec = env.argo_spec
assert not hasattr(spec, 'value')
assert spec.name == 's'
assert isinstance(spec.value_from, EnvVarSource)
assert isinstance(spec.value_from.config_map_key_ref, ConfigMapKeySelector)
assert spec.value_from.config_map_key_ref.name == 'a'
assert spec.value_from.config_map_key_ref.key == 'b'
| 33.985507
| 88
| 0.725373
|
7949d83a10c2a20ae19d5a513ffe3561de54084b
| 6,356
|
py
|
Python
|
sygnal/helper/context_factory.py
|
kibablu/sygnal
|
4f4d7ce014d680a9336bc0723014c40f87393b91
|
[
"Apache-2.0"
] | 98
|
2015-02-04T21:30:52.000Z
|
2022-02-27T18:44:40.000Z
|
sygnal/helper/context_factory.py
|
kibablu/sygnal
|
4f4d7ce014d680a9336bc0723014c40f87393b91
|
[
"Apache-2.0"
] | 224
|
2015-02-21T12:54:29.000Z
|
2022-03-28T18:52:47.000Z
|
sygnal/helper/context_factory.py
|
kibablu/sygnal
|
4f4d7ce014d680a9336bc0723014c40f87393b91
|
[
"Apache-2.0"
] | 94
|
2015-02-21T12:49:04.000Z
|
2022-02-17T12:11:46.000Z
|
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from Synapse:
# https://github.com/matrix-org/synapse/blob/1016f303e58b1305ed5b3572fde002e1273e0fc0/synapse/crypto/context_factory.py#L77
import logging
import idna
from OpenSSL import SSL
from service_identity import VerificationError
from service_identity.pyopenssl import verify_hostname, verify_ip_address
from twisted.internet.abstract import isIPAddress, isIPv6Address
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
from twisted.internet.ssl import CertificateOptions, TLSVersion, platformTrust
from twisted.python.failure import Failure
from twisted.web.iweb import IPolicyForHTTPS
from zope.interface import implementer
logger = logging.getLogger(__name__)
@implementer(IPolicyForHTTPS)
class ClientTLSOptionsFactory(object):
"""Factory for Twisted SSLClientConnectionCreators that are used to make connections
to remote servers for federation.
Uses one of two OpenSSL context objects for all connections, depending on whether
we should do SSL certificate verification.
get_options decides whether we should do SSL certificate verification and
constructs an SSLClientConnectionCreator factory accordingly.
"""
def __init__(self):
# Use CA root certs provided by OpenSSL
trust_root = platformTrust()
# "insecurelyLowerMinimumTo" is the argument that will go lower than
# Twisted's default, which is why it is marked as "insecure" (since
# Twisted's defaults are reasonably secure). But, since Twisted is
# moving to TLS 1.2 by default, we want to respect the config option if
# it is set to 1.0 (which the alternate option, raiseMinimumTo, will not
# let us do).
minTLS = TLSVersion.TLSv1_2
self._verify_ssl = CertificateOptions(
trustRoot=trust_root, insecurelyLowerMinimumTo=minTLS
)
self._verify_ssl_context = self._verify_ssl.getContext()
self._verify_ssl_context.set_info_callback(self._context_info_cb)
def get_options(self, host):
ssl_context = self._verify_ssl_context
return SSLClientConnectionCreator(host, ssl_context)
@staticmethod
def _context_info_cb(ssl_connection, where, ret):
"""The 'information callback' for our openssl context object."""
# we assume that the app_data on the connection object has been set to
# a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator)
tls_protocol = ssl_connection.get_app_data()
try:
# ... we further assume that SSLClientConnectionCreator has set the
# '_synapse_tls_verifier' attribute to a ConnectionVerifier object.
tls_protocol._synapse_tls_verifier.verify_context_info_cb(
ssl_connection, where
)
except: # noqa: E722, taken from the twisted implementation
logger.exception("Error during info_callback")
f = Failure()
tls_protocol.failVerification(f)
def creatorForNetloc(self, hostname, port):
"""Implements the IPolicyForHTTPS interace so that this can be passed
directly to agents.
"""
return self.get_options(hostname)
@implementer(IOpenSSLClientConnectionCreator)
class SSLClientConnectionCreator(object):
"""Creates openssl connection objects for client connections.
Replaces twisted.internet.ssl.ClientTLSOptions
"""
def __init__(self, hostname, ctx):
self._ctx = ctx
self._verifier = ConnectionVerifier(hostname)
def clientConnectionForTLS(self, tls_protocol):
context = self._ctx
connection = SSL.Connection(context, None)
# as per twisted.internet.ssl.ClientTLSOptions, we set the application
# data to our TLSMemoryBIOProtocol...
connection.set_app_data(tls_protocol)
# ... and we also gut-wrench a '_synapse_tls_verifier' attribute into the
# tls_protocol so that the SSL context's info callback has something to
# call to do the cert verification.
setattr(tls_protocol, "_synapse_tls_verifier", self._verifier)
return connection
class ConnectionVerifier(object):
"""Set the SNI, and do cert verification
This is a thing which is attached to the TLSMemoryBIOProtocol, and is called by
the ssl context's info callback.
"""
# This code is based on twisted.internet.ssl.ClientTLSOptions.
def __init__(self, hostname):
if isIPAddress(hostname) or isIPv6Address(hostname):
self._hostnameBytes = hostname.encode("ascii")
self._is_ip_address = True
else:
# twisted's ClientTLSOptions falls back to the stdlib impl here if
# idna is not installed, but points out that lacks support for
# IDNA2008 (http://bugs.python.org/issue17305).
#
# We can rely on having idna.
self._hostnameBytes = idna.encode(hostname)
self._is_ip_address = False
self._hostnameASCII = self._hostnameBytes.decode("ascii")
def verify_context_info_cb(self, ssl_connection, where):
if where & SSL.SSL_CB_HANDSHAKE_START and not self._is_ip_address:
ssl_connection.set_tlsext_host_name(self._hostnameBytes)
if where & SSL.SSL_CB_HANDSHAKE_DONE:
try:
if self._is_ip_address:
verify_ip_address(ssl_connection, self._hostnameASCII)
else:
verify_hostname(ssl_connection, self._hostnameASCII)
except VerificationError:
f = Failure()
tls_protocol = ssl_connection.get_app_data()
tls_protocol.failVerification(f)
| 40.484076
| 124
| 0.708779
|
7949d889d1b95927344aaf68774b0c6e011afe1b
| 87
|
py
|
Python
|
spektral/__init__.py
|
sjiang87/spektral
|
cef8575f9aa5a724fb125fc5596f52f71580793d
|
[
"MIT"
] | 1
|
2020-06-25T02:08:40.000Z
|
2020-06-25T02:08:40.000Z
|
spektral/__init__.py
|
sjiang87/spektral
|
cef8575f9aa5a724fb125fc5596f52f71580793d
|
[
"MIT"
] | null | null | null |
spektral/__init__.py
|
sjiang87/spektral
|
cef8575f9aa5a724fb125fc5596f52f71580793d
|
[
"MIT"
] | null | null | null |
from . import datasets
from . import layers
from . import utils
__version__ = '0.5.1'
| 14.5
| 22
| 0.724138
|
7949d8f0c43df42defb1c222051cdc77687524a5
| 3,145
|
py
|
Python
|
mistral/db/sqlalchemy/types.py
|
soda-research/mistral
|
550a3de9c2defc7ce26336cb705d9c8d87bbaddd
|
[
"Apache-2.0"
] | 3
|
2015-08-28T04:57:56.000Z
|
2017-03-27T10:59:56.000Z
|
mistral/db/sqlalchemy/types.py
|
soda-research/mistral
|
550a3de9c2defc7ce26336cb705d9c8d87bbaddd
|
[
"Apache-2.0"
] | 21
|
2015-04-14T22:41:53.000Z
|
2019-02-20T09:30:10.000Z
|
mistral/db/sqlalchemy/types.py
|
soda-research/mistral
|
550a3de9c2defc7ce26336cb705d9c8d87bbaddd
|
[
"Apache-2.0"
] | 12
|
2015-08-14T02:27:37.000Z
|
2020-12-31T10:09:21.000Z
|
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This module implements SQLAlchemy-based types for dict and list
# expressed by json-strings
#
from oslo_serialization import jsonutils
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from sqlalchemy.ext import mutable
class JsonEncoded(sa.TypeDecorator):
"""Represents an immutable structure as a json-encoded string."""
impl = sa.Text
def process_bind_param(self, value, dialect):
if value is not None:
value = jsonutils.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = jsonutils.loads(value)
return value
class MutableList(mutable.Mutable, list):
@classmethod
def coerce(cls, key, value):
"""Convert plain lists to MutableList."""
if not isinstance(value, MutableList):
if isinstance(value, list):
return MutableList(value)
# this call will raise ValueError
return mutable.Mutable.coerce(key, value)
return value
def __add__(self, value):
"""Detect list add events and emit change events."""
list.__add__(self, value)
self.changed()
def append(self, value):
"""Detect list add events and emit change events."""
list.append(self, value)
self.changed()
def __setitem__(self, key, value):
"""Detect list set events and emit change events."""
list.__setitem__(self, key, value)
self.changed()
def __delitem__(self, i):
"""Detect list del events and emit change events."""
list.__delitem__(self, i)
self.changed()
def JsonDictType():
"""Returns an SQLAlchemy Column Type suitable to store a Json dict."""
return mutable.MutableDict.as_mutable(JsonEncoded)
def JsonListType():
"""Returns an SQLAlchemy Column Type suitable to store a Json array."""
return MutableList.as_mutable(JsonEncoded)
def MediumText():
# TODO(rakhmerov): Need to do for postgres.
return sa.Text().with_variant(mysql.MEDIUMTEXT(), 'mysql')
class JsonEncodedMediumText(JsonEncoded):
impl = MediumText()
def JsonMediumDictType():
return mutable.MutableDict.as_mutable(JsonEncodedMediumText)
def LongText():
# TODO(rakhmerov): Need to do for postgres.
return sa.Text().with_variant(mysql.LONGTEXT(), 'mysql')
class JsonEncodedLongText(JsonEncoded):
impl = LongText()
def JsonLongDictType():
return mutable.MutableDict.as_mutable(JsonEncodedLongText)
| 29.12037
| 77
| 0.683625
|
7949d981499b6ba8071165c493de24027ed6ba54
| 1,025
|
py
|
Python
|
test/test_v1_access_credential_secret_source.py
|
gabriel-samfira/client-python
|
c2e184c3cad6797af35b0160a36ffcbba77284a7
|
[
"Apache-2.0"
] | 21
|
2018-02-21T23:59:28.000Z
|
2021-12-08T05:47:37.000Z
|
test/test_v1_access_credential_secret_source.py
|
gabriel-samfira/client-python
|
c2e184c3cad6797af35b0160a36ffcbba77284a7
|
[
"Apache-2.0"
] | 47
|
2018-02-01T15:35:01.000Z
|
2022-02-11T07:45:54.000Z
|
test/test_v1_access_credential_secret_source.py
|
gabriel-samfira/client-python
|
c2e184c3cad6797af35b0160a36ffcbba77284a7
|
[
"Apache-2.0"
] | 19
|
2018-04-03T09:20:52.000Z
|
2021-06-01T06:07:28.000Z
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1_access_credential_secret_source import V1AccessCredentialSecretSource
class TestV1AccessCredentialSecretSource(unittest.TestCase):
""" V1AccessCredentialSecretSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1AccessCredentialSecretSource(self):
"""
Test V1AccessCredentialSecretSource
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1_access_credential_secret_source.V1AccessCredentialSecretSource()
pass
if __name__ == '__main__':
unittest.main()
| 22.777778
| 100
| 0.732683
|
7949da0a237a80e832d317063c1bc70ac02b8909
| 8,260
|
py
|
Python
|
cairis/test/test_DependencyAPI.py
|
RachelLar/Cairis
|
93498fb5334ef1f88f16554823a3b9121c3675b8
|
[
"Apache-2.0"
] | null | null | null |
cairis/test/test_DependencyAPI.py
|
RachelLar/Cairis
|
93498fb5334ef1f88f16554823a3b9121c3675b8
|
[
"Apache-2.0"
] | null | null | null |
cairis/test/test_DependencyAPI.py
|
RachelLar/Cairis
|
93498fb5334ef1f88f16554823a3b9121c3675b8
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import jsonpickle
from cairis.core.Dependency import Dependency
from cairis.test.CairisDaemonTestCase import CairisDaemonTestCase
from cairis.tools.ModelDefinitions import DependencyModel
import os
from cairis.mio.ModelImport import importModelFile
__author__ = 'Robin Quetin, Shamal Faily'
class DependencyAPITests(CairisDaemonTestCase):
def setUp(self):
self.logger = logging.getLogger(__name__)
importModelFile(os.environ['CAIRIS_SRC'] + '/../examples/exemplars/NeuroGrid/NeuroGrid.xml',1,'test')
self.working_name_1 = ('Stroke', 'all', 'all', 'all')
self.working_name_2 = ('Stroke', 'Data%20Consumer', 'Certificate%20Authority', 'Personal%20certificate')
self.existing_environment_1 = 'Stroke'
self.existing_environment_2 = 'Psychosis'
self.existing_role_1 = 'Data Consumer'
self.existing_role_2 = 'Certificate Authority'
self.existing_type = 'goal'
self.existing_dependency = 'Upload authorisation'
def test_all_get(self):
method = 'test_all_get'
url = '/api/dependencies?session_id=test'
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
json_dict = jsonpickle.decode(rv.data)
self.assertIsInstance(json_dict, dict, 'The response is not a valid JSON dictionary')
self.assertGreater(len(json_dict), 0, 'No dependencies found')
assert isinstance(json_dict, dict)
item = json_dict.items()[0]
self.logger.info('[%s] First dependency: %s [%d]\n', method, item[0], item[1]['theId'])
def test_dependencies_name_get(self):
method = 'test_dependencies_name_get'
url = '/api/dependencies/environment/%s/depender/%s/dependee/%s/dependency/%s?session_id=test' % self.working_name_1
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
json_dict = jsonpickle.decode(rv.data)
self.assertIsInstance(json_dict, list, 'The response is not a valid JSON dictionary')
self.assertGreater(len(json_dict), 0, 'No dependencies found')
assert isinstance(json_dict, list)
ids = []
for dep in json_dict:
ids.append(str(dep['theId']))
self.logger.info('[%s] Dependency IDs: %s\n', method, ', '.join(ids))
def test_dependency_name_get(self):
method = 'test_dependency_name_get'
url = '/api/dependencies/environment/%s/depender/%s/dependee/%s/dependency/%s?session_id=test' % self.working_name_2
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
json_dict = jsonpickle.decode(rv.data)
self.assertIsInstance(json_dict, list, 'The response is not a valid JSON dictionary')
self.assertEqual(len(json_dict), 1, 'Result is not unique')
assert isinstance(json_dict, list)
item = json_dict[0]
has_keys = all (k in item for k in DependencyModel.required)
self.assertTrue(has_keys, 'Result is not a dependency')
dep_name = '/'.join([item['theEnvironmentName'], item['theDepender'], item['theDependee'], item['theDependency']])
self.logger.info('[%s] Dependency: %s [%d]\n', method, dep_name, item['theId'])
def test_dependency_post(self):
method = 'test_dependency_post'
url = '/api/dependencies'
new_dep = self.prepare_new_dependency()
json_dict = {
'session_id': 'test',
'object': new_dep
}
json_body = jsonpickle.encode(json_dict)
new_name = (new_dep.theEnvironmentName, new_dep.theDepender, new_dep.theDependee, new_dep.theDependency)
delete_url = '/api/dependencies/environment/%s/depender/%s/dependee/%s/dependency/%s?session_id=test' % new_name
self.app.delete(delete_url)
rv = self.app.post(url, data=json_body, content_type='application/json')
self.assertIsNotNone(rv.data, 'No response')
json_dict = jsonpickle.decode(rv.data)
self.assertIsInstance(json_dict, dict, 'Response is not a valid JSON dictionary')
message = json_dict.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.assertNotIsInstance(message, dict, 'Message is an object')
self.logger.info('[%s] Message: %s', method, message)
dep_id = json_dict.get('dependency_id', None)
self.assertIsNotNone(dep_id, 'No dependency ID returned')
self.logger.info('[%s] New dependency ID: %d\n', method, dep_id)
new_name = (new_dep.theEnvironmentName, new_dep.theDepender, new_dep.theDependee, new_dep.theDependency)
delete_url = '/api/dependencies/environment/%s/depender/%s/dependee/%s/dependency/%s?session_id=test' % new_name
self.app.delete(delete_url)
def test_dependency_name_delete(self):
method = 'test_dependency_name_delete'
url = '/api/dependencies'
new_dep = self.prepare_new_dependency()
json_dict = {
'session_id': 'test',
'object': new_dep
}
json_body = jsonpickle.encode(json_dict)
self.app.post(url, data=json_body, content_type='application/json')
new_name = (new_dep.theEnvironmentName, new_dep.theDepender, new_dep.theDependee, new_dep.theDependency)
delete_url = '/api/dependencies/environment/%s/depender/%s/dependee/%s/dependency/%s?session_id=test' % new_name
rv = self.app.delete(delete_url)
self.assertIsNotNone(rv.data, 'No response')
json_dict = jsonpickle.decode(rv.data)
self.assertIsInstance(json_dict, dict, 'Response is not a valid JSON dictionary')
message = json_dict.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.assertNotIsInstance(message, dict, 'Message is an object')
self.logger.info('[%s] Message: %s\n', method, message)
def test_dependency_name_put(self):
method = 'test_dependency_name_put'
url = '/api/dependencies'
new_dep = self.prepare_new_dependency()
json_dict = {
'session_id': 'test',
'object': new_dep
}
json_body = jsonpickle.encode(json_dict)
self.app.post(url, data=json_body, content_type='application/json')
new_name = (new_dep.theEnvironmentName, new_dep.theDepender, new_dep.theDependee, new_dep.theDependency)
upd_dep = new_dep
upd_dep.theEnvironmentName = self.existing_environment_2
json_dict = {
'session_id': 'test',
'object': upd_dep
}
json_body = jsonpickle.encode(json_dict)
upd_url = '/api/dependencies/environment/%s/depender/%s/dependee/%s/dependency/%s?session_id=test' % new_name
rv = self.app.put(upd_url, data=json_body, content_type='application/json')
self.assertIsNotNone(rv.data, 'No response')
json_dict = jsonpickle.decode(rv.data)
self.assertIsInstance(json_dict, dict, 'Response is not a valid JSON dictionary')
message = json_dict.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.assertNotIsInstance(message, dict, 'Message is an object')
self.logger.info('[%s] Message: %s\n', method, message)
delete_name = (upd_dep.theEnvironmentName, upd_dep.theDepender, upd_dep.theDependee, upd_dep.theDependency)
del_get_url = '/api/dependencies/environment/%s/depender/%s/dependee/%s/dependency/%s?session_id=test' % delete_name
rv = self.app.get(del_get_url)
self.logger.debug('[%s] Updated dependency:\n%s\n', method, rv.data)
self.app.delete(del_get_url)
def prepare_new_dependency(self):
d = Dependency(
-1,
self.existing_environment_1,
self.existing_role_1,
self.existing_role_2,
self.existing_type,
self.existing_dependency,
'This is a test dependency'
)
return d
| 44.648649
| 120
| 0.719492
|
7949dab1f6e0425bb2f4eb025f7238d57b02b5b3
| 19,705
|
py
|
Python
|
multiagent/environment.py
|
RavenPillmann/multiagent-particle-envs
|
466cc6953dfc57a0bfba5c37609db3ee7e2508a8
|
[
"MIT"
] | null | null | null |
multiagent/environment.py
|
RavenPillmann/multiagent-particle-envs
|
466cc6953dfc57a0bfba5c37609db3ee7e2508a8
|
[
"MIT"
] | null | null | null |
multiagent/environment.py
|
RavenPillmann/multiagent-particle-envs
|
466cc6953dfc57a0bfba5c37609db3ee7e2508a8
|
[
"MIT"
] | null | null | null |
import gym
from gym import spaces
from gym.envs.registration import EnvSpec
import numpy as np
from multiagent.multi_discrete import MultiDiscrete
from multiagent.scenarios.constants import D_LINE, O_LINE, Q_BACK
NOT_DONE = 0
Q_BACK_FIRST_DOWN_LINE = 1
AGENT_OUT_OF_BOUNDS = 2
D_LINE_REACHED_Q_BACK = 3
Q_BACK_NOT_IN_BOUNDS = 4
Q_BACK_THREW_BALL = 5
# environment for all agents in the multiagent world
# currently code assumes that no agents will be created/destroyed at runtime!
class MultiAgentEnv(gym.Env):
metadata = {
'render.modes' : ['human', 'rgb_array']
}
def __init__(self, world, reset_callback=None, reward_callback=None,
observation_callback=None, info_callback=None,
done_callback=None, shared_viewer=True):
self.world = world
self.agents = self.world.policy_agents
# set required vectorized gym env property
self.n = len(world.policy_agents)
# scenario callbacks
self.reset_callback = reset_callback
self.reward_callback = reward_callback
self.observation_callback = observation_callback
self.info_callback = info_callback
# self.done_callback = _done_callback
# environment parameters
self.discrete_action_space = True
# if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector
self.discrete_action_input = False
# if true, even the action is continuous, action will be performed discretely
self.force_discrete_action = world.discrete_action if hasattr(world, 'discrete_action') else False
# if true, every agent has the same reward
self.shared_reward = world.collaborative if hasattr(world, 'collaborative') else False
self.time = 0
# configure spaces
self.action_space = []
self.observation_space = []
for agent in self.get_agents():
total_action_space = []
# physical action space
if self.discrete_action_space:
u_action_space = spaces.Discrete(world.dim_p * 2 + 1)
else:
u_action_space = spaces.Box(low=-agent.u_range, high=+agent.u_range, shape=(world.dim_p,), dtype=np.float32)
if agent.movable:
total_action_space.append(u_action_space)
# communication action space
if self.discrete_action_space:
c_action_space = spaces.Discrete(world.dim_c)
else:
c_action_space = spaces.Box(low=0.0, high=1.0, shape=(world.dim_c,), dtype=np.float32)
if not agent.silent:
total_action_space.append(c_action_space)
# total action space
if len(total_action_space) > 1:
# all action spaces are discrete, so simplify to MultiDiscrete action space
if all([isinstance(act_space, spaces.Discrete) for act_space in total_action_space]):
act_space = MultiDiscrete([[0, act_space.n - 1] for act_space in total_action_space])
else:
act_space = spaces.Tuple(total_action_space)
self.action_space.append(act_space)
else:
self.action_space.append(total_action_space[0])
# observation space
obs_dim = len(observation_callback(agent, self.world))
self.observation_space.append(spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32))
agent.action.c = np.zeros(self.world.dim_c)
# rendering
self.shared_viewer = shared_viewer
if self.shared_viewer:
self.viewer = None
else:
self.viewers = [None] * self.n
self._reset_render()
def get_agents(self):
return [agent if not agent.is_done else None for agent in self.agents]
def step(self, action_n):
obs_n = []
reward_n = []
done_n = []
info_n = {'n': []}
self.agents = self.world.policy_agents
# print(self.agents)
# set action for each agent
for i, agent in enumerate(self.agents):
self._set_action(action_n[i], agent, self.action_space[i])
# advance world state
self.world.step()
# record observation for each agent
# print("New step")
chance_of_completion = np.random.uniform(0.0, 1.0)
made_throw = chance_of_completion < list(filter(lambda player: player.position == 'q_back', self.world.agents))[0].completion_percentage
for agent in self.agents:
obs_n.append(self._get_obs(agent))
reward = self._get_reward(agent)
is_done = self.done_callback(agent, self.world)
done_n.append(is_done)
# TODO:
# If done, I need to somehow indicate that so that no more actions are taken...
# if (agent.position == 'q_back'):
# print(agent.state.p_pos, self.world.line_of_scrimmage, agent.state.p_pos[1], self.world.line_of_scrimmage - agent.state.p_pos[1])
if is_done != NOT_DONE:
agent.is_done = True
# print("agent position", agent.position)
# print(agent.state.p_pos)
additional_reward = self.get_final_reward(is_done, agent, made_throw)
# print("additional_reward", additional_reward)
reward = reward + additional_reward
reward_n.append(reward)
info_n['n'].append(self._get_info(agent))
# all agents get total reward in cooperative case
reward = np.sum(reward_n)
if self.shared_reward:
reward_n = [reward] * self.n
return obs_n, reward_n, done_n, info_n
def get_final_reward(self, is_done, agent, made_throw):
# print(is_done, Q_BACK_NOT_IN_BOUNDS)
if (is_done == Q_BACK_FIRST_DOWN_LINE):
if (agent.position == O_LINE) or (agent.position == Q_BACK):
return 120
else:
return -120
elif (is_done == AGENT_OUT_OF_BOUNDS):
return -80
elif (is_done == D_LINE_REACHED_Q_BACK):
if (agent.position == O_LINE) or (agent.position == Q_BACK):
return -120 # TODO
else:
return 120
elif is_done == Q_BACK_NOT_IN_BOUNDS:
if (agent.position == O_LINE) or (agent.position == Q_BACK):
return -80
else:
return 80
elif is_done == Q_BACK_THREW_BALL:
if (agent.position == O_LINE) or (agent.position == Q_BACK):
return 80 if made_throw else -80
else:
return -80 if made_throw else 80
def reset(self):
# reset world
self.reset_callback(self.world)
# reset renderer
self._reset_render()
# record observations for each agent
obs_n = []
self.agents = self.world.policy_agents
for agent in self.get_agents():
obs = None
if agent:
obs = self._get_obs(agent)
obs_n.append(obs)
return obs_n
# get info used for benchmarking
def _get_info(self, agent):
if self.info_callback is None:
return {}
return self.info_callback(agent, self.world)
# get observation for a particular agent
def _get_obs(self, agent):
if self.observation_callback is None:
return np.zeros(0)
return self.observation_callback(agent, self.world)
# get dones for a particular agent
# unused right now -- agents are allowed to go beyond the viewing screen
# def _get_done(self, agent):
# if self.done_callback is None:
# return False
# return self.done_callback(agent, self.world)
def done_callback(self, agent, world):
# Use world.timeout, and see what's wrong
if world.time > world.timeout:
return Q_BACK_THREW_BALL
# Agent is done if it is out of bounds
if (not agent.in_bounds):
return AGENT_OUT_OF_BOUNDS
q_back = list(filter(lambda player: player.position == 'q_back', world.agents))[0]
d_line = list(filter(lambda player: player.position == 'd_line', world.agents))
line_of_scrimmage = world.line_of_scrimmage
q_pos = q_back.state.p_pos
# Quarterback is past line of scrimmage
if (q_pos[1] > (line_of_scrimmage + world.first_down_line)):
return Q_BACK_FIRST_DOWN_LINE
if (not q_back.in_bounds):
return Q_BACK_NOT_IN_BOUNDS
for d_player in d_line:
# Check if d_player is close to q_back (ie touching, look into how to find that out)
# If so return True
d_pos = d_player.state.p_pos
dist_min = q_back.size + d_player.size
# If the quarterback and defensive player are touching, set agents to done
if (((d_pos[0] - q_pos[0])**2 + (d_pos[1] - q_pos[1])**2)**0.5 < dist_min):
return D_LINE_REACHED_Q_BACK
return NOT_DONE
# get reward for a particular agent
def _get_reward(self, agent):
if self.reward_callback is None:
return 0.0
return self.reward_callback(agent, self.world)
# set env action for a particular agent
def _set_action(self, action, agent, action_space, time=None):
agent.action.u = np.zeros(self.world.dim_p)
agent.action.c = np.zeros(self.world.dim_c)
# process action
if isinstance(action_space, MultiDiscrete):
act = []
size = action_space.high - action_space.low + 1
index = 0
for s in size:
act.append(action[index:(index+s)])
index += s
action = act
else:
action = [action]
if agent.movable:
# physical action
if self.discrete_action_input:
agent.action.u = np.zeros(self.world.dim_p)
# process discrete action
if action[0] == 1: agent.action.u[0] = -1.0
if action[0] == 2: agent.action.u[0] = +1.0
if action[0] == 3: agent.action.u[1] = -1.0
if action[0] == 4: agent.action.u[1] = +1.0
else:
if self.force_discrete_action:
d = np.argmax(action[0])
action[0][:] = 0.0
action[0][d] = 1.0
if self.discrete_action_space:
agent.action.u[0] += action[0][1] - action[0][2]
agent.action.u[1] += action[0][3] - action[0][4]
else:
agent.action.u = action[0]
sensitivity = 5.0
if agent.accel is not None:
sensitivity = agent.accel
agent.action.u *= sensitivity
action = action[1:]
if not agent.silent:
# communication action
if self.discrete_action_input:
agent.action.c = np.zeros(self.world.dim_c)
agent.action.c[action[0]] = 1.0
else:
agent.action.c = action[0]
action = action[1:]
# make sure we used all elements of action
assert len(action) == 0
# reset rendering assets
def _reset_render(self):
self.render_geoms = None
self.render_geoms_xform = None
# render environment
def render_whole_field(self, mode='human'):
if mode == 'human':
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
message = ''
for agent in self.world.agents:
comm = []
for other in self.world.agents:
if other is agent: continue
if np.all(other.state.c == 0):
word = '_'
else:
word = alphabet[np.argmax(other.state.c)]
message += (other.name + ' to ' + agent.name + ': ' + word + ' ')
# for i in range(len(self.viewers)):
# # create viewers (if necessary)
# if self.viewers[i] is None:
# # import rendering only if we need it (and don't import for headless machines)
# #from gym.envs.classic_control import rendering
# from multiagent import rendering
# self.viewers[i] = rendering.Viewer(700,700)
if self.viewer is None:
from multiagent import rendering
self.viewer = rendering.Viewer(53*7, 120*7)
# create rendering geometry
if self.render_geoms is None:
# import rendering only if we need it (and don't import for headless machines)
#from gym.envs.classic_control import rendering
from multiagent import rendering
self.render_geoms = []
self.render_geoms_xform = []
for entity in self.world.entities:
size = 2*entity.size
# if entity.position == 'q_back':
# size = 2*size
geom = rendering.make_circle(size)
xform = rendering.Transform()
if 'q_back' == entity.position:
geom.set_color(0, 1, 0, alpha=0.5)
else:
geom.set_color(*entity.color)
geom.add_attr(xform)
self.render_geoms.append(geom)
self.render_geoms_xform.append(xform)
self.viewer.geoms = []
for geom in self.render_geoms:
self.viewer.add_geom(geom)
line_of_scrimmage = self.world.line_of_scrimmage
first_down_line = line_of_scrimmage + self.world.first_down_line
self.viewer.draw_line((0, line_of_scrimmage), (53, line_of_scrimmage))
self.viewer.draw_line((0, first_down_line), (53, first_down_line))
results = []
from multiagent import rendering
# update bounds to center around agent
cam_range = 1
if self.shared_viewer:
pos = np.zeros(self.world.dim_p)
else:
pos = self.agents[i].state.p_pos
self.viewer.set_bounds(0, 53, 0, 120)
# update geometry positions
for e, entity in enumerate(self.world.entities):
self.render_geoms_xform[e].set_translation(*entity.state.p_pos)
# render to display or array
results.append(self.viewer.render(return_rgb_array = mode=='rgb_array'))
return results
# render environment
def render(self, mode='human'):
if mode == 'human':
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
message = ''
for agent in self.world.agents:
comm = []
for other in self.world.agents:
if other is agent: continue
if np.all(other.state.c == 0):
word = '_'
else:
word = alphabet[np.argmax(other.state.c)]
message += (other.name + ' to ' + agent.name + ': ' + word + ' ')
for i in range(len(self.viewers)):
# create viewers (if necessary)
if self.viewers[i] is None:
# import rendering only if we need it (and don't import for headless machines)
#from gym.envs.classic_control import rendering
from multiagent import rendering
self.viewers[i] = rendering.Viewer(700,700)
# create rendering geometry
if self.render_geoms is None:
# import rendering only if we need it (and don't import for headless machines)
#from gym.envs.classic_control import rendering
from multiagent import rendering
self.render_geoms = []
self.render_geoms_xform = []
for entity in self.world.entities:
geom = rendering.make_circle(entity.size)
xform = rendering.Transform()
if 'q_back' == entity.position:
geom.set_color(0, 1, 0, alpha=0.5)
else:
geom.set_color(*entity.color)
geom.add_attr(xform)
self.render_geoms.append(geom)
self.render_geoms_xform.append(xform)
# add geoms to viewer
for viewer in self.viewers:
viewer.geoms = []
for geom in self.render_geoms:
viewer.add_geom(geom)
results = []
for i in range(len(self.viewers)):
from multiagent import rendering
# update bounds to center around agent
cam_range = 1
if self.shared_viewer:
pos = np.zeros(self.world.dim_p)
else:
pos = self.agents[i].state.p_pos
self.viewers[i].set_bounds(pos[0]-cam_range,pos[0]+cam_range,pos[1]-cam_range,pos[1]+cam_range)
# update geometry positions
for e, entity in enumerate(self.world.entities):
self.render_geoms_xform[e].set_translation(*entity.state.p_pos)
# render to display or array
results.append(self.viewers[i].render(return_rgb_array = mode=='rgb_array'))
return results
# create receptor field locations in local coordinate frame
def _make_receptor_locations(self, agent):
receptor_type = 'polar'
range_min = 0.05 * 2.0
range_max = 1.00
dx = []
# circular receptive field
if receptor_type == 'polar':
for angle in np.linspace(-np.pi, +np.pi, 8, endpoint=False):
for distance in np.linspace(range_min, range_max, 3):
dx.append(distance * np.array([np.cos(angle), np.sin(angle)]))
# add origin
dx.append(np.array([0.0, 0.0]))
# grid receptive field
if receptor_type == 'grid':
for x in np.linspace(-range_max, +range_max, 5):
for y in np.linspace(-range_max, +range_max, 5):
dx.append(np.array([x,y]))
return dx
# vectorized wrapper for a batch of multi-agent environments
# assumes all environments have the same observation and action space
class BatchMultiAgentEnv(gym.Env):
metadata = {
'runtime.vectorized': True,
'render.modes' : ['human', 'rgb_array']
}
def __init__(self, env_batch):
self.env_batch = env_batch
@property
def n(self):
return np.sum([env.n for env in self.env_batch])
@property
def action_space(self):
return self.env_batch[0].action_space
@property
def observation_space(self):
return self.env_batch[0].observation_space
def step(self, action_n, time):
obs_n = []
reward_n = []
done_n = []
info_n = {'n': []}
i = 0
for env in self.env_batch:
obs, reward, done, _ = env.step(action_n[i:(i+env.n)], time)
i += env.n
obs_n += obs
# reward = [r / len(self.env_batch) for r in reward]
reward_n += reward
done_n += done
return obs_n, reward_n, done_n, info_n
def reset(self):
obs_n = []
for env in self.env_batch:
obs_n += env.reset()
return obs_n
# render environment
def render(self, mode='human', close=True):
results_n = []
for env in self.env_batch:
results_n += env.render(mode, close)
return results_n
| 38.713163
| 147
| 0.574372
|
7949db00de54c07f91399857d4370b1f9dde9ead
| 8,430
|
py
|
Python
|
tfx/orchestration/experimental/interactive/interactive_context_test.py
|
rtg0795/tfx
|
63c31b719896eef645df3850d0e6b946e44cd059
|
[
"Apache-2.0"
] | null | null | null |
tfx/orchestration/experimental/interactive/interactive_context_test.py
|
rtg0795/tfx
|
63c31b719896eef645df3850d0e6b946e44cd059
|
[
"Apache-2.0"
] | null | null | null |
tfx/orchestration/experimental/interactive/interactive_context_test.py
|
rtg0795/tfx
|
63c31b719896eef645df3850d0e6b946e44cd059
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.interactive.interactive_context."""
import builtins
import os
import shutil
import tempfile
import textwrap
from typing import Any, Dict, List
from unittest import mock
import jinja2
import nbformat
import tensorflow as tf
from tfx import types
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import executor_spec
from tfx.orchestration.experimental.interactive import interactive_context
from tfx.orchestration.experimental.interactive import standard_visualizations
from tfx.orchestration.launcher.in_process_component_launcher import InProcessComponentLauncher
from tfx.types import component_spec
from tfx.types import standard_artifacts
from tfx.utils import telemetry_utils
class InteractiveContextTest(tf.test.TestCase):
def setUp(self):
super().setUp()
builtins.__dict__['__IPYTHON__'] = True
self._tmpdir = None
def tearDown(self):
if self._tmpdir:
shutil.rmtree(self._tmpdir, ignore_errors=True)
super().tearDown()
def _setupTestNotebook(self, notebook_name='test_notebook.ipynb'):
notebook = nbformat.v4.new_notebook(
cells=[
nbformat.v4.new_markdown_cell(source='A markdown cell.'),
nbformat.v4.new_code_cell(source='foo = 1'),
nbformat.v4.new_markdown_cell(source='Another markdown cell.'),
nbformat.v4.new_code_cell(source=textwrap.dedent('''\
%%skip_for_export
!pip install something
!ls
x = 1
y = 2
print('this cell should not be exported')''')),
nbformat.v4.new_code_cell(source=textwrap.dedent('''\
def bar():
%some_line_magic print('this line should not be exported')
a = "hello"
b = "world"
return a + b''')),
nbformat.v4.new_code_cell(source=textwrap.dedent('''\
def baz():
c = "nyan"
d = "cat"
return c + d''')),
]
)
self._tmpdir = tempfile.mkdtemp()
self._exportdir = tempfile.mkdtemp()
self._notebook_fp = os.path.join(self._tmpdir, notebook_name)
nbformat.write(notebook, self._notebook_fp)
def testBasicRun(self):
class _FakeComponentSpec(types.ComponentSpec):
PARAMETERS = {}
INPUTS = {}
OUTPUTS = {}
class _FakeExecutor(base_executor.BaseExecutor):
CALLED = False
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
_FakeExecutor.CALLED = True
class _FakeComponent(base_component.BaseComponent):
SPEC_CLASS = _FakeComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(_FakeExecutor)
def __init__(self, spec: types.ComponentSpec):
super().__init__(spec=spec)
c = interactive_context.InteractiveContext()
component = _FakeComponent(_FakeComponentSpec())
c.run(component)
self.assertTrue(_FakeExecutor.CALLED)
def testRunMethodRequiresIPython(self):
del builtins.__dict__['__IPYTHON__']
c = interactive_context.InteractiveContext()
self.assertIsNone(c.run(None))
def testUnresolvedChannel(self):
class _FakeComponentSpec(types.ComponentSpec):
PARAMETERS = {}
INPUTS = {
'input':
component_spec.ChannelParameter(type=standard_artifacts.Examples)
}
OUTPUTS = {}
class _FakeExecutor(base_executor.BaseExecutor):
CALLED = False
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
_FakeExecutor.CALLED = True
class _FakeComponent(base_component.BaseComponent):
SPEC_CLASS = _FakeComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(_FakeExecutor)
def __init__(self, spec: types.ComponentSpec):
super().__init__(spec=spec)
c = interactive_context.InteractiveContext()
foo = types.Channel(type=standard_artifacts.Examples).set_artifacts(
[standard_artifacts.Examples()])
component = _FakeComponent(_FakeComponentSpec(input=foo))
with self.assertRaisesRegex(ValueError, 'Unresolved input channel'):
c.run(component)
@mock.patch.object(jinja2.Environment, 'get_template',
return_value=jinja2.Template('{{ notebook_content }}'))
def testExportToPipeline(self, mock_get_template):
self._setupTestNotebook()
c = interactive_context.InteractiveContext()
export_filepath = os.path.join(self._exportdir, 'exported_pipeline.py')
c.export_to_pipeline(notebook_filepath=self._notebook_fp,
export_filepath=export_filepath,
runner_type='beam')
with open(export_filepath, 'r') as exported_pipeline:
code = exported_pipeline.read()
self.assertEqual(code, textwrap.dedent('''\
foo = 1
def bar():
a = "hello"
b = "world"
return a + b
def baz():
c = "nyan"
d = "cat"
return c + d'''))
def testExportToPipelineRaisesErrorInvalidRunnerType(self):
self._setupTestNotebook()
c = interactive_context.InteractiveContext()
export_filepath = os.path.join(self._exportdir, 'exported_pipeline.py')
with self.assertRaisesRegex(ValueError, 'runner_type'):
c.export_to_pipeline(notebook_filepath=self._notebook_fp,
export_filepath=export_filepath,
runner_type='foobar')
@mock.patch('tfx.orchestration.experimental.interactive.'
'standard_visualizations.ExampleAnomaliesVisualization.display')
def testShow(self, *unused_mocks):
context = interactive_context.InteractiveContext()
mock_object = mock.MagicMock()
standard_visualizations.ExampleAnomaliesVisualization.display = mock_object
mock_object.assert_not_called()
artifact = standard_artifacts.ExampleAnomalies()
context.show(
types.Channel(type=standard_artifacts.ExampleAnomalies).set_artifacts(
[artifact]))
mock_object.assert_called_with(artifact)
@mock.patch('tfx.orchestration.launcher.in_process_component_launcher.'
'InProcessComponentLauncher.create')
def testTelemetry(self, mock_launcher_create):
class _FakeLauncher:
def __init__(self):
self.recorded_labels = []
def launch(self):
self.recorded_labels = telemetry_utils.make_beam_labels_args()
return mock.MagicMock()
class _FakeComponentSpec(types.ComponentSpec):
PARAMETERS = {}
INPUTS = {}
OUTPUTS = {}
class _FakeExecutor(base_executor.BaseExecutor):
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
pass
class _FakeComponent(base_component.BaseComponent):
SPEC_CLASS = _FakeComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(_FakeExecutor)
def __init__(self):
super().__init__(spec=_FakeComponentSpec())
# Set up fake on launcher.
fake_launcher = _FakeLauncher()
mock_launcher_create.side_effect = [fake_launcher]
InProcessComponentLauncher.create = mock_launcher_create
context = interactive_context.InteractiveContext()
context.run(_FakeComponent())
self.assertIn('--labels tfx_runner=interactivecontext',
' '.join(fake_launcher.recorded_labels))
if __name__ == '__main__':
tf.test.main()
| 34.834711
| 95
| 0.681851
|
7949dcc654078584a7db402a3f685b97bc1a40e0
| 7,150
|
py
|
Python
|
experiments/murtaza/off_policy_ssl/gym/sac.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/murtaza/off_policy_ssl/gym/sac.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/murtaza/off_policy_ssl/gym/sac.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
"""
Test twin sac against various environments.
"""
import gym
from railrl.data_management.env_replay_buffer import EnvReplayBuffer
from railrl.launchers.launcher_util import run_experiment
import railrl.torch.pytorch_util as ptu
from railrl.samplers.data_collector import MdpPathCollector
from railrl.samplers.data_collector.step_collector import MdpStepCollector
from railrl.torch.networks import FlattenMlp
from railrl.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic
from railrl.torch.sac.sac import SACTrainer
import railrl.misc.hyperparameter as hyp
from railrl.torch.torch_rl_algorithm import (
TorchBatchRLAlgorithm,
TorchOnlineRLAlgorithm,
)
ENV_PARAMS = {
'half-cheetah': { # 6 DoF
'num_expl_steps_per_train_loop': 1000,
'max_path_length': 1000,
'num_epochs': 10000,
'env_id':'HalfCheetah-v2'
},
'hopper': { # 6 DoF
'num_expl_steps_per_train_loop': 1000,
'max_path_length': 1000,
'num_epochs': 10000,
'env_id':'Hopper-v2'
},
'humanoid': { # 6 DoF
'num_expl_steps_per_train_loop': 1000,
'max_path_length': 1000,
'num_epochs': 10000,
'env_id':'Humanoid-v2'
},
'inv-double-pendulum': { # 2 DoF
'num_expl_steps_per_train_loop': 1000,
'max_path_length': 1000,
'num_epochs': 100,
'env_id':'InvertedDoublePendulum-v2'
},
'pendulum': { # 2 DoF
'num_expl_steps_per_train_loop': 200,
'max_path_length': 200,
'num_epochs': 200,
'min_num_steps_before_training': 2000,
'target_update_period': 200,
'env_id':'InvertedPendulum-v2'
},
'ant': { # 6 DoF
'num_expl_steps_per_train_loop': 1000,
'max_path_length': 1000,
'num_epochs': 10000,
'env_id':'Ant-v2'
},
'walker': { # 6 DoF
'num_expl_steps_per_train_loop': 1000,
'max_path_length': 1000,
'num_epochs': 10000,
'env_id':'Walker2d-v2'
},
'swimmer': { # 6 DoF
'num_expl_steps_per_train_loop': 1000,
'max_path_length': 1000,
'num_epochs': 2000,
'env_id':'Swimmer-v2'
},
}
def experiment(variant):
env_params = ENV_PARAMS[variant['env']]
variant.update(env_params)
if 'env_id' in env_params:
expl_env = gym.make(env_params['env_id'])
eval_env = gym.make(env_params['env_id'])
obs_dim = expl_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
M = variant['layer_size']
qf1 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
qf2 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf1 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf2 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
policy = TanhGaussianPolicy(
obs_dim=obs_dim,
action_dim=action_dim,
hidden_sizes=[M, M],
)
eval_policy = MakeDeterministic(policy)
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
)
replay_buffer = EnvReplayBuffer(
variant['replay_buffer_size'],
expl_env,
)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['trainer_kwargs']
)
if variant['collection_mode'] == 'online':
expl_path_collector = MdpStepCollector(
expl_env,
policy,
)
algorithm = TorchOnlineRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
max_path_length=variant['max_path_length'],
batch_size=variant['batch_size'],
num_epochs=variant['num_epochs'],
num_eval_steps_per_epoch=variant['num_eval_steps_per_epoch'],
num_expl_steps_per_train_loop=variant['num_expl_steps_per_train_loop'],
num_trains_per_train_loop=variant['num_trains_per_train_loop'],
min_num_steps_before_training=variant['min_num_steps_before_training'],
)
else:
expl_path_collector = MdpPathCollector(
expl_env,
policy,
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
max_path_length=variant['max_path_length'],
batch_size=variant['batch_size'],
num_epochs=variant['num_epochs'],
num_eval_steps_per_epoch=variant['num_eval_steps_per_epoch'],
num_expl_steps_per_train_loop=variant['num_expl_steps_per_train_loop'],
num_trains_per_train_loop=variant['num_trains_per_train_loop'],
min_num_steps_before_training=variant['min_num_steps_before_training'],
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
variant = dict(
num_epochs=3000,
num_eval_steps_per_epoch=5000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=256,
replay_buffer_size=int(1E6),
layer_size=256,
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
),
)
# n_seeds = 1
# mode = 'local'
# exp_prefix = 'dev'
n_seeds = 2
mode = 'ec2'
exp_prefix = 'sac_mujoco_envs_unnormalized_run_longer'
search_space = {
'env': [
# 'half-cheetah',
# 'inv-double-pendulum',
# 'pendulum',
# 'ant',
'walker',
# 'hopper',
# 'humanoid',
# 'swimmer',
],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
experiment,
num_exps_per_instance=3,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
exp_id=exp_id,
time_in_mins=int(2.8 * 24 * 60), # if you use mode=sss
)
| 30.952381
| 83
| 0.614965
|
7949dceb549381f448a11eab464a933b8176a3b0
| 987
|
py
|
Python
|
src/dashboards/context_processors.py
|
PhaseDMS/phase
|
4f776d0b1b5e7916a3e26aee890b3c2b9454ef0e
|
[
"MIT"
] | 2
|
2021-09-10T19:40:30.000Z
|
2022-01-31T07:15:51.000Z
|
src/dashboards/context_processors.py
|
PhaseDMS/phase
|
4f776d0b1b5e7916a3e26aee890b3c2b9454ef0e
|
[
"MIT"
] | null | null | null |
src/dashboards/context_processors.py
|
PhaseDMS/phase
|
4f776d0b1b5e7916a3e26aee890b3c2b9454ef0e
|
[
"MIT"
] | 1
|
2021-09-10T19:40:42.000Z
|
2021-09-10T19:40:42.000Z
|
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from dashboards.models import Dashboard
def dashboards(request):
"""Fetch and cache data required to render the dashboards menu."""
user = getattr(request, "user")
context = {}
if not isinstance(user, AnonymousUser):
context.update({"user_dashboards": get_user_dashboards(user)})
return context
def get_user_dashboards(user):
cache_key = "user_{}_dashboards".format(user.id)
dashboards = cache.get(cache_key)
if dashboards is None:
qs = Dashboard.objects.filter(authorized_users=user).select_related(
"category__organisation"
)
dashboards = [
(
dashboard.category.organisation.name,
dashboard.title,
dashboard.get_absolute_url(),
)
for dashboard in qs
]
cache.set(cache_key, dashboards, 500)
return dashboards
| 27.416667
| 76
| 0.646403
|
7949dd0d3e2864e058e3c1c9efd9731adcf25e32
| 326
|
py
|
Python
|
setup.py
|
Nimphal/world-news
|
a424603c55245e0922d1b887f4cd7dd0711e6305
|
[
"MIT"
] | null | null | null |
setup.py
|
Nimphal/world-news
|
a424603c55245e0922d1b887f4cd7dd0711e6305
|
[
"MIT"
] | null | null | null |
setup.py
|
Nimphal/world-news
|
a424603c55245e0922d1b887f4cd7dd0711e6305
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name = 'world-news',
version = '1.0.0',
packages = find_packages(),
install_requires = [
"Flask"
],
author = 'Nevelina Aleksandrova',
author_email = 'nevelina.aleksandrova@gmail.com',
description = 'Map-based world news aggregator'
)
| 23.285714
| 53
| 0.650307
|
7949dd2e40fb0cd1649823ac0e3c4c83fbeb354c
| 2,554
|
py
|
Python
|
test_toolbox.py
|
atravitz/toolbox
|
3c98d1a107110655529caa40f1c768466ce52421
|
[
"MIT"
] | null | null | null |
test_toolbox.py
|
atravitz/toolbox
|
3c98d1a107110655529caa40f1c768466ce52421
|
[
"MIT"
] | null | null | null |
test_toolbox.py
|
atravitz/toolbox
|
3c98d1a107110655529caa40f1c768466ce52421
|
[
"MIT"
] | null | null | null |
"""tests for toolbox.py module."""
import numpy as np
from toolbox import *
import unittest
import numpy.testing as nptest
import os
import matplotlib.pyplot as plt
class PotentialTests(unittest.TestCase):
def setUp(self):
self.epsilon = 1.0
self.sigma = 1.0
self.R = 0
self.r_values = np.linspace(1+self.R, 3.5*self.sigma+self.R, 100)
self.V_true, self.F_true = lj(r=self.r_values,
epsilon=self.epsilon,
sigma=self.sigma,
R=self.R)
def test_lj(self):
pass
def test_force_to_potential(self):
"""Uses a lj potential to test force_to_potential()."""
# TODO: Come up with reasonable and universal criteria
print(self.F_true[-1])
V_test = force_to_potential(self.r_values, self.F_true)
plt.figure(dpi=120)
plt.plot(self.r_values, self.F_true, 'k--', label='F_true')
plt.plot(self.r_values, self.V_true, '.', label='V_true')
plt.plot(self.r_values[1:], V_test,'.', label='V_test')
plt.ylim(-10,10)
plt.legend()
plt.savefig('test_f2p.png')
percent_error = np.abs((V_test-self.V_true[1:]))
self.assertLess(np.max(percent_error), 2)
def test_potential_to_force(self):
"""Uses a lj potential to test force_to_potential()."""
# TODO: Come up with reasonable and universal criteria
F_test = potential_to_force(self.r_values, self.V_true)
plt.figure(dpi=120)
plt.plot(self.r_values, self.V_true, 'k--', label='V_true')
plt.plot(self.r_values, self.F_true, '.', label='F_true')
plt.plot(self.r_values, F_test,'.', label='F_test')
plt.savefig('test_p2f.png')
percent_error = np.abs((F_test-self.F_true))/np.abs(self.F_true)
self.assertLess(np.max(percent_error), 1)
class CorrTests(unittest.TestCase):
def test_scalar_sin(self):
"""test autocorr with a sin function """
x_range = np.linspace(0,3,100)
sin_x = np.sin(x_range*2*np.pi)
autocorr(sin_x)
def test_scalar_random(self):
random = np.random.standard_normal(size=50)
ac = autocorr(random)
self.assertEqual(ac[0], 1.0)
def test_vec_unit(self):
unit_vecs = np.array([[0,0,1], [0,0,1]])
def test_vec_random(self):
random = np.random.standard_normal(size=(50,3))
ac = vector_autocorr(random)
self.assertEqual(ac[0], 1.0)
if __name__ == '__main__':
unittest.main()
| 34.053333
| 73
| 0.612373
|
7949ddd4fff0edadc9cca18b4de1cea87ad8f725
| 1,436
|
py
|
Python
|
example/test.py
|
movingpictures83/OTUSummary
|
2cc84a2beca4b3ed217a6b6d3d0498fa1c8f24e4
|
[
"MIT"
] | null | null | null |
example/test.py
|
movingpictures83/OTUSummary
|
2cc84a2beca4b3ed217a6b6d3d0498fa1c8f24e4
|
[
"MIT"
] | null | null | null |
example/test.py
|
movingpictures83/OTUSummary
|
2cc84a2beca4b3ed217a6b6d3d0498fa1c8f24e4
|
[
"MIT"
] | null | null | null |
def isnumber(mystring):
digitcount = 0
for i in range(0, len(mystring)):
c = mystring[i]
if ((not c.isdigit()) and (c != '.') and (not (i == 0 and c == '-'))):
return False
elif (c.isdigit()):
digitcount += 1
if (digitcount != 0):
return True
def test(file1, file2):
firstfile = open(file1, 'r')
secondfile = open(file2, 'r')
lines1 = []
lines2 = []
for line in firstfile:
lines1.append(line.strip())
for line in secondfile:
lines2.append(line.strip())
if (len(lines1) != len(lines2)):
return False
for i in range(0, len(lines1)):
contents1 = lines1[i].split('\t')
contents2 = lines2[i].split('\t')
for j in range(0, len(contents1)):
if (isnumber(contents1[j]) and not isnumber(contents2[j])):
return False
elif (not isnumber(contents1[j]) and isnumber(contents2[j])):
return False
elif (isnumber(contents1[j])):
f1 = float(contents1[j])
f2 = float(contents2[j])
if (f1 == 0):
if (f2 != 0):
return False
elif ((f2-f1)/f1 > 0.01):
return False
else:
if (contents1[j] != contents2[j]):
print(contents1[j])
print(contents2[j])
return False
return True
| 30.553191
| 78
| 0.491643
|
7949dfc2b5c18d7de0268d2f3a2ed448d884b61c
| 78
|
py
|
Python
|
Python/01.py
|
Jose0Cicero1Ribeiro0Junior/Python_Praticas
|
ad7ae517735e51f25c40d9c38d3efc214d749dfb
|
[
"MIT"
] | null | null | null |
Python/01.py
|
Jose0Cicero1Ribeiro0Junior/Python_Praticas
|
ad7ae517735e51f25c40d9c38d3efc214d749dfb
|
[
"MIT"
] | null | null | null |
Python/01.py
|
Jose0Cicero1Ribeiro0Junior/Python_Praticas
|
ad7ae517735e51f25c40d9c38d3efc214d749dfb
|
[
"MIT"
] | null | null | null |
# Faça um programa que leia um número inteiro e o imprima
n = int(6)
print(n)
| 19.5
| 57
| 0.717949
|
7949dfdfd88eb377da76287b249cdae853691acb
| 2,127
|
py
|
Python
|
integrationtest/vm/virtualrouter/volume/test_crt_data_volume_tmpt_from_d.py
|
sherry546/zstack-woodpecker
|
54a37459f2d72ce6820974feaa6eb55772c3d2ce
|
[
"Apache-2.0"
] | 2
|
2016-03-23T08:45:44.000Z
|
2017-06-26T02:40:46.000Z
|
integrationtest/vm/virtualrouter/volume/test_crt_data_volume_tmpt_from_d.py
|
KevinDavidMitnick/zstack-woodpecker
|
96257faaf3c362168d008bdb47002025ad669b24
|
[
"Apache-2.0"
] | null | null | null |
integrationtest/vm/virtualrouter/volume/test_crt_data_volume_tmpt_from_d.py
|
KevinDavidMitnick/zstack-woodpecker
|
96257faaf3c362168d008bdb47002025ad669b24
|
[
"Apache-2.0"
] | 2
|
2020-03-12T03:11:28.000Z
|
2021-07-26T01:57:58.000Z
|
'''
Create Volume Template from Data Volume
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import os
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('Create test vm and check')
vm = test_stub.create_vlan_vm()
test_obj_dict.add_vm(vm)
test_util.test_dsc('Create volume and check')
disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName'))
volume_creation_option = test_util.VolumeOption()
volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)
volume = test_stub.create_volume(volume_creation_option)
test_obj_dict.add_volume(volume)
test_util.test_dsc('Attach volume and check')
#vm.check()
volume.attach(vm)
test_util.test_dsc('Detach volume and check')
volume.detach()
test_util.test_dsc('Create volume template and check')
bs_list = test_lib.lib_get_backup_storage_list_by_vm(vm.get_vm())
bs_uuid_list = []
for bs in bs_list:
bs_uuid_list.append(bs.uuid)
vol_tmpt = volume.create_template(bs_uuid_list, 'new_data_template')
test_obj_dict.add_image(vol_tmpt)
vol_tmpt.check()
volume.check()
volume.delete()
test_obj_dict.rm_volume(volume)
test_util.test_dsc('Create volume from template and check')
ps_uuid = vm.get_vm().allVolumes[0].primaryStorageUuid
volume2 = vol_tmpt.create_data_volume(ps_uuid, 'new_volume_from_template')
test_obj_dict.add_volume(volume2)
vol_tmpt.delete()
test_obj_dict.rm_image(vol_tmpt)
volume2.check()
volume2.attach(vm)
vm.check()
volume2.check()
volume2.detach()
volume2.delete()
test_obj_dict.rm_volume(volume2)
vm.destroy()
test_util.test_pass('Create Data Volume Template from Data Volume Success')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
| 31.279412
| 100
| 0.728256
|
7949e01866ac1973fb577ee31a5d5f5847d8ca52
| 1,117
|
py
|
Python
|
steamapi/getleaguelisting.py
|
NNTin/Dota-2-Meta-Analyzer
|
ce19ab414cbe0a8ff720539fd766db69604e3488
|
[
"MIT"
] | 22
|
2016-03-27T21:28:18.000Z
|
2021-09-13T14:48:43.000Z
|
steamapi/getleaguelisting.py
|
NNTin/Dota-2-Meta-Analyzer
|
ce19ab414cbe0a8ff720539fd766db69604e3488
|
[
"MIT"
] | 24
|
2016-03-28T09:39:41.000Z
|
2018-11-06T20:42:30.000Z
|
steamapi/getleaguelisting.py
|
NNTin/Reply-Dota-2-Reddit
|
e47d20f3d5bfacc9f39625324e57781e8d3f029a
|
[
"MIT"
] | null | null | null |
import requests
import time
from steamapi.steamapikey import SteamAPIKey
#message = True
leagueListingDictionary = {}
def requestGetLeagueListing():
keyValues = ['name', 'description', 'tournament_url', 'itemdef']
response = {}
attempt = 0
while response == {}:
URL = "https://api.steampowered.com/IDOTA2Match_570/GetLeagueListing/v1?key=" + SteamAPIKey
response = requests.get(URL)
response.connection.close()
response = response.json()
if response == {}:
attempt += 1
if (attempt == 5):
print('Tried %s times, cancelling API request. (Skipped counter increases)')
break
print('Failed API request, retrying in %s seconds' %(attempt * 2))
time.sleep(attempt * 2)
continue
else:
break
global leagueListingDictionary
for league in response['result']['leagues']:
leagueListingDictionary[league['leagueid']] = {}
for key in keyValues:
leagueListingDictionary[league['leagueid']][key] = league.get(key, 0)
| 26.595238
| 99
| 0.606088
|
7949e0ae3310068542e313570a433cc3f6fee63e
| 5,339
|
py
|
Python
|
kubernetes/client/models/v1beta1_network_policy_egress_rule.py
|
Prahladk09/python-1
|
2dfb3035535e4be52ba549f1ff47acbe573b73f6
|
[
"Apache-2.0"
] | 11
|
2020-10-13T05:27:59.000Z
|
2021-09-23T02:56:32.000Z
|
kubernetes/client/models/v1beta1_network_policy_egress_rule.py
|
Prahladk09/python-1
|
2dfb3035535e4be52ba549f1ff47acbe573b73f6
|
[
"Apache-2.0"
] | 48
|
2020-10-15T09:53:36.000Z
|
2021-07-05T15:33:24.000Z
|
kubernetes/client/models/v1beta1_network_policy_egress_rule.py
|
Prahladk09/python-1
|
2dfb3035535e4be52ba549f1ff47acbe573b73f6
|
[
"Apache-2.0"
] | 4
|
2020-12-04T08:51:35.000Z
|
2022-03-27T09:42:20.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1NetworkPolicyEgressRule(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'ports': 'list[V1beta1NetworkPolicyPort]',
'to': 'list[V1beta1NetworkPolicyPeer]'
}
attribute_map = {
'ports': 'ports',
'to': 'to'
}
def __init__(self, ports=None, to=None):
"""
V1beta1NetworkPolicyEgressRule - a model defined in Swagger
"""
self._ports = None
self._to = None
self.discriminator = None
if ports is not None:
self.ports = ports
if to is not None:
self.to = to
@property
def ports(self):
"""
Gets the ports of this V1beta1NetworkPolicyEgressRule.
List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.
:return: The ports of this V1beta1NetworkPolicyEgressRule.
:rtype: list[V1beta1NetworkPolicyPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""
Sets the ports of this V1beta1NetworkPolicyEgressRule.
List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.
:param ports: The ports of this V1beta1NetworkPolicyEgressRule.
:type: list[V1beta1NetworkPolicyPort]
"""
self._ports = ports
@property
def to(self):
"""
Gets the to of this V1beta1NetworkPolicyEgressRule.
List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.
:return: The to of this V1beta1NetworkPolicyEgressRule.
:rtype: list[V1beta1NetworkPolicyPeer]
"""
return self._to
@to.setter
def to(self, to):
"""
Sets the to of this V1beta1NetworkPolicyEgressRule.
List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.
:param to: The to of this V1beta1NetworkPolicyEgressRule.
:type: list[V1beta1NetworkPolicyPeer]
"""
self._to = to
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1NetworkPolicyEgressRule):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 34.445161
| 398
| 0.622588
|
7949e0e3ffeae813308c77f2755779fdaf13cf1c
| 355
|
py
|
Python
|
src/anyconfig/api/datatypes.py
|
Terrance-forks/python-anyconfig
|
21d7c0e30287569b394972557b5a54fab03bcd5c
|
[
"MIT"
] | 213
|
2015-01-14T22:09:20.000Z
|
2022-02-02T17:23:41.000Z
|
src/anyconfig/api/datatypes.py
|
Terrance-forks/python-anyconfig
|
21d7c0e30287569b394972557b5a54fab03bcd5c
|
[
"MIT"
] | 120
|
2015-03-13T15:47:43.000Z
|
2022-03-31T01:55:34.000Z
|
src/anyconfig/api/datatypes.py
|
Terrance-forks/python-anyconfig
|
21d7c0e30287569b394972557b5a54fab03bcd5c
|
[
"MIT"
] | 34
|
2015-01-12T05:03:30.000Z
|
2021-09-09T14:40:56.000Z
|
#
# Copyright (C) 2012 Satoru SATOH <satoru.satoh@gmail.com>
# SPDX-License-Identifier: MIT
#
# pylint: disable=unused-import,import-error,invalid-name
r"""Public APIs to dump configurations data.
"""
import typing
from ..backend import base
from ..common import InDataT
MaybeDataT = typing.Optional[InDataT]
ParserT = base.Parser
# vim:sw=4:ts=4:et:
| 19.722222
| 58
| 0.746479
|
7949e11430f9258dd2a181a66625fa88cfe610ac
| 1,892
|
py
|
Python
|
url_tldr.py
|
AmanKishore/GPT3-Experiments
|
ea661b66d6cab843e406c65f7a1db1de41c62a57
|
[
"MIT"
] | null | null | null |
url_tldr.py
|
AmanKishore/GPT3-Experiments
|
ea661b66d6cab843e406c65f7a1db1de41c62a57
|
[
"MIT"
] | null | null | null |
url_tldr.py
|
AmanKishore/GPT3-Experiments
|
ea661b66d6cab843e406c65f7a1db1de41c62a57
|
[
"MIT"
] | null | null | null |
import os
import openai
import re
from collections import OrderedDict
import requests, PyPDF2
from io import BytesIO
from WebScraper import scraper
openai.api_key = os.environ["OPENAI_API_KEY"]
def getTLDRfromURL():
# creating a pdf file object
url = input("Enter the pdf url: ")
response = requests.get(url)
my_raw_data = response.content
final_text = ""
with BytesIO(my_raw_data) as data:
try:
read_pdf = PyPDF2.PdfFileReader(data)
# Iterate through pages (max of 1 to save money)
for page in range(min(read_pdf.getNumPages(), 1)):
ai_text = read_pdf.getPage(page).extractText()
response = openai.Completion.create(
engine="davinci",
prompt=ai_text + "\n\ntl;dr:",
temperature=0.3,
max_tokens=60,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
final_text += response["choices"][0]["text"]
except:
data = scraper(url)
response = openai.Completion.create(
engine="davinci",
prompt=data[500:] + "\n\ntl;dr:",
temperature=0.3,
max_tokens=60,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
final_text = response["choices"][0]["text"]
"\n".join(list(OrderedDict.fromkeys(final_text.split("\n"))))
final_text = final_text.replace('Click to expand... ', '')
final_text = final_text.replace('\n\n', '\n')
final_text = re.sub(r'^\\s+|[\\s&&[^\\r\\n]](?=\\s|$)|\\s+\\z', '', final_text)
return final_text
if __name__ == "__main__":
response = getTLDRfromURL()
print(response)
| 30.031746
| 83
| 0.534355
|
7949e19e91d9d1b850a38856ca4f3f5948f99af4
| 1,624
|
py
|
Python
|
main_api/migrations/0004_auto_20180917_1332.py
|
connorrunyan1/ragnarok
|
4c8e7754a6b3c316da982d142f461775cc25d644
|
[
"MIT"
] | 6
|
2018-08-11T14:52:40.000Z
|
2021-01-06T01:39:15.000Z
|
main_api/migrations/0004_auto_20180917_1332.py
|
connorrunyan1/ragnarok
|
4c8e7754a6b3c316da982d142f461775cc25d644
|
[
"MIT"
] | 10
|
2018-10-25T18:31:36.000Z
|
2022-02-10T07:28:39.000Z
|
main_api/migrations/0004_auto_20180917_1332.py
|
connorrunyan1/ragnarok
|
4c8e7754a6b3c316da982d142f461775cc25d644
|
[
"MIT"
] | 5
|
2018-10-25T19:41:07.000Z
|
2020-07-11T09:26:50.000Z
|
# Generated by Django 2.1 on 2018-09-17 10:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main_api', '0003_auto_20180809_1001'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('balance', models.IntegerField(default=0, verbose_name='Account balance')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL, verbose_name='Master account')),
],
options={
'db_table': 'cp_user_profile',
},
),
migrations.AlterModelOptions(
name='userverification',
options={'verbose_name': 'E-Mail Verification', 'verbose_name_plural': 'E-Mail Verifications'},
),
migrations.AlterField(
model_name='userverification',
name='is_available',
field=models.BooleanField(default=True, verbose_name='Is token available?'),
),
migrations.AlterField(
model_name='userverification',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='verification', to=settings.AUTH_USER_MODEL, verbose_name='Master account'),
),
]
| 38.666667
| 173
| 0.632389
|
7949e1a5a81f7da54eb5a73694f6d6c8335363b3
| 544
|
py
|
Python
|
administrator/logger.py
|
flifloo/BotBDE
|
9c508b81b0f5cbde80f29a8d1972d6c901478e41
|
[
"MIT"
] | null | null | null |
administrator/logger.py
|
flifloo/BotBDE
|
9c508b81b0f5cbde80f29a8d1972d6c901478e41
|
[
"MIT"
] | 2
|
2020-04-10T16:43:50.000Z
|
2020-05-28T15:49:46.000Z
|
administrator/logger.py
|
flifloo/BotBDE
|
9c508b81b0f5cbde80f29a8d1972d6c901478e41
|
[
"MIT"
] | null | null | null |
import logging
from logging import handlers
from os import mkdir
from os.path import isdir
if not isdir("../logs"):
mkdir("../logs")
log_format = "{%(levelname)s}[%(asctime)s]: %(name)s | %(message)s"
logging.basicConfig(
format=log_format,
level=logging.INFO
)
logger = logging.getLogger("Administrator")
handler = handlers.TimedRotatingFileHandler("../logs/current.log", when="d", interval=1)
handler.suffix = "%Y-%m-%d"
handler.style = log_format
handler.setFormatter(logging.Formatter(log_format))
logger.addHandler(handler)
| 25.904762
| 88
| 0.731618
|
7949e226b7a80d0d404a2d9be9a4bb57c091f6f4
| 304
|
py
|
Python
|
main.py
|
jaideng1/self-deletion
|
74b7ac42757e3c8313843bbe3629c852eedb74b8
|
[
"MIT"
] | null | null | null |
main.py
|
jaideng1/self-deletion
|
74b7ac42757e3c8313843bbe3629c852eedb74b8
|
[
"MIT"
] | null | null | null |
main.py
|
jaideng1/self-deletion
|
74b7ac42757e3c8313843bbe3629c852eedb74b8
|
[
"MIT"
] | null | null | null |
import os
inp = str(raw_input("Do you want to delete this program? "))
if inp.lower() == "yes" or inp.lower() == "y":
if os.path.exists("main.py"):
print("cya <o/")
os.remove("main.py")
else:
print("doesn't exist.")
else:
print("oh. ok. guess i live another day.")
| 21.714286
| 60
| 0.5625
|
7949e249e2d536248b4cd881d61f4c4f6ef7b04d
| 40,811
|
py
|
Python
|
05-data_acquisition/telegram_bot/echo_bot/myvenv/lib/python3.8/site-packages/telegram/user.py
|
sachinpr0001/data_science
|
d028233ff7bbcbbb6b26f01806d1c5ccf788df9a
|
[
"MIT"
] | null | null | null |
05-data_acquisition/telegram_bot/echo_bot/myvenv/lib/python3.8/site-packages/telegram/user.py
|
sachinpr0001/data_science
|
d028233ff7bbcbbb6b26f01806d1c5ccf788df9a
|
[
"MIT"
] | null | null | null |
05-data_acquisition/telegram_bot/echo_bot/myvenv/lib/python3.8/site-packages/telegram/user.py
|
sachinpr0001/data_science
|
d028233ff7bbcbbb6b26f01806d1c5ccf788df9a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# pylint: disable=W0622
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2021
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram User."""
from datetime import datetime
from typing import TYPE_CHECKING, Any, List, Optional, Union, Tuple
from telegram import TelegramObject, constants
from telegram.utils.helpers import (
mention_html as util_mention_html,
DEFAULT_NONE,
DEFAULT_20,
)
from telegram.utils.helpers import mention_markdown as util_mention_markdown
from telegram.utils.types import JSONDict, FileInput, ODVInput, DVInput
if TYPE_CHECKING:
from telegram import (
Bot,
Message,
UserProfilePhotos,
MessageId,
InputMediaAudio,
InputMediaDocument,
InputMediaPhoto,
InputMediaVideo,
MessageEntity,
ReplyMarkup,
PhotoSize,
Audio,
Contact,
Document,
InlineKeyboardMarkup,
LabeledPrice,
Location,
Animation,
Sticker,
Video,
Venue,
VideoNote,
Voice,
)
class User(TelegramObject):
"""This object represents a Telegram user or bot.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`id` is equal.
Args:
id (:obj:`int`): Unique identifier for this user or bot.
is_bot (:obj:`bool`): :obj:`True`, if this user is a bot.
first_name (:obj:`str`): User's or bots first name.
last_name (:obj:`str`, optional): User's or bots last name.
username (:obj:`str`, optional): User's or bots username.
language_code (:obj:`str`, optional): IETF language tag of the user's language.
can_join_groups (:obj:`str`, optional): :obj:`True`, if the bot can be invited to groups.
Returned only in :attr:`telegram.Bot.get_me` requests.
can_read_all_group_messages (:obj:`str`, optional): :obj:`True`, if privacy mode is
disabled for the bot. Returned only in :attr:`telegram.Bot.get_me` requests.
supports_inline_queries (:obj:`str`, optional): :obj:`True`, if the bot supports inline
queries. Returned only in :attr:`telegram.Bot.get_me` requests.
bot (:class:`telegram.Bot`, optional): The Bot to use for instance methods.
Attributes:
id (:obj:`int`): Unique identifier for this user or bot.
is_bot (:obj:`bool`): :obj:`True`, if this user is a bot.
first_name (:obj:`str`): User's or bot's first name.
last_name (:obj:`str`): Optional. User's or bot's last name.
username (:obj:`str`): Optional. User's or bot's username.
language_code (:obj:`str`): Optional. IETF language tag of the user's language.
can_join_groups (:obj:`str`): Optional. :obj:`True`, if the bot can be invited to groups.
Returned only in :attr:`telegram.Bot.get_me` requests.
can_read_all_group_messages (:obj:`str`): Optional. :obj:`True`, if privacy mode is
disabled for the bot. Returned only in :attr:`telegram.Bot.get_me` requests.
supports_inline_queries (:obj:`str`): Optional. :obj:`True`, if the bot supports inline
queries. Returned only in :attr:`telegram.Bot.get_me` requests.
bot (:class:`telegram.Bot`): Optional. The Bot to use for instance methods.
"""
__slots__ = (
'is_bot',
'can_read_all_group_messages',
'username',
'first_name',
'last_name',
'can_join_groups',
'supports_inline_queries',
'id',
'bot',
'language_code',
'_id_attrs',
)
def __init__(
self,
id: int,
first_name: str,
is_bot: bool,
last_name: str = None,
username: str = None,
language_code: str = None,
can_join_groups: bool = None,
can_read_all_group_messages: bool = None,
supports_inline_queries: bool = None,
bot: 'Bot' = None,
**_kwargs: Any,
):
# Required
self.id = int(id) # pylint: disable=C0103
self.first_name = first_name
self.is_bot = is_bot
# Optionals
self.last_name = last_name
self.username = username
self.language_code = language_code
self.can_join_groups = can_join_groups
self.can_read_all_group_messages = can_read_all_group_messages
self.supports_inline_queries = supports_inline_queries
self.bot = bot
self._id_attrs = (self.id,)
@property
def name(self) -> str:
""":obj:`str`: Convenience property. If available, returns the user's :attr:`username`
prefixed with "@". If :attr:`username` is not available, returns :attr:`full_name`.
"""
if self.username:
return f'@{self.username}'
return self.full_name
@property
def full_name(self) -> str:
""":obj:`str`: Convenience property. The user's :attr:`first_name`, followed by (if
available) :attr:`last_name`.
"""
if self.last_name:
return f'{self.first_name} {self.last_name}'
return self.first_name
@property
def link(self) -> Optional[str]:
""":obj:`str`: Convenience property. If :attr:`username` is available, returns a t.me link
of the user.
"""
if self.username:
return f"https://t.me/{self.username}"
return None
def get_profile_photos(
self,
offset: int = None,
limit: int = 100,
timeout: ODVInput[float] = DEFAULT_NONE,
api_kwargs: JSONDict = None,
) -> Optional['UserProfilePhotos']:
"""
Shortcut for::
bot.get_user_profile_photos(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see
:meth:`telegram.Bot.get_user_profile_photos`.
"""
return self.bot.get_user_profile_photos(
user_id=self.id,
offset=offset,
limit=limit,
timeout=timeout,
api_kwargs=api_kwargs,
)
def mention_markdown(self, name: str = None) -> str:
"""
Note:
:attr:`telegram.ParseMode.MARKDOWN` is a legacy mode, retained by Telegram for
backward compatibility. You should use :meth:`mention_markdown_v2` instead.
Args:
name (:obj:`str`): The name used as a link for the user. Defaults to :attr:`full_name`.
Returns:
:obj:`str`: The inline mention for the user as markdown (version 1).
"""
if name:
return util_mention_markdown(self.id, name)
return util_mention_markdown(self.id, self.full_name)
def mention_markdown_v2(self, name: str = None) -> str:
"""
Args:
name (:obj:`str`): The name used as a link for the user. Defaults to :attr:`full_name`.
Returns:
:obj:`str`: The inline mention for the user as markdown (version 2).
"""
if name:
return util_mention_markdown(self.id, name, version=2)
return util_mention_markdown(self.id, self.full_name, version=2)
def mention_html(self, name: str = None) -> str:
"""
Args:
name (:obj:`str`): The name used as a link for the user. Defaults to :attr:`full_name`.
Returns:
:obj:`str`: The inline mention for the user as HTML.
"""
if name:
return util_mention_html(self.id, name)
return util_mention_html(self.id, self.full_name)
def pin_message(
self,
message_id: int,
disable_notification: ODVInput[bool] = DEFAULT_NONE,
timeout: ODVInput[float] = DEFAULT_NONE,
api_kwargs: JSONDict = None,
) -> bool:
"""Shortcut for::
bot.pin_chat_message(chat_id=update.effective_user.id,
*args,
**kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.pin_chat_message`.
Returns:
:obj:`bool`: On success, :obj:`True` is returned.
"""
return self.bot.pin_chat_message(
chat_id=self.id,
message_id=message_id,
disable_notification=disable_notification,
timeout=timeout,
api_kwargs=api_kwargs,
)
def unpin_message(
self,
timeout: ODVInput[float] = DEFAULT_NONE,
api_kwargs: JSONDict = None,
message_id: int = None,
) -> bool:
"""Shortcut for::
bot.unpin_chat_message(chat_id=update.effective_user.id,
*args,
**kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.unpin_chat_message`.
Returns:
:obj:`bool`: On success, :obj:`True` is returned.
"""
return self.bot.unpin_chat_message(
chat_id=self.id,
timeout=timeout,
api_kwargs=api_kwargs,
message_id=message_id,
)
def unpin_all_messages(
self,
timeout: ODVInput[float] = DEFAULT_NONE,
api_kwargs: JSONDict = None,
) -> bool:
"""Shortcut for::
bot.unpin_all_chat_messages(chat_id=update.effective_user.id,
*args,
**kwargs)
For the documentation of the arguments, please see
:meth:`telegram.Bot.unpin_all_chat_messages`.
Returns:
:obj:`bool`: On success, :obj:`True` is returned.
"""
return self.bot.unpin_all_chat_messages(
chat_id=self.id,
timeout=timeout,
api_kwargs=api_kwargs,
)
def send_message(
self,
text: str,
parse_mode: ODVInput[str] = DEFAULT_NONE,
disable_web_page_preview: ODVInput[bool] = DEFAULT_NONE,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'ReplyMarkup' = None,
timeout: ODVInput[float] = DEFAULT_NONE,
api_kwargs: JSONDict = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
entities: Union[List['MessageEntity'], Tuple['MessageEntity', ...]] = None,
) -> 'Message':
"""Shortcut for::
bot.send_message(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_message`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_message(
chat_id=self.id,
text=text,
parse_mode=parse_mode,
disable_web_page_preview=disable_web_page_preview,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
api_kwargs=api_kwargs,
allow_sending_without_reply=allow_sending_without_reply,
entities=entities,
)
def send_photo(
self,
photo: Union[FileInput, 'PhotoSize'],
caption: str = None,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'ReplyMarkup' = None,
timeout: DVInput[float] = DEFAULT_20,
parse_mode: ODVInput[str] = DEFAULT_NONE,
api_kwargs: JSONDict = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
caption_entities: Union[List['MessageEntity'], Tuple['MessageEntity', ...]] = None,
filename: str = None,
) -> 'Message':
"""Shortcut for::
bot.send_photo(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_photo`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_photo(
chat_id=self.id,
photo=photo,
caption=caption,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
parse_mode=parse_mode,
api_kwargs=api_kwargs,
allow_sending_without_reply=allow_sending_without_reply,
caption_entities=caption_entities,
filename=filename,
)
def send_media_group(
self,
media: List[
Union['InputMediaAudio', 'InputMediaDocument', 'InputMediaPhoto', 'InputMediaVideo']
],
disable_notification: ODVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
timeout: DVInput[float] = DEFAULT_20,
api_kwargs: JSONDict = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
) -> List['Message']:
"""Shortcut for::
bot.send_media_group(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_media_group`.
Returns:
List[:class:`telegram.Message`:] On success, instance representing the message posted.
"""
return self.bot.send_media_group(
chat_id=self.id,
media=media,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
timeout=timeout,
api_kwargs=api_kwargs,
allow_sending_without_reply=allow_sending_without_reply,
)
def send_audio(
self,
audio: Union[FileInput, 'Audio'],
duration: int = None,
performer: str = None,
title: str = None,
caption: str = None,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'ReplyMarkup' = None,
timeout: DVInput[float] = DEFAULT_20,
parse_mode: ODVInput[str] = DEFAULT_NONE,
thumb: FileInput = None,
api_kwargs: JSONDict = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
caption_entities: Union[List['MessageEntity'], Tuple['MessageEntity', ...]] = None,
filename: str = None,
) -> 'Message':
"""Shortcut for::
bot.send_audio(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_audio`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_audio(
chat_id=self.id,
audio=audio,
duration=duration,
performer=performer,
title=title,
caption=caption,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
parse_mode=parse_mode,
thumb=thumb,
api_kwargs=api_kwargs,
allow_sending_without_reply=allow_sending_without_reply,
caption_entities=caption_entities,
filename=filename,
)
def send_chat_action(
self,
action: str,
timeout: ODVInput[float] = DEFAULT_NONE,
api_kwargs: JSONDict = None,
) -> bool:
"""Shortcut for::
bot.send_chat_action(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_chat_action`.
Returns:
:obj:`True`: On success.
"""
return self.bot.send_chat_action(
chat_id=self.id,
action=action,
timeout=timeout,
api_kwargs=api_kwargs,
)
send_action = send_chat_action
"""Alias for :attr:`send_chat_action`"""
def send_contact(
self,
phone_number: str = None,
first_name: str = None,
last_name: str = None,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'ReplyMarkup' = None,
timeout: ODVInput[float] = DEFAULT_NONE,
contact: 'Contact' = None,
vcard: str = None,
api_kwargs: JSONDict = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
) -> 'Message':
"""Shortcut for::
bot.send_contact(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_contact`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_contact(
chat_id=self.id,
phone_number=phone_number,
first_name=first_name,
last_name=last_name,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
contact=contact,
vcard=vcard,
api_kwargs=api_kwargs,
allow_sending_without_reply=allow_sending_without_reply,
)
def send_dice(
self,
disable_notification: ODVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'ReplyMarkup' = None,
timeout: ODVInput[float] = DEFAULT_NONE,
emoji: str = None,
api_kwargs: JSONDict = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
) -> 'Message':
"""Shortcut for::
bot.send_dice(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_dice`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_dice(
chat_id=self.id,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
emoji=emoji,
api_kwargs=api_kwargs,
allow_sending_without_reply=allow_sending_without_reply,
)
def send_document(
self,
document: Union[FileInput, 'Document'],
filename: str = None,
caption: str = None,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'ReplyMarkup' = None,
timeout: DVInput[float] = DEFAULT_20,
parse_mode: ODVInput[str] = DEFAULT_NONE,
thumb: FileInput = None,
api_kwargs: JSONDict = None,
disable_content_type_detection: bool = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
caption_entities: Union[List['MessageEntity'], Tuple['MessageEntity', ...]] = None,
) -> 'Message':
"""Shortcut for::
bot.send_document(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_document`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_document(
chat_id=self.id,
document=document,
filename=filename,
caption=caption,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
parse_mode=parse_mode,
thumb=thumb,
api_kwargs=api_kwargs,
disable_content_type_detection=disable_content_type_detection,
allow_sending_without_reply=allow_sending_without_reply,
caption_entities=caption_entities,
)
def send_game(
self,
game_short_name: str,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'InlineKeyboardMarkup' = None,
timeout: ODVInput[float] = DEFAULT_NONE,
api_kwargs: JSONDict = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
) -> 'Message':
"""Shortcut for::
bot.send_game(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_game`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_game(
chat_id=self.id,
game_short_name=game_short_name,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
api_kwargs=api_kwargs,
allow_sending_without_reply=allow_sending_without_reply,
)
def send_invoice(
self,
title: str,
description: str,
payload: str,
provider_token: str,
currency: str,
prices: List['LabeledPrice'],
start_parameter: str = None,
photo_url: str = None,
photo_size: int = None,
photo_width: int = None,
photo_height: int = None,
need_name: bool = None,
need_phone_number: bool = None,
need_email: bool = None,
need_shipping_address: bool = None,
is_flexible: bool = None,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'InlineKeyboardMarkup' = None,
provider_data: Union[str, object] = None,
send_phone_number_to_provider: bool = None,
send_email_to_provider: bool = None,
timeout: ODVInput[float] = DEFAULT_NONE,
api_kwargs: JSONDict = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
max_tip_amount: int = None,
suggested_tip_amounts: List[int] = None,
) -> 'Message':
"""Shortcut for::
bot.send_invoice(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_invoice`.
Warning:
As of API 5.2 :attr:`start_parameter` is an optional argument and therefore the order
of the arguments had to be changed. Use keyword arguments to make sure that the
arguments are passed correctly.
.. versionchanged:: 13.5
As of Bot API 5.2, the parameter :attr:`start_parameter` is optional.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_invoice(
chat_id=self.id,
title=title,
description=description,
payload=payload,
provider_token=provider_token,
currency=currency,
prices=prices,
start_parameter=start_parameter,
photo_url=photo_url,
photo_size=photo_size,
photo_width=photo_width,
photo_height=photo_height,
need_name=need_name,
need_phone_number=need_phone_number,
need_email=need_email,
need_shipping_address=need_shipping_address,
is_flexible=is_flexible,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
provider_data=provider_data,
send_phone_number_to_provider=send_phone_number_to_provider,
send_email_to_provider=send_email_to_provider,
timeout=timeout,
api_kwargs=api_kwargs,
allow_sending_without_reply=allow_sending_without_reply,
max_tip_amount=max_tip_amount,
suggested_tip_amounts=suggested_tip_amounts,
)
def send_location(
self,
latitude: float = None,
longitude: float = None,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'ReplyMarkup' = None,
timeout: ODVInput[float] = DEFAULT_NONE,
location: 'Location' = None,
live_period: int = None,
api_kwargs: JSONDict = None,
horizontal_accuracy: float = None,
heading: int = None,
proximity_alert_radius: int = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
) -> 'Message':
"""Shortcut for::
bot.send_location(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_location`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_location(
chat_id=self.id,
latitude=latitude,
longitude=longitude,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
location=location,
live_period=live_period,
api_kwargs=api_kwargs,
horizontal_accuracy=horizontal_accuracy,
heading=heading,
proximity_alert_radius=proximity_alert_radius,
allow_sending_without_reply=allow_sending_without_reply,
)
def send_animation(
self,
animation: Union[FileInput, 'Animation'],
duration: int = None,
width: int = None,
height: int = None,
thumb: FileInput = None,
caption: str = None,
parse_mode: ODVInput[str] = DEFAULT_NONE,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'ReplyMarkup' = None,
timeout: DVInput[float] = DEFAULT_20,
api_kwargs: JSONDict = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
caption_entities: Union[List['MessageEntity'], Tuple['MessageEntity', ...]] = None,
filename: str = None,
) -> 'Message':
"""Shortcut for::
bot.send_animation(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_animation`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_animation(
chat_id=self.id,
animation=animation,
duration=duration,
width=width,
height=height,
thumb=thumb,
caption=caption,
parse_mode=parse_mode,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
api_kwargs=api_kwargs,
allow_sending_without_reply=allow_sending_without_reply,
caption_entities=caption_entities,
filename=filename,
)
def send_sticker(
self,
sticker: Union[FileInput, 'Sticker'],
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'ReplyMarkup' = None,
timeout: DVInput[float] = DEFAULT_20,
api_kwargs: JSONDict = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
) -> 'Message':
"""Shortcut for::
bot.send_sticker(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_sticker`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_sticker(
chat_id=self.id,
sticker=sticker,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
api_kwargs=api_kwargs,
allow_sending_without_reply=allow_sending_without_reply,
)
def send_video(
self,
video: Union[FileInput, 'Video'],
duration: int = None,
caption: str = None,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'ReplyMarkup' = None,
timeout: DVInput[float] = DEFAULT_20,
width: int = None,
height: int = None,
parse_mode: ODVInput[str] = DEFAULT_NONE,
supports_streaming: bool = None,
thumb: FileInput = None,
api_kwargs: JSONDict = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
caption_entities: Union[List['MessageEntity'], Tuple['MessageEntity', ...]] = None,
filename: str = None,
) -> 'Message':
"""Shortcut for::
bot.send_video(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_video`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_video(
chat_id=self.id,
video=video,
duration=duration,
caption=caption,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
width=width,
height=height,
parse_mode=parse_mode,
supports_streaming=supports_streaming,
thumb=thumb,
api_kwargs=api_kwargs,
allow_sending_without_reply=allow_sending_without_reply,
caption_entities=caption_entities,
filename=filename,
)
def send_venue(
self,
latitude: float = None,
longitude: float = None,
title: str = None,
address: str = None,
foursquare_id: str = None,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'ReplyMarkup' = None,
timeout: ODVInput[float] = DEFAULT_NONE,
venue: 'Venue' = None,
foursquare_type: str = None,
api_kwargs: JSONDict = None,
google_place_id: str = None,
google_place_type: str = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
) -> 'Message':
"""Shortcut for::
bot.send_venue(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_venue`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_venue(
chat_id=self.id,
latitude=latitude,
longitude=longitude,
title=title,
address=address,
foursquare_id=foursquare_id,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
venue=venue,
foursquare_type=foursquare_type,
api_kwargs=api_kwargs,
google_place_id=google_place_id,
google_place_type=google_place_type,
allow_sending_without_reply=allow_sending_without_reply,
)
def send_video_note(
self,
video_note: Union[FileInput, 'VideoNote'],
duration: int = None,
length: int = None,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'ReplyMarkup' = None,
timeout: DVInput[float] = DEFAULT_20,
thumb: FileInput = None,
api_kwargs: JSONDict = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
filename: str = None,
) -> 'Message':
"""Shortcut for::
bot.send_video_note(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_video_note`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_video_note(
chat_id=self.id,
video_note=video_note,
duration=duration,
length=length,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
thumb=thumb,
api_kwargs=api_kwargs,
allow_sending_without_reply=allow_sending_without_reply,
filename=filename,
)
def send_voice(
self,
voice: Union[FileInput, 'Voice'],
duration: int = None,
caption: str = None,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'ReplyMarkup' = None,
timeout: DVInput[float] = DEFAULT_20,
parse_mode: ODVInput[str] = DEFAULT_NONE,
api_kwargs: JSONDict = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
caption_entities: Union[List['MessageEntity'], Tuple['MessageEntity', ...]] = None,
filename: str = None,
) -> 'Message':
"""Shortcut for::
bot.send_voice(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_voice`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_voice(
chat_id=self.id,
voice=voice,
duration=duration,
caption=caption,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
parse_mode=parse_mode,
api_kwargs=api_kwargs,
allow_sending_without_reply=allow_sending_without_reply,
caption_entities=caption_entities,
filename=filename,
)
def send_poll(
self,
question: str,
options: List[str],
is_anonymous: bool = True,
# We use constant.POLL_REGULAR instead of Poll.REGULAR here to avoid circular imports
type: str = constants.POLL_REGULAR, # pylint: disable=W0622
allows_multiple_answers: bool = False,
correct_option_id: int = None,
is_closed: bool = None,
disable_notification: ODVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
reply_markup: 'ReplyMarkup' = None,
timeout: ODVInput[float] = DEFAULT_NONE,
explanation: str = None,
explanation_parse_mode: ODVInput[str] = DEFAULT_NONE,
open_period: int = None,
close_date: Union[int, datetime] = None,
api_kwargs: JSONDict = None,
allow_sending_without_reply: ODVInput[bool] = DEFAULT_NONE,
explanation_entities: Union[List['MessageEntity'], Tuple['MessageEntity', ...]] = None,
) -> 'Message':
"""Shortcut for::
bot.send_poll(update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.send_poll`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.send_poll(
chat_id=self.id,
question=question,
options=options,
is_anonymous=is_anonymous,
type=type, # pylint=pylint,
allows_multiple_answers=allows_multiple_answers,
correct_option_id=correct_option_id,
is_closed=is_closed,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
timeout=timeout,
explanation=explanation,
explanation_parse_mode=explanation_parse_mode,
open_period=open_period,
close_date=close_date,
api_kwargs=api_kwargs,
allow_sending_without_reply=allow_sending_without_reply,
explanation_entities=explanation_entities,
)
def send_copy(
self,
from_chat_id: Union[str, int],
message_id: int,
caption: str = None,
parse_mode: ODVInput[str] = DEFAULT_NONE,
caption_entities: Union[Tuple['MessageEntity', ...], List['MessageEntity']] = None,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
allow_sending_without_reply: DVInput[bool] = DEFAULT_NONE,
reply_markup: 'ReplyMarkup' = None,
timeout: ODVInput[float] = DEFAULT_NONE,
api_kwargs: JSONDict = None,
) -> 'MessageId':
"""Shortcut for::
bot.copy_message(chat_id=update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.copy_message`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.copy_message(
chat_id=self.id,
from_chat_id=from_chat_id,
message_id=message_id,
caption=caption,
parse_mode=parse_mode,
caption_entities=caption_entities,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
allow_sending_without_reply=allow_sending_without_reply,
reply_markup=reply_markup,
timeout=timeout,
api_kwargs=api_kwargs,
)
def copy_message(
self,
chat_id: Union[int, str],
message_id: int,
caption: str = None,
parse_mode: ODVInput[str] = DEFAULT_NONE,
caption_entities: Union[Tuple['MessageEntity', ...], List['MessageEntity']] = None,
disable_notification: DVInput[bool] = DEFAULT_NONE,
reply_to_message_id: int = None,
allow_sending_without_reply: DVInput[bool] = DEFAULT_NONE,
reply_markup: 'ReplyMarkup' = None,
timeout: ODVInput[float] = DEFAULT_NONE,
api_kwargs: JSONDict = None,
) -> 'MessageId':
"""Shortcut for::
bot.copy_message(from_chat_id=update.effective_user.id, *args, **kwargs)
For the documentation of the arguments, please see :meth:`telegram.Bot.copy_message`.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
"""
return self.bot.copy_message(
from_chat_id=self.id,
chat_id=chat_id,
message_id=message_id,
caption=caption,
parse_mode=parse_mode,
caption_entities=caption_entities,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
allow_sending_without_reply=allow_sending_without_reply,
reply_markup=reply_markup,
timeout=timeout,
api_kwargs=api_kwargs,
)
| 35.705162
| 99
| 0.613879
|
7949e33b8ccab2e1d3483759e84c0ea1967b2a51
| 972
|
py
|
Python
|
kubernetes/test/test_v1_stateful_set_list.py
|
anemerovsky-essextec/python
|
6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_stateful_set_list.py
|
anemerovsky-essextec/python
|
6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_stateful_set_list.py
|
anemerovsky-essextec/python
|
6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_stateful_set_list import V1StatefulSetList
class TestV1StatefulSetList(unittest.TestCase):
""" V1StatefulSetList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1StatefulSetList(self):
"""
Test V1StatefulSetList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_stateful_set_list.V1StatefulSetList()
pass
if __name__ == '__main__':
unittest.main()
| 21.6
| 105
| 0.711934
|
7949e46e353f23ae7af80c14510eff90961e1175
| 737
|
py
|
Python
|
setup.py
|
znstrider/speedo
|
238d591bbf374596b22fb44abba5ba849b4ed32b
|
[
"MIT"
] | 4
|
2021-02-08T11:52:59.000Z
|
2021-03-15T13:01:13.000Z
|
setup.py
|
znstrider/speedo
|
238d591bbf374596b22fb44abba5ba849b4ed32b
|
[
"MIT"
] | null | null | null |
setup.py
|
znstrider/speedo
|
238d591bbf374596b22fb44abba5ba849b4ed32b
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="speedo",
version="0.0.1",
author="znstrider",
author_email="mindfulstrider@gmail.com",
description="Class to make speedometer plots",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/znstrider/speedo",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Framework :: Matplotlib",
"Topic :: Scientific/Engineering :: Visualization"
],
python_requires='>=3.6',
)
| 29.48
| 58
| 0.658073
|
7949e4aefdb1df4f55956456aa8c9de081acfe30
| 752
|
py
|
Python
|
spinup/user_config.py
|
liushunyu/stable-spinningup
|
7ece777a564b05334c78005eda094ed729d31304
|
[
"MIT"
] | null | null | null |
spinup/user_config.py
|
liushunyu/stable-spinningup
|
7ece777a564b05334c78005eda094ed729d31304
|
[
"MIT"
] | null | null | null |
spinup/user_config.py
|
liushunyu/stable-spinningup
|
7ece777a564b05334c78005eda094ed729d31304
|
[
"MIT"
] | null | null | null |
import os
import os.path as osp
# Default neural network backend for each algo
# (Must be either 'tf1' or 'pytorch')
DEFAULT_BACKEND = {
'vpg': 'pytorch',
'trpo': 'tf1',
'ppo': 'pytorch',
'ddpg': 'pytorch',
'td3': 'pytorch',
'sac': 'pytorch',
'dqn': 'pytorch'
}
# Where experiment outputs are saved by default:
DEFAULT_DATA_DIR = osp.join(osp.abspath(osp.dirname(osp.dirname(__file__))),'data')
# Whether to automatically insert a date and time stamp into the names of
# save directories:
FORCE_DATESTAMP = True
# Whether GridSearch provides automatically-generated default shorthands:
DEFAULT_SHORTHAND = True
# Tells the GridSearch how many seconds to pause for before launching
# experiments.
WAIT_BEFORE_LAUNCH = 5
| 26.857143
| 83
| 0.716755
|
7949e6a877eca0756074bee0ab2b3ea9c3bdd100
| 13,062
|
py
|
Python
|
tests/linear/goland_wing/test_goland_flutter.py
|
jomorlier/sharpy-1
|
9800df617efd046d4c4af048bbab17f88a91e6a8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/linear/goland_wing/test_goland_flutter.py
|
jomorlier/sharpy-1
|
9800df617efd046d4c4af048bbab17f88a91e6a8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/linear/goland_wing/test_goland_flutter.py
|
jomorlier/sharpy-1
|
9800df617efd046d4c4af048bbab17f88a91e6a8
|
[
"BSD-3-Clause"
] | 1
|
2020-05-25T17:11:09.000Z
|
2020-05-25T17:11:09.000Z
|
import numpy as np
import os
import unittest
import cases.templates.flying_wings as wings
import sharpy.sharpy_main
import sharpy.utils.sharpydir as sharpydir
class TestGolandFlutter(unittest.TestCase):
def setup(self):
# Problem Set up
u_inf = 1.
alpha_deg = 0.
rho = 1.02
num_modes = 4
# Lattice Discretisation
M = 16
N = 32
M_star_fact = 10
# Linear UVLM settings
integration_order = 2
remove_predictor = False
use_sparse = True
# ROM Properties
rom_settings = dict()
rom_settings['algorithm'] = 'mimo_rational_arnoldi'
rom_settings['r'] = 6
frequency_continuous_k = np.array([0.])
# Case Admin - Create results folders
case_name = 'goland_cs'
case_nlin_info = 'M%dN%dMs%d_nmodes%d' % (M, N, M_star_fact, num_modes)
case_rom_info = 'rom_MIMORA_r%d_sig%04d_%04dj' % (rom_settings['r'], frequency_continuous_k[-1].real * 100,
frequency_continuous_k[-1].imag * 100)
case_name += case_nlin_info + case_rom_info
self.route_test_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
fig_folder = self.route_test_dir + '/figures/'
os.makedirs(fig_folder, exist_ok=True)
# SHARPy nonlinear reference solution
ws = wings.GolandControlSurface(M=M,
N=N,
Mstar_fact=M_star_fact,
u_inf=u_inf,
alpha=alpha_deg,
cs_deflection=[0, 0],
rho=rho,
sweep=0,
physical_time=2,
n_surfaces=2,
route=self.route_test_dir + '/cases',
case_name=case_name)
ws.gust_intensity = 0.01
ws.sigma = 1
ws.clean_test_files()
ws.update_derived_params()
ws.set_default_config_dict()
ws.generate_aero_file()
ws.generate_fem_file()
frequency_continuous_w = 2 * u_inf * frequency_continuous_k / ws.c_ref
rom_settings['frequency'] = frequency_continuous_w
rom_settings['tangent_input_file'] = ws.route + '/' + ws.case_name + '.rom.h5'
ws.config['SHARPy'] = {
'flow':
['BeamLoader', 'AerogridLoader',
'StaticCoupled',
'AerogridPlot',
'BeamPlot',
'Modal',
'LinearAssembler',
'FrequencyResponse',
'AsymptoticStability',
],
'case': ws.case_name, 'route': ws.route,
'write_screen': 'off', 'write_log': 'on',
'log_folder': self.route_test_dir + '/output/' + ws.case_name + '/',
'log_file': ws.case_name + '.log'}
ws.config['BeamLoader'] = {
'unsteady': 'off',
'orientation': ws.quat}
ws.config['AerogridLoader'] = {
'unsteady': 'off',
'aligned_grid': 'on',
'mstar': ws.Mstar_fact * ws.M,
'freestream_dir': ws.u_inf_direction
}
ws.config['StaticUvlm'] = {
'rho': ws.rho,
'velocity_field_generator': 'SteadyVelocityField',
'velocity_field_input': {
'u_inf': ws.u_inf,
'u_inf_direction': ws.u_inf_direction},
'rollup_dt': ws.dt,
'print_info': 'on',
'horseshoe': 'off',
'num_cores': 4,
'n_rollup': 0,
'rollup_aic_refresh': 0,
'rollup_tolerance': 1e-4}
ws.config['StaticCoupled'] = {
'print_info': 'on',
'max_iter': 200,
'n_load_steps': 1,
'tolerance': 1e-10,
'relaxation_factor': 0.,
'aero_solver': 'StaticUvlm',
'aero_solver_settings': {
'rho': ws.rho,
'print_info': 'off',
'horseshoe': 'off',
'num_cores': 4,
'n_rollup': 0,
'rollup_dt': ws.dt,
'rollup_aic_refresh': 1,
'rollup_tolerance': 1e-4,
'velocity_field_generator': 'SteadyVelocityField',
'velocity_field_input': {
'u_inf': ws.u_inf,
'u_inf_direction': ws.u_inf_direction}},
'structural_solver': 'NonLinearStatic',
'structural_solver_settings': {'print_info': 'off',
'max_iterations': 150,
'num_load_steps': 4,
'delta_curved': 1e-1,
'min_delta': 1e-10,
'gravity_on': 'on',
'gravity': 9.754}}
ws.config['AerogridPlot'] = {'folder': self.route_test_dir + '/output/',
'include_rbm': 'off',
'include_applied_forces': 'on',
'minus_m_star': 0}
ws.config['AeroForcesCalculator'] = {'folder': self.route_test_dir + '/output/forces',
'write_text_file': 'on',
'text_file_name': ws.case_name + '_aeroforces.csv',
'screen_output': 'on',
'unsteady': 'off'}
ws.config['BeamPlot'] = {'folder': self.route_test_dir + '/output/',
'include_rbm': 'off',
'include_applied_forces': 'on'}
ws.config['BeamCsvOutput'] = {'folder': self.route_test_dir + '/output/',
'output_pos': 'on',
'output_psi': 'on',
'screen_output': 'on'}
ws.config['Modal'] = {'folder': self.route_test_dir + '/output/',
'NumLambda': 20,
'rigid_body_modes': 'off',
'print_matrices': 'on',
'keep_linear_matrices': 'on',
'write_dat': 'off',
'rigid_modes_cg': 'off',
'continuous_eigenvalues': 'off',
'dt': 0,
'plot_eigenvalues': False,
'max_rotation_deg': 15.,
'max_displacement': 0.15,
'write_modes_vtk': True,
'use_undamped_modes': True}
ws.config['LinearAssembler'] = {'linear_system': 'LinearAeroelastic',
'linear_system_settings': {
'beam_settings': {'modal_projection': 'on',
'inout_coords': 'modes',
'discrete_time': 'on',
'newmark_damp': 0.5e-4,
'discr_method': 'newmark',
'dt': ws.dt,
'proj_modes': 'undamped',
'use_euler': 'off',
'num_modes': num_modes,
'print_info': 'on',
'gravity': 'on',
'remove_sym_modes': 'on',
'remove_dofs': []},
'aero_settings': {'dt': ws.dt,
'ScalingDict': {'length': 0.5 * ws.c_ref,
'speed': u_inf,
'density': rho},
'integr_order': integration_order,
'density': ws.rho,
'remove_predictor': remove_predictor,
'use_sparse': use_sparse,
'rigid_body_motion': 'off',
'use_euler': 'off',
'remove_inputs': ['u_gust'],
'rom_method': ['Krylov'],
'rom_method_settings': {'Krylov': rom_settings}},
'rigid_body_motion': False}}
ws.config['AsymptoticStability'] = {'print_info': True,
'folder': self.route_test_dir + '/output/',
'velocity_analysis': [160, 180, 20]}
ws.config['LinDynamicSim'] = {'dt': ws.dt,
'n_tsteps': ws.n_tstep,
'sys_id': 'LinearAeroelastic',
'postprocessors': ['BeamPlot', 'AerogridPlot'],
'postprocessors_settings': {'AerogridPlot': {
'u_inf': ws.u_inf,
'folder': self.route_test_dir + '/output/',
'include_rbm': 'on',
'include_applied_forces': 'on',
'minus_m_star': 0},
'BeamPlot': {'folder': ws.route + '/output/',
'include_rbm': 'on',
'include_applied_forces': 'on'}}}
ws.config['FrequencyResponse'] = {'compute_fom': 'on',
'quick_plot': 'off',
'folder': self.route_test_dir + '/output/',
'frequency_unit': 'k',
'frequency_bounds': [0.0001, 1.0],
}
ws.config.write()
self.data = sharpy.sharpy_main.main(['', ws.route + ws.case_name + '.sharpy'])
def run_rom_stable(self):
ssrom = self.data.linear.linear_system.uvlm.rom['Krylov'].ssrom
eigs_rom = np.linalg.eigvals(ssrom.A)
assert all(np.abs(eigs_rom) <= 1.), 'UVLM Krylov ROM is unstable - flutter speed may not be correct. Change' \
'ROM settings to achieve stability'
print('ROM is stable')
def run_flutter(self):
flutter_ref_speed = 166 # at current discretisation
u_inf = self.data.linear.stability['velocity_results']['u_inf']
eval_real = self.data.linear.stability['velocity_results']['evals_real']
eval_imag = self.data.linear.stability['velocity_results']['evals_imag']
# Flutter onset
ind_zero_real = np.where(eval_real >= 0)[0][0]
assert ind_zero_real > 0, 'Flutter speed not below 165.00 m/s'
flutter_speed = 0.5 * (u_inf[ind_zero_real] + u_inf[ind_zero_real - 1])
flutter_frequency = np.sqrt(eval_real[ind_zero_real] ** 2 + eval_imag[ind_zero_real] ** 2)
print('Flutter speed = %.1f m/s' % flutter_speed)
print('Flutter frequency = %.2f rad/s' % flutter_frequency)
assert np.abs(
flutter_speed - flutter_ref_speed) / flutter_ref_speed < 1e-2, ' Flutter speed error greater than ' \
'1 percent'
print('Test Complete')
def test_flutter(self):
self.setup()
self.run_rom_stable()
self.run_flutter()
def tearDown(self):
import shutil
folders = ['cases', 'figures', 'output']
for folder in folders:
shutil.rmtree(self.route_test_dir + '/' + folder)
if __name__ == '__main__':
unittest.main()
| 46.65
| 118
| 0.406752
|
7949e6c6d66e40b8dbb8c03e6b01e4be940f4e2a
| 19,120
|
py
|
Python
|
qface/idl/domain.py
|
Gagi2k/qface
|
5069fdc5f406495590a83a799ecbe30009436f1d
|
[
"MIT"
] | 38
|
2016-11-30T10:36:56.000Z
|
2022-02-19T14:58:00.000Z
|
qface/idl/domain.py
|
Gagi2k/qface
|
5069fdc5f406495590a83a799ecbe30009436f1d
|
[
"MIT"
] | 70
|
2016-11-30T09:12:58.000Z
|
2022-01-27T12:52:41.000Z
|
qface/idl/domain.py
|
Gagi2k/qface
|
5069fdc5f406495590a83a799ecbe30009436f1d
|
[
"MIT"
] | 23
|
2016-12-07T15:23:15.000Z
|
2021-06-07T08:32:40.000Z
|
# Copyright (c) Pelagicore AB 2016
'''The domian module contains an object hierachy which resembles the
QFace grammar as a domain model. It is created from the QFace and the main
input for the code generation templates.
.. note:: Changes on this API will result into broken templates
.. code-block:: text
System
+- Module
+- Import
+- Interface
+- Property
+- Operation
+- Event
+- Struct (has attributes)
+- Enum (has values)
.. note::
When the API talks about an order list, the order is by appearance
in the QFace file.
'''
from collections import OrderedDict, ChainMap
import click
import logging
log = logging.getLogger(__name__)
class System(object):
"""The root entity which consist of modules"""
def __init__(self):
log.debug('System()')
self._moduleMap = OrderedDict() # type: dict[str, Module]
def __unicode__(self):
return 'system'
def __repr__(self):
return '<System>'
@property
def modules(self):
'''returns ordered list of module symbols'''
return self._moduleMap.values()
def lookup(self, name: str):
'''lookup a symbol by fully qualified name.'''
# <module>
if name in self._moduleMap:
return self._moduleMap[name]
# <module>.<Symbol>
(module_name, type_name, fragment_name) = self.split_typename(name)
if not module_name and type_name:
click.secho('not able to lookup symbol: {0}'.format(name), fg='red')
return None
module = self._moduleMap[module_name]
return module.lookup(type_name, fragment_name)
@staticmethod
def split_typename(name):
parts = name.rsplit('#', 1)
fragment_name = None
module_name = None
type_name = None
if len(parts) == 2:
fragment_name = parts[1]
name = parts[0]
parts = name.rsplit('.', 1)
if len(parts) == 1:
type_name = parts[0]
elif len(parts) == 2:
module_name = parts[0]
type_name = parts[1]
return (module_name, type_name, fragment_name)
def toJson(self):
o = OrderedDict()
o['modules'] = [o.toJson() for o in self.modules]
return o
class NamedElement(object):
def __init__(self, name, module: 'Module'):
self.name = name
"""symbol name"""
self.module = module
"""module the symbol belongs to"""
def __unicode__(self):
return self.name
def __str__(self):
return self.name
def __repr__(self):
return '<{0} name={1}>'.format(type(self), self.name)
@property
def qualified_name(self):
'''return the fully qualified name (`<module>.<name>`)'''
if self.module == self:
return self.module.name
else:
if "." not in self.name:
return '{0}.{1}'.format(self.module.name, self.name)
else:
# We have a fully qualified reference, just return it
return self.name
def toJson(self):
o = OrderedDict()
if self.name:
o['name'] = self.name
return o
class Symbol(NamedElement):
"""A symbol represents a base class for names elements"""
def __init__(self, name: str, module: 'Module'):
super().__init__(name, module)
self.comment = ''
"""comment which appeared in QFace right before symbol"""
self._tags = dict()
"""a value attached to the symbol"""
self.value = None
self._contentMap = ChainMap()
self._dependencies = set()
self.type = TypeSymbol('', self)
self.kind = self.__class__.__name__.lower()
""" the associated type information """
@property
def system(self):
'''returns reference to system'''
return self.module._system
@property
def tags(self):
return self._tags
def add_tag(self, tag):
""" add a tag to the tag list """
if tag not in self._tags:
self._tags[tag] = dict()
def add_attribute(self, tag, name, value):
""" add an attribute (nam, value pair) to the named tag """
self.add_tag(tag)
d = self._tags[tag]
d[name] = value
def tag(self, name):
""" return tag by name """
return self._tags[name]
def attribute(self, tag, name):
""" return attribute by tag and attribute name """
if tag in self._tags and name in self._tags[tag]:
return self._tags[tag][name]
@property
def contents(self):
""" return general list of symbol contents """
return self._contentMap.values()
@property
def dependencies(self):
if not self._dependencies:
self._dependencies = [x.type for x in self.contents]
return self._dependencies
def toJson(self):
o = super().toJson()
if self.type.is_valid:
o['type'] = self.type.toJson()
return o
class TypeSymbol(NamedElement):
"""Defines a type in the system"""
def __init__(self, name: str, parent: NamedElement):
super().__init__(name, parent.module)
log.debug('TypeSymbol()')
self.parent = parent
""" the parent symbol of this type """
self.is_void = False # type:bool
""" if type represents the void type """
self.is_primitive = False # type:bool
""" if type represents a primitive type """
self.is_complex = False # type:bool
""" if type represents a complex type """
self.is_list = False # type:bool
""" if type represents a list of nested types """
self.is_map = False # type:bool
""" if type represents a map of nested types. A key type is not defined """
self.is_model = False # type:bool
""" if type represents a model of nested types """
self.nested = None
"""nested type if symbol is list or model"""
self.__reference = None
self.__is_resolved = False
@property
def is_valid(self):
'''checks if type is a valid type'''
return (self.is_primitive and self.name) \
or (self.is_complex and self.name) \
or (self.is_list and self.nested) \
or (self.is_map and self.nested) \
or (self.is_model and self.nested)
@property
def is_bool(self):
'''checks if type is primitive and bool'''
return self.is_primitive and self.name == 'bool'
@property
def is_int(self):
'''checks if type is primitive and int'''
return self.is_primitive and self.name == 'int'
@property
def is_real(self):
'''checks if type is primitive and real'''
return self.is_primitive and self.name == 'real'
@property
def is_string(self):
'''checks if type is primitive and string'''
return self.is_primitive and self.name == 'string'
@property
def is_var(self):
'''checks if type is primitive and var'''
return self.is_primitive and self.name == 'var'
@property
def is_enumeration(self):
'''checks if type is complex and instance of type Enum'''
return self.is_complex and isinstance(self.reference, Enum)
@property
def is_enum(self):
'''checks if type is an enumeration and reference is enum'''
return self.is_enumeration and self.reference.is_enum
@property
def is_flag(self):
'''checks if type is an enumeration and reference is flag '''
return self.is_enumeration and self.reference.is_flag
@property
def is_struct(self):
'''checks if type is complex and struct'''
return self.is_complex and isinstance(self.reference, Struct)
@property
def is_interface(self):
'''checks if type is interface'''
return self.is_complex and isinstance(self.reference, Interface)
@property
def reference(self):
"""returns the symbol reference of the type name"""
if not self.__is_resolved:
self._resolve()
return self.__reference
def _resolve(self):
"""resolve the type symbol from name by doing a lookup"""
self.__is_resolved = True
if self.is_complex:
type = self.nested if self.nested else self
type.__reference = self.module.lookup(type.name)
@property
def type(self):
""" return the type information. In this case: self """
return self
def toJson(self):
o = super().toJson()
if self.is_void:
o['void'] = self.is_void
if self.is_primitive:
o['primitive'] = self.is_primitive
if self.is_complex:
o['complex'] = self.is_complex
if self.is_list:
o['list'] = self.is_list
if self.is_map:
o['map'] = self.is_map
if self.is_model:
o['model'] = self.is_model
if self.nested:
o['nested'] = self.nested.toJson()
return o
class Module(Symbol):
"""Module is a namespace for types, e.g. interfaces, enums, structs"""
def __init__(self, name: str, system: System):
"""init"""
super().__init__(name, self)
log.debug('Module()')
self.version = '1.0'
self._system = system
self._system._moduleMap[name] = self
self._interfaceMap = OrderedDict() # type: dict[str, Interface]
self._structMap = OrderedDict() # type: dict[str, Struct]
self._enumMap = OrderedDict() # type: dict[str, Enum]
self._contentMap = ChainMap(self._interfaceMap, self._structMap, self._enumMap)
self._importMap = OrderedDict() # type: dict[str, Module]
@property
def interfaces(self):
'''returns ordered list of interface symbols'''
return self._interfaceMap.values()
@property
def structs(self):
'''returns ordered list of struct symbols'''
return self._structMap.values()
@property
def enums(self):
'''returns ordered list of enum symbols'''
return self._enumMap.values()
@property
def imports(self):
'''returns ordered list of import symbols'''
return self._importMap.values()
def checkType(self, type: str):
if type.is_primitive:
return True
(module_name, type_name, fragment_name) = System.split_typename(type.name)
if module_name and module_name not in self._importMap:
return False
return True
@property
def name_parts(self):
'''return module name splitted by '.' in parts'''
return self.name.split('.')
@property
def majorVersion(self):
""" returns the major version number of the version information """
return self.version.split('.')[0]
@property
def minorVersion(self):
""" returns the minor version number of the version information """
return self.version.split('.')[1]
@property
def module_name(self):
""" returns the last part of the module uri """
return self.name.split('.')[-1]
def lookup(self, name: str, fragment: str = None):
'''lookup a symbol by name. If symbol is not local
it will be looked up system wide'''
if name in self._contentMap:
symbol = self._contentMap[name]
if fragment:
return symbol._contentMap[fragment]
return symbol
return self.system.lookup(name)
def toJson(self):
o = super().toJson()
o['version'] = self.version
o['interfaces'] = [s.toJson() for s in self.interfaces]
o['structs'] = [s.toJson() for s in self.structs]
o['enums'] = [s.toJson() for s in self.enums]
return o
class Interface(Symbol):
"""A interface is an object with operations, properties and signals"""
def __init__(self, name: str, module: Module):
super().__init__(name, module)
log.debug('Interface()')
self.module._interfaceMap[name] = self
self._propertyMap = OrderedDict() # type: dict[str, Property]
self._operationMap = OrderedDict() # type: dict[str, Operation]
self._signalMap = OrderedDict() # type: dict[str, Signal]
self._contentMap = ChainMap(self._propertyMap, self._operationMap, self._signalMap)
self._extends = None
@property
def properties(self):
'''returns ordered list of properties'''
return self._propertyMap.values()
@property
def operations(self):
'''returns ordered list of operations'''
return self._operationMap.values()
@property
def signals(self):
'''returns ordered list of signals'''
return self._signalMap.values()
@property
def extends(self):
''' returns the symbol defined by the extends interface attribute '''
return self.module.lookup(self._extends)
def toJson(self):
o = super().toJson()
o['properties'] = [s.toJson() for s in self.properties]
o['operations'] = [s.toJson() for s in self.operations]
o['signals'] = [s.toJson() for s in self.signals]
return o
class Operation(Symbol):
"""An operation inside a interface"""
def __init__(self, name: str, interface: Interface):
super().__init__(name, interface.module)
log.debug('Operation()')
self.interface = interface
""" the interface the operation is part of """
self.interface._operationMap[name] = self
self._parameterMap = self._contentMap = OrderedDict() # type: dict[Parameter]
self.is_const = False # type: bool
"""reflects is the operation was declared as const operation"""
@property
def qualified_name(self):
'''return the fully qualified name (`<module>.<interface>#<operation>`)'''
return '{0}.{1}#{2}'.format(self.module.name, self.interface.name, self.name)
@property
def parameters(self):
'''returns ordered list of parameters'''
return self._parameterMap.values()
def toJson(self):
o = super().toJson()
o['parameters'] = [s.toJson() for s in self.parameters]
o['type'] = self.type.toJson()
return o
class Signal(Symbol):
"""A signal inside an interface"""
def __init__(self, name: str, interface: Interface):
super().__init__(name, interface.module)
log.debug('Signal()')
self.interface = interface
self.interface._signalMap[name] = self
self._parameterMap = self._contentMap = OrderedDict() # type: dict[Parameter]
@property
def qualified_name(self):
'''return the fully qualified name (`module + "." + name`)'''
return '{0}.{1}#{2}'.format(self.module.name, self.interface.name, self.name)
@property
def parameters(self):
'''returns ordered list of parameters'''
return self._parameterMap.values()
def toJson(self):
o = super().toJson()
o['parameters'] = [s.toJson() for s in self.parameters]
return o
class Parameter(Symbol):
"""An operation parameter"""
def __init__(self, name: str, operation: Operation):
super().__init__(name, operation.module)
log.debug('Parameter()')
self.operation = operation
self.operation._parameterMap[name] = self
class Property(Symbol):
"""A typed property inside a interface"""
def __init__(self, name: str, interface: Interface):
super().__init__(name, interface.module)
log.debug('Property()')
self.interface = interface
self.interface._propertyMap[name] = self
self.readonly = False
self.const = False
@property
def is_model(self):
''' true if type is a model '''
return self.type.is_model
@property
def is_primitive_model(self):
''' true if type is a model of nested primitive types '''
return self.type.is_model and self.type.nested.is_primitive
@property
def is_complex_model(self):
''' true if type is a model of nested complex types '''
return self.type.is_model and self.type.nested.is_complex
@property
def qualified_name(self):
'''return the fully qualified name (`<module>.<interface>#<property>`)'''
return '{0}.{1}#{2}'.format(self.module.name, self.interface.name, self.name)
@property
def writeable(self):
return not self.readonly and not self.const
def toJson(self):
o = super().toJson()
if self.readonly:
o['readonly'] = True
if self.const:
o['const'] = True
return o
class Struct(Symbol):
"""Represents a data container"""
def __init__(self, name: str, module: Module):
super().__init__(name, module)
log.debug('Struct()')
self.module._structMap[name] = self
self._fieldMap = self._contentMap = OrderedDict()
@property
def fields(self):
'''returns ordered list of members'''
return self._fieldMap.values()
def toJson(self):
o = super().toJson()
o['fields'] = [s.toJson() for s in self.fields]
return o
class Field(Symbol):
"""A member in a struct"""
def __init__(self, name: str, struct: Struct):
super().__init__(name, struct.module)
log.debug('Field()')
self.struct = struct # type:Struct
self.struct._fieldMap[name] = self
@property
def qualified_name(self):
'''return the fully qualified name (`<module>.<struct>#<field>`)'''
return '{0}.{1}#{2}'.format(self.module.name, self.struct.name, self.name)
class Enum(Symbol):
"""An enum (flag) inside a module"""
def __init__(self, name: str, module: Module):
super().__init__(name, module)
log.debug('Enum()')
self.is_enum = True
self.is_flag = False
self.module._enumMap[name] = self
self._memberMap = self._contentMap = OrderedDict() # type: dict[EnumMember]
@property
def members(self):
'''returns ordered list of members'''
return self._memberMap.values()
def toJson(self):
o = super().toJson()
if self.is_enum:
o['enum'] = self.is_enum
if self.is_flag:
o['flag'] = self.is_flag
o['members'] = [s.toJson() for s in self.members]
return o
class EnumMember(Symbol):
"""A enum value"""
def __init__(self, name: str, enum: Enum):
super().__init__(name, enum.module)
log.debug('EnumMember()')
self.enum = enum
self.enum._memberMap[name] = self
self.value = 0
def qualified_name(self):
'''return the fully qualified name (`<module>.<enum>#<member>`)'''
return '{0}.{1}#{2}'.format(self.module.name, self.enum.name, self.name)
def toJson(self):
o = super().toJson()
o['value'] = self.value
return o
| 31.344262
| 91
| 0.59864
|
7949e7b97b49f6f33ffd53efa087359927e2e82f
| 2,139
|
py
|
Python
|
2020/11/2/solution.py
|
twsh/Advent-of-Code
|
69f394f6740a32825bc847ca18d91f53fbcccb10
|
[
"MIT"
] | null | null | null |
2020/11/2/solution.py
|
twsh/Advent-of-Code
|
69f394f6740a32825bc847ca18d91f53fbcccb10
|
[
"MIT"
] | null | null | null |
2020/11/2/solution.py
|
twsh/Advent-of-Code
|
69f394f6740a32825bc847ca18d91f53fbcccb10
|
[
"MIT"
] | null | null | null |
def parse_map(in_file):
with open(in_file) as f:
lines = f.read().splitlines()
width = len(lines[0])
height = len(lines)
points = {}
for x in range(width):
for y in range(height):
points[(x, y)] = lines[y][x]
return points
def look(seat, grid, direction):
(x, y) = seat
if direction == "right":
w = x + 1
z = y
elif direction == "left":
w = x - 1
z = y
elif direction == "down":
w = x
z = y + 1
elif direction == "up":
w = x
z = y - 1
elif direction == "upright":
w = x + 1
z = y - 1
elif direction == "upleft":
w = x - 1
z = y - 1
elif direction == "downright":
w = x + 1
z = y + 1
elif direction == "downleft":
w = x - 1
z = y + 1
try:
if grid[(w, z)] in ("#", "L"):
return grid[(w, z)]
else:
return look((w, z), grid, direction)
except KeyError:
return None
def check_seat(seat, grid):
visible = 0
directions = [
"up",
"down",
"left",
"right",
"upleft",
"downleft",
"upright",
"downright",
]
for direction in directions:
seen = look(seat, grid, direction)
if seen == "#":
visible += 1
return visible
def count_occupied(grid):
total = 0
for seat in grid:
if grid[seat] == "#":
total += 1
return total
def solve(in_file):
grid = parse_map(in_file)
unstable = True
updates = {}
while unstable:
for seat in grid:
if grid[seat] == "L" and check_seat(seat, grid) == 0:
updates[seat] = "#"
elif grid[seat] == "#" and check_seat(seat, grid) >= 5:
updates[seat] = "L"
if updates:
for update in updates:
grid[update] = updates[update]
updates = {}
else:
unstable = False
return count_occupied(grid)
# print(solve("sample.txt")) # 26
print(solve("input.txt")) # 2045
| 22.28125
| 67
| 0.462366
|
7949e7ea48d128d1f4f5d097e15dbcc77c909a21
| 42,413
|
py
|
Python
|
tests/test_exh.py
|
pmacosta/putil
|
416cea52df8221981727e25d133e9b4e3f464798
|
[
"MIT"
] | 6
|
2015-12-15T04:09:08.000Z
|
2020-02-21T01:40:57.000Z
|
tests/test_exh.py
|
pmacosta/putil
|
416cea52df8221981727e25d133e9b4e3f464798
|
[
"MIT"
] | null | null | null |
tests/test_exh.py
|
pmacosta/putil
|
416cea52df8221981727e25d133e9b4e3f464798
|
[
"MIT"
] | 2
|
2016-01-21T23:29:17.000Z
|
2020-02-21T01:41:05.000Z
|
# test_exh.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0103,C0111,C0302,C0413,E0611,F0401,R0201,R0204,R0903,R0912
# pylint: disable=R0914,R0915,W0108,W0122,W0123,W0212,W0612,W0631,W0640
# Standard library imports
from __future__ import print_function
import collections
import copy
import os
import re
import sys
from itertools import product
if sys.hexversion >= 0x03000000:
import unittest.mock as mock
# PyPI imports
import pytest
if sys.hexversion < 0x03000000:
import mock
# Putil imports
import putil.eng
import putil.exh
import putil.misc
import putil.pcontracts
from putil.test import AE, AI, AROPROP, CLDICTS, GET_EXMSG
TEST_DIR = os.path.realpath(os.path.dirname(__file__))
SUPPORT_DIR = os.path.join(TEST_DIR, 'support')
sys.path.append(SUPPORT_DIR)
import exh_support_module_1
import exh_support_module_2
if sys.hexversion > 0x03000000:
def exec_function(source, filename, global_map):
""" A wrapper around exec() """
exec(compile(source, filename, 'exec'), global_map)
else:
# From https://stackoverflow.com/questions/12809234/
# is-it-possible-to-call-exec-so-that-its-compatible-with-
# both-python-3-and-pytho
# "OK, this is pretty gross. In Py2, exec was a statement, but that will
# be a syntax error if we try to put it in a Py3 file, even if it isn't
# executed. So hide it inside an evaluated string literal instead."
eval(compile("""\
def exec_function(source, filename, global_map):
exec compile(source, filename, "exec") in global_map
""",
'<exec_function>', 'exec'
))
###
# Global variables
###
CTNAME = 'tests.test_exh.TestExHandle'
PENG_FNAME = sys.modules['putil.eng'].__file__
###
# Helper functions
###
sarg = lambda msg: 'Argument `{0}` is not valid'.format(msg)
###
# Test functions
###
def test_add():
""" Test add function behavior """
obj = putil.exh.addex
# Exceptions
AI(obj, 'extype', 5, 'Message')
AI(obj, 'exmsg', RuntimeError, 2.0)
AI(obj, 'condition', RuntimeError, 'Message', 1)
AI(obj, 'edata', RuntimeError, 'Message', False, 5)
inst1 = obj(IOError, 'First exception')
inst2 = obj(TypeError, 'My exception')
inst3 = obj(RuntimeError, 'Third exception')
inst2(False)
AE(inst2, TypeError, 'My exception', True)
inst4 = obj(IOError, 'Invalid *[name]*')
inst4(False)
AE(inst4, IOError, 'Invalid arg', True, [{'field':'name', 'value':'arg'}])
obj(TypeError, 'My exception', False)
with pytest.raises(TypeError) as excinfo:
obj(TypeError, 'My exception', True)
assert GET_EXMSG(excinfo) == 'My exception'
edata = [{'field':'name', 'value':'arg'}]
obj(IOError, 'Invalid *[name]*', False, edata)
with pytest.raises(IOError) as excinfo:
obj(IOError, 'Invalid *[name]*', True, edata)
assert GET_EXMSG(excinfo) == 'Invalid arg'
def test_add_ai():
""" Test add_ai function behavior """
obj = putil.exh.addai
# Exceptions
AI(obj, 'argname', 5)
AI(obj, 'condition', 'name', 1)
inst1 = obj('value')
inst2 = obj('fname')
inst3 = obj('kwarg')
inst2(False)
AE(inst2, RuntimeError, 'Argument `fname` is not valid', True)
obj('arg', False)
with pytest.raises(RuntimeError) as excinfo:
obj('arg', True)
assert GET_EXMSG(excinfo) == 'Argument `arg` is not valid'
def test_star_exh_obj():
""" Test [get|set|del]_exh_obj() function behavior """
AI(putil.exh.set_exh_obj, 'obj', obj=5)
# set_exh_obj function
exobj = putil.exh.ExHandle()
putil.exh.set_exh_obj(exobj)
assert id(putil.exh.get_exh_obj()) == id(exobj)
# del_exh_obj function
putil.exh.del_exh_obj()
assert putil.exh.get_exh_obj() is None
# Test that nothing happens if del_exh_obj is called when there
# is no global object handler set
putil.exh.del_exh_obj()
new_exh_obj = putil.exh.get_or_create_exh_obj()
assert id(new_exh_obj) != id(exobj)
assert not new_exh_obj._full_cname
# If there is a global exception handler the arguments passed
# should not have any effect
assert id(putil.exh.get_or_create_exh_obj(True)) == id(new_exh_obj)
assert not new_exh_obj._full_cname
# Test passed arguments are correctly assigned
putil.exh.del_exh_obj()
new_exh_obj = putil.exh.get_or_create_exh_obj(True)
assert new_exh_obj._full_cname
putil.exh.del_exh_obj()
# get_or_create_exh_obj function
obj = putil.exh.get_or_create_exh_obj
AI(obj, 'full_cname', full_cname=5)
AI(obj, 'exclude', exclude=5)
AI(obj, 'callables_fname', callables_fname=True)
args = {'callables_fname':'_not_a_file_'}
msg = 'File _not_a_file_ could not be found'
AE(obj, OSError, msg, **args)
# exclude parameter
pobj = putil.pinspect.Callables([PENG_FNAME])
new_exh_obj = putil.exh.get_or_create_exh_obj(exclude=['putil.eng'])
new_exh_obj._exclude = ['putil.eng']
putil.exh.del_exh_obj()
# callables_fname parameter
with putil.misc.TmpFile() as fname:
pobj.save(fname)
new_exh_obj = putil.exh.get_or_create_exh_obj(callables_fname=fname)
assert pobj == new_exh_obj._callables_obj
putil.exh.del_exh_obj()
@pytest.mark.parametrize(
'arg, ref', [
(RuntimeError, 'RuntimeError'),
(OSError, 'OSError')
]
)
def test_ex_type_str(arg, ref):
""" test _ex_type_str() function behavior """
assert putil.exh._ex_type_str(arg) == ref
class TestExHandle(object):
""" Tests for ExHandle class """
def test_init(self):
""" Test constructor behavior """
# Exceptions
obj = putil.exh.ExHandle
AI(obj, 'full_cname', full_cname=5)
AI(obj, 'exclude', exclude=5)
AI(obj, 'exclude', exclude=['p', 'a', 5, 'c'])
AI(obj, 'callables_fname', callables_fname=True)
arg = {'exclude':['sys', '_not_a_module_']}
msg = 'Source for module _not_a_module_ could not be found'
AE(obj, ValueError, msg, **arg)
arg = {'callables_fname':'_not_an_existing_file_'}
msg = 'File _not_an_existing_file_ could not be found'
# Functionality
exobj = putil.exh.ExHandle()
assert not exobj._full_cname
assert exobj._exclude is None
assert exobj._exclude_list == []
exobj = putil.exh.ExHandle(False, [])
assert not exobj._full_cname
assert exobj._exclude == []
assert exobj._exclude_list == []
exobj = putil.exh.ExHandle(True, None)
assert exobj._full_cname
assert exobj._exclude is None
assert exobj._exclude_list == []
exobj = putil.exh.ExHandle(exclude=['putil.exh'])
assert exobj._exclude == ['putil.exh']
assert exobj._exclude_list == [
sys.modules['putil.exh'].__file__.replace('.pyc', '.py')
]
pobj = putil.pinspect.Callables([PENG_FNAME])
with putil.misc.TmpFile() as fname:
pobj.save(fname)
exobj = putil.exh.ExHandle(callables_fname=fname)
assert pobj == exobj._callables_obj
@pytest.mark.parametrize(
'args, extype, exname', [
((5, RuntimeError, 'Message'), RuntimeError, sarg('exname')),
(('34', RuntimeError, 'Message'), RuntimeError, sarg('exname')),
(('exception', 5, 'Message'), RuntimeError, sarg('extype')),
(('exception', RuntimeError, True), RuntimeError, sarg('exmsg'))
]
)
@pytest.mark.parametrize('full_cname', [True, False])
def test_add_exception_exceptions(self, full_cname, args, extype, exname):
""" Test add_exception() method exceptions """
obj = putil.exh.ExHandle(full_cname).add_exception
AE(obj, extype, exname, *args)
# These should not raise an exception
obj = putil.exh.ExHandle(full_cname)
obj.add_exception(exname='name1', extype=RuntimeError, exmsg='desc1')
obj.add_exception(exname='name2', extype=TypeError, exmsg='desc2')
def test_add_exception(self):
""" Test add_exception() function behavior """
# pylint: disable=E0602,W0613
# Functions that are going to be traced
def func1():
aobj('first_exception', TypeError, dmsg('first'))
print("Hello")
def prop_decorator(func):
return func
@putil.pcontracts.contract(text=str)
@prop_decorator
def func2(text):
aobj('second_exception', ValueError, dmsg('second'))
aobj('third_exception', OSError, dmsg('third'))
print(text)
class Class1(object):
def __init__(self, exobj):
self._value = None
self._aobj = exobj.add_exception
@property
def value3(self):
self._aobj('getter_exception', TypeError, pmsg('Get'))
return self._value
@value3.setter
@putil.pcontracts.contract(value=int)
def value3(self, value):
self._aobj('setter_exception', TypeError, pmsg('Set'))
self._value = value
@value3.deleter
def value3(self):
self._aobj('deleter_exception', TypeError, pmsg('Delete'))
print('Cannot delete attribute')
def _get_value4_int(self):
self._aobj('dummy_exception', OSError, 'Bypass exception')
return self._value
def _get_value4(self):
return self._get_value4_int()
value4 = property(_get_value4)
def func7():
aobj(emsg(7), TypeError, mmsg(7))
def func8():
aobj(emsg(8), TypeError, mmsg(8))
def func9():
aobj(emsg(9), TypeError, mmsg(9))
def func10():
aobj(emsg(10), TypeError, mmsg(10))
def func11():
aobj(emsg(11), TypeError, mmsg(11))
def func12():
aobj(emsg(12), TypeError, mmsg(12))
def func13():
aobj(emsg(13), TypeError, mmsg(13))
def func14():
aobj(emsg(14), TypeError, mmsg(14))
# Add a function via exec
iftxt = (
"def func15(exobj):"
" aobj(emsg(15), TypeError, mmsg(15))"
)
gmap = locals()
exec_function(iftxt, '<exec_function>', gmap)
func15 = gmap['func15']
# Helper functions
dmsg = lambda msg: 'This is the {0} exception'.format(msg)
emsg = lambda num: 'total_exception_{0}'.format(num)
mmsg = lambda num: 'Total exception #{0}'.format(num)
pmsg = lambda msg: '{0} function exception'.format(msg)
root = CTNAME+'.test_add_exception'
# Assertions
exobj = putil.exh.ExHandle(
full_cname=True, exclude=['_pytest', 'tests.test_exh']
)
assert exobj.exceptions_db == []
# Generate all possible combinations of full_cname and exclude options
# full_cname exclude
combinations = product([True, False], [None, ['_pytest', 'execnet']])
for full_cname, exclude in combinations:
exobj = putil.exh.ExHandle(full_cname=full_cname, exclude=exclude)
aobj = exobj.add_exception
# Trace functions
dobj = Class1(exobj)
dobj.value3 = 5
print(dobj.value3)
del dobj.value3
cdb = exobj._ex_dict
func1()
func2("world")
func7()
func8()
func9()
func10()
func11()
func12()
func13()
func14()
func15(exobj)
exh_support_module_1.func16(exobj)
# Define references
cpath = lambda msg: 'Class1.value3({0})'.format(msg)
fkeys = [
'first_exception', 'second_exception', 'third_exception',
'setter_exception', 'getter_exception', 'deleter_exception',
emsg(7), emsg(8), emsg(9), emsg(10), emsg(11), emsg(12),
emsg(13), emsg(14), emsg(15), emsg(16), 'dummy_exception'
]
fnames = [
'func1', 'func2', 'func2',
cpath('setter'), cpath('getter'), cpath('deleter'),
'func7', 'func8', 'func9', 'func10', 'func11', 'func12',
'func13', 'func14', '', '/exh_support_module_1.func16',
'Class1.value4(getter)',
]
if full_cname and exclude:
template = root+'/'+CTNAME+'.'
func_name = 'test_add_exception.'
fnames = (
[template+func_name+item for item in fnames[0:14]]+
[root, root+'/exh_support_module_1.func16']+
[template+func_name+item for item in fnames[-1]]
)
fexh = [TypeError, ValueError, OSError]+13*[TypeError]+[OSError]
fdesc = [
dmsg('first'), dmsg('second'), dmsg('third'),
pmsg('Set'), pmsg('Get'), pmsg('Delete'),
mmsg(7), mmsg(8), mmsg(9), mmsg(10), mmsg(11), mmsg(12),
mmsg(13), mmsg(14), mmsg(15), mmsg(16), 'Bypass exception'
]
# Test that exceptions have been added correctly to handler
assert cdb
cdb = exobj._flatten_ex_dict()
for exname in cdb:
erec = cdb[exname]
iobj = zip(fnames, fexh, fdesc)
match_dict = dict(
[(key, value) for key, value in zip(fkeys, iobj)]
)
for key, ttuple in match_dict.items():
if re.compile(r'\d+/'+key).match(exname):
break
else:
raise RuntimeError('Callable not found')
if full_cname and exclude:
assert erec['function'][0] == exobj.encode_call(ttuple[0])
else:
if not full_cname:
assert erec['function'][0] is None
else:
assert re.compile(('.+/{0}'+ttuple[0]).format(root))
assert erec['type'] == ttuple[1]
assert erec['msg'] == ttuple[2]
# Test that function IDs are unique
mdict = collections.defaultdict(lambda: [])
for exname in cdb:
func_id = exname.split('/')[0]
mdict[exname] = mdict[exname].append(func_id)
exlist = []
for exname, value in mdict.items():
if value and match(exname) and (len(set(value)) != len(value)):
raise RuntimeError('Functions do not have unique IDs')
exlist = exlist+value if value else exlist
assert len(set(exlist)) == len(exlist)
# Test that exec code gets correctly flagged
frobj = sys._getframe(0)
gcf = exobj._get_callable_full_name
for item in [func15, None]:
assert gcf(frobj, '<module>', item) == 'dynamic'
# Test what happens when top of stack is reached
exobj = putil.exh.ExHandle(full_cname=True, exclude=['_pytest'])
obj = exobj.add_exception
def func_f():
obj(emsg('F'), TypeError, mmsg('F'))
def mock_get_frame(num):
if num < 4:
return frobj
raise ValueError('Top of the stack')
frobj = sys._getframe(0)
cname = 'putil.exh.sys._getframe'
with mock.patch(cname, side_effect=mock_get_frame):
func_f()
ecb = exobj._flatten_ex_dict()
exname = list(ecb.keys())[0]
erec = ecb[exname]
ref = sorted(
{
'function': [root+'/'+root+'/'+root+'/'+root],
'type':TypeError,
'msg':mmsg('F'),
'raised': [False]
}.items()
)
assert re.compile(r'\d+/'+emsg('F')).match(exname)
erec['function'] = [
exobj.decode_call(call) for call in erec['function']
]
assert sorted(erec.items()) == ref
###
# Test property search
###
exobj = putil.exh.ExHandle(
full_cname=True, exclude=['_pytest', 'tests.test_exh']
)
cobj = exh_support_module_2.MyClass(exobj)
cobj.value = 5
cdb = cobj._exhobj._flatten_ex_dict()
assert len(list(cdb.keys())) == 1
key = list(cdb.keys())[0]
item = cdb[key]
assert cobj._exhobj.decode_call(item['function'][0]).endswith(
'exh_support_module_2.MyClass.value(setter)'
)
assert item['msg'] == 'Illegal value'
###
# Test exclude: test without exclusion and with exclusion,
# the function name should be 'None'
###
# Test with function that has a contract decorator
putil.exh.set_exh_obj(
putil.exh.ExHandle(full_cname=True, exclude=['_pytest'])
)
_ = putil.eng.peng(15, 3, False)
cdb = putil.exh.get_exh_obj()._flatten_ex_dict()
for item in cdb.values():
assert item['function'][0]
putil.exh.set_exh_obj(
putil.exh.ExHandle(
full_cname=True, exclude=['_pytest', 'putil.eng']
)
)
_ = putil.eng.peng(15, 3, False)
cdb = putil.exh.get_exh_obj()._flatten_ex_dict()
for item in cdb.values():
assert not item['function'][0]
# Test with function that has an exception in body
import tests.support.exh_support_module_1
putil.exh.set_exh_obj(
putil.exh.ExHandle(full_cname=True, exclude=['_pytest'])
)
tests.support.exh_support_module_1.simple_exception()
cdb = putil.exh.get_exh_obj()._flatten_ex_dict()
for item in cdb.values():
assert item['function'][0]
putil.exh.set_exh_obj(
putil.exh.ExHandle(
full_cname=True,
exclude=['_pytest', 'tests.support.exh_support_module_1']
)
)
tests.support.exh_support_module_1.simple_exception()
cdb = putil.exh.get_exh_obj()._flatten_ex_dict()
for item in cdb.values():
assert not item['function'][0]
def test_raise_exception_if_exceptions(self):
""" Test raise_exception_if method exceptions """
# pylint: disable=W0702
# Helper functions
def check_rec(mname, extype, exmsg):
assert erec['function'].endswith('{0}'.format(mname))
assert erec['type'] == extype
assert erec['msg'] == exmsg
assert erec['raised']
def check_str(exobj, slist, llist):
root = CTNAME+'.test_raise_exception_if_exceptions'
ref = [
'{0}/{0}.func_top/{0}.func_mid/{0}.func_base'.format(root),
'{0}/{0}.func_base'.format(root),
'{0}/{0}.func_mid/{0}.func_base'.format(root)
]
pb = lambda x, nflip: x if nflip else (not x)
alist = ['*' if item else '' for item in llist]
cdb_int = exobj._flatten_ex_dict()
entry = cdb_int[list(cdb_int.keys())[0]]
entry = cdb_int[list(cdb_int.keys())[0]]
assert (
[exobj.decode_call(item) for item in entry['function']] == ref
)
assert entry['raised'] == llist
stxt = str(exobj).split('\n')[3:]
plist = [item.endswith(' [raised]') for item in stxt]
assert all([pb(item, sitem) for item, sitem in zip(plist, slist)])
db = exobj.exceptions_db
exmsg = 'RuntimeError (Invalid condition)'
for num in range(0, 3):
assert db[num]['data'] == exmsg+alist[num]
for num, fname in zip(range(0, 3), ref):
assert db[num]['name'] == fname
# Tests
obj = putil.exh.ExHandle()
def func3(cond1=False, cond2=False, cond3=False, cond4=False):
exobj = putil.exh.ExHandle()
items = (
(RuntimeError, 'This is an exception'),
(OSError, 'This is an exception with a *[fname]* field')
)
for num, (extype, exmsg) in enumerate(items):
exobj.add_exception('my_exception'+str(num+1), extype, exmsg)
exobj.raise_exception_if('my_exception1', cond1, edata=None)
edata = {'field':'fname', 'value':'my_file.txt'}
exobj.raise_exception_if('my_exception2', cond2, edata=edata)
if cond3:
exobj.raise_exception_if('my_exception3', False)
edata = {'field':'not_a_field', 'value':'my_file.txt'}
exobj.raise_exception_if('my_exception2', cond4, edata=edata)
return exobj
robj = obj.raise_exception_if
AI(robj, 'exname', exname=5, condition=False)
AI(robj, 'condition', exname='my_exception', condition=5)
items = [
354,
{'field':'my_field'},
{'field':3, 'value':5},
{'value':5},
[{'field':'my_field1', 'value':5}, {'field':'my_field'}],
[{'field':'my_field1', 'value':5}, {'field':3, 'value':5}],
[{'field':'my_field1', 'value':5}, {'value':5}]
]
for item in items:
AI(robj, 'edata', 'my_exception', False, item)
AE(func3, RuntimeError, 'This is an exception', True, False)
exmsg = 'This is an exception with a my_file.txt field'
AE(func3, OSError, exmsg, cond2=True)
exmsg = 'Exception name my_exception3 not found'
AE(func3, ValueError, exmsg, cond3=True)
exmsg = 'Field not_a_field not in exception message'
AE(func3, RuntimeError, exmsg, cond4=True)
# Test that edata=None works
exobj = func3()
cdb = exobj._flatten_ex_dict()
if not cdb:
pytest.fail('_ex_dict empty')
for exname, erec in cdb.items():
mname = 'test_exh.test_raise_exception.func3'
if exname.endswith('/{0}.my_exception1'.format(mname)):
check_rec(mname, RuntimeError, 'This is an exception')
if exname.endswith('/{0}.my_exception2'.format(mname)):
msg = 'This is an exception with a *[fname]* field'
check_rec(mname, OSError, msg)
exobj = putil.exh.ExHandle(full_cname=True)
def func_base(exobj, cond):
""" Test raised field """
exobj.add_exception(
'multi_path_exception', RuntimeError, 'Invalid condition'
)
exobj.raise_exception_if(
exname='multi_path_exception', condition=cond
)
def func_mid(exobj, cond):
""" Add multi-path to exception object """
func_base(exobj, cond)
def func_top(exobj, cond):
""" Add another multi-path to exception object """
func_mid(exobj, cond)
# Mangle "natural" order to test __str__, which
# sorts the function names
func_top(exobj, False)
func_base(exobj, False)
func_mid(exobj, False)
fpointers = [func_mid, func_top, func_base]
slist = [[False, True, False], [False, True, True], [True, True, True]]
llist = [[False, False, True], [True, False, True], [True, True, True]]
for fpointer, sitem, litem in zip(fpointers, slist, llist):
try:
fpointer(exobj, True)
except:
pass
check_str(exobj, sitem, litem)
def test_exceptions_db(self):
""" Test _exceptions_db property behavior """
for full_cname in [True, False]:
# Functions definitions
def func4(exobj):
exobj.add_exception(
'my_exception1', RuntimeError, 'This is exception #1'
)
def func5(exobj):
exobj.add_exception(
'my_exception2',
ValueError,
'This is exception #2, *[result]*'
)
exobj.add_exception(
'my_exception3', TypeError, 'This is exception #3'
)
exobj = putil.exh.ExHandle(full_cname)
func4(exobj)
func5(exobj)
# Actual tests
# Test that property cannot be deleted
AROPROP(exobj, 'exceptions_db')
# Test contents
tdata_in = exobj.exceptions_db
if (not tdata_in) or (len(tdata_in) != 3):
pytest.fail('Erroneous exceptions database')
tdata_out = list()
regtext1 = r'[\w|\W]+/'+CTNAME+'.test_exceptions_db.func4'
regtext2 = r'[\w|\W]+/'+CTNAME+'.test_exceptions_db.func5'
regtext3 = r'\d+/my_exception[2-3]'
cname = CTNAME+'.test_exceptions_db'
for erec in tdata_in:
name = None
if full_cname:
if re.compile(regtext1).match(erec['name']):
name = '{0}.func4'.format(cname)
elif re.compile(regtext2).match(erec['name']):
name = '{0}.func5'.format(cname)
else:
if re.compile(r'\d+/my_exception1').match(erec['name']):
name = '{0}.func4'.format(cname)
elif re.compile(regtext3).match(erec['name']):
name = '{0}.func5'.format(cname)
if not name:
pytest.fail('Exception not found')
tdata_out.append({'name':name, 'data':erec['data']})
ref = [
{
'name':'{0}.func4'.format(cname),
'data':'RuntimeError (This is exception #1)'
},
{
'name':'{0}.func5'.format(cname),
'data':'ValueError (This is exception #2, *[result]*)'
},
{
'name':'{0}.func5'.format(cname),
'data':'TypeError (This is exception #3)'
}
]
assert CLDICTS(tdata_out, ref)
def test_save_callables(self):
""" Test save_callables method behavior """
obj1 = putil.pinspect.Callables([PENG_FNAME])
with putil.misc.TmpFile() as fname1:
with putil.misc.TmpFile() as fname2:
callables_fname1 = fname1
callables_fname2 = fname2
obj1.save(callables_fname1)
obj2 = putil.exh.ExHandle(callables_fname=callables_fname1)
obj2.save_callables(callables_fname2)
obj3 = putil.pinspect.Callables()
obj3.load(callables_fname2)
assert obj1 == obj3
def test_save_callables_exceptions(self):
""" Test save_callables method exceptions """
obj = putil.exh.ExHandle()
AI(obj.save_callables, 'callables_fname', True)
def test_callables_db(self):
""" Test callables_db property behavior """
# Function definitions
def func6(exobj):
exobj.add_exception(
'my_exception', RuntimeError, 'This is an exception'
)
return exobj
# Actual tests
exobj = func6(putil.exh.ExHandle())
# Actual contents of what is returned should be checked
# in pinspect module
assert exobj.callables_db is not None
# Test that property cannot be deleted
AROPROP(exobj, 'callables_db')
def test_callables_separator(self):
""" Test callables_separator property behavior """
exobj = putil.exh.ExHandle()
# Actual contents of what is returned should be checked in
# pinspect module
assert exobj.callables_separator == '/'
# Test that property cannot be deleted
AROPROP(exobj, 'callables_separator')
def test_str(self):
""" Test __str__ method behavior """
for full_cname in [True, False]:
# Functions definition
def func7(exobj):
exobj.add_exception(
'my_exception7', RuntimeError, 'This is exception #7'
)
exobj.raise_exception_if('my_exception7', False)
def func8(exobj):
exobj.add_exception(
'my_exception8',
ValueError,
'This is exception #8, *[fname]*'
)
exobj.add_exception(
'my_exception9', TypeError, 'This is exception #9'
)
exobj = putil.exh.ExHandle(full_cname)
func7(exobj)
func8(exobj)
# Actual tests
str_in = str(exobj).split('\n\n')
str_out = list()
cname = 'test_exh.TestExHandle.test_str'
for str_element in str_in:
str_list = str_element.split('\n')
if str_list[0].endswith('/my_exception7'):
str_list[0] = (
'Name : {0}.func7/my_exception7'.format(cname)
)
elif str_list[0].endswith('/my_exception8'):
str_list[0] = (
'Name : {0}.func8/my_exception8'.format(cname)
)
elif str_list[0].endswith('/my_exception9'):
str_list[0] = (
'Name : {0}.func8/my_exception9'.format(cname)
)
if str_list[3].endswith('{0}.func7'.format(cname)):
str_list[3] = 'Function: {0}'.format(
'{0}.func7'.format(cname) if full_cname else 'None'
)
elif str_list[3].endswith('{0}.func8'.format(cname)):
str_list[3] = 'Function: {0}'.format(
'{0}.func8'.format(cname) if full_cname else 'None'
)
str_out.append('\n'.join(str_list))
#
str_check = list()
str_check.append(
'Name : '+cname+'.func7/my_exception7\n'
'Type : RuntimeError\n'
'Message : This is exception #7\n'
'Function: {name}'.format(
name='{0}.func7'.format(cname) if full_cname else 'None'
)
)
str_check.append(
'Name : '+cname+'.func8/my_exception8\n'
'Type : ValueError\n'
'Message : This is exception #8, *[fname]*\n'
'Function: {name}'.format(
name='{0}.func8'.format(cname) if full_cname else 'None'
)
)
str_check.append(
'Name : '+cname+'.func8/my_exception9\n'
'Type : TypeError\n'
'Message : This is exception #9\n'
'Function: {name}'.format(
name='{0}.func8'.format(cname) if full_cname else 'None'
)
)
if sorted(str_out) != sorted(str_check):
print('\n\nActual output:\n{text}'.format(
text='\n'.join(sorted(str_out))
)
)
print('\n\nReference output\n{text}'.format(
text='\n'.join(sorted(str_check))
)
)
assert sorted(str_out) == sorted(str_check)
def test_copy(self):
""" Test __copy__ method behavior """
# Functions definition
def funca(exobj):
exobj.add_exception(
'my_exceptionA', RuntimeError, 'This is exception #A'
)
def funcb(exobj):
exobj.add_exception(
'my_exceptionB', ValueError, 'This is exception #B'
)
exobj.add_exception(
'my_exceptionC', TypeError, 'This is exception #C'
)
class Clsc(object):
def __init__(self, exobj):
self._exobj = exobj
self._value = None
def _set_value(self, value):
self._exobj.add_exception(
'my_exceptionD', OSError, 'This is exception #D'
)
self._value = value
value = property(None, _set_value, None, doc='Value property')
source_obj = putil.exh.ExHandle(full_cname=True)
funca(source_obj)
funcb(source_obj)
obj = Clsc(source_obj)
obj.value = 5
# Actual tests
dest_obj = copy.copy(source_obj)
assert source_obj._ex_dict == dest_obj._ex_dict
assert id(source_obj._ex_dict) != id(dest_obj._ex_dict)
assert source_obj._callables_obj == dest_obj._callables_obj
assert id(source_obj._callables_obj) != id(dest_obj._callables_obj)
assert source_obj._clut == dest_obj._clut
assert id(source_obj._clut) != id(dest_obj._clut)
assert source_obj._full_cname == dest_obj._full_cname
assert CLDICTS(source_obj.exceptions_db, dest_obj.exceptions_db)
def test_multiple_paths_to_same_exception(self):
"""
Test that different paths to a single exception definition do not
overwrite each other
"""
def exdef(obj):
obj.add_exception(
'my_exception', RuntimeError, 'This is the exception'
)
def funca(obj):
exdef(obj)
def funcb(obj):
exdef(obj)
exobj = putil.exh.ExHandle(full_cname=True)
funca(exobj)
funcb(exobj)
exdb = sorted(exobj.exceptions_db, key=lambda item: item['name'])
assert len(exdb) == 2
assert exdb[0]['data'] == 'RuntimeError (This is the exception)'
assert exdb[1]['data'] == 'RuntimeError (This is the exception)'
cname = CTNAME+'.test_multiple_paths_to_same_exception'
assert exdb[0]['name'].endswith(
'{0}/{0}.funca/{0}.exdef'.format(cname)
)
assert exdb[1]['name'].endswith(
'{0}/{0}.funcb/{0}.exdef'.format(cname)
)
str_in = putil.misc.flatten_list(
[item.split('\n') for item in str(exobj).split('\n\n')]
)
fstring = cname+'/'+cname+'.func{0}/'+cname+'.exdef'
assert str_in[0].endswith('/my_exception')
assert str_in[1] == 'Type : RuntimeError'
assert str_in[2] == 'Message : This is the exception'
assert str_in[3].startswith('Function: ')
assert (
str_in[3].endswith(fstring.format('a')) or
str_in[3].endswith(fstring.format('b'))
)
assert str_in[4].startswith(' '*10)
assert str_in[4].endswith(
fstring.format(
'a' if str_in[3].endswith(fstring.format('b')) else 'b'
)
)
def test_add(self):
""" Test __add__ method behavior """
scomp = lambda a, b: bool(sorted(a) == sorted(b))
def check_add(sobj):
ex_dict_ref = {
'call1':{'a':5, 'b':6},
'call2':{'a':7, 'b':8},
'call3':{'a':10, 'b':100},
'call4':{'a':200, 'b':300}
}
c_ref = {'id1':5, 'ssid1':10, 'id2':3, 'ssid2':1}
rcdb_ref = {'rc1':5, 'rc2':7, 'rc3':0, 'rc4':-1}
mod_ref = {
'key1':'alpha', 'key2':'beta', 'key3':'pi', 'key4':'gamma'
}
fnames_ref = {'hello':0, 'world':1}
mlist_ref = ['this', 'is', 'a', 'test']
cls_ref = ['once', 'upon', 'a', 'time']
assert scomp(sobj._ex_dict, c_ref)
cobj = sobj._callables_obj
assert scomp(cobj._callables_db, ex_dict_ref)
assert scomp(cobj._reverse_callables_db, rcdb_ref)
assert scomp(cobj._modules_dict, mod_ref)
assert scomp(cobj._fnames, fnames_ref)
assert scomp(cobj._module_names, mlist_ref)
assert scomp(cobj._class_names, cls_ref)
def comp_objs(obj):
ref = {
CTNAME+'.test_add': '0',
CTNAME+'.test_add.func1': '1',
CTNAME+'.test_add.func2': '3',
CTNAME+'.test_add.func3': '2'
}
assert obj._clut == ref
nref = [
'copy_exception_1',
'copy_exception_2',
'copy_exception_3',
'contract:putil.eng.peng.frac_length_0',
'contract:putil.eng.peng.number_0',
'contract:putil.eng.peng.rjust_0',
]
alist = [
item.split(obj._callables_separator)[1]
for item in obj._flatten_ex_dict()
]
assert sorted(alist) == sorted(nref)
for key, value in obj._flatten_ex_dict().items():
name = key.split(obj._callables_separator)[1]
if name == 'copy_exception_1':
assert value['function'] == ['0/1']
elif name == 'copy_exception_2':
assert value['function'] == ['0/3']
elif name == 'copy_exception_3':
assert value['function'] == ['0/2']
else:
assert value['function'] == [None]
# pylint: disable=W0104
obj1 = putil.exh.ExHandle(_copy=True)
obj1._ex_dict = {'id1':5, 'ssid1':10}
obj1._callables_obj = putil.pinspect.Callables()
obj1._callables_obj._callables_db = {
'call1':{'a':5, 'b':6},
'call2':{'a':7, 'b':8}
}
obj1._callables_obj._reverse_callables_db = {'rc1':5, 'rc2':7}
obj1._callables_obj._modules_dict = {'key1':'alpha', 'key2':'beta'}
obj1._callables_obj._fnames = {'hello':0}
obj1._callables_obj._module_names = ['this', 'is']
obj1._callables_obj._class_names = ['once', 'upon']
#
obj2 = putil.exh.ExHandle(_copy=True)
obj2._ex_dict = {'id2':3, 'ssid2':1}
obj2._callables_obj = putil.pinspect.Callables()
obj2._callables_obj._callables_db = {
'call3':{'a':10, 'b':100},
'call4':{'a':200, 'b':300}
}
obj2._callables_obj._reverse_callables_db = {'rc3':0, 'rc4':-1}
obj2._callables_obj._modules_dict = {'key3':'pi', 'key4':'gamma'}
obj2._callables_obj._fnames = {'world':0}
obj2._callables_obj._module_names = ['a', 'test']
obj2._callables_obj._class_names = ['a', 'time']
#
check_add(obj1+obj2)
obj1 += obj2
check_add(obj1)
# Incompatible types
with pytest.raises(TypeError) as excinfo:
obj1+5
msg = 'Unsupported operand type(s) for +: putil.exh.ExHandle and int'
assert GET_EXMSG(excinfo) == msg
# Incompatible types
with pytest.raises(TypeError) as excinfo:
obj1 += 5
assert GET_EXMSG(excinfo) == msg
#
obj2._full_cname = True
with pytest.raises(RuntimeError) as excinfo:
obj1+obj2
msg = 'Incompatible exception handlers'
assert GET_EXMSG(excinfo) == msg
with pytest.raises(RuntimeError) as excinfo:
obj1 += obj2
assert GET_EXMSG(excinfo) == msg
obj2._full_cname = False
obj2._exclude = ['_pytest']
with pytest.raises(RuntimeError) as excinfo:
obj1+obj2
assert GET_EXMSG(excinfo) == msg
with pytest.raises(RuntimeError) as excinfo:
obj1 += obj2
assert GET_EXMSG(excinfo) == msg
obj2._exclude = None
# Test re-mapping of callables look-up table
obj1 = putil.exh.ExHandle(full_cname=True, exclude=['putil.eng'])
obj2 = putil.exh.ExHandle(full_cname=True, exclude=['putil.eng'])
def func1(exhobj):
exhobj.add_exception(
'copy_exception_1', TypeError, 'Copy exception #1'
)
def func2(exhobj):
exhobj.add_exception(
'copy_exception_2', RuntimeError, 'Copy exception #2'
)
def func3(exhobj):
exhobj.add_exception(
'copy_exception_3', ValueError, 'Copy exception #3'
)
putil.exh.del_exh_obj()
putil.exh.get_or_create_exh_obj(
full_cname=True, exclude=['putil.eng']
)
putil.eng.peng(1, 3, False)
exhobj += putil.exh.get_exh_obj()
putil.exh.del_exh_obj()
# Test __add__
func1(obj1)
func3(obj1)
func1(obj2)
func2(obj2)
func3(obj2)
obj1_ref = copy.copy(obj1)
obj2_ref = copy.copy(obj2)
assert obj1_ref == obj1
assert obj2_ref == obj2
obj3 = obj1+obj2
assert obj1_ref == obj1
assert obj2_ref == obj2
comp_objs(obj3)
# Test __iadd__
obj1 = putil.exh.ExHandle(full_cname=True, exclude=['putil.eng'])
obj2 = putil.exh.ExHandle(full_cname=True, exclude=['putil.eng'])
func1(obj1)
func3(obj1)
func1(obj2)
func2(obj2)
func3(obj2)
obj2_ref = copy.copy(obj2)
assert obj2_ref == obj2
obj1 += obj2
assert obj2_ref == obj2
comp_objs(obj1)
def test_eq(self):
""" Test __eq__ method behavior """
putil.exh.get_or_create_exh_obj()
# Trace some exceptions
putil.eng.peng(100, 3, True)
obj1 = putil.exh.get_exh_obj()
obj2 = copy.copy(obj1)
assert obj1 == obj2
assert obj1 != 5
def test_nonzero(self):
""" Test __nonzero__ method behavior """
exhobj = putil.exh.ExHandle()
assert not exhobj
def my_func(exhobj):
exhobj.add_exception('test', RuntimeError, 'Message')
my_func(exhobj)
assert exhobj
| 40.3549
| 79
| 0.544833
|
7949e826c413c242d31fecc441782f399f92ab57
| 63,706
|
bzl
|
Python
|
tools/build_variables.bzl
|
Zhaopudark/pytorch
|
8559d39cf032a9118c470ebe3a5047f89d94fe5c
|
[
"Intel"
] | 1
|
2022-02-05T18:15:29.000Z
|
2022-02-05T18:15:29.000Z
|
tools/build_variables.bzl
|
Zhaopudark/pytorch
|
8559d39cf032a9118c470ebe3a5047f89d94fe5c
|
[
"Intel"
] | null | null | null |
tools/build_variables.bzl
|
Zhaopudark/pytorch
|
8559d39cf032a9118c470ebe3a5047f89d94fe5c
|
[
"Intel"
] | null | null | null |
# In both open-source and fbcode builds, these are generated into
# torch/csrc/{autgrad,jit}/generated.i
GENERATED_CPP = [
"autograd/generated/Functions.cpp",
"autograd/generated/VariableType_0.cpp",
"autograd/generated/VariableType_1.cpp",
"autograd/generated/VariableType_2.cpp",
"autograd/generated/VariableType_3.cpp",
"autograd/generated/VariableType_4.cpp",
"autograd/generated/TraceType_0.cpp",
"autograd/generated/TraceType_1.cpp",
"autograd/generated/TraceType_2.cpp",
"autograd/generated/TraceType_3.cpp",
"autograd/generated/TraceType_4.cpp",
"autograd/generated/ADInplaceOrViewType_0.cpp",
"autograd/generated/ADInplaceOrViewType_1.cpp",
"autograd/generated/python_functions_0.cpp",
"autograd/generated/python_functions_1.cpp",
"autograd/generated/python_functions_2.cpp",
"autograd/generated/python_functions_3.cpp",
"autograd/generated/python_functions_4.cpp",
"autograd/generated/python_nn_functions.cpp",
"autograd/generated/python_fft_functions.cpp",
"autograd/generated/python_linalg_functions.cpp",
"autograd/generated/python_return_types.cpp",
"autograd/generated/python_sparse_functions.cpp",
"autograd/generated/python_special_functions.cpp",
"autograd/generated/python_torch_functions_0.cpp",
"autograd/generated/python_torch_functions_1.cpp",
"autograd/generated/python_torch_functions_2.cpp",
"autograd/generated/python_variable_methods.cpp",
]
# NVFuser runtime library
libtorch_nvfuser_runtime_sources = [
"torch/csrc/jit/codegen/cuda/runtime/bf16_support.cu",
"torch/csrc/jit/codegen/cuda/runtime/block_reduction.cu",
"torch/csrc/jit/codegen/cuda/runtime/block_sync_atomic.cu",
"torch/csrc/jit/codegen/cuda/runtime/block_sync_default.cu",
"torch/csrc/jit/codegen/cuda/runtime/broadcast.cu",
"torch/csrc/jit/codegen/cuda/runtime/fp16_support.cu",
"torch/csrc/jit/codegen/cuda/runtime/grid_broadcast.cu",
"torch/csrc/jit/codegen/cuda/runtime/grid_reduction.cu",
"torch/csrc/jit/codegen/cuda/runtime/grid_sync.cu",
"torch/csrc/jit/codegen/cuda/runtime/helpers.cu",
"torch/csrc/jit/codegen/cuda/runtime/index_utils.cu",
"torch/csrc/jit/codegen/cuda/runtime/random_numbers.cu",
"torch/csrc/jit/codegen/cuda/runtime/tensor.cu",
"torch/csrc/jit/codegen/cuda/runtime/welford.cu",
"torch/csrc/jit/codegen/cuda/runtime/warp.cu",
"aten/src/ATen/cuda/detail/PhiloxCudaStateRaw.cuh",
"aten/src/ATen/cuda/detail/UnpackRaw.cuh",
]
libtorch_nvfuser_generated_headers = ["{}.h".format(name.split("/")[-1].split(".")[0]) for name in libtorch_nvfuser_runtime_sources]
def libtorch_generated_sources(gencode_pattern):
return [gencode_pattern.format(name) for name in [
"autograd/generated/Functions.cpp",
"autograd/generated/VariableType_0.cpp",
"autograd/generated/VariableType_1.cpp",
"autograd/generated/VariableType_2.cpp",
"autograd/generated/VariableType_3.cpp",
"autograd/generated/VariableType_4.cpp",
"autograd/generated/TraceType_0.cpp",
"autograd/generated/TraceType_1.cpp",
"autograd/generated/TraceType_2.cpp",
"autograd/generated/TraceType_3.cpp",
"autograd/generated/TraceType_4.cpp",
"autograd/generated/ADInplaceOrViewType_0.cpp",
"autograd/generated/ADInplaceOrViewType_1.cpp",
]]
# copied from https://github.com/pytorch/pytorch/blob/f99a693cd9ff7a9b5fdc71357dac66b8192786d3/aten/src/ATen/core/CMakeLists.txt
jit_core_headers = [
"torch/csrc/utils/memory.h",
"torch/csrc/Export.h",
"torch/csrc/jit/frontend/source_range.h",
"torch/csrc/jit/serialization/callstack_debug_info_serialization.h",
"torch/csrc/jit/serialization/source_range_serialization.h",
"torch/csrc/jit/frontend/lexer.h",
"torch/csrc/jit/frontend/strtod.h",
"torch/csrc/jit/frontend/parser_constants.h",
"torch/csrc/jit/frontend/function_schema_parser.h",
"torch/csrc/jit/frontend/parse_string_literal.h",
"torch/csrc/jit/frontend/schema_type_parser.h",
"torch/csrc/jit/frontend/error_report.h",
"torch/csrc/jit/frontend/tree.h",
"torch/custom_class.h",
"torch/custom_class_detail.h",
"torch/library.h",
]
jit_core_sources = [
"torch/csrc/jit/frontend/error_report.cpp",
"torch/csrc/jit/frontend/function_schema_parser.cpp",
"torch/csrc/jit/frontend/lexer.cpp",
"torch/csrc/jit/frontend/schema_type_parser.cpp",
"torch/csrc/jit/frontend/strtod.cpp",
"torch/csrc/jit/frontend/source_range.cpp",
]
# copied from https://github.com/pytorch/pytorch/blob/0bde610c14b92d351b968a0228df29e92442b1cc/torch/CMakeLists.txt
# There are some common files used in both internal lite-interpreter and full-jit. Making a separate
# list for the shared files.
core_sources_common = [
"torch/csrc/autograd/autograd_meta.cpp",
"torch/csrc/autograd/forward_grad.cpp",
"torch/csrc/jit/frontend/edit_distance.cpp",
"torch/csrc/jit/mobile/compatibility/runtime_compatibility.cpp",
"torch/csrc/jit/mobile/type_parser.cpp",
"torch/csrc/jit/operator_upgraders/upgraders_guard.cpp",
"torch/csrc/jit/operator_upgraders/version_map.cpp",
"torch/csrc/jit/runtime/instruction.cpp",
"torch/csrc/jit/runtime/jit_exception.cpp",
"torch/csrc/jit/runtime/operator.cpp",
"torch/csrc/jit/mobile/register_ops_common_utils.cpp",
"torch/csrc/jit/runtime/print_handler.cpp",
"torch/csrc/jit/runtime/slice_indices_adjust.cpp",
"torch/csrc/jit/runtime/register_ops_utils.cpp",
"torch/csrc/jit/runtime/vararg_functions.cpp",
"torch/csrc/jit/mobile/promoted_prim_ops.cpp",
"torch/csrc/jit/mobile/prim_ops_registery.cpp",
"torch/csrc/profiler/util.cpp",
]
torch_unpickler_common = [
"torch/csrc/jit/serialization/import_read.cpp",
"torch/csrc/jit/serialization/unpickler.cpp",
]
libtorch_sources_common = sorted(core_sources_common + torch_unpickler_common)
# The profilers are not needed in the lite interpreter build.
libtorch_profiler_sources = [
"torch/csrc/autograd/profiler_legacy.cpp",
"torch/csrc/autograd/profiler_kineto.cpp",
"torch/csrc/profiler/api.cpp",
"torch/csrc/profiler/kineto_shim.cpp",
"torch/csrc/profiler/nvtx_observer.cpp",
"torch/csrc/monitor/counters.cpp",
"torch/csrc/monitor/events.cpp",
]
libtorch_edge_profiler_sources = libtorch_profiler_sources + [
"torch/csrc/jit/mobile/profiler_edge.cpp",
]
core_trainer_sources = [
"torch/csrc/autograd/anomaly_mode.cpp",
"torch/csrc/autograd/autograd.cpp",
"torch/csrc/autograd/autograd_not_implemented_fallback.cpp",
"torch/csrc/autograd/cpp_hook.cpp",
"torch/csrc/autograd/custom_function.cpp",
"torch/csrc/autograd/engine.cpp",
"torch/csrc/autograd/function.cpp",
"torch/csrc/autograd/function_hook.cpp",
"torch/csrc/autograd/functions/accumulate_grad.cpp",
"torch/csrc/autograd/functions/basic_ops.cpp",
"torch/csrc/autograd/functions/tensor.cpp",
"torch/csrc/autograd/functions/utils.cpp",
"torch/csrc/autograd/input_buffer.cpp",
"torch/csrc/autograd/record_function_ops.cpp",
"torch/csrc/autograd/saved_variable.cpp",
"torch/csrc/autograd/variable.cpp",
"torch/csrc/autograd/utils/warnings.cpp",
"torch/csrc/jit/frontend/name_mangler.cpp",
"torch/csrc/jit/ir/type_hashing.cpp",
"torch/csrc/jit/serialization/pickler.cpp",
"torch/csrc/jit/serialization/type_name_uniquer.cpp",
]
core_sources_full_mobile_no_backend_interface = [
"torch/csrc/jit/api/function_impl.cpp",
"torch/csrc/jit/api/module.cpp",
"torch/csrc/jit/api/object.cpp",
"torch/csrc/jit/backends/backend_debug_handler.cpp",
"torch/csrc/jit/backends/backend_detail.cpp",
"torch/csrc/jit/backends/backend_resolver.cpp",
"torch/csrc/jit/codegen/fuser/codegen.cpp",
"torch/csrc/jit/codegen/fuser/compiler.cpp",
"torch/csrc/jit/codegen/fuser/executor.cpp",
"torch/csrc/jit/codegen/fuser/fallback.cpp",
"torch/csrc/jit/codegen/fuser/interface.cpp",
"torch/csrc/jit/codegen/fuser/kernel_cache.cpp",
"torch/csrc/jit/frontend/builtin_functions.cpp",
"torch/csrc/jit/frontend/versioned_symbols.cpp",
"torch/csrc/jit/frontend/canonicalize_modified_loop.cpp",
"torch/csrc/jit/frontend/convert_to_ssa.cpp",
"torch/csrc/jit/frontend/exit_transforms.cpp",
"torch/csrc/jit/frontend/inline_loop_condition.cpp",
"torch/csrc/jit/frontend/ir_emitter.cpp",
"torch/csrc/jit/frontend/parser.cpp",
"torch/csrc/jit/frontend/schema_matching.cpp",
"torch/csrc/jit/frontend/script_type_parser.cpp",
"torch/csrc/jit/frontend/sugared_value.cpp",
"torch/csrc/jit/frontend/tracer.cpp",
"torch/csrc/jit/ir/alias_analysis.cpp",
"torch/csrc/jit/ir/attributes.cpp",
"torch/csrc/jit/ir/constants.cpp",
"torch/csrc/jit/ir/ir.cpp",
"torch/csrc/jit/ir/irparser.cpp",
"torch/csrc/jit/ir/node_hashing.cpp",
"torch/csrc/jit/ir/scope.cpp",
"torch/csrc/jit/ir/subgraph_matcher.cpp",
"torch/csrc/jit/jit_log.cpp",
"torch/csrc/jit/jit_opt_limit.cpp",
"torch/csrc/jit/mobile/nnc/aot_compiler.cpp",
"torch/csrc/jit/mobile/nnc/backend.cpp",
"torch/csrc/jit/mobile/nnc/context.cpp",
"torch/csrc/jit/mobile/nnc/registry.cpp",
"torch/csrc/jit/operator_upgraders/utils.cpp",
"torch/csrc/jit/operator_upgraders/upgraders.cpp",
"torch/csrc/jit/operator_upgraders/upgraders_entry.cpp",
"torch/csrc/jit/passes/annotate_warns.cpp",
"torch/csrc/jit/passes/bailout_graph.cpp",
"torch/csrc/jit/passes/batch_mm.cpp",
"torch/csrc/jit/passes/canonicalize.cpp",
"torch/csrc/jit/passes/canonicalize_graph_fuser_ops.cpp",
"torch/csrc/jit/passes/clear_profiling.cpp",
"torch/csrc/jit/passes/clear_undefinedness.cpp",
"torch/csrc/jit/passes/common_subexpression_elimination.cpp",
"torch/csrc/jit/passes/common_expression_hoisting.cpp",
"torch/csrc/jit/passes/concat_opt.cpp",
"torch/csrc/jit/passes/constant_pooling.cpp",
"torch/csrc/jit/passes/constant_propagation.cpp",
"torch/csrc/jit/passes/restore_mutation.cpp",
"torch/csrc/jit/passes/create_autodiff_subgraphs.cpp",
"torch/csrc/jit/passes/dead_code_elimination.cpp",
"torch/csrc/jit/passes/eliminate_no_ops.cpp",
"torch/csrc/jit/passes/remove_redundant_profiles.cpp",
"torch/csrc/jit/passes/remove_exceptions.cpp",
"torch/csrc/jit/passes/decompose_ops.cpp",
"torch/csrc/jit/passes/dtype_analysis.cpp",
"torch/csrc/jit/passes/device_type_analysis.cpp",
"torch/csrc/jit/passes/erase_number_types.cpp",
"torch/csrc/jit/passes/fixup_trace_scope_blocks.cpp",
"torch/csrc/jit/passes/freeze_module.cpp",
"torch/csrc/jit/passes/fuse_linear.cpp",
"torch/csrc/jit/passes/fuse_relu.cpp",
"torch/csrc/jit/passes/graph_fuser.cpp",
"torch/csrc/jit/passes/graph_rewrite_helper.cpp",
"torch/csrc/jit/passes/guard_elimination.cpp",
"torch/csrc/jit/passes/hoist_conv_packed_params.cpp",
"torch/csrc/jit/passes/inline_autodiff_subgraphs.cpp",
"torch/csrc/jit/passes/inline_forked_closures.cpp",
"torch/csrc/jit/passes/inline_fork_wait.cpp",
"torch/csrc/jit/passes/inliner.cpp",
"torch/csrc/jit/passes/inplace_check.cpp",
"torch/csrc/jit/passes/insert_guards.cpp",
"torch/csrc/jit/passes/lift_closures.cpp",
"torch/csrc/jit/passes/liveness.cpp",
"torch/csrc/jit/passes/loop_unrolling.cpp",
"torch/csrc/jit/passes/lower_grad_of.cpp",
"torch/csrc/jit/passes/lower_tuples.cpp",
"torch/csrc/jit/passes/normalize_ops.cpp",
"torch/csrc/jit/passes/peephole_dict_idioms.cpp",
"torch/csrc/jit/passes/peephole_list_idioms.cpp",
"torch/csrc/jit/passes/value_refinement_utils.cpp",
"torch/csrc/jit/passes/peephole_alias_sensitive.cpp",
"torch/csrc/jit/passes/pass_manager.cpp",
"torch/csrc/jit/passes/peephole.cpp",
"torch/csrc/jit/passes/peephole_non_tensor.cpp",
"torch/csrc/jit/passes/create_functional_graphs.cpp",
"torch/csrc/jit/passes/remove_mutation.cpp",
"torch/csrc/jit/passes/prepack_folding.cpp",
"torch/csrc/jit/passes/fold_conv_bn.cpp",
"torch/csrc/jit/passes/frozen_concat_linear.cpp",
"torch/csrc/jit/passes/frozen_conv_add_relu_fusion.cpp",
"torch/csrc/jit/passes/frozen_conv_folding.cpp",
"torch/csrc/jit/passes/frozen_linear_transpose.cpp",
"torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp",
"torch/csrc/jit/passes/frozen_graph_optimizations.cpp",
"torch/csrc/jit/passes/remove_expands.cpp",
"torch/csrc/jit/passes/remove_dropout.cpp",
"torch/csrc/jit/passes/requires_grad_analysis.cpp",
"torch/csrc/jit/passes/shape_analysis.cpp",
"torch/csrc/jit/passes/integer_value_refinement.cpp",
"torch/csrc/jit/passes/replacement_of_old_operators.cpp",
"torch/csrc/jit/passes/symbolic_shape_analysis.cpp",
"torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp",
"torch/csrc/jit/passes/specialize_autogradzero.cpp",
"torch/csrc/jit/passes/update_differentiable_graph_requires_grad.cpp",
"torch/csrc/jit/passes/variadic_ops.cpp",
"torch/csrc/jit/passes/subgraph_rewrite.cpp",
"torch/csrc/jit/passes/tensorexpr_fuser.cpp",
"torch/csrc/jit/passes/utils/memory_dag.cpp",
"torch/csrc/jit/passes/utils/subgraph_utils.cpp",
"torch/csrc/jit/passes/utils/optimization_utils.cpp",
"torch/csrc/jit/passes/utils/op_registry.cpp",
"torch/csrc/jit/passes/xnnpack_rewrite.cpp",
"torch/csrc/jit/passes/vulkan_rewrite.cpp",
"torch/csrc/jit/passes/metal_rewrite.cpp",
"torch/csrc/jit/passes/quantization/helper.cpp",
"torch/csrc/jit/passes/quantization/quantization_type.cpp",
"torch/csrc/jit/passes/quantization/insert_observers.cpp",
"torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp",
"torch/csrc/jit/passes/quantization/dedup_module_uses.cpp",
"torch/csrc/jit/passes/quantization/finalize.cpp",
"torch/csrc/jit/passes/quantization/fusion_passes.cpp",
"torch/csrc/jit/python/update_graph_executor_opt.cpp",
"torch/csrc/jit/runtime/argument_spec.cpp",
"torch/csrc/jit/runtime/autodiff.cpp",
"torch/csrc/jit/runtime/graph_executor.cpp",
"torch/csrc/jit/runtime/interpreter/frame.cpp",
"torch/csrc/jit/runtime/interpreter/preprocess_graph.cpp",
"torch/csrc/jit/runtime/interpreter.cpp",
"torch/csrc/jit/runtime/logging.cpp",
"torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp",
"torch/csrc/jit/runtime/profiling_record.cpp",
"torch/csrc/jit/runtime/script_profile.cpp",
"torch/csrc/jit/runtime/symbolic_script.cpp",
"torch/csrc/jit/runtime/symbolic_shape_registry.cpp",
"torch/csrc/jit/runtime/symbolic_shape_registry_util.cpp",
"torch/csrc/jit/runtime/jit_trace.cpp",
"torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp",
"torch/csrc/jit/serialization/import.cpp",
"torch/csrc/jit/serialization/import_export_helpers.cpp",
"torch/csrc/jit/serialization/import_source.cpp",
"torch/csrc/jit/serialization/pickle.cpp",
"torch/csrc/jit/serialization/python_print.cpp",
"torch/csrc/jit/serialization/source_range_serialization.cpp",
"torch/csrc/jit/tensorexpr/block_codegen.cpp",
"torch/csrc/jit/tensorexpr/bounds_inference.cpp",
"torch/csrc/jit/tensorexpr/bounds_overlap.cpp",
"torch/csrc/jit/tensorexpr/codegen.cpp",
"torch/csrc/jit/tensorexpr/cpp_codegen.cpp",
"torch/csrc/jit/tensorexpr/eval.cpp",
"torch/csrc/jit/tensorexpr/expr.cpp",
"torch/csrc/jit/tensorexpr/external_functions_registry.cpp",
"torch/csrc/jit/tensorexpr/graph_opt.cpp",
"torch/csrc/jit/tensorexpr/hash_provider.cpp",
"torch/csrc/jit/tensorexpr/intrinsic_symbols.cpp",
"torch/csrc/jit/tensorexpr/ir.cpp",
"torch/csrc/jit/tensorexpr/ir_cloner.cpp",
"torch/csrc/jit/tensorexpr/ir_mutator.cpp",
"torch/csrc/jit/tensorexpr/ir_printer.cpp",
"torch/csrc/jit/tensorexpr/ir_simplifier.cpp",
"torch/csrc/jit/tensorexpr/ir_verifier.cpp",
"torch/csrc/jit/tensorexpr/ir_visitor.cpp",
"torch/csrc/jit/tensorexpr/kernel.cpp",
"torch/csrc/jit/tensorexpr/llvm_codegen.cpp",
"torch/csrc/jit/tensorexpr/llvm_jit.cpp",
"torch/csrc/jit/tensorexpr/loopnest.cpp",
"torch/csrc/jit/tensorexpr/loopnest_randomization.cpp",
"torch/csrc/jit/tensorexpr/lowerings.cpp",
"torch/csrc/jit/tensorexpr/mem_dependency_checker.cpp",
"torch/csrc/jit/tensorexpr/operators/conv2d.cpp",
"torch/csrc/jit/tensorexpr/operators/matmul.cpp",
"torch/csrc/jit/tensorexpr/operators/misc.cpp",
"torch/csrc/jit/tensorexpr/operators/norm.cpp",
"torch/csrc/jit/tensorexpr/operators/pointwise.cpp",
"torch/csrc/jit/tensorexpr/operators/quantization.cpp",
"torch/csrc/jit/tensorexpr/operators/reduction.cpp",
"torch/csrc/jit/tensorexpr/operators/softmax.cpp",
"torch/csrc/jit/tensorexpr/reduction.cpp",
"torch/csrc/jit/tensorexpr/registerizer.cpp",
"torch/csrc/jit/tensorexpr/tensor.cpp",
"torch/csrc/jit/tensorexpr/types.cpp",
"torch/csrc/jit/tensorexpr/unique_name_manager.cpp",
"torch/csrc/jit/testing/file_check.cpp",
"torch/csrc/jit/testing/hooks_for_testing.cpp",
"torch/csrc/utils/tensor_flatten.cpp",
"torch/csrc/utils/variadic.cpp",
]
core_sources_full_mobile = core_sources_full_mobile_no_backend_interface + [
"torch/csrc/jit/backends/backend_debug_info.cpp",
"torch/csrc/jit/backends/backend_interface.cpp",
]
core_sources_full = core_sources_full_mobile + [
"torch/csrc/jit/runtime/static/fusion.cpp",
"torch/csrc/jit/runtime/static/impl.cpp",
"torch/csrc/jit/runtime/static/memory_planner.cpp",
"torch/csrc/jit/runtime/static/native_ops.cpp",
"torch/csrc/jit/runtime/static/ops.cpp",
"torch/csrc/jit/runtime/static/passes.cpp",
"torch/csrc/jit/runtime/static/te_wrapper.cpp",
"torch/csrc/jit/tensorexpr/external_functions.cpp",
"torch/csrc/jit/tensorexpr/external_functions_codegen.cpp",
]
lazy_tensor_core_sources = [
"torch/csrc/lazy/backend/backend_device.cpp",
"torch/csrc/lazy/backend/backend_interface.cpp",
"torch/csrc/lazy/backend/lowering_context.cpp",
"torch/csrc/lazy/core/config.cpp",
"torch/csrc/lazy/core/hash.cpp",
"torch/csrc/lazy/core/helpers.cpp",
"torch/csrc/lazy/core/ir.cpp",
"torch/csrc/lazy/core/ir_dump_util.cpp",
"torch/csrc/lazy/core/ir_metadata.cpp",
"torch/csrc/lazy/core/ir_util.cpp",
"torch/csrc/lazy/core/lazy_graph_executor.cpp",
"torch/csrc/lazy/core/lazy_view.cpp",
"torch/csrc/lazy/core/metrics.cpp",
"torch/csrc/lazy/core/multi_wait.cpp",
"torch/csrc/lazy/core/permutation_util.cpp",
"torch/csrc/lazy/core/shape.cpp",
"torch/csrc/lazy/core/tensor.cpp",
"torch/csrc/lazy/core/tensor_impl.cpp",
"torch/csrc/lazy/core/tensor_util.cpp",
"torch/csrc/lazy/core/thread_pool.cpp",
"torch/csrc/lazy/core/view_ops/as_strided.cpp",
"torch/csrc/lazy/core/view_ops/as_strided_view_update.cpp",
"torch/csrc/lazy/core/view_ops/diagonal.cpp",
"torch/csrc/lazy/core/view_ops/diagonal_view_update.cpp",
"torch/csrc/lazy/core/view_ops/narrow.cpp",
"torch/csrc/lazy/core/view_ops/narrow_view_update.cpp",
"torch/csrc/lazy/core/view_ops/permute.cpp",
"torch/csrc/lazy/core/view_ops/resize.cpp",
"torch/csrc/lazy/core/view_ops/select.cpp",
"torch/csrc/lazy/core/view_ops/squeeze.cpp",
"torch/csrc/lazy/core/view_ops/unsqueeze.cpp",
"torch/csrc/lazy/core/view_ops/select_view_update.cpp",
"torch/csrc/lazy/core/view_ops/view.cpp",
"torch/csrc/lazy/ts_backend/config.cpp",
"torch/csrc/lazy/ts_backend/ops/arithmetic_ir_ops.cpp",
"torch/csrc/lazy/ts_backend/ops/cast.cpp",
"torch/csrc/lazy/ts_backend/ops/device_data.cpp",
"torch/csrc/lazy/ts_backend/ops/expand.cpp",
"torch/csrc/lazy/ts_backend/ops/generic.cpp",
"torch/csrc/lazy/ts_backend/ops/scalar.cpp",
"torch/csrc/lazy/ts_backend/ts_node.cpp",
]
libtorch_core_sources = sorted(
core_sources_common +
torch_unpickler_common +
core_sources_full +
core_trainer_sources +
libtorch_profiler_sources +
lazy_tensor_core_sources,
)
# These files are the only ones that are supported on Windows.
libtorch_distributed_base_sources = [
"torch/csrc/distributed/c10d/FileStore.cpp",
"torch/csrc/distributed/c10d/GlooDeviceFactory.cpp",
"torch/csrc/distributed/c10d/ParamCommsUtils.cpp",
"torch/csrc/distributed/c10d/PrefixStore.cpp",
"torch/csrc/distributed/c10d/ProcessGroup.cpp",
"torch/csrc/distributed/c10d/ProcessGroupGloo.cpp",
"torch/csrc/distributed/c10d/ProcessGroupMPI.cpp",
"torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp",
"torch/csrc/distributed/c10d/Store.cpp",
"torch/csrc/distributed/c10d/TCPStore.cpp",
"torch/csrc/distributed/c10d/Utils.cpp",
"torch/csrc/distributed/c10d/comm.cpp",
"torch/csrc/distributed/c10d/default_comm_hooks.cpp",
"torch/csrc/distributed/c10d/exception.cpp",
"torch/csrc/distributed/c10d/logger.cpp",
"torch/csrc/distributed/c10d/reducer.cpp",
"torch/csrc/distributed/c10d/sequence_num.cpp",
"torch/csrc/distributed/c10d/socket.cpp",
]
# These files are only supported on Linux (and others) but not on Windows.
libtorch_distributed_extra_sources = [
"torch/csrc/distributed/autograd/autograd.cpp",
"torch/csrc/distributed/autograd/utils.cpp",
"torch/csrc/distributed/autograd/context/container.cpp",
"torch/csrc/distributed/autograd/context/context.cpp",
"torch/csrc/distributed/autograd/engine/dist_engine.cpp",
"torch/csrc/distributed/autograd/functions/recvrpc_backward.cpp",
"torch/csrc/distributed/autograd/functions/sendrpc_backward.cpp",
"torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.cpp",
"torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_resp.cpp",
"torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_resp.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_resp.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rref_backward_resp.cpp",
"torch/csrc/distributed/c10d/HashStore.cpp",
"torch/csrc/distributed/c10d/ProcessGroupRoundRobin.cpp",
"torch/csrc/distributed/rpc/agent_utils.cpp",
"torch/csrc/distributed/rpc/message.cpp",
"torch/csrc/distributed/rpc/profiler/remote_profiler_manager.cpp",
"torch/csrc/distributed/rpc/profiler/server_process_global_profiler.cpp",
"torch/csrc/distributed/rpc/python_call.cpp",
"torch/csrc/distributed/rpc/python_remote_call.cpp",
"torch/csrc/distributed/rpc/python_resp.cpp",
"torch/csrc/distributed/rpc/request_callback.cpp",
"torch/csrc/distributed/rpc/request_callback_no_python.cpp",
"torch/csrc/distributed/rpc/rpc_agent.cpp",
"torch/csrc/distributed/rpc/rref_context.cpp",
"torch/csrc/distributed/rpc/rref_impl.cpp",
"torch/csrc/distributed/rpc/rref_proto.cpp",
"torch/csrc/distributed/rpc/script_call.cpp",
"torch/csrc/distributed/rpc/script_remote_call.cpp",
"torch/csrc/distributed/rpc/script_resp.cpp",
"torch/csrc/distributed/rpc/tensorpipe_agent.cpp",
"torch/csrc/distributed/rpc/tensorpipe_utils.cpp",
"torch/csrc/distributed/rpc/testing/faulty_tensorpipe_agent.cpp",
"torch/csrc/distributed/rpc/torchscript_functions.cpp",
"torch/csrc/distributed/rpc/types.cpp",
"torch/csrc/distributed/rpc/utils.cpp",
]
libtorch_distributed_sources = libtorch_distributed_base_sources + libtorch_distributed_extra_sources
jit_sources_full = [
"torch/csrc/jit/codegen/cuda/interface.cpp",
"torch/csrc/jit/passes/lower_graph.cpp",
"torch/csrc/jit/runtime/register_c10_ops.cpp",
"torch/csrc/jit/runtime/register_prim_ops.cpp",
"torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp",
"torch/csrc/jit/runtime/register_special_ops.cpp",
"torch/csrc/jit/passes/remove_inplace_ops.cpp",
"torch/csrc/jit/passes/utils/check_alias_annotation.cpp",
"torch/csrc/jit/passes/autocast.cpp",
]
libtorch_core_jit_sources = sorted(jit_sources_full)
torch_mobile_tracer_sources = [
"torch/csrc/jit/mobile/model_tracer/tracer.cpp",
"torch/csrc/jit/mobile/model_tracer/TensorUtils.cpp",
"torch/csrc/jit/mobile/model_tracer/TracerRunner.cpp",
"torch/csrc/jit/mobile/model_tracer/MobileModelRunner.cpp",
"torch/csrc/jit/mobile/model_tracer/OperatorCallTracer.cpp",
"torch/csrc/jit/mobile/model_tracer/KernelDTypeTracer.cpp",
]
torch_mobile_core = [
# backend_debug_info.cpp provides
# __torch__.torch.classes.backend.BackendDebugInfo class
# This should not be needed eventually.
# TODO: Remove this dependency
"torch/csrc/jit/backends/backend_debug_info.cpp",
"torch/csrc/jit/mobile/compatibility/model_compatibility.cpp",
"torch/csrc/jit/mobile/function.cpp",
"torch/csrc/jit/mobile/import.cpp",
"torch/csrc/jit/mobile/interpreter.cpp",
"torch/csrc/jit/mobile/module.cpp",
"torch/csrc/jit/mobile/observer.cpp",
"torch/csrc/jit/mobile/parse_bytecode.cpp",
"torch/csrc/jit/mobile/parse_operators.cpp",
"torch/csrc/jit/mobile/upgrader_mobile.cpp",
"torch/csrc/jit/runtime/register_prim_ops.cpp",
"torch/csrc/jit/runtime/register_special_ops.cpp",
]
libtorch_lite_eager_symbolication = [
"torch/csrc/jit/frontend/source_range.cpp",
"torch/csrc/jit/ir/scope.cpp",
"torch/csrc/jit/mobile/debug_info.cpp",
"torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp",
"torch/csrc/jit/serialization/source_range_serialization.cpp",
# Later we can split serialization and deserialization logic
# to have better separation within build and only build relevant parts.
"torch/csrc/jit/serialization/pickle.cpp",
"torch/csrc/jit/serialization/pickler.cpp",
"torch/csrc/jit/serialization/unpickler.cpp",
]
# TODO: core_trainer_sources is not necessary for libtorch lite
libtorch_lite_cmake_sources = sorted(
core_trainer_sources +
core_sources_common +
torch_unpickler_common +
torch_mobile_core,
)
libtorch_cmake_sources = libtorch_core_sources + libtorch_core_jit_sources
libtorch_extra_sources = libtorch_core_jit_sources + [
"torch/csrc/autograd/TraceTypeManual.cpp",
"torch/csrc/autograd/VariableTypeManual.cpp",
"torch/csrc/autograd/FunctionsManual.cpp",
"torch/csrc/jit/api/module_save.cpp",
"torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp",
"torch/csrc/jit/mobile/compatibility/backport.cpp",
"torch/csrc/jit/mobile/compatibility/backport_manager.cpp",
"torch/csrc/jit/mobile/compatibility/model_compatibility.cpp",
# To be included for eager symbolication in lite interpreter
# when it is built in libtorch
"torch/csrc/jit/mobile/debug_info.cpp",
"torch/csrc/jit/mobile/function.cpp",
"torch/csrc/jit/mobile/import.cpp",
"torch/csrc/jit/mobile/import_data.cpp",
"torch/csrc/jit/mobile/interpreter.cpp",
"torch/csrc/jit/mobile/module.cpp",
"torch/csrc/jit/mobile/observer.cpp",
"torch/csrc/jit/mobile/parse_bytecode.cpp",
"torch/csrc/jit/mobile/parse_operators.cpp",
"torch/csrc/jit/mobile/train/export_data.cpp",
"torch/csrc/jit/mobile/train/optim/sgd.cpp",
"torch/csrc/jit/mobile/train/random.cpp",
"torch/csrc/jit/mobile/train/sequential.cpp",
"torch/csrc/jit/mobile/upgrader_mobile.cpp",
"torch/csrc/jit/serialization/onnx.cpp",
"torch/csrc/jit/serialization/export.cpp",
"torch/csrc/jit/serialization/export_bytecode.cpp",
"torch/csrc/jit/serialization/export_module.cpp",
"torch/csrc/jit/serialization/import_legacy.cpp",
"torch/csrc/utils/byte_order.cpp",
"torch/csrc/utils/out_types.cpp",
]
def libtorch_sources(gencode_pattern = ":generate-code[{}]"):
return libtorch_generated_sources(gencode_pattern) + libtorch_core_sources + libtorch_distributed_sources + libtorch_extra_sources
libtorch_cuda_core_sources = [
"torch/csrc/CudaIPCTypes.cpp",
"torch/csrc/cuda/comm.cpp",
"torch/csrc/jit/codegen/fuser/cuda/fused_kernel.cpp",
"torch/csrc/profiler/cuda.cpp",
"torch/csrc/autograd/functions/comm.cpp",
"torch/csrc/jit/codegen/cuda/arith.cpp",
"torch/csrc/jit/codegen/cuda/compute_at.cpp",
"torch/csrc/jit/codegen/cuda/compute_at_map.cpp",
"torch/csrc/jit/codegen/cuda/codegen.cpp",
"torch/csrc/jit/codegen/cuda/dispatch.cpp",
"torch/csrc/jit/codegen/cuda/expr_evaluator.cpp",
"torch/csrc/jit/codegen/cuda/executor.cpp",
"torch/csrc/jit/codegen/cuda/executor_kernel_arg.cpp",
"torch/csrc/jit/codegen/cuda/executor_launch_params.cpp",
"torch/csrc/jit/codegen/cuda/evaluator_common.cpp",
"torch/csrc/jit/codegen/cuda/executor_utils.cpp",
"torch/csrc/jit/codegen/cuda/fusion.cpp",
"torch/csrc/jit/codegen/cuda/graph_fuser.cpp",
"torch/csrc/jit/codegen/cuda/index_compute.cpp",
"torch/csrc/jit/codegen/cuda/index_reference_replay.cpp",
"torch/csrc/jit/codegen/cuda/instrumentation.cpp",
"torch/csrc/jit/codegen/cuda/ir_base_nodes.cpp",
"torch/csrc/jit/codegen/cuda/ir_cloner.cpp",
"torch/csrc/jit/codegen/cuda/ir_graphviz.cpp",
"torch/csrc/jit/codegen/cuda/ir_nodes.cpp",
"torch/csrc/jit/codegen/cuda/ir_iostream.cpp",
"torch/csrc/jit/codegen/cuda/ir_utils.cpp",
"torch/csrc/jit/codegen/cuda/iter_visitor.cpp",
"torch/csrc/jit/codegen/cuda/kernel.cpp",
"torch/csrc/jit/codegen/cuda/kernel_cache.cpp",
"torch/csrc/jit/codegen/cuda/kernel_expr_evaluator.cpp",
"torch/csrc/jit/codegen/cuda/kernel_ir.cpp",
"torch/csrc/jit/codegen/cuda/kernel_ir_builder.cpp",
"torch/csrc/jit/codegen/cuda/kernel_ir_printer.cpp",
"torch/csrc/jit/codegen/cuda/lower_alias_memory.cpp",
"torch/csrc/jit/codegen/cuda/lower_warp_reduce.cpp",
"torch/csrc/jit/codegen/cuda/lower_allocation.cpp",
"torch/csrc/jit/codegen/cuda/lower_expr_sort.cpp",
"torch/csrc/jit/codegen/cuda/lower_index.cpp",
"torch/csrc/jit/codegen/cuda/lower_insert_syncs.cpp",
"torch/csrc/jit/codegen/cuda/lower_loops.cpp",
"torch/csrc/jit/codegen/cuda/lower_magic_zero.cpp",
"torch/csrc/jit/codegen/cuda/lower_misaligned_vectorization.cpp",
"torch/csrc/jit/codegen/cuda/lower_predicate.cpp",
"torch/csrc/jit/codegen/cuda/lower_shift.cpp",
"torch/csrc/jit/codegen/cuda/lower_thread_predicate.cpp",
"torch/csrc/jit/codegen/cuda/lower_trivial_reductions.cpp",
"torch/csrc/jit/codegen/cuda/lower_unroll.cpp",
"torch/csrc/jit/codegen/cuda/lower_utils.cpp",
"torch/csrc/jit/codegen/cuda/lower_validation.cpp",
"torch/csrc/jit/codegen/cuda/lower2device.cpp",
"torch/csrc/jit/codegen/cuda/manager.cpp",
"torch/csrc/jit/codegen/cuda/mutator.cpp",
"torch/csrc/jit/codegen/cuda/non_divisible_split.cpp",
"torch/csrc/jit/codegen/cuda/ops/composite.cpp",
"torch/csrc/jit/codegen/cuda/ops/normalization.cpp",
"torch/csrc/jit/codegen/cuda/parallel_dimension_map.cpp",
"torch/csrc/jit/codegen/cuda/parallel_type_bitmap.cpp",
"torch/csrc/jit/codegen/cuda/parser.cpp",
"torch/csrc/jit/codegen/cuda/partial_split_map.cpp",
"torch/csrc/jit/codegen/cuda/partition.cpp",
"torch/csrc/jit/codegen/cuda/predicate_compute.cpp",
"torch/csrc/jit/codegen/cuda/register_interface.cpp",
"torch/csrc/jit/codegen/cuda/root_domain_map.cpp",
"torch/csrc/jit/codegen/cuda/scheduler/pointwise.cpp",
"torch/csrc/jit/codegen/cuda/scheduler/normalization.cpp",
"torch/csrc/jit/codegen/cuda/scheduler/reduction.cpp",
"torch/csrc/jit/codegen/cuda/scheduler/reduction_utils.cpp",
"torch/csrc/jit/codegen/cuda/scheduler/registry.cpp",
"torch/csrc/jit/codegen/cuda/scheduler/utils.cpp",
"torch/csrc/jit/codegen/cuda/type_inference.cpp",
"torch/csrc/jit/codegen/cuda/type_promotion.cpp",
"torch/csrc/jit/codegen/cuda/fusion_segmenter.cpp",
"torch/csrc/jit/codegen/cuda/tensor_view.cpp",
"torch/csrc/jit/codegen/cuda/transform_iter.cpp",
"torch/csrc/jit/codegen/cuda/transform_replay.cpp",
"torch/csrc/jit/codegen/cuda/transform_rfactor.cpp",
"torch/csrc/jit/codegen/cuda/transform_view.cpp",
"torch/csrc/jit/codegen/cuda/type.cpp",
"torch/csrc/jit/codegen/cuda/utils.cpp",
"torch/csrc/jit/passes/frozen_conv_add_relu_fusion_cuda.cpp",
"torch/csrc/jit/tensorexpr/cuda_codegen.cpp",
"torch/csrc/jit/runtime/register_cuda_ops.cpp",
]
# These files are the only ones that are supported on Windows.
libtorch_cuda_distributed_base_sources = [
"torch/csrc/distributed/c10d/reducer_cuda.cpp",
]
# These files are only supported on Linux (and others) but not on Windows.
libtorch_cuda_distributed_extra_sources = [
"torch/csrc/distributed/c10d/NCCLUtils.cpp",
"torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp",
"torch/csrc/distributed/rpc/tensorpipe_cuda.cpp",
"torch/csrc/distributed/c10d/quantization/quantization_gpu.cu",
]
libtorch_cuda_distributed_sources = libtorch_cuda_distributed_base_sources + libtorch_cuda_distributed_extra_sources
libtorch_cuda_sources = libtorch_cuda_core_sources + libtorch_cuda_distributed_sources + [
"torch/csrc/cuda/nccl.cpp",
]
torch_cpp_srcs = [
"torch/csrc/api/src/cuda.cpp", # this just forwards stuff, no real CUDA
"torch/csrc/api/src/data/datasets/mnist.cpp",
"torch/csrc/api/src/data/samplers/distributed.cpp",
"torch/csrc/api/src/data/samplers/random.cpp",
"torch/csrc/api/src/data/samplers/sequential.cpp",
"torch/csrc/api/src/data/samplers/stream.cpp",
"torch/csrc/api/src/enum.cpp",
"torch/csrc/api/src/imethod.cpp",
"torch/csrc/api/src/jit.cpp",
"torch/csrc/api/src/serialize.cpp",
"torch/csrc/api/src/nn/init.cpp",
"torch/csrc/api/src/nn/module.cpp",
"torch/csrc/api/src/nn/modules/_functions.cpp",
"torch/csrc/api/src/nn/modules/activation.cpp",
"torch/csrc/api/src/nn/modules/adaptive.cpp",
"torch/csrc/api/src/nn/modules/batchnorm.cpp",
"torch/csrc/api/src/nn/modules/normalization.cpp",
"torch/csrc/api/src/nn/modules/instancenorm.cpp",
"torch/csrc/api/src/nn/modules/conv.cpp",
"torch/csrc/api/src/nn/modules/dropout.cpp",
"torch/csrc/api/src/nn/modules/distance.cpp",
"torch/csrc/api/src/nn/modules/embedding.cpp",
"torch/csrc/api/src/nn/modules/fold.cpp",
"torch/csrc/api/src/nn/modules/linear.cpp",
"torch/csrc/api/src/nn/modules/loss.cpp",
"torch/csrc/api/src/nn/modules/padding.cpp",
"torch/csrc/api/src/nn/modules/pixelshuffle.cpp",
"torch/csrc/api/src/nn/modules/pooling.cpp",
"torch/csrc/api/src/nn/modules/rnn.cpp",
"torch/csrc/api/src/nn/modules/upsampling.cpp",
"torch/csrc/api/src/nn/modules/transformer.cpp",
"torch/csrc/api/src/nn/modules/container/functional.cpp",
"torch/csrc/api/src/nn/options/activation.cpp",
"torch/csrc/api/src/nn/options/adaptive.cpp",
"torch/csrc/api/src/nn/options/batchnorm.cpp",
"torch/csrc/api/src/nn/options/conv.cpp",
"torch/csrc/api/src/nn/options/dropout.cpp",
"torch/csrc/api/src/nn/options/instancenorm.cpp",
"torch/csrc/api/src/nn/options/linear.cpp",
"torch/csrc/api/src/nn/options/normalization.cpp",
"torch/csrc/api/src/nn/options/embedding.cpp",
"torch/csrc/api/src/nn/options/padding.cpp",
"torch/csrc/api/src/nn/options/pooling.cpp",
"torch/csrc/api/src/nn/options/rnn.cpp",
"torch/csrc/api/src/nn/options/vision.cpp",
"torch/csrc/api/src/nn/options/transformer.cpp",
"torch/csrc/api/src/optim/adagrad.cpp",
"torch/csrc/api/src/optim/adam.cpp",
"torch/csrc/api/src/optim/adamw.cpp",
"torch/csrc/api/src/optim/lbfgs.cpp",
"torch/csrc/api/src/optim/optimizer.cpp",
"torch/csrc/api/src/optim/rmsprop.cpp",
"torch/csrc/api/src/optim/serialize.cpp",
"torch/csrc/api/src/optim/sgd.cpp",
"torch/csrc/api/src/optim/schedulers/lr_scheduler.cpp",
"torch/csrc/api/src/optim/schedulers/step_lr.cpp",
"torch/csrc/api/src/serialize/input-archive.cpp",
"torch/csrc/api/src/serialize/output-archive.cpp",
"torch/csrc/utils/crash_handler.cpp",
]
libtorch_python_cuda_core_sources = [
"torch/csrc/cuda/Event.cpp",
"torch/csrc/cuda/Module.cpp",
"torch/csrc/cuda/python_comm.cpp",
"torch/csrc/cuda/Storage.cpp",
"torch/csrc/cuda/Stream.cpp",
"torch/csrc/cuda/Graph.cpp",
"torch/csrc/cuda/serialization.cpp",
"torch/csrc/cuda/shared/cudart.cpp",
"torch/csrc/cuda/shared/nvtx.cpp",
"torch/csrc/cuda/utils.cpp",
]
libtorch_python_cuda_sources = libtorch_python_cuda_core_sources + [
"torch/csrc/cuda/python_nccl.cpp",
"torch/csrc/cuda/shared/cudnn.cpp",
"torch/csrc/cuda/Tensor.cpp",
]
libtorch_python_core_sources = [
"torch/csrc/DataLoader.cpp",
"torch/csrc/Device.cpp",
"torch/csrc/Dtype.cpp",
"torch/csrc/DynamicTypes.cpp",
"torch/csrc/Exceptions.cpp",
"torch/csrc/Generator.cpp",
"torch/csrc/Layout.cpp",
"torch/csrc/MemoryFormat.cpp",
"torch/csrc/QScheme.cpp",
"torch/csrc/Module.cpp",
"torch/csrc/python_dimname.cpp",
"torch/csrc/Size.cpp",
"torch/csrc/Storage.cpp",
"torch/csrc/Stream.cpp",
"torch/csrc/TypeInfo.cpp",
"torch/csrc/api/src/python/init.cpp",
"torch/csrc/autograd/functions/init.cpp",
"torch/csrc/autograd/init.cpp",
"torch/csrc/autograd/profiler_python.cpp",
"torch/csrc/autograd/python_anomaly_mode.cpp",
"torch/csrc/autograd/python_saved_variable_hooks.cpp",
"torch/csrc/autograd/python_mode.cpp",
"torch/csrc/autograd/python_cpp_function.cpp",
"torch/csrc/autograd/python_engine.cpp",
"torch/csrc/autograd/python_function.cpp",
"torch/csrc/autograd/python_hook.cpp",
"torch/csrc/autograd/python_legacy_variable.cpp",
"torch/csrc/autograd/python_torch_functions_manual.cpp",
"torch/csrc/autograd/python_variable.cpp",
"torch/csrc/autograd/python_variable_indexing.cpp",
"torch/csrc/jit/backends/backend_init.cpp",
"torch/csrc/jit/python/init.cpp",
"torch/csrc/jit/passes/onnx.cpp",
"torch/csrc/jit/passes/onnx/cast_all_constant_to_floating.cpp",
"torch/csrc/jit/passes/onnx/eval_peephole.cpp",
"torch/csrc/jit/passes/onnx/constant_fold.cpp",
"torch/csrc/jit/passes/onnx/constant_map.cpp",
"torch/csrc/jit/passes/onnx/eliminate_unused_items.cpp",
"torch/csrc/jit/passes/onnx/fixup_onnx_controlflow.cpp",
"torch/csrc/jit/passes/onnx/list_model_parameters.cpp",
"torch/csrc/jit/passes/onnx/function_substitution.cpp",
"torch/csrc/jit/passes/onnx/helper.cpp",
"torch/csrc/jit/passes/onnx/peephole.cpp",
"torch/csrc/jit/passes/onnx/preprocess_for_onnx.cpp",
"torch/csrc/jit/passes/onnx/prepare_division_for_onnx.cpp",
"torch/csrc/jit/passes/onnx/scalar_type_analysis.cpp",
"torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp",
"torch/csrc/jit/passes/onnx/remove_inplace_ops_for_onnx.cpp",
"torch/csrc/jit/passes/onnx/shape_type_inference.cpp",
"torch/csrc/jit/passes/onnx/function_extraction.cpp",
"torch/csrc/jit/python/pybind_utils.cpp",
"torch/csrc/jit/passes/onnx/pattern_conversion/common.cpp",
"torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp",
"torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.cpp",
"torch/csrc/jit/python/python_arg_flatten.cpp",
"torch/csrc/jit/python/python_custom_class.cpp",
"torch/csrc/jit/python/python_dict.cpp",
"torch/csrc/jit/python/python_interpreter.cpp",
"torch/csrc/jit/python/python_ir.cpp",
"torch/csrc/jit/python/python_list.cpp",
"torch/csrc/jit/python/python_tracer.cpp",
"torch/csrc/jit/python/script_init.cpp",
"torch/csrc/jit/frontend/concrete_module_type.cpp",
"torch/csrc/jit/frontend/tree_views.cpp",
"torch/csrc/jit/python/python_sugared_value.cpp",
"torch/csrc/jit/python/python_tree_views.cpp",
"torch/csrc/jit/runtime/static/init.cpp",
"torch/csrc/jit/tensorexpr/tensorexpr_init.cpp",
"torch/csrc/monitor/python_init.cpp",
"torch/csrc/multiprocessing/init.cpp",
"torch/csrc/onnx/init.cpp",
"torch/csrc/serialization.cpp",
"torch/csrc/tensor/python_tensor.cpp",
"torch/csrc/utils/init.cpp",
"torch/csrc/utils/throughput_benchmark.cpp",
"torch/csrc/utils.cpp",
"torch/csrc/utils/cuda_lazy_init.cpp",
"torch/csrc/utils/invalid_arguments.cpp",
"torch/csrc/utils/object_ptr.cpp",
"torch/csrc/utils/python_arg_parser.cpp",
"torch/csrc/utils/python_dispatch.cpp",
"torch/csrc/utils/structseq.cpp",
"torch/csrc/utils/tensor_apply.cpp",
"torch/csrc/utils/tensor_dtypes.cpp",
"torch/csrc/utils/tensor_layouts.cpp",
"torch/csrc/utils/tensor_memoryformats.cpp",
"torch/csrc/utils/tensor_qschemes.cpp",
"torch/csrc/utils/tensor_list.cpp",
"torch/csrc/utils/tensor_new.cpp",
"torch/csrc/utils/tensor_numpy.cpp",
"torch/csrc/utils/tensor_types.cpp",
"torch/csrc/utils/disable_torch_function.cpp",
]
libtorch_python_distributed_core_sources = [
"torch/csrc/distributed/c10d/init.cpp",
"torch/csrc/distributed/c10d/python_comm_hook.cpp",
"torch/csrc/distributed/c10d/quantization/quantization.cpp",
]
libtorch_python_distributed_sources = libtorch_python_distributed_core_sources + [
"torch/csrc/distributed/autograd/init.cpp",
"torch/csrc/distributed/rpc/init.cpp",
"torch/csrc/distributed/rpc/py_rref.cpp",
"torch/csrc/distributed/rpc/python_functions.cpp",
"torch/csrc/distributed/rpc/python_rpc_handler.cpp",
"torch/csrc/distributed/rpc/request_callback_impl.cpp",
"torch/csrc/distributed/rpc/testing/init.cpp",
"torch/csrc/distributed/rpc/unpickled_python_call.cpp",
"torch/csrc/distributed/rpc/unpickled_python_remote_call.cpp",
"torch/csrc/jit/runtime/register_distributed_ops.cpp",
]
def glob_libtorch_python_sources(gencode_pattern = ":generate-code[{}]"):
_libtorch_python_sources = [gencode_pattern.format(name) for name in [
"autograd/generated/python_functions_0.cpp",
"autograd/generated/python_functions_1.cpp",
"autograd/generated/python_functions_2.cpp",
"autograd/generated/python_functions_3.cpp",
"autograd/generated/python_functions_4.cpp",
"autograd/generated/python_nn_functions.cpp",
"autograd/generated/python_fft_functions.cpp",
"autograd/generated/python_linalg_functions.cpp",
"autograd/generated/python_return_types.cpp",
"autograd/generated/python_sparse_functions.cpp",
"autograd/generated/python_special_functions.cpp",
"autograd/generated/python_torch_functions_0.cpp",
"autograd/generated/python_torch_functions_1.cpp",
"autograd/generated/python_torch_functions_2.cpp",
"autograd/generated/python_variable_methods.cpp",
]]
_libtorch_python_sources.extend(libtorch_python_core_sources)
_libtorch_python_sources.extend(libtorch_python_distributed_sources)
return _libtorch_python_sources
aten_cpu_source_non_codegen_list = [
"aten/src/ATen/AccumulateType.cpp",
"aten/src/ATen/BatchedTensorImpl.cpp",
"aten/src/ATen/CPUGeneratorImpl.cpp",
"aten/src/ATen/Context.cpp",
"aten/src/ATen/DLConvertor.cpp",
"aten/src/ATen/EmptyTensor.cpp",
"aten/src/ATen/ExpandUtils.cpp",
"aten/src/ATen/FunctionalInverses.cpp",
"aten/src/ATen/FunctionalStorageImpl.cpp",
"aten/src/ATen/FunctionalTensorWrapper.cpp",
"aten/src/ATen/FunctionalizeFallbackKernel.cpp",
"aten/src/ATen/MemoryOverlap.cpp",
"aten/src/ATen/MapAllocator.cpp",
"aten/src/ATen/NamedTensorUtils.cpp",
"aten/src/ATen/ParallelCommon.cpp",
"aten/src/ATen/ParallelNative.cpp",
"aten/src/ATen/ParallelNativeTBB.cpp",
"aten/src/ATen/ParallelOpenMP.cpp",
"aten/src/ATen/ParallelThreadPoolNative.cpp",
"aten/src/ATen/ScalarOps.cpp",
"aten/src/ATen/SequenceNumber.cpp",
"aten/src/ATen/SparseTensorImpl.cpp",
"aten/src/ATen/SparseCsrTensorImpl.cpp",
"aten/src/ATen/SparseTensorUtils.cpp",
"aten/src/ATen/TensorGeometry.cpp",
"aten/src/ATen/TensorIndexing.cpp",
"aten/src/ATen/TensorMeta.cpp",
"aten/src/ATen/TensorNames.cpp",
"aten/src/ATen/TensorUtils.cpp",
"aten/src/ATen/ThreadLocalState.cpp",
"aten/src/ATen/FuncTorchTLS.cpp",
"aten/src/ATen/Utils.cpp",
"aten/src/ATen/Version.cpp",
"aten/src/ATen/VmapMode.cpp",
"aten/src/ATen/VmapTransforms.cpp",
"aten/src/ATen/core/BackendSelectFallbackKernel.cpp",
"aten/src/ATen/core/DeprecatedTypeProperties.cpp",
"aten/src/ATen/core/DeprecatedTypePropertiesRegistry.cpp",
"aten/src/ATen/core/Dict.cpp",
"aten/src/ATen/core/Dimname.cpp",
"aten/src/ATen/core/Formatting.cpp",
"aten/src/ATen/core/Generator.cpp",
"aten/src/ATen/core/List.cpp",
"aten/src/ATen/core/NamedTensor.cpp",
"aten/src/ATen/core/Tensor.cpp",
"aten/src/ATen/core/VariableFallbackKernel.cpp",
"aten/src/ATen/core/VariableHooksInterface.cpp",
"aten/src/ATen/core/Vitals.cpp",
"aten/src/ATen/core/boxing/KernelFunction.cpp",
"aten/src/ATen/core/custom_class.cpp",
"aten/src/ATen/core/dispatch/DispatchKeyExtractor.cpp",
"aten/src/ATen/core/dispatch/Dispatcher.cpp",
"aten/src/ATen/core/dispatch/ObservedOperators.cpp",
"aten/src/ATen/core/dispatch/OperatorEntry.cpp",
"aten/src/ATen/core/interned_strings.cpp",
"aten/src/ATen/core/ivalue.cpp",
"aten/src/ATen/core/library.cpp",
"aten/src/ATen/core/op_registration/infer_schema.cpp",
"aten/src/ATen/core/op_registration/op_registration.cpp",
"aten/src/ATen/core/operator_name.cpp",
"aten/src/ATen/core/PythonModeTLS.cpp",
"aten/src/ATen/core/register_symbols.cpp",
"aten/src/ATen/core/class_type.cpp",
"aten/src/ATen/core/type.cpp",
"aten/src/ATen/core/type_factory.cpp",
"aten/src/ATen/core/dynamic_type.cpp",
"aten/src/ATen/core/tensor_type.cpp",
"aten/src/ATen/core/union_type.cpp",
"aten/src/ATen/cpu/FlushDenormal.cpp",
"aten/src/ATen/detail/CPUGuardImpl.cpp",
"aten/src/ATen/detail/CUDAHooksInterface.cpp",
"aten/src/ATen/detail/HIPHooksInterface.cpp",
"aten/src/ATen/detail/ORTHooksInterface.cpp",
"aten/src/ATen/metal/Context.cpp",
"aten/src/ATen/native/AutogradComposite.cpp",
"aten/src/ATen/native/BatchLinearAlgebraKernel.cpp",
"aten/src/ATen/native/DispatchStub.cpp",
"aten/src/ATen/native/UpSample.cpp",
"aten/src/ATen/native/mkl/LinearAlgebra.cpp",
"aten/src/ATen/native/mkl/SparseBlasImpl.cpp",
"aten/src/ATen/native/mkl/SparseCsrLinearAlgebra.cpp",
"aten/src/ATen/native/mkl/SpectralOps.cpp",
"aten/src/ATen/native/mkldnn/BinaryOps.cpp",
"aten/src/ATen/native/mkldnn/Conv.cpp",
"aten/src/ATen/native/mkldnn/Copy.cpp",
"aten/src/ATen/native/mkldnn/Gelu.cpp",
"aten/src/ATen/native/mkldnn/IDeepRegistration.cpp",
"aten/src/ATen/native/mkldnn/Linear.cpp",
"aten/src/ATen/native/mkldnn/MKLDNNCommon.cpp",
"aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp",
"aten/src/ATen/native/mkldnn/MkldnnTensorMath.cpp",
"aten/src/ATen/native/mkldnn/Normalization.cpp",
"aten/src/ATen/native/mkldnn/Pooling.cpp",
"aten/src/ATen/native/mkldnn/Relu.cpp",
"aten/src/ATen/native/mkldnn/SoftMax.cpp",
"aten/src/ATen/native/mkldnn/TensorFactories.cpp",
"aten/src/ATen/native/mkldnn/TensorShape.cpp",
"aten/src/ATen/native/mkldnn/UnaryOps.cpp",
"aten/src/ATen/native/mkldnn/Utils.cpp",
"aten/src/ATen/native/mkldnn/Matmul.cpp",
"aten/src/ATen/native/quantized/cpu/init_qnnpack.cpp",
"aten/src/ATen/record_function.cpp",
"aten/src/ATen/Dispatch.cpp",
"aten/src/ATen/SavedTensorHooks.cpp",
"aten/src/ATen/vulkan/Context.cpp",
"aten/src/ATen/nnapi/nnapi_bind.cpp",
"aten/src/ATen/nnapi/nnapi_wrapper.cpp",
"aten/src/ATen/nnapi/nnapi_model_loader.cpp",
"aten/src/ATen/native/prim_native_functions.cpp",
]
aten_cpu_source_codegen_list = [
"aten/src/ATen/native/cpu/AdaptiveAvgPoolKernel.cpp",
"aten/src/ATen/native/cpu/AdaptiveMaxPoolKernel.cpp",
]
# When building lite interpreter in OSS, "aten/src/ATen/native/cpu/AdaptiveAvgPoolKernel.cpp" will go through
# codegen process. The codegen version of this file, like Activation.cpp.DEFAULT.cpp, will be included
# in ${cpu_kernel_cpp} in aten/src/ATen/CMakeLists.txt. As a result, in aten/src/ATen/CMakeLists.txt,
# only aten_cpu_source_non_codegen_list need to be added to ${all_cpu_cpp}.
aten_cpu_source_list = sorted(aten_cpu_source_non_codegen_list + aten_cpu_source_codegen_list)
# Same as ${aten_cpu_source_codegen_list}, this list will go through aten codegen, and be included in
# ${cpu_kernel_cpp} in aten/src/ATen/CMakeLists.txt.
aten_native_source_codegen_list = [
"aten/src/ATen/native/cpu/Activation.cpp",
"aten/src/ATen/native/cpu/AvgPoolKernel.cpp",
"aten/src/ATen/native/cpu/BinaryOpsKernel.cpp",
"aten/src/ATen/native/cpu/BlasKernel.cpp",
"aten/src/ATen/native/cpu/CatKernel.cpp",
"aten/src/ATen/native/cpu/ChannelShuffleKernel.cpp",
"aten/src/ATen/native/cpu/ComplexKernel.cpp",
"aten/src/ATen/native/cpu/CopyKernel.cpp",
"aten/src/ATen/native/cpu/CrossKernel.cpp",
"aten/src/ATen/native/cpu/DepthwiseConvKernel.cpp",
"aten/src/ATen/native/cpu/DistanceOpsKernel.cpp",
"aten/src/ATen/native/cpu/DistributionKernels.cpp",
"aten/src/ATen/native/cpu/FillKernel.cpp",
"aten/src/ATen/native/cpu/FunctionOfAMatrixUtilsKernel.cpp",
"aten/src/ATen/native/cpu/GridSamplerKernel.cpp",
"aten/src/ATen/native/cpu/HistogramKernel.cpp",
"aten/src/ATen/native/cpu/IndexKernel.cpp",
"aten/src/ATen/native/cpu/LerpKernel.cpp",
"aten/src/ATen/native/cpu/LinearAlgebraKernel.cpp",
"aten/src/ATen/native/cpu/MaxPooling.cpp",
"aten/src/ATen/native/cpu/MaxPoolKernel.cpp",
"aten/src/ATen/native/cpu/MaxUnpoolKernel.cpp",
"aten/src/ATen/native/cpu/MultinomialKernel.cpp",
"aten/src/ATen/native/cpu/PointwiseOpsKernel.cpp",
"aten/src/ATen/native/cpu/PowKernel.cpp",
"aten/src/ATen/native/cpu/RangeFactoriesKernel.cpp",
"aten/src/ATen/native/cpu/ReduceAllOpsKernel.cpp",
"aten/src/ATen/native/cpu/ReduceOpsKernel.cpp",
"aten/src/ATen/native/cpu/RenormKernel.cpp",
"aten/src/ATen/native/cpu/ScatterGatherKernel.cpp",
"aten/src/ATen/native/cpu/SoftMaxKernel.cpp",
"aten/src/ATen/native/cpu/SortingKernel.cpp",
"aten/src/ATen/native/cpu/StackKernel.cpp",
"aten/src/ATen/native/cpu/SumKernel.cpp",
"aten/src/ATen/native/cpu/TensorCompareKernel.cpp",
"aten/src/ATen/native/cpu/UnaryOpsKernel.cpp",
"aten/src/ATen/native/cpu/Unfold2d.cpp",
"aten/src/ATen/native/cpu/UnfoldBackwardKernel.cpp",
"aten/src/ATen/native/cpu/UpSampleKernel.cpp",
"aten/src/ATen/native/cpu/UpSampleMoreKernel.cpp",
"aten/src/ATen/native/cpu/batch_norm_kernel.cpp",
"aten/src/ATen/native/cpu/group_norm_kernel.cpp",
"aten/src/ATen/native/cpu/layer_norm_kernel.cpp",
"aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp",
]
# This aten native source file list will not go through aten codegen process
aten_native_source_non_codegen_list = [
"aten/src/ATen/native/ao_sparse/library.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/fbgemm_utils.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_dynamic.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp",
"aten/src/ATen/native/quantized/cpu/fused_obs_fake_quant.cpp",
"aten/src/ATen/native/quantized/cpu/int_repr_quant.cpp",
"aten/src/ATen/native/quantized/cpu/make_per_tensor_quantized_tensor.cpp",
"aten/src/ATen/native/quantized/cpu/q_adaavgpool.cpp",
"aten/src/ATen/native/quantized/cpu/q_avgpool.cpp",
"aten/src/ATen/native/quantized/cpu/q_avgpool3d.cpp",
"aten/src/ATen/native/quantized/cpu/qadd.cpp",
"aten/src/ATen/native/quantized/cpu/qbatch_norm.cpp",
"aten/src/ATen/native/quantized/cpu/qchannel_shuffle.cpp",
"aten/src/ATen/native/quantized/cpu/qclamp.cpp",
"aten/src/ATen/native/quantized/cpu/qconcat.cpp",
"aten/src/ATen/native/quantized/cpu/qconv.cpp",
"aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp",
"aten/src/ATen/native/quantized/cpu/qconv_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/qelu.cpp",
"aten/src/ATen/native/quantized/cpu/qembeddingbag.cpp",
"aten/src/ATen/native/quantized/cpu/qembeddingbag_prepack.cpp",
"aten/src/ATen/native/quantized/cpu/qembeddingbag_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/qgelu.cpp",
"aten/src/ATen/native/quantized/cpu/qhardsigmoid.cpp",
"aten/src/ATen/native/quantized/cpu/qhardswish.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp",
"aten/src/ATen/native/quantized/cpu/qconv_dynamic.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear_prepack.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/qmatmul.cpp",
"aten/src/ATen/native/quantized/cpu/qmul.cpp",
"aten/src/ATen/native/quantized/cpu/qnormalization.cpp",
"aten/src/ATen/native/quantized/cpu/qpool.cpp",
"aten/src/ATen/native/quantized/cpu/qreduction.cpp",
"aten/src/ATen/native/quantized/cpu/qrelu.cpp",
"aten/src/ATen/native/quantized/cpu/qsigmoid.cpp",
"aten/src/ATen/native/quantized/cpu/qsort.cpp",
"aten/src/ATen/native/quantized/cpu/qtanh.cpp",
"aten/src/ATen/native/quantized/cpu/qthreshold.cpp",
"aten/src/ATen/native/quantized/cpu/qupsample_bilinear2d.cpp",
"aten/src/ATen/native/quantized/cpu/qupsample_nearest2d.cpp",
"aten/src/ATen/native/quantized/cpu/qupsample_nearest3d.cpp",
"aten/src/ATen/native/quantized/cpu/tensor_operators.cpp",
"aten/src/ATen/native/quantized/Copy.cpp",
"aten/src/ATen/native/quantized/QTensor.cpp",
"aten/src/ATen/native/quantized/TensorCompare.cpp",
"aten/src/ATen/native/quantized/TensorFactories.cpp",
"aten/src/ATen/native/quantized/affine_quantizer.cpp",
"aten/src/ATen/native/quantized/affine_quantizer_base.cpp",
"aten/src/ATen/native/quantized/fake_quant_per_channel_affine.cpp",
"aten/src/ATen/native/quantized/fake_quant_per_tensor_affine.cpp",
"aten/src/ATen/native/quantized/library.cpp",
"aten/src/ATen/quantized/QTensorImpl.cpp",
"aten/src/ATen/quantized/Quantizer.cpp",
"aten/src/ATen/native/attention.cpp",
"aten/src/ATen/native/Activation.cpp",
"aten/src/ATen/native/AdaptiveAveragePooling.cpp",
"aten/src/ATen/native/AdaptiveAveragePooling3d.cpp",
"aten/src/ATen/native/AdaptiveMaxPooling2d.cpp",
"aten/src/ATen/native/AdaptiveMaxPooling3d.cpp",
"aten/src/ATen/native/AffineGridGenerator.cpp",
"aten/src/ATen/native/AveragePool2d.cpp",
"aten/src/ATen/native/AveragePool3d.cpp",
"aten/src/ATen/native/BatchLinearAlgebra.cpp",
"aten/src/ATen/native/Batching.cpp",
"aten/src/ATen/native/BinaryOps.cpp",
"aten/src/ATen/native/Blas.cpp",
"aten/src/ATen/native/BlasKernel.cpp",
"aten/src/ATen/native/Bucketization.cpp",
"aten/src/ATen/native/CPUBlas.cpp",
"aten/src/ATen/native/ChanelShuffle.cpp",
"aten/src/ATen/native/Col2Im.cpp",
"aten/src/ATen/native/ConstantPadNd.cpp",
"aten/src/ATen/native/Convolution.cpp",
"aten/src/ATen/native/ConvolutionMM2d.cpp",
"aten/src/ATen/native/ConvolutionMM3d.cpp",
"aten/src/ATen/native/ConvolutionTBC.cpp",
"aten/src/ATen/native/Copy.cpp",
"aten/src/ATen/native/Correlation.cpp",
"aten/src/ATen/native/CPUFallback.cpp",
"aten/src/ATen/native/Cross.cpp",
"aten/src/ATen/native/DilatedMaxPool2d.cpp",
"aten/src/ATen/native/DilatedMaxPool3d.cpp",
# Referenced by both native and ATen/Version.cpp. Does not reference to other native symbols
# "aten/src/ATen/native/DispatchStub.cpp",
# "aten/src/ATen/native/quantized/cpu/init_qnnpack.cpp",
"aten/src/ATen/native/Distance.cpp",
"aten/src/ATen/native/Distributions.cpp",
"aten/src/ATen/native/Dropout.cpp",
"aten/src/ATen/native/Embedding.cpp",
"aten/src/ATen/native/EmbeddingBag.cpp",
"aten/src/ATen/native/Fill.cpp",
"aten/src/ATen/native/ForeachOpsKernels.cpp",
"aten/src/ATen/native/FractionalMaxPool2d.cpp",
"aten/src/ATen/native/FractionalMaxPool3d.cpp",
"aten/src/ATen/native/FunctionOfAMatrixUtils.cpp",
"aten/src/ATen/native/GatedLinearUnit.cpp",
"aten/src/ATen/native/GridSampler.cpp",
"aten/src/ATen/native/Histogram.cpp",
"aten/src/ATen/native/Im2Col.cpp",
"aten/src/ATen/native/IndexingUtils.cpp",
"aten/src/ATen/native/Integration.cpp",
"aten/src/ATen/native/Itertools.cpp",
"aten/src/ATen/native/LegacyBridge.cpp",
"aten/src/ATen/native/Lerp.cpp",
"aten/src/ATen/native/Linear.cpp",
"aten/src/ATen/native/LinearAlgebra.cpp",
"aten/src/ATen/native/Loss.cpp",
"aten/src/ATen/native/LossCTC.cpp",
"aten/src/ATen/native/LossMultiLabelMargin.cpp",
"aten/src/ATen/native/LossMultiMargin.cpp",
"aten/src/ATen/native/LossNLL.cpp",
"aten/src/ATen/native/LossNLL2d.cpp",
"aten/src/ATen/native/MaxPooling.cpp",
"aten/src/ATen/native/MaxUnpooling.cpp",
"aten/src/ATen/native/Memory.cpp",
"aten/src/ATen/native/MetaTensor.cpp",
"aten/src/ATen/native/NNPACK.cpp",
"aten/src/ATen/native/NaiveConvolutionTranspose2d.cpp",
"aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp",
"aten/src/ATen/native/NaiveDilatedConvolution.cpp",
"aten/src/ATen/native/NamedTensor.cpp",
"aten/src/ATen/native/Normalization.cpp",
"aten/src/ATen/native/Onehot.cpp",
"aten/src/ATen/native/PackedSequence.cpp",
"aten/src/ATen/native/PixelShuffle.cpp",
"aten/src/ATen/native/PointwiseOps.cpp",
"aten/src/ATen/native/Pooling.cpp",
"aten/src/ATen/native/Pow.cpp",
"aten/src/ATen/native/QuantizedLinear.cpp",
"aten/src/ATen/native/RNN.cpp",
"aten/src/ATen/native/RangeFactories.cpp",
"aten/src/ATen/native/ReduceAllOps.cpp",
"aten/src/ATen/native/ReduceOps.cpp",
"aten/src/ATen/native/ReflectionPad.cpp",
"aten/src/ATen/native/Repeat.cpp",
"aten/src/ATen/native/ReplicationPadding.cpp",
"aten/src/ATen/native/Resize.cpp",
"aten/src/ATen/native/RowwisePrune.cpp",
"aten/src/ATen/native/SegmentReduce.cpp",
"aten/src/ATen/native/Scalar.cpp",
"aten/src/ATen/native/SobolEngineOps.cpp",
"aten/src/ATen/native/SobolEngineOpsUtils.cpp",
"aten/src/ATen/native/SoftMax.cpp",
"aten/src/ATen/native/Sorting.cpp",
"aten/src/ATen/native/SpectralOps.cpp",
"aten/src/ATen/native/SummaryOps.cpp",
"aten/src/ATen/native/TensorAdvancedIndexing.cpp",
"aten/src/ATen/native/TensorCompare.cpp",
"aten/src/ATen/native/TensorConversions.cpp",
"aten/src/ATen/native/TensorFactories.cpp",
"aten/src/ATen/native/TensorIteratorReduce.cpp",
"aten/src/ATen/native/TensorProperties.cpp",
"aten/src/ATen/native/TensorShape.cpp",
"aten/src/ATen/native/TensorTransformations.cpp",
"aten/src/ATen/native/TestOps.cpp",
"aten/src/ATen/native/TriangularOps.cpp",
"aten/src/ATen/native/TypeProperties.cpp",
"aten/src/ATen/native/UnaryOps.cpp",
"aten/src/ATen/native/Unfold2d.cpp",
"aten/src/ATen/native/Unfold3d.cpp",
"aten/src/ATen/native/UnfoldBackward.cpp",
"aten/src/ATen/native/Unique.cpp",
# Low-level functions that can be directly referenced
# "aten/src/ATen/native/UpSample.cpp",
"aten/src/ATen/native/UpSampleBicubic2d.cpp",
"aten/src/ATen/native/UpSampleBilinear2d.cpp",
"aten/src/ATen/native/UpSampleLinear1d.cpp",
"aten/src/ATen/native/UpSampleNearest1d.cpp",
"aten/src/ATen/native/UpSampleNearest2d.cpp",
"aten/src/ATen/native/UpSampleNearest3d.cpp",
"aten/src/ATen/native/UpSampleTrilinear3d.cpp",
"aten/src/ATen/native/VariableMethodStubs.cpp",
"aten/src/ATen/native/WeightNorm.cpp",
"aten/src/ATen/native/group_norm.cpp",
"aten/src/ATen/native/layer_norm.cpp",
"aten/src/ATen/native/sparse/ParamUtils.cpp",
"aten/src/ATen/native/sparse/SoftMax.cpp",
"aten/src/ATen/native/sparse/SparseBlas.cpp",
"aten/src/ATen/native/sparse/SparseBlasImpl.cpp",
"aten/src/ATen/native/sparse/SparseMatMul.cpp",
"aten/src/ATen/native/sparse/SparseTensor.cpp",
"aten/src/ATen/native/sparse/SparseCsrTensor.cpp",
"aten/src/ATen/native/sparse/SparseTensorMath.cpp",
"aten/src/ATen/native/sparse/SparseUnaryOps.cpp",
"aten/src/ATen/native/sparse/SparseCsrTensorMath.cpp",
"aten/src/ATen/native/utils/Factory.cpp",
"aten/src/ATen/native/xnnpack/Activation.cpp",
"aten/src/ATen/native/xnnpack/ChannelShuffle.cpp",
"aten/src/ATen/native/xnnpack/Convolution.cpp",
"aten/src/ATen/native/xnnpack/AveragePooling.cpp",
"aten/src/ATen/native/xnnpack/Init.cpp",
"aten/src/ATen/native/xnnpack/Linear.cpp",
"aten/src/ATen/native/xnnpack/MaxPooling.cpp",
"aten/src/ATen/native/xnnpack/OpContext.cpp",
"aten/src/ATen/native/xnnpack/RegisterOpContextClass.cpp",
"aten/src/ATen/native/xnnpack/Shim.cpp",
# Files not in native, but depends on native symbols
# "aten/src/ATen/TensorIndexing.cpp",
"aten/src/ATen/TensorIterator.cpp",
"aten/src/ATen/nnapi/nnapi_register.cpp",
]
# 1. Files in ATen/native with a few exceptions
# TODO: move the exceptions to proper locations
# 2. The whole aten native source list includes the list with and without aten codegen process.
aten_native_source_list = sorted(aten_native_source_non_codegen_list + aten_native_source_codegen_list)
# These are cpp files which need to go in the torch_cuda_cu library
# .cu files can be found via glob
aten_cuda_cu_source_list = [
"aten/src/ATen/cuda/CUDABlas.cpp",
"aten/src/ATen/cuda/CUDASolver.cpp",
"aten/src/ATen/cuda/CUDASparseBlas.cpp",
"aten/src/ATen/cuda/CublasHandlePool.cpp",
"aten/src/ATen/cuda/CusolverDnHandlePool.cpp",
"aten/src/ATen/native/cuda/Activation.cpp",
"aten/src/ATen/native/cuda/BatchLinearAlgebra.cpp",
"aten/src/ATen/native/cuda/BatchLinearAlgebraLib.cpp",
"aten/src/ATen/native/cuda/Blas.cpp",
"aten/src/ATen/native/cuda/Equal.cpp",
"aten/src/ATen/native/cuda/IndexKernel.cpp",
"aten/src/ATen/native/cuda/ReduceOps.cpp",
"aten/src/ATen/native/cuda/ScanKernels.cpp",
"aten/src/ATen/native/cuda/Sort.cpp",
"aten/src/ATen/native/cuda/Sorting.cpp",
"aten/src/ATen/native/cuda/TensorModeKernel.cpp",
"aten/src/ATen/native/cuda/TensorShapeCUDA.cpp",
"aten/src/ATen/native/cuda/TensorTopK.cpp",
"aten/src/ATen/native/cuda/jit_utils.cpp",
"aten/src/ATen/native/sparse/cuda/SparseBlas.cpp",
"aten/src/ATen/native/sparse/cuda/SparseBlasImpl.cpp",
"aten/src/ATen/native/sparse/cuda/SparseBlasLegacy.cpp",
"aten/src/ATen/native/sparse/cuda/SparseCUDABlas.cpp",
]
# Files using thrust::sort_by_key need to be linked last
aten_cuda_with_sort_by_key_source_list = [
# empty_cuda is needed by torch_cuda_cpp
"aten/src/ATen/native/cuda/TensorFactories.cu",
]
aten_cuda_cu_with_sort_by_key_source_list = [
"aten/src/ATen/native/cuda/Unique.cu",
]
| 46.980826
| 134
| 0.738518
|
7949e9e4d35f72be7d201ee700812b575f35f904
| 143,340
|
py
|
Python
|
virtual/lib/python3.8/site-packages/sqlalchemy/orm/relationships.py
|
Lenus254/personal_blog
|
aac38e4b5372c86efa8e24db2e051fef8e5feef8
|
[
"Unlicense"
] | 4
|
2022-02-06T00:54:58.000Z
|
2022-02-25T12:44:43.000Z
|
virtual/lib/python3.8/site-packages/sqlalchemy/orm/relationships.py
|
Lenus254/personal_blog
|
aac38e4b5372c86efa8e24db2e051fef8e5feef8
|
[
"Unlicense"
] | 1
|
2022-03-12T06:00:21.000Z
|
2022-03-12T07:07:55.000Z
|
virtual/lib/python3.8/site-packages/sqlalchemy/orm/relationships.py
|
Lenus254/personal_blog
|
aac38e4b5372c86efa8e24db2e051fef8e5feef8
|
[
"Unlicense"
] | 1
|
2022-02-08T13:43:20.000Z
|
2022-02-08T13:43:20.000Z
|
# orm/relationships.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Heuristics related to join conditions as used in
:func:`_orm.relationship`.
Provides the :class:`.JoinCondition` object, which encapsulates
SQL annotation and aliasing behavior focused on the `primaryjoin`
and `secondaryjoin` aspects of :func:`_orm.relationship`.
"""
from __future__ import absolute_import
import collections
import re
import weakref
from . import attributes
from .base import _is_mapped_class
from .base import state_str
from .interfaces import MANYTOMANY
from .interfaces import MANYTOONE
from .interfaces import ONETOMANY
from .interfaces import PropComparator
from .interfaces import StrategizedProperty
from .util import _orm_annotate
from .util import _orm_deannotate
from .util import CascadeOptions
from .. import exc as sa_exc
from .. import log
from .. import schema
from .. import sql
from .. import util
from ..inspection import inspect
from ..sql import coercions
from ..sql import expression
from ..sql import operators
from ..sql import roles
from ..sql import visitors
from ..sql.util import _deep_deannotate
from ..sql.util import _shallow_annotate
from ..sql.util import adapt_criterion_to_null
from ..sql.util import ClauseAdapter
from ..sql.util import join_condition
from ..sql.util import selectables_overlap
from ..sql.util import visit_binary_product
def remote(expr):
"""Annotate a portion of a primaryjoin expression
with a 'remote' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.foreign`
"""
return _annotate_columns(
coercions.expect(roles.ColumnArgumentRole, expr), {"remote": True}
)
def foreign(expr):
"""Annotate a portion of a primaryjoin expression
with a 'foreign' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.remote`
"""
return _annotate_columns(
coercions.expect(roles.ColumnArgumentRole, expr), {"foreign": True}
)
@log.class_logger
class RelationshipProperty(StrategizedProperty):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`_orm.relationship` function.
.. seealso::
:ref:`relationship_config_toplevel`
"""
strategy_wildcard_key = "relationship"
inherit_cache = True
_links_to_entity = True
_persistence_only = dict(
passive_deletes=False,
passive_updates=True,
enable_typechecks=True,
active_history=False,
cascade_backrefs=True,
)
_dependency_processor = None
def __init__(
self,
argument,
secondary=None,
primaryjoin=None,
secondaryjoin=None,
foreign_keys=None,
uselist=None,
order_by=False,
backref=None,
back_populates=None,
overlaps=None,
post_update=False,
cascade=False,
viewonly=False,
lazy="select",
collection_class=None,
passive_deletes=_persistence_only["passive_deletes"],
passive_updates=_persistence_only["passive_updates"],
remote_side=None,
enable_typechecks=_persistence_only["enable_typechecks"],
join_depth=None,
comparator_factory=None,
single_parent=False,
innerjoin=False,
distinct_target_key=None,
doc=None,
active_history=_persistence_only["active_history"],
cascade_backrefs=_persistence_only["cascade_backrefs"],
load_on_pending=False,
bake_queries=True,
_local_remote_pairs=None,
query_class=None,
info=None,
omit_join=None,
sync_backref=None,
_legacy_inactive_history_style=False,
):
"""Provide a relationship between two mapped classes.
This corresponds to a parent-child or associative table relationship.
The constructed class is an instance of
:class:`.RelationshipProperty`.
A typical :func:`_orm.relationship`, used in a classical mapping::
mapper(Parent, properties={
'children': relationship(Child)
})
Some arguments accepted by :func:`_orm.relationship`
optionally accept a
callable function, which when called produces the desired value.
The callable is invoked by the parent :class:`_orm.Mapper` at "mapper
initialization" time, which happens only when mappers are first used,
and is assumed to be after all mappings have been constructed. This
can be used to resolve order-of-declaration and other dependency
issues, such as if ``Child`` is declared below ``Parent`` in the same
file::
mapper(Parent, properties={
"children":relationship(lambda: Child,
order_by=lambda: Child.id)
})
When using the :ref:`declarative_toplevel` extension, the Declarative
initializer allows string arguments to be passed to
:func:`_orm.relationship`. These string arguments are converted into
callables that evaluate the string as Python code, using the
Declarative class-registry as a namespace. This allows the lookup of
related classes to be automatic via their string name, and removes the
need for related classes to be imported into the local module space
before the dependent classes have been declared. It is still required
that the modules in which these related classes appear are imported
anywhere in the application at some point before the related mappings
are actually used, else a lookup error will be raised when the
:func:`_orm.relationship`
attempts to resolve the string reference to the
related class. An example of a string- resolved class is as
follows::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
children = relationship("Child", order_by="Child.id")
.. seealso::
:ref:`relationship_config_toplevel` - Full introductory and
reference documentation for :func:`_orm.relationship`.
:ref:`orm_tutorial_relationship` - ORM tutorial introduction.
:param argument:
A mapped class, or actual :class:`_orm.Mapper` instance,
representing
the target of the relationship.
:paramref:`_orm.relationship.argument`
may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a string name when using Declarative.
.. warning:: Prior to SQLAlchemy 1.3.16, this value is interpreted
using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. versionchanged 1.3.16::
The string evaluation of the main "argument" no longer accepts an
open ended Python expression, instead only accepting a string
class name or dotted package-qualified name.
.. seealso::
:ref:`declarative_configuring_relationships` - further detail
on relationship configuration when using Declarative.
:param secondary:
For a many-to-many relationship, specifies the intermediary
table, and is typically an instance of :class:`_schema.Table`.
In less common circumstances, the argument may also be specified
as an :class:`_expression.Alias` construct, or even a
:class:`_expression.Join` construct.
:paramref:`_orm.relationship.secondary` may
also be passed as a callable function which is evaluated at
mapper initialization time. When using Declarative, it may also
be a string argument noting the name of a :class:`_schema.Table`
that is
present in the :class:`_schema.MetaData`
collection associated with the
parent-mapped :class:`_schema.Table`.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
The :paramref:`_orm.relationship.secondary` keyword argument is
typically applied in the case where the intermediary
:class:`_schema.Table`
is not otherwise expressed in any direct class mapping. If the
"secondary" table is also explicitly mapped elsewhere (e.g. as in
:ref:`association_pattern`), one should consider applying the
:paramref:`_orm.relationship.viewonly` flag so that this
:func:`_orm.relationship`
is not used for persistence operations which
may conflict with those of the association object pattern.
.. seealso::
:ref:`relationships_many_to_many` - Reference example of "many
to many".
:ref:`orm_tutorial_many_to_many` - ORM tutorial introduction to
many-to-many relationships.
:ref:`self_referential_many_to_many` - Specifics on using
many-to-many in a self-referential case.
:ref:`declarative_many_to_many` - Additional options when using
Declarative.
:ref:`association_pattern` - an alternative to
:paramref:`_orm.relationship.secondary`
when composing association
table relationships, allowing additional attributes to be
specified on the association table.
:ref:`composite_secondary_join` - a lesser-used pattern which
in some cases can enable complex :func:`_orm.relationship` SQL
conditions to be used.
.. versionadded:: 0.9.2 :paramref:`_orm.relationship.secondary`
works
more effectively when referring to a :class:`_expression.Join`
instance.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
many-to-one reference should be loaded when replaced, if
not already loaded. Normally, history tracking logic for
simple many-to-ones only needs to be aware of the "new"
value in order to perform a flush. This flag is available
for applications that make use of
:func:`.attributes.get_history` which also need to know
the "previous" value of the attribute.
:param backref:
Indicates the string name of a property to be placed on the related
mapper's class that will handle this relationship in the other
direction. The other property will be created automatically
when the mappers are configured. Can also be passed as a
:func:`.backref` object to control the configuration of the
new relationship.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`_orm.relationship.back_populates` - alternative form
of backref specification.
:func:`.backref` - allows control over :func:`_orm.relationship`
configuration when using :paramref:`_orm.relationship.backref`.
:param back_populates:
Takes a string name and has the same meaning as
:paramref:`_orm.relationship.backref`, except the complementing
property is **not** created automatically, and instead must be
configured explicitly on the other mapper. The complementing
property should also indicate
:paramref:`_orm.relationship.back_populates` to this relationship to
ensure proper functioning.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`_orm.relationship.backref` - alternative form
of backref specification.
:param overlaps:
A string name or comma-delimited set of names of other relationships
on either this mapper, a descendant mapper, or a target mapper with
which this relationship may write to the same foreign keys upon
persistence. The only effect this has is to eliminate the
warning that this relationship will conflict with another upon
persistence. This is used for such relationships that are truly
capable of conflicting with each other on write, but the application
will ensure that no such conflicts occur.
.. versionadded:: 1.4
.. seealso::
:ref:`error_qzyx` - usage example
:param bake_queries=True:
Enable :ref:`lambda caching <engine_lambda_caching>` for loader
strategies, if applicable, which adds a performance gain to the
construction of SQL constructs used by loader strategies, in addition
to the usual SQL statement caching used throughout SQLAlchemy. This
parameter currently applies only to the "lazy" and "selectin" loader
strategies. There is generally no reason to set this parameter to
False.
.. versionchanged:: 1.4 Relationship loaders no longer use the
previous "baked query" system of query caching. The "lazy"
and "selectin" loaders make use of the "lambda cache" system
for the construction of SQL constructs,
as well as the usual SQL caching system that is throughout
SQLAlchemy as of the 1.4 series.
:param cascade:
A comma-separated list of cascade rules which determines how
Session operations should be "cascaded" from parent to child.
This defaults to ``False``, which means the default cascade
should be used - this default cascade is ``"save-update, merge"``.
The available cascades are ``save-update``, ``merge``,
``expunge``, ``delete``, ``delete-orphan``, and ``refresh-expire``.
An additional option, ``all`` indicates shorthand for
``"save-update, merge, refresh-expire,
expunge, delete"``, and is often used as in ``"all, delete-orphan"``
to indicate that related objects should follow along with the
parent object in all cases, and be deleted when de-associated.
.. seealso::
:ref:`unitofwork_cascades` - Full detail on each of the available
cascade options.
:ref:`tutorial_delete_cascade` - Tutorial example describing
a delete cascade.
:param cascade_backrefs=True:
A boolean value indicating if the ``save-update`` cascade should
operate along an assignment event intercepted by a backref.
When set to ``False``, the attribute managed by this relationship
will not cascade an incoming transient object into the session of a
persistent parent, if the event is received via backref.
.. deprecated:: 1.4 The
:paramref:`_orm.relationship.cascade_backrefs`
flag will default to False in all cases in SQLAlchemy 2.0.
.. seealso::
:ref:`backref_cascade` - Full discussion and examples on how
the :paramref:`_orm.relationship.cascade_backrefs` option is used.
:param collection_class:
A class or callable that returns a new list-holding object. will
be used in place of a plain list for storing elements.
.. seealso::
:ref:`custom_collections` - Introductory documentation and
examples.
:param comparator_factory:
A class which extends :class:`.RelationshipProperty.Comparator`
which provides custom SQL clause generation for comparison
operations.
.. seealso::
:class:`.PropComparator` - some detail on redefining comparators
at this level.
:ref:`custom_comparators` - Brief intro to this feature.
:param distinct_target_key=None:
Indicate if a "subquery" eager load should apply the DISTINCT
keyword to the innermost SELECT statement. When left as ``None``,
the DISTINCT keyword will be applied in those cases when the target
columns do not comprise the full primary key of the target table.
When set to ``True``, the DISTINCT keyword is applied to the
innermost SELECT unconditionally.
It may be desirable to set this flag to False when the DISTINCT is
reducing performance of the innermost subquery beyond that of what
duplicate innermost rows may be causing.
.. versionchanged:: 0.9.0 -
:paramref:`_orm.relationship.distinct_target_key` now defaults to
``None``, so that the feature enables itself automatically for
those cases where the innermost query targets a non-unique
key.
.. seealso::
:ref:`loading_toplevel` - includes an introduction to subquery
eager loading.
:param doc:
Docstring which will be applied to the resulting descriptor.
:param foreign_keys:
A list of columns which are to be used as "foreign key"
columns, or columns which refer to the value in a remote
column, within the context of this :func:`_orm.relationship`
object's :paramref:`_orm.relationship.primaryjoin` condition.
That is, if the :paramref:`_orm.relationship.primaryjoin`
condition of this :func:`_orm.relationship` is ``a.id ==
b.a_id``, and the values in ``b.a_id`` are required to be
present in ``a.id``, then the "foreign key" column of this
:func:`_orm.relationship` is ``b.a_id``.
In normal cases, the :paramref:`_orm.relationship.foreign_keys`
parameter is **not required.** :func:`_orm.relationship` will
automatically determine which columns in the
:paramref:`_orm.relationship.primaryjoin` condition are to be
considered "foreign key" columns based on those
:class:`_schema.Column` objects that specify
:class:`_schema.ForeignKey`,
or are otherwise listed as referencing columns in a
:class:`_schema.ForeignKeyConstraint` construct.
:paramref:`_orm.relationship.foreign_keys` is only needed when:
1. There is more than one way to construct a join from the local
table to the remote table, as there are multiple foreign key
references present. Setting ``foreign_keys`` will limit the
:func:`_orm.relationship`
to consider just those columns specified
here as "foreign".
2. The :class:`_schema.Table` being mapped does not actually have
:class:`_schema.ForeignKey` or
:class:`_schema.ForeignKeyConstraint`
constructs present, often because the table
was reflected from a database that does not support foreign key
reflection (MySQL MyISAM).
3. The :paramref:`_orm.relationship.primaryjoin`
argument is used to
construct a non-standard join condition, which makes use of
columns or expressions that do not normally refer to their
"parent" column, such as a join condition expressed by a
complex comparison using a SQL function.
The :func:`_orm.relationship` construct will raise informative
error messages that suggest the use of the
:paramref:`_orm.relationship.foreign_keys` parameter when
presented with an ambiguous condition. In typical cases,
if :func:`_orm.relationship` doesn't raise any exceptions, the
:paramref:`_orm.relationship.foreign_keys` parameter is usually
not needed.
:paramref:`_orm.relationship.foreign_keys` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`relationship_foreign_keys`
:ref:`relationship_custom_foreign`
:func:`.foreign` - allows direct annotation of the "foreign"
columns within a :paramref:`_orm.relationship.primaryjoin`
condition.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
:param innerjoin=False:
When ``True``, joined eager loads will use an inner join to join
against related tables instead of an outer join. The purpose
of this option is generally one of performance, as inner joins
generally perform better than outer joins.
This flag can be set to ``True`` when the relationship references an
object via many-to-one using local foreign keys that are not
nullable, or when the reference is one-to-one or a collection that
is guaranteed to have one or at least one entry.
The option supports the same "nested" and "unnested" options as
that of :paramref:`_orm.joinedload.innerjoin`. See that flag
for details on nested / unnested behaviors.
.. seealso::
:paramref:`_orm.joinedload.innerjoin` - the option as specified by
loader option, including detail on nesting behavior.
:ref:`what_kind_of_loading` - Discussion of some details of
various loader options.
:param join_depth:
When non-``None``, an integer value indicating how many levels
deep "eager" loaders should join on a self-referring or cyclical
relationship. The number counts how many times the same Mapper
shall be present in the loading condition along a particular join
branch. When left at its default of ``None``, eager loaders
will stop chaining when they encounter a the same target mapper
which is already higher up in the chain. This option applies
both to joined- and subquery- eager loaders.
.. seealso::
:ref:`self_referential_eager_loading` - Introductory documentation
and examples.
:param lazy='select': specifies
How the related items should be loaded. Default value is
``select``. Values include:
* ``select`` - items should be loaded lazily when the property is
first accessed, using a separate SELECT statement, or identity map
fetch for simple many-to-one references.
* ``immediate`` - items should be loaded as the parents are loaded,
using a separate SELECT statement, or identity map fetch for
simple many-to-one references.
* ``joined`` - items should be loaded "eagerly" in the same query as
that of the parent, using a JOIN or LEFT OUTER JOIN. Whether
the join is "outer" or not is determined by the
:paramref:`_orm.relationship.innerjoin` parameter.
* ``subquery`` - items should be loaded "eagerly" as the parents are
loaded, using one additional SQL statement, which issues a JOIN to
a subquery of the original statement, for each collection
requested.
* ``selectin`` - items should be loaded "eagerly" as the parents
are loaded, using one or more additional SQL statements, which
issues a JOIN to the immediate parent object, specifying primary
key identifiers using an IN clause.
.. versionadded:: 1.2
* ``noload`` - no loading should occur at any time. This is to
support "write-only" attributes, or attributes which are
populated in some manner specific to the application.
* ``raise`` - lazy loading is disallowed; accessing
the attribute, if its value were not already loaded via eager
loading, will raise an :exc:`~sqlalchemy.exc.InvalidRequestError`.
This strategy can be used when objects are to be detached from
their attached :class:`.Session` after they are loaded.
.. versionadded:: 1.1
* ``raise_on_sql`` - lazy loading that emits SQL is disallowed;
accessing the attribute, if its value were not already loaded via
eager loading, will raise an
:exc:`~sqlalchemy.exc.InvalidRequestError`, **if the lazy load
needs to emit SQL**. If the lazy load can pull the related value
from the identity map or determine that it should be None, the
value is loaded. This strategy can be used when objects will
remain associated with the attached :class:`.Session`, however
additional SELECT statements should be blocked.
.. versionadded:: 1.1
* ``dynamic`` - the attribute will return a pre-configured
:class:`_query.Query` object for all read
operations, onto which further filtering operations can be
applied before iterating the results. See
the section :ref:`dynamic_relationship` for more details.
* True - a synonym for 'select'
* False - a synonym for 'joined'
* None - a synonym for 'noload'
.. seealso::
:doc:`/orm/loading_relationships` - Full documentation on
relationship loader configuration.
:ref:`dynamic_relationship` - detail on the ``dynamic`` option.
:ref:`collections_noload_raiseload` - notes on "noload" and "raise"
:param load_on_pending=False:
Indicates loading behavior for transient or pending parent objects.
When set to ``True``, causes the lazy-loader to
issue a query for a parent object that is not persistent, meaning it
has never been flushed. This may take effect for a pending object
when autoflush is disabled, or for a transient object that has been
"attached" to a :class:`.Session` but is not part of its pending
collection.
The :paramref:`_orm.relationship.load_on_pending`
flag does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before a flush proceeds.
This flag is not not intended for general use.
.. seealso::
:meth:`.Session.enable_relationship_loading` - this method
establishes "load on pending" behavior for the whole object, and
also allows loading on objects that remain transient or
detached.
:param order_by:
Indicates the ordering that should be applied when loading these
items. :paramref:`_orm.relationship.order_by`
is expected to refer to
one of the :class:`_schema.Column`
objects to which the target class is
mapped, or the attribute itself bound to the target class which
refers to the column.
:paramref:`_orm.relationship.order_by`
may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a Python-evaluable string when using Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
:param passive_deletes=False:
Indicates loading behavior during delete operations.
A value of True indicates that unloaded child items should not
be loaded during a delete operation on the parent. Normally,
when a parent item is deleted, all child items are loaded so
that they can either be marked as deleted, or have their
foreign key to the parent set to NULL. Marking this flag as
True usually implies an ON DELETE <CASCADE|SET NULL> rule is in
place which will handle updating/deleting child rows on the
database side.
Additionally, setting the flag to the string value 'all' will
disable the "nulling out" of the child foreign keys, when the parent
object is deleted and there is no delete or delete-orphan cascade
enabled. This is typically used when a triggering or error raise
scenario is in place on the database side. Note that the foreign
key attributes on in-session child objects will not be changed after
a flush occurs so this is a very special use-case setting.
Additionally, the "nulling out" will still occur if the child
object is de-associated with the parent.
.. seealso::
:ref:`passive_deletes` - Introductory documentation
and examples.
:param passive_updates=True:
Indicates the persistence behavior to take when a referenced
primary key value changes in place, indicating that the referencing
foreign key columns will also need their value changed.
When True, it is assumed that ``ON UPDATE CASCADE`` is configured on
the foreign key in the database, and that the database will
handle propagation of an UPDATE from a source column to
dependent rows. When False, the SQLAlchemy
:func:`_orm.relationship`
construct will attempt to emit its own UPDATE statements to
modify related targets. However note that SQLAlchemy **cannot**
emit an UPDATE for more than one level of cascade. Also,
setting this flag to False is not compatible in the case where
the database is in fact enforcing referential integrity, unless
those constraints are explicitly "deferred", if the target backend
supports it.
It is highly advised that an application which is employing
mutable primary keys keeps ``passive_updates`` set to True,
and instead uses the referential integrity features of the database
itself in order to handle the change efficiently and fully.
.. seealso::
:ref:`passive_updates` - Introductory documentation and
examples.
:paramref:`.mapper.passive_updates` - a similar flag which
takes effect for joined-table inheritance mappings.
:param post_update:
This indicates that the relationship should be handled by a
second UPDATE statement after an INSERT or before a
DELETE. Currently, it also will issue an UPDATE after the
instance was UPDATEd as well, although this technically should
be improved. This flag is used to handle saving bi-directional
dependencies between two individual rows (i.e. each row
references the other), where it would otherwise be impossible to
INSERT or DELETE both rows fully since one row exists before the
other. Use this flag when a particular mapping arrangement will
incur two rows that are dependent on each other, such as a table
that has a one-to-many relationship to a set of child rows, and
also has a column that references a single child row within that
list (i.e. both tables contain a foreign key to each other). If
a flush operation returns an error that a "cyclical
dependency" was detected, this is a cue that you might want to
use :paramref:`_orm.relationship.post_update` to "break" the cycle.
.. seealso::
:ref:`post_update` - Introductory documentation and examples.
:param primaryjoin:
A SQL expression that will be used as the primary
join of the child object against the parent object, or in a
many-to-many relationship the join of the parent object to the
association table. By default, this value is computed based on the
foreign key relationships of the parent and child tables (or
association table).
:paramref:`_orm.relationship.primaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`relationship_primaryjoin`
:param remote_side:
Used for self-referential relationships, indicates the column or
list of columns that form the "remote side" of the relationship.
:paramref:`_orm.relationship.remote_side` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`self_referential` - in-depth explanation of how
:paramref:`_orm.relationship.remote_side`
is used to configure self-referential relationships.
:func:`.remote` - an annotation function that accomplishes the
same purpose as :paramref:`_orm.relationship.remote_side`,
typically
when a custom :paramref:`_orm.relationship.primaryjoin` condition
is used.
:param query_class:
A :class:`_query.Query`
subclass that will be used internally by the
``AppenderQuery`` returned by a "dynamic" relationship, that
is, a relationship that specifies ``lazy="dynamic"`` or was
otherwise constructed using the :func:`_orm.dynamic_loader`
function.
.. seealso::
:ref:`dynamic_relationship` - Introduction to "dynamic"
relationship loaders.
:param secondaryjoin:
A SQL expression that will be used as the join of
an association table to the child object. By default, this value is
computed based on the foreign key relationships of the association
and child tables.
:paramref:`_orm.relationship.secondaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. warning:: When passed as a Python-evaluable string, the
argument is interpreted using Python's ``eval()`` function.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
See :ref:`declarative_relationship_eval` for details on
declarative evaluation of :func:`_orm.relationship` arguments.
.. seealso::
:ref:`relationship_primaryjoin`
:param single_parent:
When True, installs a validator which will prevent objects
from being associated with more than one parent at a time.
This is used for many-to-one or many-to-many relationships that
should be treated either as one-to-one or one-to-many. Its usage
is optional, except for :func:`_orm.relationship` constructs which
are many-to-one or many-to-many and also
specify the ``delete-orphan`` cascade option. The
:func:`_orm.relationship` construct itself will raise an error
instructing when this option is required.
.. seealso::
:ref:`unitofwork_cascades` - includes detail on when the
:paramref:`_orm.relationship.single_parent`
flag may be appropriate.
:param uselist:
A boolean that indicates if this property should be loaded as a
list or a scalar. In most cases, this value is determined
automatically by :func:`_orm.relationship` at mapper configuration
time, based on the type and direction
of the relationship - one to many forms a list, many to one
forms a scalar, many to many is a list. If a scalar is desired
where normally a list would be present, such as a bi-directional
one-to-one relationship, set :paramref:`_orm.relationship.uselist`
to
False.
The :paramref:`_orm.relationship.uselist`
flag is also available on an
existing :func:`_orm.relationship`
construct as a read-only attribute,
which can be used to determine if this :func:`_orm.relationship`
deals
with collections or scalar attributes::
>>> User.addresses.property.uselist
True
.. seealso::
:ref:`relationships_one_to_one` - Introduction to the "one to
one" relationship pattern, which is typically when the
:paramref:`_orm.relationship.uselist` flag is needed.
:param viewonly=False:
When set to ``True``, the relationship is used only for loading
objects, and not for any persistence operation. A
:func:`_orm.relationship` which specifies
:paramref:`_orm.relationship.viewonly` can work
with a wider range of SQL operations within the
:paramref:`_orm.relationship.primaryjoin` condition, including
operations that feature the use of a variety of comparison operators
as well as SQL functions such as :func:`_expression.cast`. The
:paramref:`_orm.relationship.viewonly`
flag is also of general use when defining any kind of
:func:`_orm.relationship` that doesn't represent
the full set of related objects, to prevent modifications of the
collection from resulting in persistence operations.
When using the :paramref:`_orm.relationship.viewonly` flag in
conjunction with backrefs, the originating relationship for a
particular state change will not produce state changes within the
viewonly relationship. This is the behavior implied by
:paramref:`_orm.relationship.sync_backref` being set to False.
.. versionchanged:: 1.3.17 - the
:paramref:`_orm.relationship.sync_backref` flag is set to False
when using viewonly in conjunction with backrefs.
.. seealso::
:paramref:`_orm.relationship.sync_backref`
:param sync_backref:
A boolean that enables the events used to synchronize the in-Python
attributes when this relationship is target of either
:paramref:`_orm.relationship.backref` or
:paramref:`_orm.relationship.back_populates`.
Defaults to ``None``, which indicates that an automatic value should
be selected based on the value of the
:paramref:`_orm.relationship.viewonly` flag. When left at its
default, changes in state will be back-populated only if neither
sides of a relationship is viewonly.
.. versionadded:: 1.3.17
.. versionchanged:: 1.4 - A relationship that specifies
:paramref:`_orm.relationship.viewonly` automatically implies
that :paramref:`_orm.relationship.sync_backref` is ``False``.
.. seealso::
:paramref:`_orm.relationship.viewonly`
:param omit_join:
Allows manual control over the "selectin" automatic join
optimization. Set to ``False`` to disable the "omit join" feature
added in SQLAlchemy 1.3; or leave as ``None`` to leave automatic
optimization in place.
.. note:: This flag may only be set to ``False``. It is not
necessary to set it to ``True`` as the "omit_join" optimization is
automatically detected; if it is not detected, then the
optimization is not supported.
.. versionchanged:: 1.3.11 setting ``omit_join`` to True will now
emit a warning as this was not the intended use of this flag.
.. versionadded:: 1.3
"""
super(RelationshipProperty, self).__init__()
self.uselist = uselist
self.argument = argument
self.secondary = secondary
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.post_update = post_update
self.direction = None
self.viewonly = viewonly
if viewonly:
self._warn_for_persistence_only_flags(
passive_deletes=passive_deletes,
passive_updates=passive_updates,
enable_typechecks=enable_typechecks,
active_history=active_history,
cascade_backrefs=cascade_backrefs,
)
if viewonly and sync_backref:
raise sa_exc.ArgumentError(
"sync_backref and viewonly cannot both be True"
)
self.sync_backref = sync_backref
self.lazy = lazy
self.single_parent = single_parent
self._user_defined_foreign_keys = foreign_keys
self.collection_class = collection_class
self.passive_deletes = passive_deletes
self.cascade_backrefs = cascade_backrefs
self.passive_updates = passive_updates
self.remote_side = remote_side
self.enable_typechecks = enable_typechecks
self.query_class = query_class
self.innerjoin = innerjoin
self.distinct_target_key = distinct_target_key
self.doc = doc
self.active_history = active_history
self._legacy_inactive_history_style = _legacy_inactive_history_style
self.join_depth = join_depth
if omit_join:
util.warn(
"setting omit_join to True is not supported; selectin "
"loading of this relationship may not work correctly if this "
"flag is set explicitly. omit_join optimization is "
"automatically detected for conditions under which it is "
"supported."
)
self.omit_join = omit_join
self.local_remote_pairs = _local_remote_pairs
self.bake_queries = bake_queries
self.load_on_pending = load_on_pending
self.comparator_factory = (
comparator_factory or RelationshipProperty.Comparator
)
self.comparator = self.comparator_factory(self, None)
util.set_creation_order(self)
if info is not None:
self.info = info
self.strategy_key = (("lazy", self.lazy),)
self._reverse_property = set()
if overlaps:
self._overlaps = set(re.split(r"\s*,\s*", overlaps))
else:
self._overlaps = ()
if cascade is not False:
self.cascade = cascade
elif self.viewonly:
self.cascade = "none"
else:
self.cascade = "save-update, merge"
self.order_by = order_by
self.back_populates = back_populates
if self.back_populates:
if backref:
raise sa_exc.ArgumentError(
"backref and back_populates keyword arguments "
"are mutually exclusive"
)
self.backref = None
else:
self.backref = backref
def _warn_for_persistence_only_flags(self, **kw):
for k, v in kw.items():
if v != self._persistence_only[k]:
# we are warning here rather than warn deprecated as this is a
# configuration mistake, and Python shows regular warnings more
# aggressively than deprecation warnings by default. Unlike the
# case of setting viewonly with cascade, the settings being
# warned about here are not actively doing the wrong thing
# against viewonly=True, so it is not as urgent to have these
# raise an error.
util.warn(
"Setting %s on relationship() while also "
"setting viewonly=True does not make sense, as a "
"viewonly=True relationship does not perform persistence "
"operations. This configuration may raise an error "
"in a future release." % (k,)
)
def instrument_class(self, mapper):
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.RelationshipProperty` attributes.
See the documentation for :class:`.PropComparator` for a brief
overview of ORM level operator definition.
.. seealso::
:class:`.PropComparator`
:class:`.ColumnProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
_of_type = None
_extra_criteria = ()
def __init__(
self,
prop,
parentmapper,
adapt_to_entity=None,
of_type=None,
extra_criteria=(),
):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self._parententity = parentmapper
self._adapt_to_entity = adapt_to_entity
if of_type:
self._of_type = of_type
self._extra_criteria = extra_criteria
def adapt_to_entity(self, adapt_to_entity):
return self.__class__(
self.property,
self._parententity,
adapt_to_entity=adapt_to_entity,
of_type=self._of_type,
)
@util.memoized_property
def entity(self):
"""The target entity referred to by this
:class:`.RelationshipProperty.Comparator`.
This is either a :class:`_orm.Mapper` or :class:`.AliasedInsp`
object.
This is the "target" or "remote" side of the
:func:`_orm.relationship`.
"""
# this is a relatively recent change made for
# 1.4.27 as part of #7244.
# TODO: shouldn't _of_type be inspected up front when received?
if self._of_type is not None:
return inspect(self._of_type)
else:
return self.property.entity
@util.memoized_property
def mapper(self):
"""The target :class:`_orm.Mapper` referred to by this
:class:`.RelationshipProperty.Comparator`.
This is the "target" or "remote" side of the
:func:`_orm.relationship`.
"""
return self.property.mapper
@util.memoized_property
def _parententity(self):
return self.property.parent
def _source_selectable(self):
if self._adapt_to_entity:
return self._adapt_to_entity.selectable
else:
return self.property.parent._with_polymorphic_selectable
def __clause_element__(self):
adapt_from = self._source_selectable()
if self._of_type:
of_type_entity = inspect(self._of_type)
else:
of_type_entity = None
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = self.property._create_joins(
source_selectable=adapt_from,
source_polymorphic=True,
of_type_entity=of_type_entity,
alias_secondary=True,
extra_criteria=self._extra_criteria,
)
if sj is not None:
return pj & sj
else:
return pj
def of_type(self, cls):
r"""Redefine this object in terms of a polymorphic subclass.
See :meth:`.PropComparator.of_type` for an example.
"""
return RelationshipProperty.Comparator(
self.property,
self._parententity,
adapt_to_entity=self._adapt_to_entity,
of_type=cls,
extra_criteria=self._extra_criteria,
)
def and_(self, *other):
"""Add AND criteria.
See :meth:`.PropComparator.and_` for an example.
.. versionadded:: 1.4
"""
return RelationshipProperty.Comparator(
self.property,
self._parententity,
adapt_to_entity=self._adapt_to_entity,
of_type=self._of_type,
extra_criteria=self._extra_criteria + other,
)
def in_(self, other):
"""Produce an IN clause - this is not implemented
for :func:`_orm.relationship`-based attributes at this time.
"""
raise NotImplementedError(
"in_() not yet supported for "
"relationships. For a simple "
"many-to-one, use in_() against "
"the set of foreign key values."
)
__hash__ = None
def __eq__(self, other):
"""Implement the ``==`` operator.
In a many-to-one context, such as::
MyClass.some_prop == <some object>
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.RelationshipProperty.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce a NOT EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction in [ONETOMANY, MANYTOMANY]:
return ~self._criterion_exists()
else:
return _orm_annotate(
self.property._optimized_compare(
None, adapt_source=self.adapter
)
)
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection to an object or collection; "
"use contains() to test for membership."
)
else:
return _orm_annotate(
self.property._optimized_compare(
other, adapt_source=self.adapter
)
)
def _criterion_exists(self, criterion=None, **kwargs):
if getattr(self, "_of_type", None):
info = inspect(self._of_type)
target_mapper, to_selectable, is_aliased_class = (
info.mapper,
info.selectable,
info.is_aliased_class,
)
if self.property._is_self_referential and not is_aliased_class:
to_selectable = to_selectable._anonymous_fromclause()
single_crit = target_mapper._single_table_criterion
if single_crit is not None:
if criterion is not None:
criterion = single_crit & criterion
else:
criterion = single_crit
else:
is_aliased_class = False
to_selectable = None
if self.adapter:
source_selectable = self._source_selectable()
else:
source_selectable = None
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = self.property._create_joins(
dest_selectable=to_selectable,
source_selectable=source_selectable,
)
for k in kwargs:
crit = getattr(self.property.mapper.class_, k) == kwargs[k]
if criterion is None:
criterion = crit
else:
criterion = criterion & crit
# annotate the *local* side of the join condition, in the case
# of pj + sj this is the full primaryjoin, in the case of just
# pj its the local side of the primaryjoin.
if sj is not None:
j = _orm_annotate(pj) & sj
else:
j = _orm_annotate(pj, exclude=self.property.remote_side)
if (
criterion is not None
and target_adapter
and not is_aliased_class
):
# limit this adapter to annotated only?
criterion = target_adapter.traverse(criterion)
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if criterion is not None:
criterion = criterion._annotate(
{"no_replacement_traverse": True}
)
crit = j & sql.True_._ifnone(criterion)
if secondary is not None:
ex = (
sql.exists(1)
.where(crit)
.select_from(dest, secondary)
.correlate_except(dest, secondary)
)
else:
ex = (
sql.exists(1)
.where(crit)
.select_from(dest)
.correlate_except(dest)
)
return ex
def any(self, criterion=None, **kwargs):
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT (EXISTS (SELECT 1 FROM related WHERE
related.my_id=my_table.id))
:meth:`~.RelationshipProperty.Comparator.any` is only
valid for collections, i.e. a :func:`_orm.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.RelationshipProperty.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'any()' not implemented for scalar "
"attributes. Use has()."
)
return self._criterion_exists(criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Produce an expression that tests a scalar reference against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE
related.id==my_table.related_id AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.has` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.has` is only
valid for scalar references, i.e. a :func:`_orm.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.RelationshipProperty.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
"'has()' not implemented for collections. " "Use any()."
)
return self._criterion_exists(criterion, **kwargs)
def contains(self, other, **kwargs):
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.RelationshipProperty.Comparator.contains` is
only valid for a collection, i.e. a
:func:`_orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.RelationshipProperty.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.RelationshipProperty.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.RelationshipProperty.Comparator.contains`
will **not** work with many-to-many collections when
used in queries that move beyond simple AND
conjunctions, such as multiple
:meth:`~.RelationshipProperty.Comparator.contains`
expressions joined by OR. In such cases subqueries or
explicit "outer joins" will need to be used instead.
See :meth:`~.RelationshipProperty.Comparator.any` for
a less-performant alternative using EXISTS, or refer
to :meth:`_query.Query.outerjoin`
as well as :ref:`ormtutorial_joins`
for more details on constructing outer joins.
kwargs may be ignored by this operator but are required for API
conformance.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'contains' not implemented for scalar "
"attributes. Use =="
)
clause = self.property._optimized_compare(
other, adapt_source=self.adapter
)
if self.property.secondaryjoin is not None:
clause.negation_clause = self.__negated_contains_or_equals(
other
)
return clause
def __negated_contains_or_equals(self, other):
if self.property.direction == MANYTOONE:
state = attributes.instance_state(other)
def state_bindparam(local_col, state, remote_col):
dict_ = state.dict
return sql.bindparam(
local_col.key,
type_=local_col.type,
unique=True,
callable_=self.property._get_attr_w_warn_on_none(
self.property.mapper, state, dict_, remote_col
),
)
def adapt(col):
if self.adapter:
return self.adapter(col)
else:
return col
if self.property._use_get:
return sql.and_(
*[
sql.or_(
adapt(x)
!= state_bindparam(adapt(x), state, y),
adapt(x) == None,
)
for (x, y) in self.property.local_remote_pairs
]
)
criterion = sql.and_(
*[
x == y
for (x, y) in zip(
self.property.mapper.primary_key,
self.property.mapper.primary_key_from_instance(other),
)
]
)
return ~self._criterion_exists(criterion)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.RelationshipProperty.Comparator.contains`
in conjunction with :func:`_expression.not_`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` in
conjunction with :func:`_expression.not_` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction == MANYTOONE:
return _orm_annotate(
~self.property._optimized_compare(
None, adapt_source=self.adapter
)
)
else:
return self._criterion_exists()
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection"
" to an object or collection; use "
"contains() to test for membership."
)
else:
return _orm_annotate(self.__negated_contains_or_equals(other))
@util.memoized_property
def property(self):
self.prop.parent._check_configure()
return self.prop
def _with_parent(self, instance, alias_secondary=True, from_entity=None):
assert instance is not None
adapt_source = None
if from_entity is not None:
insp = inspect(from_entity)
if insp.is_aliased_class:
adapt_source = insp._adapter.adapt_clause
return self._optimized_compare(
instance,
value_is_parent=True,
adapt_source=adapt_source,
alias_secondary=alias_secondary,
)
def _optimized_compare(
self,
state,
value_is_parent=False,
adapt_source=None,
alias_secondary=True,
):
if state is not None:
try:
state = inspect(state)
except sa_exc.NoInspectionAvailable:
state = None
if state is None or not getattr(state, "is_instance", False):
raise sa_exc.ArgumentError(
"Mapped instance expected for relationship "
"comparison to object. Classes, queries and other "
"SQL elements are not accepted in this context; for "
"comparison with a subquery, "
"use %s.has(**criteria)." % self
)
reverse_direction = not value_is_parent
if state is None:
return self._lazy_none_clause(
reverse_direction, adapt_source=adapt_source
)
if not reverse_direction:
criterion, bind_to_col = (
self._lazy_strategy._lazywhere,
self._lazy_strategy._bind_to_col,
)
else:
criterion, bind_to_col = (
self._lazy_strategy._rev_lazywhere,
self._lazy_strategy._rev_bind_to_col,
)
if reverse_direction:
mapper = self.mapper
else:
mapper = self.parent
dict_ = attributes.instance_dict(state.obj())
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
bindparam.callable = self._get_attr_w_warn_on_none(
mapper,
state,
dict_,
bind_to_col[bindparam._identifying_key],
)
if self.secondary is not None and alias_secondary:
criterion = ClauseAdapter(
self.secondary._anonymous_fromclause()
).traverse(criterion)
criterion = visitors.cloned_traverse(
criterion, {}, {"bindparam": visit_bindparam}
)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _get_attr_w_warn_on_none(self, mapper, state, dict_, column):
"""Create the callable that is used in a many-to-one expression.
E.g.::
u1 = s.query(User).get(5)
expr = Address.user == u1
Above, the SQL should be "address.user_id = 5". The callable
returned by this method produces the value "5" based on the identity
of ``u1``.
"""
# in this callable, we're trying to thread the needle through
# a wide variety of scenarios, including:
#
# * the object hasn't been flushed yet and there's no value for
# the attribute as of yet
#
# * the object hasn't been flushed yet but it has a user-defined
# value
#
# * the object has a value but it's expired and not locally present
#
# * the object has a value but it's expired and not locally present,
# and the object is also detached
#
# * The object hadn't been flushed yet, there was no value, but
# later, the object has been expired and detached, and *now*
# they're trying to evaluate it
#
# * the object had a value, but it was changed to a new value, and
# then expired
#
# * the object had a value, but it was changed to a new value, and
# then expired, then the object was detached
#
# * the object has a user-set value, but it's None and we don't do
# the comparison correctly for that so warn
#
prop = mapper.get_property_by_column(column)
# by invoking this method, InstanceState will track the last known
# value for this key each time the attribute is to be expired.
# this feature was added explicitly for use in this method.
state._track_last_known_value(prop.key)
def _go():
last_known = to_return = state._last_known_values[prop.key]
existing_is_available = last_known is not attributes.NO_VALUE
# we support that the value may have changed. so here we
# try to get the most recent value including re-fetching.
# only if we can't get a value now due to detachment do we return
# the last known value
current_value = mapper._get_state_attr_by_column(
state,
dict_,
column,
passive=attributes.PASSIVE_OFF
if state.persistent
else attributes.PASSIVE_NO_FETCH ^ attributes.INIT_OK,
)
if current_value is attributes.NEVER_SET:
if not existing_is_available:
raise sa_exc.InvalidRequestError(
"Can't resolve value for column %s on object "
"%s; no value has been set for this column"
% (column, state_str(state))
)
elif current_value is attributes.PASSIVE_NO_RESULT:
if not existing_is_available:
raise sa_exc.InvalidRequestError(
"Can't resolve value for column %s on object "
"%s; the object is detached and the value was "
"expired" % (column, state_str(state))
)
else:
to_return = current_value
if to_return is None:
util.warn(
"Got None for value of column %s; this is unsupported "
"for a relationship comparison and will not "
"currently produce an IS comparison "
"(but may in a future release)" % column
)
return to_return
return _go
def _lazy_none_clause(self, reverse_direction=False, adapt_source=None):
if not reverse_direction:
criterion, bind_to_col = (
self._lazy_strategy._lazywhere,
self._lazy_strategy._bind_to_col,
)
else:
criterion, bind_to_col = (
self._lazy_strategy._rev_lazywhere,
self._lazy_strategy._rev_bind_to_col,
)
criterion = adapt_criterion_to_null(criterion, bind_to_col)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
def merge(
self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load,
_recursive,
_resolve_conflict_map,
):
if load:
for r in self._reverse_property:
if (source_state, r) in _recursive:
return
if "merge" not in self._cascade:
return
if self.key not in source_dict:
return
if self.uselist:
impl = source_state.get_impl(self.key)
instances_iterable = impl.get_collection(source_state, source_dict)
# if this is a CollectionAttributeImpl, then empty should
# be False, otherwise "self.key in source_dict" should not be
# True
assert not instances_iterable.empty if impl.collection else True
if load:
# for a full merge, pre-load the destination collection,
# so that individual _merge of each item pulls from identity
# map for those already present.
# also assumes CollectionAttributeImpl behavior of loading
# "old" list in any case
dest_state.get_impl(self.key).get(dest_state, dest_dict)
dest_list = []
for current in instances_iterable:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state,
current_dict,
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
if obj is not None:
dest_list.append(obj)
if not load:
coll = attributes.init_state_collection(
dest_state, dest_dict, self.key
)
for c in dest_list:
coll.append_without_event(c)
else:
dest_state.get_impl(self.key).set(
dest_state, dest_dict, dest_list, _adapt=False
)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state,
current_dict,
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
else:
obj = None
if not load:
dest_dict[self.key] = obj
else:
dest_state.get_impl(self.key).set(
dest_state, dest_dict, obj, None
)
def _value_as_iterable(
self, state, dict_, key, passive=attributes.PASSIVE_OFF
):
"""Return a list of tuples (state, obj) for the given
key.
returns an empty list if the value is None/empty/PASSIVE_NO_RESULT
"""
impl = state.manager[key].impl
x = impl.get(state, dict_, passive=passive)
if x is attributes.PASSIVE_NO_RESULT or x is None:
return []
elif hasattr(impl, "get_collection"):
return [
(attributes.instance_state(o), o)
for o in impl.get_collection(state, dict_, x, passive=passive)
]
else:
return [(attributes.instance_state(x), x)]
def cascade_iterator(
self, type_, state, dict_, visited_states, halt_on=None
):
# assert type_ in self._cascade
# only actively lazy load on the 'delete' cascade
if type_ != "delete" or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
if type_ == "save-update":
tuples = state.manager[self.key].impl.get_all_pending(state, dict_)
else:
tuples = self._value_as_iterable(
state, dict_, self.key, passive=passive
)
skip_pending = (
type_ == "refresh-expire" and "delete-orphan" not in self._cascade
)
for instance_state, c in tuples:
if instance_state in visited_states:
continue
if c is None:
# would like to emit a warning here, but
# would not be consistent with collection.append(None)
# current behavior of silently skipping.
# see [ticket:2229]
continue
instance_dict = attributes.instance_dict(c)
if halt_on and halt_on(instance_state):
continue
if skip_pending and not instance_state.key:
continue
instance_mapper = instance_state.manager.mapper
if not instance_mapper.isa(self.mapper.class_manager.mapper):
raise AssertionError(
"Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'"
% (self.key, self.parent.class_, c.__class__)
)
visited_states.add(instance_state)
yield c, instance_mapper, instance_state, instance_dict
@property
def _effective_sync_backref(self):
if self.viewonly:
return False
else:
return self.sync_backref is not False
@staticmethod
def _check_sync_backref(rel_a, rel_b):
if rel_a.viewonly and rel_b.sync_backref:
raise sa_exc.InvalidRequestError(
"Relationship %s cannot specify sync_backref=True since %s "
"includes viewonly=True." % (rel_b, rel_a)
)
if (
rel_a.viewonly
and not rel_b.viewonly
and rel_b.sync_backref is not False
):
rel_b.sync_backref = False
def _add_reverse_property(self, key):
other = self.mapper.get_property(key, _configure_mappers=False)
if not isinstance(other, RelationshipProperty):
raise sa_exc.InvalidRequestError(
"back_populates on relationship '%s' refers to attribute '%s' "
"that is not a relationship. The back_populates parameter "
"should refer to the name of a relationship on the target "
"class." % (self, other)
)
# viewonly and sync_backref cases
# 1. self.viewonly==True and other.sync_backref==True -> error
# 2. self.viewonly==True and other.viewonly==False and
# other.sync_backref==None -> warn sync_backref=False, set to False
self._check_sync_backref(self, other)
# 3. other.viewonly==True and self.sync_backref==True -> error
# 4. other.viewonly==True and self.viewonly==False and
# self.sync_backref==None -> warn sync_backref=False, set to False
self._check_sync_backref(other, self)
self._reverse_property.add(other)
other._reverse_property.add(self)
if not other.mapper.common_parent(self.parent):
raise sa_exc.ArgumentError(
"reverse_property %r on "
"relationship %s references relationship %s, which "
"does not reference mapper %s"
% (key, self, other, self.parent)
)
if (
self.direction in (ONETOMANY, MANYTOONE)
and self.direction == other.direction
):
raise sa_exc.ArgumentError(
"%s and back-reference %s are "
"both of the same direction %r. Did you mean to "
"set remote_side on the many-to-one side ?"
% (other, self, self.direction)
)
@util.memoized_property
@util.preload_module("sqlalchemy.orm.mapper")
def entity(self):
"""Return the target mapped entity, which is an inspect() of the
class or aliased class that is referred towards.
"""
mapperlib = util.preloaded.orm_mapper
if isinstance(self.argument, util.string_types):
argument = self._clsregistry_resolve_name(self.argument)()
elif callable(self.argument) and not isinstance(
self.argument, (type, mapperlib.Mapper)
):
argument = self.argument()
else:
argument = self.argument
if isinstance(argument, type):
return mapperlib.class_mapper(argument, configure=False)
try:
entity = inspect(argument)
except sa_exc.NoInspectionAvailable:
pass
else:
if hasattr(entity, "mapper"):
return entity
raise sa_exc.ArgumentError(
"relationship '%s' expects "
"a class or a mapper argument (received: %s)"
% (self.key, type(argument))
)
@util.memoized_property
def mapper(self):
"""Return the targeted :class:`_orm.Mapper` for this
:class:`.RelationshipProperty`.
This is a lazy-initializing static attribute.
"""
return self.entity.mapper
def do_init(self):
self._check_conflicts()
self._process_dependent_arguments()
self._setup_registry_dependencies()
self._setup_join_conditions()
self._check_cascade_settings(self._cascade)
self._post_init()
self._generate_backref()
self._join_condition._warn_for_conflicting_sync_targets()
super(RelationshipProperty, self).do_init()
self._lazy_strategy = self._get_strategy((("lazy", "select"),))
def _setup_registry_dependencies(self):
self.parent.mapper.registry._set_depends_on(
self.entity.mapper.registry
)
def _process_dependent_arguments(self):
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
# accept callables for other attributes which may require
# deferred initialization. This technique is used
# by declarative "string configs" and some recipes.
for attr in (
"order_by",
"primaryjoin",
"secondaryjoin",
"secondary",
"_user_defined_foreign_keys",
"remote_side",
):
attr_value = getattr(self, attr)
if isinstance(attr_value, util.string_types):
setattr(
self,
attr,
self._clsregistry_resolve_arg(
attr_value, favor_tables=attr == "secondary"
)(),
)
elif callable(attr_value) and not _is_mapped_class(attr_value):
setattr(self, attr, attr_value())
# remove "annotations" which are present if mapped class
# descriptors are used to create the join expression.
for attr in "primaryjoin", "secondaryjoin":
val = getattr(self, attr)
if val is not None:
setattr(
self,
attr,
_orm_deannotate(
coercions.expect(
roles.ColumnArgumentRole, val, argname=attr
)
),
)
if self.secondary is not None and _is_mapped_class(self.secondary):
raise sa_exc.ArgumentError(
"secondary argument %s passed to to relationship() %s must "
"be a Table object or other FROM clause; can't send a mapped "
"class directly as rows in 'secondary' are persisted "
"independently of a class that is mapped "
"to that same table." % (self.secondary, self)
)
# ensure expressions in self.order_by, foreign_keys,
# remote_side are all columns, not strings.
if self.order_by is not False and self.order_by is not None:
self.order_by = tuple(
coercions.expect(
roles.ColumnArgumentRole, x, argname="order_by"
)
for x in util.to_list(self.order_by)
)
self._user_defined_foreign_keys = util.column_set(
coercions.expect(
roles.ColumnArgumentRole, x, argname="foreign_keys"
)
for x in util.to_column_set(self._user_defined_foreign_keys)
)
self.remote_side = util.column_set(
coercions.expect(
roles.ColumnArgumentRole, x, argname="remote_side"
)
for x in util.to_column_set(self.remote_side)
)
self.target = self.entity.persist_selectable
def _setup_join_conditions(self):
self._join_condition = jc = JoinCondition(
parent_persist_selectable=self.parent.persist_selectable,
child_persist_selectable=self.entity.persist_selectable,
parent_local_selectable=self.parent.local_table,
child_local_selectable=self.entity.local_table,
primaryjoin=self.primaryjoin,
secondary=self.secondary,
secondaryjoin=self.secondaryjoin,
parent_equivalents=self.parent._equivalent_columns,
child_equivalents=self.mapper._equivalent_columns,
consider_as_foreign_keys=self._user_defined_foreign_keys,
local_remote_pairs=self.local_remote_pairs,
remote_side=self.remote_side,
self_referential=self._is_self_referential,
prop=self,
support_sync=not self.viewonly,
can_be_synced_fn=self._columns_are_mapped,
)
self.primaryjoin = jc.primaryjoin
self.secondaryjoin = jc.secondaryjoin
self.direction = jc.direction
self.local_remote_pairs = jc.local_remote_pairs
self.remote_side = jc.remote_columns
self.local_columns = jc.local_columns
self.synchronize_pairs = jc.synchronize_pairs
self._calculated_foreign_keys = jc.foreign_key_columns
self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs
@property
def _clsregistry_resolve_arg(self):
return self._clsregistry_resolvers[1]
@property
def _clsregistry_resolve_name(self):
return self._clsregistry_resolvers[0]
@util.memoized_property
@util.preload_module("sqlalchemy.orm.clsregistry")
def _clsregistry_resolvers(self):
_resolver = util.preloaded.orm_clsregistry._resolver
return _resolver(self.parent.class_, self)
@util.preload_module("sqlalchemy.orm.mapper")
def _check_conflicts(self):
"""Test that this relationship is legal, warn about
inheritance conflicts."""
mapperlib = util.preloaded.orm_mapper
if self.parent.non_primary and not mapperlib.class_mapper(
self.parent.class_, configure=False
).has_property(self.key):
raise sa_exc.ArgumentError(
"Attempting to assign a new "
"relationship '%s' to a non-primary mapper on "
"class '%s'. New relationships can only be added "
"to the primary mapper, i.e. the very first mapper "
"created for class '%s' "
% (
self.key,
self.parent.class_.__name__,
self.parent.class_.__name__,
)
)
@property
def cascade(self):
"""Return the current cascade setting for this
:class:`.RelationshipProperty`.
"""
return self._cascade
@cascade.setter
def cascade(self, cascade):
self._set_cascade(cascade)
def _set_cascade(self, cascade):
cascade = CascadeOptions(cascade)
if self.viewonly:
non_viewonly = set(cascade).difference(
CascadeOptions._viewonly_cascades
)
if non_viewonly:
raise sa_exc.ArgumentError(
'Cascade settings "%s" apply to persistence operations '
"and should not be combined with a viewonly=True "
"relationship." % (", ".join(sorted(non_viewonly)))
)
if "mapper" in self.__dict__:
self._check_cascade_settings(cascade)
self._cascade = cascade
if self._dependency_processor:
self._dependency_processor.cascade = cascade
def _check_cascade_settings(self, cascade):
if (
cascade.delete_orphan
and not self.single_parent
and (self.direction is MANYTOMANY or self.direction is MANYTOONE)
):
raise sa_exc.ArgumentError(
"For %(direction)s relationship %(rel)s, delete-orphan "
"cascade is normally "
'configured only on the "one" side of a one-to-many '
"relationship, "
'and not on the "many" side of a many-to-one or many-to-many '
"relationship. "
"To force this relationship to allow a particular "
'"%(relatedcls)s" object to be referred towards by only '
'a single "%(clsname)s" object at a time via the '
"%(rel)s relationship, which "
"would allow "
"delete-orphan cascade to take place in this direction, set "
"the single_parent=True flag."
% {
"rel": self,
"direction": "many-to-one"
if self.direction is MANYTOONE
else "many-to-many",
"clsname": self.parent.class_.__name__,
"relatedcls": self.mapper.class_.__name__,
},
code="bbf0",
)
if self.passive_deletes == "all" and (
"delete" in cascade or "delete-orphan" in cascade
):
raise sa_exc.ArgumentError(
"On %s, can't set passive_deletes='all' in conjunction "
"with 'delete' or 'delete-orphan' cascade" % self
)
if cascade.delete_orphan:
self.mapper.primary_mapper()._delete_orphans.append(
(self.key, self.parent.class_)
)
def _persists_for(self, mapper):
"""Return True if this property will persist values on behalf
of the given mapper.
"""
return (
self.key in mapper.relationships
and mapper.relationships[self.key] is self
)
def _columns_are_mapped(self, *cols):
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.Relationship`.
"""
for c in cols:
if (
self.secondary is not None
and self.secondary.c.contains_column(c)
):
continue
if not self.parent.persist_selectable.c.contains_column(
c
) and not self.target.c.contains_column(c):
return False
return True
def _generate_backref(self):
"""Interpret the 'backref' instruction to create a
:func:`_orm.relationship` complementary to this one."""
if self.parent.non_primary:
return
if self.backref is not None and not self.back_populates:
if isinstance(self.backref, util.string_types):
backref_key, kwargs = self.backref, {}
else:
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
if not mapper.concrete:
check = set(mapper.iterate_to_root()).union(
mapper.self_and_descendants
)
for m in check:
if m.has_property(backref_key) and not m.concrete:
raise sa_exc.ArgumentError(
"Error creating backref "
"'%s' on relationship '%s': property of that "
"name exists on mapper '%s'"
% (backref_key, self, m)
)
# determine primaryjoin/secondaryjoin for the
# backref. Use the one we had, so that
# a custom join doesn't have to be specified in
# both directions.
if self.secondary is not None:
# for many to many, just switch primaryjoin/
# secondaryjoin. use the annotated
# pj/sj on the _join_condition.
pj = kwargs.pop(
"primaryjoin",
self._join_condition.secondaryjoin_minus_local,
)
sj = kwargs.pop(
"secondaryjoin",
self._join_condition.primaryjoin_minus_local,
)
else:
pj = kwargs.pop(
"primaryjoin",
self._join_condition.primaryjoin_reverse_remote,
)
sj = kwargs.pop("secondaryjoin", None)
if sj:
raise sa_exc.InvalidRequestError(
"Can't assign 'secondaryjoin' on a backref "
"against a non-secondary relationship."
)
foreign_keys = kwargs.pop(
"foreign_keys", self._user_defined_foreign_keys
)
parent = self.parent.primary_mapper()
kwargs.setdefault("viewonly", self.viewonly)
kwargs.setdefault("post_update", self.post_update)
kwargs.setdefault("passive_updates", self.passive_updates)
kwargs.setdefault("sync_backref", self.sync_backref)
self.back_populates = backref_key
relationship = RelationshipProperty(
parent,
self.secondary,
pj,
sj,
foreign_keys=foreign_keys,
back_populates=self.key,
**kwargs
)
mapper._configure_property(backref_key, relationship)
if self.back_populates:
self._add_reverse_property(self.back_populates)
@util.preload_module("sqlalchemy.orm.dependency")
def _post_init(self):
dependency = util.preloaded.orm_dependency
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
if not self.viewonly:
self._dependency_processor = (
dependency.DependencyProcessor.from_relationship
)(self)
@util.memoized_property
def _use_get(self):
"""memoize the 'use_get' attribute of this RelationshipLoader's
lazyloader."""
strategy = self._lazy_strategy
return strategy.use_get
@util.memoized_property
def _is_self_referential(self):
return self.mapper.common_parent(self.parent)
def _create_joins(
self,
source_polymorphic=False,
source_selectable=None,
dest_selectable=None,
of_type_entity=None,
alias_secondary=False,
extra_criteria=(),
):
aliased = False
if alias_secondary and self.secondary is not None:
aliased = True
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
source_selectable = self.parent._with_polymorphic_selectable
if of_type_entity:
dest_mapper = of_type_entity.mapper
if dest_selectable is None:
dest_selectable = of_type_entity.selectable
aliased = True
else:
dest_mapper = self.mapper
if dest_selectable is None:
dest_selectable = self.entity.selectable
if self.mapper.with_polymorphic:
aliased = True
if self._is_self_referential and source_selectable is None:
dest_selectable = dest_selectable._anonymous_fromclause()
aliased = True
elif (
dest_selectable is not self.mapper._with_polymorphic_selectable
or self.mapper.with_polymorphic
):
aliased = True
single_crit = dest_mapper._single_table_criterion
aliased = aliased or (
source_selectable is not None
and (
source_selectable
is not self.parent._with_polymorphic_selectable
or source_selectable._is_subquery
)
)
(
primaryjoin,
secondaryjoin,
secondary,
target_adapter,
dest_selectable,
) = self._join_condition.join_targets(
source_selectable,
dest_selectable,
aliased,
single_crit,
extra_criteria,
)
if source_selectable is None:
source_selectable = self.parent.local_table
if dest_selectable is None:
dest_selectable = self.entity.local_table
return (
primaryjoin,
secondaryjoin,
source_selectable,
dest_selectable,
secondary,
target_adapter,
)
def _annotate_columns(element, annotations):
def clone(elem):
if isinstance(elem, expression.ColumnClause):
elem = elem._annotate(annotations.copy())
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
clone = None # remove gc cycles
return element
class JoinCondition(object):
def __init__(
self,
parent_persist_selectable,
child_persist_selectable,
parent_local_selectable,
child_local_selectable,
primaryjoin=None,
secondary=None,
secondaryjoin=None,
parent_equivalents=None,
child_equivalents=None,
consider_as_foreign_keys=None,
local_remote_pairs=None,
remote_side=None,
self_referential=False,
prop=None,
support_sync=True,
can_be_synced_fn=lambda *c: True,
):
self.parent_persist_selectable = parent_persist_selectable
self.parent_local_selectable = parent_local_selectable
self.child_persist_selectable = child_persist_selectable
self.child_local_selectable = child_local_selectable
self.parent_equivalents = parent_equivalents
self.child_equivalents = child_equivalents
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.secondary = secondary
self.consider_as_foreign_keys = consider_as_foreign_keys
self._local_remote_pairs = local_remote_pairs
self._remote_side = remote_side
self.prop = prop
self.self_referential = self_referential
self.support_sync = support_sync
self.can_be_synced_fn = can_be_synced_fn
self._determine_joins()
self._sanitize_joins()
self._annotate_fks()
self._annotate_remote()
self._annotate_local()
self._annotate_parentmapper()
self._setup_pairs()
self._check_foreign_cols(self.primaryjoin, True)
if self.secondaryjoin is not None:
self._check_foreign_cols(self.secondaryjoin, False)
self._determine_direction()
self._check_remote_side()
self._log_joins()
def _log_joins(self):
if self.prop is None:
return
log = self.prop.logger
log.info("%s setup primary join %s", self.prop, self.primaryjoin)
log.info("%s setup secondary join %s", self.prop, self.secondaryjoin)
log.info(
"%s synchronize pairs [%s]",
self.prop,
",".join(
"(%s => %s)" % (l, r) for (l, r) in self.synchronize_pairs
),
)
log.info(
"%s secondary synchronize pairs [%s]",
self.prop,
",".join(
"(%s => %s)" % (l, r)
for (l, r) in self.secondary_synchronize_pairs or []
),
)
log.info(
"%s local/remote pairs [%s]",
self.prop,
",".join(
"(%s / %s)" % (l, r) for (l, r) in self.local_remote_pairs
),
)
log.info(
"%s remote columns [%s]",
self.prop,
",".join("%s" % col for col in self.remote_columns),
)
log.info(
"%s local columns [%s]",
self.prop,
",".join("%s" % col for col in self.local_columns),
)
log.info("%s relationship direction %s", self.prop, self.direction)
def _sanitize_joins(self):
"""remove the parententity annotation from our join conditions which
can leak in here based on some declarative patterns and maybe others.
We'd want to remove "parentmapper" also, but apparently there's
an exotic use case in _join_fixture_inh_selfref_w_entity
that relies upon it being present, see :ticket:`3364`.
"""
self.primaryjoin = _deep_deannotate(
self.primaryjoin, values=("parententity", "proxy_key")
)
if self.secondaryjoin is not None:
self.secondaryjoin = _deep_deannotate(
self.secondaryjoin, values=("parententity", "proxy_key")
)
def _determine_joins(self):
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError(
"Property %s specified with secondary "
"join condition but "
"no secondary argument" % self.prop
)
# find a join between the given mapper's mapped table and
# the given table. will try the mapper's local table first
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
try:
consider_as_foreign_keys = self.consider_as_foreign_keys or None
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = join_condition(
self.child_persist_selectable,
self.secondary,
a_subset=self.child_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
if self.primaryjoin is None:
self.primaryjoin = join_condition(
self.parent_persist_selectable,
self.secondary,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
else:
if self.primaryjoin is None:
self.primaryjoin = join_condition(
self.parent_persist_selectable,
self.child_persist_selectable,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
except sa_exc.NoForeignKeysError as nfe:
if self.secondary is not None:
util.raise_(
sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify 'primaryjoin' and 'secondaryjoin' "
"expressions." % (self.prop, self.secondary)
),
from_=nfe,
)
else:
util.raise_(
sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify a 'primaryjoin' expression." % self.prop
),
from_=nfe,
)
except sa_exc.AmbiguousForeignKeysError as afe:
if self.secondary is not None:
util.raise_(
sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables via secondary table '%s'. "
"Specify the 'foreign_keys' "
"argument, providing a list of those columns which "
"should be counted as containing a foreign key "
"reference from the secondary table to each of the "
"parent and child tables."
% (self.prop, self.secondary)
),
from_=afe,
)
else:
util.raise_(
sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables. Specify the "
"'foreign_keys' argument, providing a list of those "
"columns which should be counted as containing a "
"foreign key reference to the parent table."
% self.prop
),
from_=afe,
)
@property
def primaryjoin_minus_local(self):
return _deep_deannotate(self.primaryjoin, values=("local", "remote"))
@property
def secondaryjoin_minus_local(self):
return _deep_deannotate(self.secondaryjoin, values=("local", "remote"))
@util.memoized_property
def primaryjoin_reverse_remote(self):
"""Return the primaryjoin condition suitable for the
"reverse" direction.
If the primaryjoin was delivered here with pre-existing
"remote" annotations, the local/remote annotations
are reversed. Otherwise, the local/remote annotations
are removed.
"""
if self._has_remote_annotations:
def replace(element):
if "remote" in element._annotations:
v = dict(element._annotations)
del v["remote"]
v["local"] = True
return element._with_annotations(v)
elif "local" in element._annotations:
v = dict(element._annotations)
del v["local"]
v["remote"] = True
return element._with_annotations(v)
return visitors.replacement_traverse(self.primaryjoin, {}, replace)
else:
if self._has_foreign_annotations:
# TODO: coverage
return _deep_deannotate(
self.primaryjoin, values=("local", "remote")
)
else:
return _deep_deannotate(self.primaryjoin)
def _has_annotation(self, clause, annotation):
for col in visitors.iterate(clause, {}):
if annotation in col._annotations:
return True
else:
return False
@util.memoized_property
def _has_foreign_annotations(self):
return self._has_annotation(self.primaryjoin, "foreign")
@util.memoized_property
def _has_remote_annotations(self):
return self._has_annotation(self.primaryjoin, "remote")
def _annotate_fks(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'foreign' annotations marking columns
considered as foreign.
"""
if self._has_foreign_annotations:
return
if self.consider_as_foreign_keys:
self._annotate_from_fk_list()
else:
self._annotate_present_fks()
def _annotate_from_fk_list(self):
def check_fk(col):
if col in self.consider_as_foreign_keys:
return col._annotate({"foreign": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, check_fk
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, check_fk
)
def _annotate_present_fks(self):
if self.secondary is not None:
secondarycols = util.column_set(self.secondary.c)
else:
secondarycols = set()
def is_foreign(a, b):
if isinstance(a, schema.Column) and isinstance(b, schema.Column):
if a.references(b):
return a
elif b.references(a):
return b
if secondarycols:
if a in secondarycols and b not in secondarycols:
return a
elif b in secondarycols and a not in secondarycols:
return b
def visit_binary(binary):
if not isinstance(
binary.left, sql.ColumnElement
) or not isinstance(binary.right, sql.ColumnElement):
return
if (
"foreign" not in binary.left._annotations
and "foreign" not in binary.right._annotations
):
col = is_foreign(binary.left, binary.right)
if col is not None:
if col.compare(binary.left):
binary.left = binary.left._annotate({"foreign": True})
elif col.compare(binary.right):
binary.right = binary.right._annotate(
{"foreign": True}
)
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.cloned_traverse(
self.secondaryjoin, {}, {"binary": visit_binary}
)
def _refers_to_parent_table(self):
"""Return True if the join condition contains column
comparisons where both columns are in both tables.
"""
pt = self.parent_persist_selectable
mt = self.child_persist_selectable
result = [False]
def visit_binary(binary):
c, f = binary.left, binary.right
if (
isinstance(c, expression.ColumnClause)
and isinstance(f, expression.ColumnClause)
and pt.is_derived_from(c.table)
and pt.is_derived_from(f.table)
and mt.is_derived_from(c.table)
and mt.is_derived_from(f.table)
):
result[0] = True
visitors.traverse(self.primaryjoin, {}, {"binary": visit_binary})
return result[0]
def _tables_overlap(self):
"""Return True if parent/child tables have some overlap."""
return selectables_overlap(
self.parent_persist_selectable, self.child_persist_selectable
)
def _annotate_remote(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'remote' annotations marking columns
considered as part of the 'remote' side.
"""
if self._has_remote_annotations:
return
if self.secondary is not None:
self._annotate_remote_secondary()
elif self._local_remote_pairs or self._remote_side:
self._annotate_remote_from_args()
elif self._refers_to_parent_table():
self._annotate_selfref(
lambda col: "foreign" in col._annotations, False
)
elif self._tables_overlap():
self._annotate_remote_with_overlap()
else:
self._annotate_remote_distinct_selectables()
def _annotate_remote_secondary(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when 'secondary' is present.
"""
def repl(element):
if self.secondary.c.contains_column(element):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, repl
)
def _annotate_selfref(self, fn, remote_side_given):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the relationship is detected as self-referential.
"""
def visit_binary(binary):
equated = binary.left.compare(binary.right)
if isinstance(binary.left, expression.ColumnClause) and isinstance(
binary.right, expression.ColumnClause
):
# assume one to many - FKs are "remote"
if fn(binary.left):
binary.left = binary.left._annotate({"remote": True})
if fn(binary.right) and not equated:
binary.right = binary.right._annotate({"remote": True})
elif not remote_side_given:
self._warn_non_column_elements()
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
def _annotate_remote_from_args(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the 'remote_side' or '_local_remote_pairs'
arguments are used.
"""
if self._local_remote_pairs:
if self._remote_side:
raise sa_exc.ArgumentError(
"remote_side argument is redundant "
"against more detailed _local_remote_side "
"argument."
)
remote_side = [r for (l, r) in self._local_remote_pairs]
else:
remote_side = self._remote_side
if self._refers_to_parent_table():
self._annotate_selfref(lambda col: col in remote_side, True)
else:
def repl(element):
# use set() to avoid generating ``__eq__()`` expressions
# against each element
if element in set(remote_side):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
def _annotate_remote_with_overlap(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables have some set of
tables in common, though is not a fully self-referential
relationship.
"""
def visit_binary(binary):
binary.left, binary.right = proc_left_right(
binary.left, binary.right
)
binary.right, binary.left = proc_left_right(
binary.right, binary.left
)
check_entities = (
self.prop is not None and self.prop.mapper is not self.prop.parent
)
def proc_left_right(left, right):
if isinstance(left, expression.ColumnClause) and isinstance(
right, expression.ColumnClause
):
if self.child_persist_selectable.c.contains_column(
right
) and self.parent_persist_selectable.c.contains_column(left):
right = right._annotate({"remote": True})
elif (
check_entities
and right._annotations.get("parentmapper") is self.prop.mapper
):
right = right._annotate({"remote": True})
elif (
check_entities
and left._annotations.get("parentmapper") is self.prop.mapper
):
left = left._annotate({"remote": True})
else:
self._warn_non_column_elements()
return left, right
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
def _annotate_remote_distinct_selectables(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables are entirely
separate.
"""
def repl(element):
if self.child_persist_selectable.c.contains_column(element) and (
not self.parent_local_selectable.c.contains_column(element)
or self.child_local_selectable.c.contains_column(element)
):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
def _warn_non_column_elements(self):
util.warn(
"Non-simple column elements in primary "
"join condition for property %s - consider using "
"remote() annotations to mark the remote side." % self.prop
)
def _annotate_local(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'local' annotations.
This annotates all column elements found
simultaneously in the parent table
and the join condition that don't have a
'remote' annotation set up from
_annotate_remote() or user-defined.
"""
if self._has_annotation(self.primaryjoin, "local"):
return
if self._local_remote_pairs:
local_side = util.column_set(
[l for (l, r) in self._local_remote_pairs]
)
else:
local_side = util.column_set(self.parent_persist_selectable.c)
def locals_(elem):
if "remote" not in elem._annotations and elem in local_side:
return elem._annotate({"local": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, locals_
)
def _annotate_parentmapper(self):
if self.prop is None:
return
def parentmappers_(elem):
if "remote" in elem._annotations:
return elem._annotate({"parentmapper": self.prop.mapper})
elif "local" in elem._annotations:
return elem._annotate({"parentmapper": self.prop.parent})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, parentmappers_
)
def _check_remote_side(self):
if not self.local_remote_pairs:
raise sa_exc.ArgumentError(
"Relationship %s could "
"not determine any unambiguous local/remote column "
"pairs based on join condition and remote_side "
"arguments. "
"Consider using the remote() annotation to "
"accurately mark those elements of the join "
"condition that are on the remote side of "
"the relationship." % (self.prop,)
)
def _check_foreign_cols(self, join_condition, primary):
"""Check the foreign key columns collected and emit error
messages."""
can_sync = False
foreign_cols = self._gather_columns_with_annotation(
join_condition, "foreign"
)
has_foreign = bool(foreign_cols)
if primary:
can_sync = bool(self.synchronize_pairs)
else:
can_sync = bool(self.secondary_synchronize_pairs)
if (
self.support_sync
and can_sync
or (not self.support_sync and has_foreign)
):
return
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if self.support_sync and has_foreign and not can_sync:
err = (
"Could not locate any simple equality expressions "
"involving locally mapped foreign key columns for "
"%s join condition "
"'%s' on relationship %s."
% (
primary and "primary" or "secondary",
join_condition,
self.prop,
)
)
err += (
" Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or are "
"annotated in the join condition with the foreign() "
"annotation. To allow comparison operators other than "
"'==', the relationship can be marked as viewonly=True."
)
raise sa_exc.ArgumentError(err)
else:
err = (
"Could not locate any relevant foreign key columns "
"for %s join condition '%s' on relationship %s."
% (
primary and "primary" or "secondary",
join_condition,
self.prop,
)
)
err += (
" Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or are "
"annotated in the join condition with the foreign() "
"annotation."
)
raise sa_exc.ArgumentError(err)
def _determine_direction(self):
"""Determine if this relationship is one to many, many to one,
many to many.
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
else:
parentcols = util.column_set(self.parent_persist_selectable.c)
targetcols = util.column_set(self.child_persist_selectable.c)
# fk collection which suggests ONETOMANY.
onetomany_fk = targetcols.intersection(self.foreign_key_columns)
# fk collection which suggests MANYTOONE.
manytoone_fk = parentcols.intersection(self.foreign_key_columns)
if onetomany_fk and manytoone_fk:
# fks on both sides. test for overlap of local/remote
# with foreign key.
# we will gather columns directly from their annotations
# without deannotating, so that we can distinguish on a column
# that refers to itself.
# 1. columns that are both remote and FK suggest
# onetomany.
onetomany_local = self._gather_columns_with_annotation(
self.primaryjoin, "remote", "foreign"
)
# 2. columns that are FK but are not remote (e.g. local)
# suggest manytoone.
manytoone_local = set(
[
c
for c in self._gather_columns_with_annotation(
self.primaryjoin, "foreign"
)
if "remote" not in c._annotations
]
)
# 3. if both collections are present, remove columns that
# refer to themselves. This is for the case of
# and_(Me.id == Me.remote_id, Me.version == Me.version)
if onetomany_local and manytoone_local:
self_equated = self.remote_columns.intersection(
self.local_columns
)
onetomany_local = onetomany_local.difference(self_equated)
manytoone_local = manytoone_local.difference(self_equated)
# at this point, if only one or the other collection is
# present, we know the direction, otherwise it's still
# ambiguous.
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns within the join condition are present "
"in both the parent and the child's mapped tables. "
"Ensure that only those columns referring "
"to a parent column are marked as foreign, "
"either via the foreign() annotation or "
"via the foreign_keys argument." % self.prop
)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self.prop
)
def _deannotate_pairs(self, collection):
"""provide deannotation for the various lists of
pairs, so that using them in hashes doesn't incur
high-overhead __eq__() comparisons against
original columns mapped.
"""
return [(x._deannotate(), y._deannotate()) for x, y in collection]
def _setup_pairs(self):
sync_pairs = []
lrp = util.OrderedSet([])
secondary_sync_pairs = []
def go(joincond, collection):
def visit_binary(binary, left, right):
if (
"remote" in right._annotations
and "remote" not in left._annotations
and self.can_be_synced_fn(left)
):
lrp.add((left, right))
elif (
"remote" in left._annotations
and "remote" not in right._annotations
and self.can_be_synced_fn(right)
):
lrp.add((right, left))
if binary.operator is operators.eq and self.can_be_synced_fn(
left, right
):
if "foreign" in right._annotations:
collection.append((left, right))
elif "foreign" in left._annotations:
collection.append((right, left))
visit_binary_product(visit_binary, joincond)
for joincond, collection in [
(self.primaryjoin, sync_pairs),
(self.secondaryjoin, secondary_sync_pairs),
]:
if joincond is None:
continue
go(joincond, collection)
self.local_remote_pairs = self._deannotate_pairs(lrp)
self.synchronize_pairs = self._deannotate_pairs(sync_pairs)
self.secondary_synchronize_pairs = self._deannotate_pairs(
secondary_sync_pairs
)
_track_overlapping_sync_targets = weakref.WeakKeyDictionary()
def _warn_for_conflicting_sync_targets(self):
if not self.support_sync:
return
# we would like to detect if we are synchronizing any column
# pairs in conflict with another relationship that wishes to sync
# an entirely different column to the same target. This is a
# very rare edge case so we will try to minimize the memory/overhead
# impact of this check
for from_, to_ in [
(from_, to_) for (from_, to_) in self.synchronize_pairs
] + [
(from_, to_) for (from_, to_) in self.secondary_synchronize_pairs
]:
# save ourselves a ton of memory and overhead by only
# considering columns that are subject to a overlapping
# FK constraints at the core level. This condition can arise
# if multiple relationships overlap foreign() directly, but
# we're going to assume it's typically a ForeignKeyConstraint-
# level configuration that benefits from this warning.
if to_ not in self._track_overlapping_sync_targets:
self._track_overlapping_sync_targets[
to_
] = weakref.WeakKeyDictionary({self.prop: from_})
else:
other_props = []
prop_to_from = self._track_overlapping_sync_targets[to_]
for pr, fr_ in prop_to_from.items():
if (
not pr.mapper._dispose_called
and pr not in self.prop._reverse_property
and pr.key not in self.prop._overlaps
and self.prop.key not in pr._overlaps
# note: the "__*" symbol is used internally by
# SQLAlchemy as a general means of suppressing the
# overlaps warning for some extension cases, however
# this is not currently
# a publicly supported symbol and may change at
# any time.
and "__*" not in self.prop._overlaps
and "__*" not in pr._overlaps
and not self.prop.parent.is_sibling(pr.parent)
and not self.prop.mapper.is_sibling(pr.mapper)
and not self.prop.parent.is_sibling(pr.mapper)
and not self.prop.mapper.is_sibling(pr.parent)
and (
self.prop.key != pr.key
or not self.prop.parent.common_parent(pr.parent)
)
):
other_props.append((pr, fr_))
if other_props:
util.warn(
"relationship '%s' will copy column %s to column %s, "
"which conflicts with relationship(s): %s. "
"If this is not the intention, consider if these "
"relationships should be linked with "
"back_populates, or if viewonly=True should be "
"applied to one or more if they are read-only. "
"For the less common case that foreign key "
"constraints are partially overlapping, the "
"orm.foreign() "
"annotation can be used to isolate the columns that "
"should be written towards. To silence this "
"warning, add the parameter 'overlaps=\"%s\"' to the "
"'%s' relationship."
% (
self.prop,
from_,
to_,
", ".join(
sorted(
"'%s' (copies %s to %s)" % (pr, fr_, to_)
for (pr, fr_) in other_props
)
),
",".join(sorted(pr.key for pr, fr in other_props)),
self.prop,
),
code="qzyx",
)
self._track_overlapping_sync_targets[to_][self.prop] = from_
@util.memoized_property
def remote_columns(self):
return self._gather_join_annotations("remote")
@util.memoized_property
def local_columns(self):
return self._gather_join_annotations("local")
@util.memoized_property
def foreign_key_columns(self):
return self._gather_join_annotations("foreign")
def _gather_join_annotations(self, annotation):
s = set(
self._gather_columns_with_annotation(self.primaryjoin, annotation)
)
if self.secondaryjoin is not None:
s.update(
self._gather_columns_with_annotation(
self.secondaryjoin, annotation
)
)
return {x._deannotate() for x in s}
def _gather_columns_with_annotation(self, clause, *annotation):
annotation = set(annotation)
return set(
[
col
for col in visitors.iterate(clause, {})
if annotation.issubset(col._annotations)
]
)
def join_targets(
self,
source_selectable,
dest_selectable,
aliased,
single_crit=None,
extra_criteria=(),
):
"""Given a source and destination selectable, create a
join between them.
This takes into account aliasing the join clause
to reference the appropriate corresponding columns
in the target objects, as well as the extra child
criterion, equivalent column sets, etc.
"""
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable, {"no_replacement_traverse": True}
)
primaryjoin, secondaryjoin, secondary = (
self.primaryjoin,
self.secondaryjoin,
self.secondary,
)
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the
# "_adjust_for_single_table_inheritance()" method in Query.
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if extra_criteria:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & sql.and_(*extra_criteria)
else:
primaryjoin = primaryjoin & sql.and_(*extra_criteria)
if aliased:
if secondary is not None:
secondary = secondary._anonymous_fromclause(flat=True)
primary_aliasizer = ClauseAdapter(
secondary, exclude_fn=_ColInAnnotations("local")
)
secondary_aliasizer = ClauseAdapter(
dest_selectable, equivalents=self.child_equivalents
).chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = ClauseAdapter(
secondary, exclude_fn=_ColInAnnotations("local")
).chain(
ClauseAdapter(
source_selectable,
equivalents=self.parent_equivalents,
)
)
secondaryjoin = secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(
dest_selectable,
exclude_fn=_ColInAnnotations("local"),
equivalents=self.child_equivalents,
)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(
source_selectable,
exclude_fn=_ColInAnnotations("remote"),
equivalents=self.parent_equivalents,
)
)
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.exclude_fn = None
else:
target_adapter = None
return (
primaryjoin,
secondaryjoin,
secondary,
target_adapter,
dest_selectable,
)
def create_lazy_clause(self, reverse_direction=False):
binds = util.column_dict()
equated_columns = util.column_dict()
has_secondary = self.secondaryjoin is not None
if has_secondary:
lookup = collections.defaultdict(list)
for l, r in self.local_remote_pairs:
lookup[l].append((l, r))
equated_columns[r] = l
elif not reverse_direction:
for l, r in self.local_remote_pairs:
equated_columns[r] = l
else:
for l, r in self.local_remote_pairs:
equated_columns[l] = r
def col_to_bind(col):
if (
(not reverse_direction and "local" in col._annotations)
or reverse_direction
and (
(has_secondary and col in lookup)
or (not has_secondary and "remote" in col._annotations)
)
):
if col not in binds:
binds[col] = sql.bindparam(
None, None, type_=col.type, unique=True
)
return binds[col]
return None
lazywhere = self.primaryjoin
if self.secondaryjoin is None or not reverse_direction:
lazywhere = visitors.replacement_traverse(
lazywhere, {}, col_to_bind
)
if self.secondaryjoin is not None:
secondaryjoin = self.secondaryjoin
if reverse_direction:
secondaryjoin = visitors.replacement_traverse(
secondaryjoin, {}, col_to_bind
)
lazywhere = sql.and_(lazywhere, secondaryjoin)
bind_to_col = {binds[col].key: col for col in binds}
return lazywhere, bind_to_col, equated_columns
class _ColInAnnotations(object):
"""Serializable object that tests for a name in c._annotations."""
__slots__ = ("name",)
def __init__(self, name):
self.name = name
def __call__(self, c):
return self.name in c._annotations
| 38.845528
| 79
| 0.579413
|
7949e9fa4b0fdd2d2c6caf9583dd4670734d5fa9
| 3,024
|
py
|
Python
|
compare_gan/src/gans/ops.py
|
hwalsuklee/compare_gan
|
d6c5976ba2b9d19dbff42f439c20b48565d10235
|
[
"Apache-2.0"
] | 2
|
2018-10-03T09:52:44.000Z
|
2022-03-03T03:00:23.000Z
|
compare_gan/src/gans/ops.py
|
hwalsuklee/compare_gan
|
d6c5976ba2b9d19dbff42f439c20b48565d10235
|
[
"Apache-2.0"
] | null | null | null |
compare_gan/src/gans/ops.py
|
hwalsuklee/compare_gan
|
d6c5976ba2b9d19dbff42f439c20b48565d10235
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import numpy as np
import scipy.misc
import tensorflow as tf
def check_folder(log_dir):
if not tf.gfile.IsDirectory(log_dir):
tf.gfile.MakeDirs(log_dir)
return log_dir
def save_images(images, image_path):
with tf.gfile.Open(image_path, "wb") as f:
scipy.misc.imsave(f, images * 255.0)
def gaussian(batch_size, n_dim, mean=0., var=1.):
return np.random.normal(mean, var, (batch_size, n_dim)).astype(np.float32)
def lrelu(input_, leak=0.2, name="lrelu"):
return tf.maximum(input_, leak * input_, name=name)
def linear(input_,
output_size,
scope=None,
stddev=0.02,
bias_start=0.0):
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable(
"Matrix", [shape[1], output_size],
tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable(
"bias", [output_size], initializer=tf.constant_initializer(bias_start))
return tf.matmul(input_, matrix) + bias
def batch_norm(input_, is_training, scope):
return tf.contrib.layers.batch_norm(
input_,
decay=0.999,
epsilon=0.001,
updates_collections=None,
scale=True,
fused=False,
is_training=is_training,
scope=scope)
def conv2d(input_, output_dim, k_h, k_w, d_h, d_w, stddev=0.02, name="conv2d"):
with tf.variable_scope(name):
w = tf.get_variable(
"w", [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding="SAME")
biases = tf.get_variable(
"biases", [output_dim], initializer=tf.constant_initializer(0.0))
return tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
def deconv2d(
input_, output_shape, k_h, k_w, d_h, d_w, stddev=0.02, name="deconv2d"):
with tf.variable_scope(name):
w = tf.get_variable(
"w", [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.conv2d_transpose(
input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1])
biases = tf.get_variable(
"biases", [output_shape[-1]], initializer=tf.constant_initializer(0.0))
return tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
| 32.516129
| 79
| 0.689153
|
7949eb93941a7a360b1a0c0636271378a03b6570
| 435
|
py
|
Python
|
src/Backend/query_DT_candidates.py
|
uhh-lt/cam-hci
|
dd402322bb37470963c5cc46765592f952e2c3f0
|
[
"MIT"
] | null | null | null |
src/Backend/query_DT_candidates.py
|
uhh-lt/cam-hci
|
dd402322bb37470963c5cc46765592f952e2c3f0
|
[
"MIT"
] | 42
|
2018-02-23T14:54:35.000Z
|
2018-03-09T10:50:56.000Z
|
src/Backend/query_DT_candidates.py
|
uhh-lt/cam-hci
|
dd402322bb37470963c5cc46765592f952e2c3f0
|
[
"MIT"
] | null | null | null |
from elasticsearch import Elasticsearch
ES_HOST = {"host": "localhost", "port": 9200}
INDEX_NAME = 'dt-index'
es = Elasticsearch(hosts=[ES_HOST], timeout=300)
def get_all_similarities(comparison_object):
# print(comparison_object)
res = es.search(index=INDEX_NAME, size=10000, body={"query": {"match": {"first": comparison_object}}})
return list(set([hit['_source']['second'].lower() for hit in res['hits']['hits']]))
| 31.071429
| 106
| 0.698851
|
7949ed6258a82d36cfa6f81a12d715f3313d6b56
| 6,074
|
py
|
Python
|
popbl_servicesapp/flask_app/delivery/application/routes.py
|
xetxezarreta/master-popbl1
|
253880b9ba358f63f666893cdbbffe7391fcd096
|
[
"MIT"
] | null | null | null |
popbl_servicesapp/flask_app/delivery/application/routes.py
|
xetxezarreta/master-popbl1
|
253880b9ba358f63f666893cdbbffe7391fcd096
|
[
"MIT"
] | 1
|
2021-06-02T00:57:11.000Z
|
2021-06-02T00:57:11.000Z
|
popbl_servicesapp/flask_app/delivery/application/routes.py
|
xetxezarreta/master-popbl1
|
253880b9ba358f63f666893cdbbffe7391fcd096
|
[
"MIT"
] | null | null | null |
from flask import request, jsonify, abort
from flask import current_app as app
from .models import Delivery
from werkzeug.exceptions import NotFound, InternalServerError, BadRequest, UnsupportedMediaType
import traceback
from . import Session
from .BLConsul import BLConsul
from .config import Config
import requests
from .delivery_logic import DeliveryLogic
bl_consul = BLConsul.get_instance()
logic = DeliveryLogic()
# Get Service from consul by name and return ##########################################################################
@app.route('/{}/<string:external_service_name>'.format(Config.SERVICE_NAME))
def external_service_response(external_service_name):
service = bl_consul.get_service(external_service_name)
service['Name'] = external_service_name
print(""*50)
print(external_service_name)
if service is None:
ret_message = "The service does not exist or there is no healthy replica"
status_code = 404
else:
ret_message, status_code = call_external_service(service)
return ret_message, status_code
@app.route('/{}/kv'.format(Config.SERVICE_NAME))
def key_values():
kv = bl_consul.get_key_value_items()
return jsonify(kv), 200
@app.route('/{}/catalog'.format(Config.SERVICE_NAME))
def get_catalog():
catalog = bl_consul.get_service_catalog()
return jsonify(catalog), 200
@app.route('/{}/replicas'.format(Config.SERVICE_NAME))
def get_replicas():
replicas = bl_consul.get_service_replicas()
return jsonify(replicas), 200
def call_external_service(service):
url = "http://{host}:{port}/{path}".format(
host=service['Address'],
port=service['Port'],
path=service['Name']
)
response = requests.get(url)
if response:
ret_message = jsonify({
"caller": Config.SERVICE_NAME,
"callerURL": "{}:{}".format(Config.IP, Config.PORT),
"answerer": service['Name'],
"answererURL": "{}:{}".format(service['Address'], service['Port']),
"response": response.text,
"status_code": response.status_code
})
status_code = response.status_code
else:
ret_message = "Could not get message"
status_code = 500
return ret_message, status_code
@app.route('/delivery/health', methods=['HEAD', 'GET'])
def health_check():
# print("Health check on delivery")
return "OK", 200
# Crea un objeto delivery que contiene la referencia que lo une a un pedido, asi como su estado y su descripcion
# El estado sera en un principio "en proceso"
# La descripcion estara en blanco
@app.route('/delivery/create_delivery', methods=['POST'])
def create_delivery():
session = Session()
status = False
if request.headers['Content-Type'] != 'application/json':
abort(UnsupportedMediaType.code)
content = request.json
print("Contenido del request:\n")
print(content)
try:
temdel = Delivery(
ref=content['orderId'],
status=content['status'],
description=content['description']
)
session.add(temdel)
session.commit()
except KeyError:
session.rollback()
session.close()
abort(BadRequest.code)
status = False
response = jsonify(status)
session.close()
print(response)
if not response: # if response == False
return {"success": False}
else:
return {"success": True}
# Se modifica el valor del estado de una delivery mediente la referencia
@app.route('/delivery/update_delivery', methods=['POST'])
def update_delivery():
session = Session()
status = False
if request.headers['Content-Type'] != 'application/json':
abort(UnsupportedMediaType.code)
content = request.json
print("Contenido del request:\n")
print(content)
deli = session.query(Delivery).filter_by(ref=content['orderId']).first()
print("Contenido de la query")
print(deli)
if deli:
# Updatear
deli.status = content['status']
session.add(deli)
session.commit()
status = True
else:
status = False
response = jsonify(status)
session.close()
print(response)
if not response: # if response == False
return {"success": False}
else:
return {"success": True}
# El usuario añade a uno de los pedidos referenciados la descripcion necesaria para que seudiese realizar la entrega
# El pedido debe existir en la base de datos y tener su status como finalizado o ready
@app.route('/delivery/info_delivery', methods=['POST'])
def info_delivery():
session = Session()
status = False
if request.headers['Content-Type'] != 'application/json':
abort(UnsupportedMediaType.code)
content = request.json
print("Contenido del request:\n")
print(content)
deli = session.query(Delivery).filter_by(ref=content['orderId']).first()
print("Contenido de la query")
print(deli)
if deli:
# Updatear
deli.description = content['description']
session.add(deli)
session.commit()
status = True
else:
status = False
response = jsonify(status)
session.close()
print(response)
if not response: # if response == False
return {"success": False}
else:
return {"success": True}
# Error Handling #######################################################################################################
@app.errorhandler(UnsupportedMediaType)
def unsupported_media_type_handler(e):
return get_jsonified_error(e)
@app.errorhandler(BadRequest)
def bad_request_handler(e):
return get_jsonified_error(e)
@app.errorhandler(NotFound)
def resource_not_found_handler(e):
return get_jsonified_error(e)
@app.errorhandler(InternalServerError)
def server_error_handler(e):
return get_jsonified_error(e)
def get_jsonified_error(e):
traceback.print_tb(e.__traceback__)
return jsonify({"error_code": e.code, "error_message": e.description}), e.code
| 27.73516
| 120
| 0.651465
|
7949ee2058ba85e5da1d839fef8119f0f84ff0b0
| 1,172
|
py
|
Python
|
quiz_master/quiz_app/tests/models/test_QuizModel.py
|
DiyanKalaydzhiev23/quiz_master
|
d8624a59d5bfc52c13ede72b024d1178c17c1780
|
[
"MIT"
] | null | null | null |
quiz_master/quiz_app/tests/models/test_QuizModel.py
|
DiyanKalaydzhiev23/quiz_master
|
d8624a59d5bfc52c13ede72b024d1178c17c1780
|
[
"MIT"
] | null | null | null |
quiz_master/quiz_app/tests/models/test_QuizModel.py
|
DiyanKalaydzhiev23/quiz_master
|
d8624a59d5bfc52c13ede72b024d1178c17c1780
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.test import TestCase
from quiz_master.quiz_app.models import Quiz
UserModel = get_user_model()
class QuizModelTests(TestCase):
def setUp(self):
self.user = UserModel(
username='Meto',
password="metilab",
email='di@metilab.com',
)
self.user.full_clean()
self.user.save()
def test_quiz_model_create__with_valid_data__expect_success(self):
quiz_instance = Quiz(
category="biology",
name="phagocytes",
author=self.user,
)
quiz_instance.full_clean()
quiz_instance.save()
self.assertIsNotNone(quiz_instance.pk)
def test_quiz_model__with_invalid_name__expect_fail(self):
name = 'n' * 151
quiz_instance = Quiz(
category="biology",
name=name,
author=self.user,
)
with self.assertRaises(ValidationError) as context:
quiz_instance.full_clean()
quiz_instance.save()
self.assertIsNotNone(context.exception)
| 24.93617
| 70
| 0.62884
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.