id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8114545 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class CloudbusTotalOdItem(object):
def __init__(self):
self._code = None
self._message = None
self._totalod = None
self._weekend_od = None
self._workday_od = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def message(self):
return self._message
@message.setter
def message(self, value):
self._message = value
@property
def totalod(self):
return self._totalod
@totalod.setter
def totalod(self, value):
self._totalod = value
@property
def weekend_od(self):
return self._weekend_od
@weekend_od.setter
def weekend_od(self, value):
self._weekend_od = value
@property
def workday_od(self):
return self._workday_od
@workday_od.setter
def workday_od(self, value):
self._workday_od = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.message:
if hasattr(self.message, 'to_alipay_dict'):
params['message'] = self.message.to_alipay_dict()
else:
params['message'] = self.message
if self.totalod:
if hasattr(self.totalod, 'to_alipay_dict'):
params['totalod'] = self.totalod.to_alipay_dict()
else:
params['totalod'] = self.totalod
if self.weekend_od:
if hasattr(self.weekend_od, 'to_alipay_dict'):
params['weekend_od'] = self.weekend_od.to_alipay_dict()
else:
params['weekend_od'] = self.weekend_od
if self.workday_od:
if hasattr(self.workday_od, 'to_alipay_dict'):
params['workday_od'] = self.workday_od.to_alipay_dict()
else:
params['workday_od'] = self.workday_od
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CloudbusTotalOdItem()
if 'code' in d:
o.code = d['code']
if 'message' in d:
o.message = d['message']
if 'totalod' in d:
o.totalod = d['totalod']
if 'weekend_od' in d:
o.weekend_od = d['weekend_od']
if 'workday_od' in d:
o.workday_od = d['workday_od']
return o
| StarcoderdataPython |
206389 | n = float(input('Digite o 1º numero: '))
n1 = float(input('Digite o 2º numero: '))
n2 = 0
while n2 != 5:
print(''' [1] somar
[2] Multiplicar
[3] Maior
[4] Novos numeros
[5] Sair do programa''')
n2 = int(input('Sua opção: '))
if n2 == 1:
n3 = n + n1
print(f'A soma entre {n} + {n1} é igual a {n3}')
elif n2 == 2:
n4 = n * n1
print(f'O produto de {n} x {n1} é igual a {n4}')
elif n2 == 3:
if n > n1:
print(f'O maior numero digitado foi {n}')
else:
print(f'O maior numero digitado foi {n1}')
elif n2 == 4:
n7 = float(input('Digite o 1º numero: '))
n8 = float(input('Digite o 2º numero: '))
n = n7
n1 = n8
elif n2 == 5:
print('FINALIZANDO o programa!')
else:
print('Opção invalida por favor digite novamente:') | StarcoderdataPython |
1959209 | <filename>friendly-asyncio/6-aiohttp-server.py<gh_stars>10-100
import asyncio
import random
from aiohttp import web
routes = web.RouteTableDef()
@routes.get(r"/randint/{number:\d+}")
async def randint(request):
try:
number = int(request.match_info["number"])
except Exception:
number = 20
await asyncio.sleep(0.1) # thinking
value = random.randint(0, number)
return web.Response(text=f"{value}\n")
app = web.Application()
app.add_routes(routes)
web.run_app(app)
| StarcoderdataPython |
11217137 | <reponame>mdw771/tomosim
# -*- coding: utf-8 -*-
"""
This script works for foam phantom.
Plot snr_intrinsic and fidelity against truncation ratio.
Will read already-generated data. Use foam_eff_ratio if not exists.
"""
import numpy as np
import glob
import dxchange
import matplotlib.pyplot as plt
import tomopy
import matplotlib
from project import *
from simulator import *
from sinogram import *
from instrument import *
from sample import *
if __name__ == '__main__':
pad_length = 1024
sino_width = 2048
half_sino_width = 1024
scanned_sino_width = 2048 + 1024
n_scan_local_ls = np.arange(1, 14, dtype='int')
n_scan_tomosaic_ls = np.arange(1, 14, dtype='int')
ovlp_rate_tomosaic = 0.2
mask_ratio_local = 0.8
trunc_ratio_local_ls = []
fidelity_local_ls = []
variance_local_ls = []
variance_local_interior_ls = []
snr_local_ls = []
trunc_ratio_tomosaic_ls = []
fidelity_tomosaic_ls = []
variance_tomosaic_ls = []
variance_tomosaic_interior_ls = []
snr_tomosaic_ls = []
# create reference recon
if os.path.exists(os.path.join('data', 'ref_recon.tiff')):
ref_recon = dxchange.read_tiff(os.path.join('data', 'ref_recon.tiff'))
else:
sino = dxchange.read_tiff(os.path.join('data', 'foam_sino_pad.tiff'))
sino = -np.log(sino)
sino = sino[:, np.newaxis, :]
theta = tomopy.angles(sino.shape[0])
ref_recon = tomopy.recon(sino, theta, center=pad_length+half_sino_width, algorithm='gridrec')
dxchange.write_tiff(ref_recon, 'data/ref_recon', overwrite=True)
ref_recon = np.squeeze(ref_recon)
try:
raise ValueError
trunc_ratio_local_ls = np.load(os.path.join('data', 'trunc_ratio_local_ls.npy'))
fidelity_local_ls = np.load(os.path.join('data', 'fidelity_local_ls.npy'))
variance_local_ls = np.load(os.path.join('data', 'variance_local_ls.npy'))
variance_local_interior_ls = np.load(os.path.join('data', 'variance_local_interior_ls.npy'))
trunc_ratio_tomosaic_ls = np.load(os.path.join('data', 'trunc_ratio_tomosaic_ls.npy'))
fidelity_tomosaic_ls = np.load(os.path.join('data', 'fidelity_tomosaic_ls.npy'))
variance_tomosaic_ls = np.load(os.path.join('data', 'variance_tomosaic_ls.npy'))
variance_tomosaic_interior_ls = np.load(os.path.join('data', 'variance_tomosaic_interior_ls.npy'))
except:
for n_scan in n_scan_tomosaic_ls:
print('NSCAN (tomosaic): {:d}'.format(n_scan))
fov = get_fov(n_scan, scanned_sino_width, mask_ratio_local)
half_fov = int(fov / 2)
trunc = float(fov) / scanned_sino_width
trunc_ratio_tomosaic_ls.append(trunc)
dirname = 'foam_nscan_{:d}'.format(n_scan)
recon = np.squeeze(
dxchange.read_tiff(os.path.join('data', 'foam_eff_ratio', dirname, 'recon_tomosaic_1x.tiff')))
fid = ssim(recon, ref_recon, mask_ratio=0.4, terms='cs')
varc = snr_intrinsic(recon, mask_ratio=0.4)
fidelity_tomosaic_ls.append(fid)
variance_tomosaic_ls.append(varc)
stage_begin = ((sino_width + pad_length * 2) - scanned_sino_width) / 2
stage_end = (sino_width + pad_length * 2) - stage_begin
stage_list = np.linspace(half_fov + stage_begin, stage_end - half_fov, n_scan)
stage_list = stage_list.astype('int')
center_list = [(y, x) for y in stage_list for x in stage_list]
temp = []
for y, x in center_list:
img = recon[y - half_fov:y - half_fov + fov, x - half_fov:x - half_fov + fov]
temp.append(snr_intrinsic(img, mask_ratio=0.4))
variance_tomosaic_interior_ls.append(np.mean(temp))
for n_scan in n_scan_local_ls:
print('NSCAN (local): {:d}'.format(n_scan))
fov = get_fov(n_scan, scanned_sino_width, mask_ratio_local)
half_fov = int(fov / 2)
trunc = float(fov) / scanned_sino_width
trunc_ratio_local_ls.append(trunc)
dirname = 'foam_nscan_{:d}'.format(n_scan)
recon = np.squeeze(dxchange.read_tiff(os.path.join('data', 'foam_eff_ratio', dirname, 'recon_local_1x.tiff')))
fid = ssim(recon, ref_recon, mask_ratio=0.4, terms='cs')
varc = snr_intrinsic(recon, mask_ratio=0.4)
fidelity_local_ls.append(fid)
variance_local_ls.append(varc)
stage_begin = ((sino_width + pad_length * 2) - scanned_sino_width) / 2
stage_end = (sino_width + pad_length * 2) - stage_begin
stage_list = np.linspace(half_fov+stage_begin, stage_end-half_fov, n_scan)
stage_list = stage_list.astype('int')
center_list = [(y, x) for y in stage_list for x in stage_list]
temp = []
for y, x in center_list:
img = recon[y-half_fov:y-half_fov+fov, x-half_fov:x-half_fov+fov]
temp.append(snr_intrinsic(img, mask_ratio=0.4))
variance_local_interior_ls.append(np.mean(temp))
fidelity_local_ls = np.array(fidelity_local_ls)
variance_local_ls = np.array(variance_local_ls)
variance_local_interior_ls = np.array(variance_local_interior_ls)
trunc_ratio_local_ls = np.array(trunc_ratio_local_ls)
# save
np.save(os.path.join('data', 'trunc_ratio_local_ls'), trunc_ratio_local_ls)
np.save(os.path.join('data', 'fidelity_local_ls'), fidelity_local_ls)
np.save(os.path.join('data', 'variance_local_ls'), variance_local_ls)
np.save(os.path.join('data', 'variance_local_interior_ls'), variance_local_interior_ls)
np.save(os.path.join('data', 'trunc_ratio_tomosaic_ls'), trunc_ratio_tomosaic_ls)
np.save(os.path.join('data', 'fidelity_tomosaic_ls'), fidelity_tomosaic_ls)
np.save(os.path.join('data', 'variance_tomosaic_ls'), variance_tomosaic_ls)
np.save(os.path.join('data', 'variance_tomosaic_interior_ls'), variance_tomosaic_interior_ls)
print('===========================')
print('Local:')
print('TR: ', trunc_ratio_local_ls)
print('Fidelity: ', fidelity_local_ls)
print('Variance: ', variance_local_ls)
print('Interior snr_intrinsic: ', variance_local_interior_ls)
print('Tomosaic:')
print('TR: ', trunc_ratio_tomosaic_ls)
print('Fidelity: ', fidelity_tomosaic_ls)
print('Variance: ', variance_tomosaic_ls)
print('Interior snr_intrinsic: ', variance_tomosaic_interior_ls)
print('===========================')
matplotlib.rcParams['pdf.fonttype'] = 'truetype'
fontProperties = {'family': 'serif', 'serif': ['Times New Roman'], 'weight': 'normal', 'size': 9}
plt.rc('font', **fontProperties)
fig = plt.figure()
plt.plot(trunc_ratio_local_ls, fidelity_local_ls, marker='o', label='RMT fidelity')
plt.plot(trunc_ratio_tomosaic_ls, fidelity_tomosaic_ls, marker='x', label='PSMT fidelity')
plt.xlabel('Truncation ratio')
plt.ylabel('Reconstruction fidelity (dB)')
plt.legend()
plt.savefig(os.path.join('data', 'fidelity_trunc.pdf'), format='pdf')
fig2 = plt.figure()
plt.plot(trunc_ratio_local_ls, variance_local_ls, marker='o', label='RMT global snr_intrinsic')
plt.plot(trunc_ratio_tomosaic_ls, variance_tomosaic_ls, marker='x', label='PSMT global snr_intrinsic')
plt.plot(trunc_ratio_local_ls, variance_local_interior_ls, marker='o', label='RMT interior snr_intrinsic')
plt.plot(trunc_ratio_tomosaic_ls, variance_tomosaic_interior_ls, marker='x', label='PSMT interior snr_intrinsic')
plt.legend()
plt.savefig(os.path.join('data', 'variance_trunc.pdf'), format='pdf')
plt.show()
| StarcoderdataPython |
12839151 | from __future__ import absolute_import, division, print_function
from cfn_model.model.ModelElement import ModelElement
class EC2NetworkInterface(ModelElement):
"""
Ec2 network interface model lement
"""
def __init__(self, cfn_model):
"""
Initialize
:param cfn_model:
"""
ModelElement.__init__(self, cfn_model)
self.groupSet= []
self.ipv6Addresses= []
self.privateIpAddresses= []
self.tags= []
self.security_groups= []
self.resource_type = 'AWS::EC2::NetworkInterface'
| StarcoderdataPython |
1788767 | "Unit test for the game-board class"
import unittest
from .board import *
def place_stone(board, color, x, y):
board[x,y] = color
class TestBoard(unittest.TestCase):
def test_creation(self):
width = 20
height = 40
board = Board(height, width)
self.assertEqual(board.shape, (height,width))
for i in range(height):
for j in range(width):
# empty refers to "no stone laid" and should be defined in the module ``board``
self.assertEqual(board[i,j], empty)
def test_reset(self):
width = 20
height = 40
board = Board(height, width)
place_stone(board, white, 5, 5)
place_stone(board, black, 4, 5)
place_stone(board, white, 4, 3)
self.assertEqual(board.in_turn, black)
self.assertFalse( (board.board == np.zeros([height, width]) ).all() )
board.reset()
self.assertEqual(board.in_turn, white)
self.assertEqual(board.shape, (height,width))
for i in range(height):
for j in range(width):
# empty refers to "no stone laid" and should be defined in the module ``board``
self.assertEqual(board[i,j], empty)
def test_lay_stone(self):
width = height= 20
board = Board(width, height)
# try "place a black stone at 5,5" --> white starts therefore expect error
self.assertRaisesRegexp(InvalidMoveError, 'White is in turn', place_stone, board, black, 5, 5)
# "place a white stone at 5,5" should be OK
place_stone(board, white, 5, 5)
# "place another white stone" is an invalid move
self.assertRaisesRegexp(InvalidMoveError, 'Black is in turn', place_stone, board, white, 5, 4)
# place black stone at 5,5 is invalid since 5,5 is already occupied
self.assertRaisesRegexp(InvalidMoveError, r'Position \(5, 5\) is already taken', place_stone, board, white, 5, 5)
def test_log(self):
width = height= 20
board = Board(width, height)
self.assertEqual(board.log, [])
place_stone(board, white, 5, 5)
self.assertEqual(board.log, [(5, 5)])
place_stone(board, black, 1, 19)
self.assertEqual(board.log, [(5, 5), (1, 19)])
place_stone(board, white, 2, 8)
self.assertEqual(board.log, [(5, 5), (1, 19), (2, 8)])
board.reset()
self.assertEqual(board.log, [])
def test_full(self):
width = height= 4
board = Board(height, width)
in_turn = white
for i in range(width):
for j in range(height):
board[i,j] = in_turn
if in_turn == white:
in_turn = black
else:
in_turn = white
if not (i,j) == (width-1, height-1):
self.assertFalse(board.full())
else:
self.assertTrue(board.full())
self.assertTrue(board.full())
def test_winner(self):
width = height= 10
board = Board(width, height)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,2)
self.assertEqual(board.winner(), (None, []))
place_stone(board, black, 0,2)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,3)
self.assertEqual(board.winner(), (None, []))
place_stone(board, black, 0,3)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,4)
self.assertEqual(board.winner(), (None, []))
place_stone(board, black, 0,4)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,5)
self.assertEqual(board.winner(), (None, []))
place_stone(board, black, 0,5)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,6)
self.assertEqual(board.winner()[0], white)
self.assertEqual(board.winner()[1], [(1,2), (1,3), (1,4), (1,5), (1,6)])
class TestGetLine(unittest.TestCase):
def setUp(self):
self.target_shape = (5,)
width = 7
height = 7
self.board = Board(width=width, height=height)
# make row
place_stone(self.board, white, 1,2)
place_stone(self.board, black, 1,3)
place_stone(self.board, white, 1,4)
place_stone(self.board, black, 1,5)
place_stone(self.board, white, 1,6)
# make column
place_stone(self.board, black, 2,6)
place_stone(self.board, white, 3,6)
place_stone(self.board, black, 4,6)
place_stone(self.board, white, 5,6)
# leave (6,6) empty
# make diagonal upleft to lowright
place_stone(self.board, black, 0,0)
place_stone(self.board, white, 1,1)
place_stone(self.board, black, 2,2)
place_stone(self.board, white, 3,3)
place_stone(self.board, black, 4,4)
# make diagonal lowleft to upright
place_stone(self.board, white, 5,0)
# leave (4,1) empty
place_stone(self.board, black, 3,2)
place_stone(self.board, white, 2,3)
# (1,4) is already white from "make column"
def test_get_column(self):
column, positions = self.board.get_column(2,6)
target_positions = [(2,6), (3,6), (4,6), (5,6), (6,6)]
self.assertEqual(column.shape, self.target_shape)
np.testing.assert_equal(column, np.array([black,white,black,white,empty]))
self.assertEqual(positions, target_positions)
def test_get_row(self):
row, positions = self.board.get_row(1,2)
target_positions = [(1,2), (1,3), (1,4), (1,5), (1,6)]
self.assertEqual(row.shape, self.target_shape)
np.testing.assert_equal(row, np.array([white,black,white,black,white]))
self.assertEqual(positions, target_positions)
def test_get_diagonal_upleft_to_lowright(self):
diagonal, positions = self.board.get_diagonal_upleft_to_lowright(0,0)
target_positions = [(0,0), (1,1), (2,2), (3,3), (4,4)]
self.assertEqual(diagonal.shape, self.target_shape)
np.testing.assert_equal(diagonal, np.array([black,white,black,white,black]))
self.assertEqual(positions, target_positions)
def test_diagonal_lowleft_to_upright(self):
diagonal, positions = self.board.get_diagonal_lowleft_to_upright(5,0)
target_positions = [(5,0), (4,1), (3,2), (2,3), (1,4)]
self.assertEqual(diagonal.shape, self.target_shape)
np.testing.assert_equal(diagonal, np.array([white,empty,black,white,white]))
self.assertEqual(positions, target_positions)
# no negative Y-index?
width = 7
height = 7
self.board = Board(width=width, height=height)
place_stone(self.board, white, 3,0)
place_stone(self.board, black, 2,1)
place_stone(self.board, white, 1,2)
place_stone(self.board, black, 0,3)
place_stone(self.board, white, -1,4)
self.assertRaises(IndexError, self.board.get_diagonal_lowleft_to_upright, 3,0)
# reach upmost row?
width = 7
height = 7
self.board = Board(width=width, height=height)
place_stone(self.board, white, 4,0)
place_stone(self.board, black, 3,1)
place_stone(self.board, white, 2,2)
place_stone(self.board, black, 1,3)
place_stone(self.board, white, 0,4)
line, positions = self.board.get_diagonal_lowleft_to_upright(4,0)
np.testing.assert_equal(line, [white, black, white, black, white])
np.testing.assert_equal(positions, [(4,0), (3,1), (2,2), (1,3), (0,4)])
| StarcoderdataPython |
9650606 | import sys
from asyncio import AbstractEventLoop, get_event_loop_policy
from typing import List, Optional, Union
from unittest.mock import MagicMock, Mock, patch
if sys.version_info >= (3, 8):
from unittest.mock import AsyncMock
else:
class AsyncMock(MagicMock):
async def __call__(self, *args, **kwargs):
return super(AsyncMock, self).__call__(*args, **kwargs)
from nest_asyncio import apply
from pytest import fixture
apply()
@fixture(scope='session')
def event_loop() -> AbstractEventLoop:
return get_event_loop_policy().new_event_loop()
def amock(target: Union[str, object], attributes: Optional[List[str]] = None) -> Mock:
target_async_mock = AsyncMock()
if not attributes:
patch(
target=f'{target.__module__}.{target.__name__}' if isinstance(target, object) else target, # type: ignore
side_effect=target_async_mock,
)
return target_async_mock
for attribute in attributes:
attribute_async_mock = AsyncMock()
patch.object(
target=target,
attribute=attribute,
side_effect=attribute_async_mock,
)
target_async_mock[attribute] = attribute_async_mock
return target_async_mock
| StarcoderdataPython |
5195872 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
test.t_utils.test_file
~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2014 by the RootForum.org team, see AUTHORS.
:license: MIT License, see LICENSE for details.
"""
import os
import tempfile
from unittest import TestCase, skipUnless, skipIf
import sys
from magrathea.utils.file import open_file, File
class TestMagratheaUtilsFile(TestCase):
"""
Unit tests for :py:mod:`magrathea.utils.file`
"""
@skipUnless(hasattr(TestCase, 'assertWarns'), "TestCase.assertWarns not available")
def test_01a(self):
"""
Test Case 01a:
Try opening an existing file using the :py:func:`magrathea.utils.file.open_file` function.
Test is passed if a deprecation warning is raised.
"""
fp, name = tempfile.mkstemp()
os.write(fp, b"test string")
os.close(fp)
with self.assertWarns(DeprecationWarning):
fd = open_file(name, 'r')
fd.close()
os.unlink(name)
def test_01b(self):
"""
Test Case 01b:
Try opening an existing file using the :py:func:`magrathea.utils.file.open_file` function.
Test is passed if content can be read from file object and meets expectation.
"""
fp, name = tempfile.mkstemp()
os.write(fp, b"test string")
os.close(fp)
fd = open_file(name, 'r')
result = fd.read()
fd.close()
os.unlink(name)
self.assertEqual(result, "test string")
def test_02(self):
"""
Test Case 02:
Try getting an instance of :py:class:`magrathea.utils.file.File`.
Test is passed if instance proves being an instance of :py:class:`magrathea.utils.file.File`.
"""
obj = File()
self.assertIsInstance(obj, File)
def test_03(self):
"""
Test Case 03:
Test :py:meth:`magrathea.utils.file.File._check_access` with an existing path (this file).
Test is passed if return value is True.
"""
self.assertTrue(File._check_access(__file__, os.R_OK))
@skipIf(os.path.exists('/foobarstuffdirdoesntexist'), 'Rubbish path existing on your system. Clean up!')
def test_04(self):
"""
Test Case 04:
Test :py:meth:`magrathea.utils.file.File._check_access` with a non existing path.
Test is passed if return value is False.
"""
self.assertFalse(File._check_access('/foobarstuffdirdoesntexist', os.R_OK))
@skipUnless(os.path.exists('/usr/sbin'), '/usr/sbin does not exist on this system')
def test_05(self):
"""
Test Case 05:
Test :py:meth:`magrathea.utils.file.File._check_access` with an existing, but forbidden path.
Test is passed if return value is False.
"""
self.assertFalse(File._check_access('/usr/sbin', os.W_OK))
def test_06(self):
"""
Test Case 06:
Test :py:meth:`magrathea.utils.file.File._check_file_exists` with an existing file (this file).
Test is passed if return value is True.
"""
self.assertTrue(File._check_file_exists(__file__))
@skipIf(os.path.exists('/foobarstuffdirdoesntexist'), 'Rubbish path existing on your system. Clean up!')
def test_07(self):
"""
Test Case 07:
Test :py:meth:`magrathea.utils.file.File._check_file_exists` with a non-existing file.
Test is passed if return value is True.
"""
self.assertFalse(File._check_file_exists('/foobarstuffdirdoesntexist'))
@skipUnless(os.path.exists('/usr/sbin'), '/usr/sbin does not exist on this system')
def test_08(self):
"""
Test Case 08:
Test :py:meth:`magrathea.utils.file.File._check_file_exists` with an existing fs object not being a file.
Test is passed if return value is False.
"""
self.assertFalse(File._check_file_exists('/usr/sbin'))
def test_09(self):
"""
Test Case 09:
Test :py:meth:`magrathea.utils.file.File._check_dir_exists` with an existing directory (this file's parent).
Test is passed if return value is True.
"""
self.assertTrue(File._check_dir_exists(os.path.dirname(__file__)))
@skipIf(os.path.exists('/foobarstuffdirdoesntexist'), 'Rubbish path existing on your system. Clean up!')
def test_10(self):
"""
Test Case 10:
Test :py:meth:`magrathea.utils.file.File._check_dir_exists` with a non-existing directory.
Test is passed if return value is False.
"""
self.assertFalse(File._check_dir_exists('/foobarstuffdirdoesntexist'))
def test_11(self):
"""
Test Case 11:
Test :py:meth:`magrathea.utils.file.File._check_dir_exists` with an existing fs object not being a directory.
Test is passed if return value is False.
"""
self.assertFalse(File._check_dir_exists(__file__))
| StarcoderdataPython |
6636133 | <gh_stars>0
from django.contrib import admin
from .models import Link, Node
admin.site.register(Node)
admin.site.register(Link)
| StarcoderdataPython |
4940868 | <reponame>LikimiaD/HackerRank<gh_stars>0
from itertools import product
print(*product(map(int, input().split(' ')), map(int, input().split(' ')))) | StarcoderdataPython |
9645722 | # purchase/urls.py
from django.urls import path
from . import views
app_name = "purchase"
urlpatterns = [
path('', views.PurchaseView.as_view(), name='home'),
path('invoice/<int:invoice_id>', views.InvoiceView.as_view(), name='invoice'),
]
| StarcoderdataPython |
16756 | from typing import Tuple
import torch
class RunningMeanStd:
"""
Utility Function to compute a running mean and variance calculator
:param epsilon: Small number to prevent division by zero for calculations
:param shape: Shape of the RMS object
:type epsilon: float
:type shape: Tuple
"""
def __init__(self, epsilon: float = 1e-4, shape: Tuple = ()):
self.mean = torch.zeros(shape).double()
self.var = torch.ones(shape).double()
self.count = epsilon
def update(self, batch: torch.Tensor):
batch_mean = torch.mean(batch, axis=0)
batch_var = torch.var(batch, axis=0)
batch_count = batch.shape[0]
total_count = self.count + batch_count
delta = batch_mean - self.mean
new_mean = self.mean + delta * batch_count / total_count
M2 = (
self.var * self.count
+ batch_var * batch_count
+ (delta ** 2) * self.count * batch_count / total_count
)
self.mean = new_mean
self.var = M2 / (total_count - 1)
self.count = total_count
| StarcoderdataPython |
1608179 | # <NAME>
# GUI class
from Tkinter import *
import TTT_game
import TTT_AI
#####################
"""The Class to Create an App."""
class TTT(object):
def __init__(self, master):
''' Initializes the variables for the game and tkinter window.
Pre: The Tkinter master is supplied.
Post: Game and window initialized. '''
self.turn = "O"
## Adding menu bar
self.themenu = Menu(master)
self.file_label_menu = Menu(self.themenu, tearoff=0)
# Submenu
self.file_new_submenu = Menu(self.file_label_menu)
self.file_new_submenu.add_command(label="Human vs. Human",command = self.new_human)
# Difficulty submenus
self.new_submenu_ai_first_submenu = Menu(self.file_new_submenu)
self.new_submenu_ai_first_submenu.add_command(label="Easy",command = self.new_ai_first_easy)
self.new_submenu_ai_first_submenu.add_command(label="Medium",command = self.new_ai_first_medium)
self.new_submenu_ai_first_submenu.add_command(label="Hard",command = self.new_ai_first_hard)
self.file_new_submenu.add_cascade(label="Computer vs. Human",menu=self.new_submenu_ai_first_submenu)
self.new_submenu_ai_submenu = Menu(self.file_new_submenu)
self.new_submenu_ai_submenu.add_command(label="Easy",command=self.new_ai_easy)
self.new_submenu_ai_submenu.add_command(label="Medium",command=self.new_ai_medium)
self.new_submenu_ai_submenu.add_command(label="Hard",command=self.new_ai_hard)
self.file_new_submenu.add_cascade(label="Human vs Computer",menu=self.new_submenu_ai_submenu)
self.file_label_menu.add_cascade(label="New Game",menu=self.file_new_submenu)
self.file_label_menu.add_separator()
self.file_label_menu.add_command(label="Exit",command=master.quit)
self.themenu.add_cascade(label="File",menu=self.file_label_menu)
master.config(menu=self.themenu)
# Frames used in window.
buttonframe = Frame(master)
buttonframe.pack()
frame = Frame(master)
frame.pack(side=TOP,fill=BOTH,expand=True)
# Label in button frame
self.label_message = StringVar()
Label(buttonframe,textvariable=self.label_message,font=("serif",16)).pack()
# Canvas for drawing
self.width = 400
self.height = 400
self.x_offset = 0
self.y_offset = 0
self.canvas = Canvas(frame,width=self.width,height=self.height,relief=SUNKEN)
self.canvas.grid()
self.canvas.pack(side=TOP,fill=BOTH,expand=True)
# Setting event responses
self.canvas.bind("<Configure>",self.draw_board)
self.canvas.bind("<Button-1>",self.WhichSquare)
# Initializes Game.
self.g = TTT_game.game()
self.label_message.set(self.g.message)
# AI is false
self.use_ai = False
# AI Button wrapper functions
def new_ai_first_easy(self):
self.new_ai_first(0)
def new_ai_first_medium(self):
self.new_ai_first(1)
def new_ai_first_hard(self):
self.new_ai_first(2)
def new_ai_easy(self):
self.new_ai(0)
def new_ai_medium(self):
self.new_ai(1)
def new_ai_hard(self):
self.new_ai(2)
def new_ai_first(self,difficulty):
self.new_ai(difficulty)
ai_move = self.a.next_move(self.g.board,self.g.turn)
self.g.TakeTurn(ai_move)
self.drawshape(ai_move,"X")
self.label_message.set(self.g.message)
def new_ai(self,difficulty):
self.new()
self.use_ai = True
self.a = TTT_AI.AI(difficulty)
def new_human(self):
self.new();
self.use_ai = False
def new(self):
'''Creates new game.
Pre: None
Post: Draws new grid, and initiates new grid class.'''
self.label_message.set("")
self.canvas.delete("all")
self.g.newGame()
self.creategrid()
self.label_message.set(self.g.message)
def draw_board(self,event):
'''Draws the tic tac toe board.
Pre: The event passed to the widget, when board needs redrawn.
Post: Board is drawn on window.'''
# Adjusting values
self.width = event.width
self.height = event.height
if self.width > self.height: # Keeping square
self.x_offset = (self.width - self.height) / 2
self.y_offset = 0
self.width = self.height
else:
self.y_offset = (self.height - self.width) / 2
self.x_offset = 0
self.height = self.width
# Redrawing
self.canvas.delete(ALL)
self.creategrid()
if (self.g.turn_number > 0):
self.place_pieces()
if (self.g.gameover == True):
self.draw_end_symbol()
def creategrid(self):
'''Draws the tictactow grid.
Pre: uses class's canvas object.
Post: draws lines on the canvas.'''
## Drawling Background
self.canvas.create_rectangle(self.x_offset,self.y_offset,self.x_offset+self.width,self.y_offset+self.height,fill="#C0C0C0")
## Drawling Lines
length = self.width / 3
# Line 1
x0 = length + self.x_offset
y0 = self.y_offset
x1 = x0
y1 = self.y_offset + self.height
self.canvas.create_line(x0,y0,x1,y1,width=4.0)
# Line 2
x0 = 2*length + self.x_offset
y0 = self.y_offset
x1 = x0
y1 = self.y_offset + self.height
self.canvas.create_line(x0,y0,x1,y1,width=4.0)
# Line 3
x0 = self.x_offset
y0 = self.y_offset + length
x1 = self.x_offset + self.width
y1 = y0
self.canvas.create_line(x0,y0,x1,y1,width=4.0)
# Line 4
x0 = self.x_offset
y0 = self.y_offset + 2*length
x1 = self.x_offset + self.width
y1 = y0
self.canvas.create_line(x0,y0,x1,y1,width=4.0)
def place_pieces(self):
'''Draws the pieces on the board when necessary.
Pre: Uses class's canvas and game objects.
Post: Current pieces in game are are board. '''
for i in range(0,9,1):
piece = self.g.board[i]
if piece != ".":
self.drawshape(i,piece)
def WhichSquare(self,event):
'''Determines which square has been clicked.
Pre: x and y are the coordinates of the click.
Post: initiates the takeTurn method from game class.'''
## Checking which square was clicked
# Initial values
x = event.x
y = event.y
coor = "out"
first_x = self.x_offset + self.width/3
second_x = self.x_offset + (2*self.width)/3
third_x = self.x_offset + self.width
first_y = self.y_offset + self.height/3
second_y = self.y_offset + (2*self.height)/3
third_y = self.y_offset + self.height
# Determing square by coordinates
if (x > self.x_offset) and (y > self.y_offset):
if y < first_y:
if x < first_x:
coor = 0 # Top left square.
elif x < second_x:
coor = 1 # Top middle square.
elif x < third_x:
coor = 2 # Top Right square.
elif y < second_y:
if x < first_x:
coor = 3 # Middle left square.
elif x < second_x:
coor = 4 # Center Square.
elif x < third_x:
coor = 5 # Middle rigth square.
elif y < third_y:
if x < first_x:
coor = 6 # Bottom left square.
elif x < second_x:
coor = 7 # Bottom Middle square.
elif x < third_x:
coor = 8 # Bottom Right Square.
# Entering move in game
if coor != "out":
if self.g.gameover == False:
current_sym = self.g.turn
if (self.g.TakeTurn(coor)): # Begins the turn in game class.
self.drawshape(coor,current_sym) # Draws shape if valid move
self.g.CheckEnd() # Checks to see if game is over
self.label_message.set(self.g.message)
# Checking whether to draw winning symbol
if self.g.gameover == True:
self.draw_end_symbol()
# If using AI
elif self.use_ai == True:
current_sym = self.g.turn
ai_move = self.a.next_move(self.g.board, self.g.turn)
if (self.g.TakeTurn(ai_move)): # Checking move
self.drawshape(ai_move,current_sym)
self.g.CheckEnd()
self.label_message.set(self.g.message)
# Checking whether to draw winning symbol
if self.g.gameover == True:
self.draw_end_symbol()
else:
print("AI gave bad move.")
exit()
else: # Invalid Move
self.label_message.set(self.g.message)
else:
self.label_message.set("Game Over, Hit New")
def draw_end_symbol(self):
'''Draws the symbol incidicating a window.
Pre: Uses the game class end_symbol variable.
Post: Symbol is drawn. '''
if self.g.end_symbol == -1: # Tie
x0 = self.x_offset + self.width/6
y0 = self.y_offset + (self.height)/6
x1 = self.x_offset + (5*self.width)/6
y1 = self.y_offset + (5*self.height)/6
self.canvas.create_arc(x0,y0,x1,y1,style=ARC,width=8.0,start=30.0,extent=300.0,dash=(16,8),outline="#009ACD")
else:
if self.g.end_symbol == 0: # Across top
x0 = self.x_offset + self.width/12
y0 = self.y_offset + self.height/6
x1 = self.x_offset + (11*self.width)/12
y1 = y0
elif self.g.end_symbol == 1: # Down left
x0 = self.x_offset + self.width/6
y0 = self.y_offset + self.height/12
x1 = x0
y1 = self.y_offset + (11*self.height)/12
elif self.g.end_symbol == 2: # Diagonal from top left
x0 = self.x_offset + self.width/12
y0 = self.y_offset + self.height/12
x1 = self.x_offset + (11*self.width)/12
y1 = self.y_offset + (11*self.height)/12
elif self.g.end_symbol == 3: # Diagonal from bottom left
x0 = self.x_offset + self.width/12
y0 = self.y_offset + (11*self.height)/12
x1 = self.x_offset + (11*self.width)/12
y1 = self.y_offset + self.height/12
elif self.g.end_symbol == 4: # Down right
x0 = self.x_offset + (5*self.width)/6
y0 = self.y_offset + self.height/12
x1 = x0
y1 = self.y_offset + (11*self.width)/12
elif self.g.end_symbol == 5: # Across bottom
x0 = self.x_offset + self.width/12
y0 = self.y_offset + (5*self.height)/6
x1 = self.x_offset + (11*self.width)/12
y1 = y0
elif self.g.end_symbol == 6: # Down middle
x0 = self.x_offset + self.width/2
y0 = self.y_offset + self.height/12
x1 = x0
y1 = self.y_offset + (11*self.height)/12
elif self.g.end_symbol == 7: # Across middle
x0 = self.x_offset + self.width/12
y0 = self.y_offset + self.height/2
x1 = self.x_offset + (11*self.width)/12
y1 = y0
# Drawing the line over the winner
self.canvas.create_line(x0,y0,x1,y1,width=8.0,dash=(16,8),fill="#009ACD")
def shape_coordinates(self,x,y):
'''Finds the coordinates for either a circle or cross
Pre: The center x,y as arguments, also uses current value of self.width.
Post: The box coordinates are returned as a tuple.'''
# Finding coordinates
temp = self.width / 8 # half the length of a side
x0 = x - temp
y0 = y - temp
x1 = x + temp
y1 = y + temp
return (x0,y0,x1,y1)
def circle(self,x,y):
'''Creates Circle on Board.
Pre: Center of symbol.
Post: Draws an O on the board at the center coordinate given.'''
# Retrieving coordinates
(x0,y0,x1,y1) = self.shape_coordinates(x,y)
# Creating circle
self.canvas.create_oval(x0,y0,x1,y1,outline="#33DD00",width=4.0)
def cross(self,x,y):
'''Creates Cross on board.
Pre: Center is middle position of "X" on board.
Post: Draws an X on the board at the center coordinate given'''
# Retrieving coordinates
(x0,y0,x1,y1) = self.shape_coordinates(x,y)
# Creating lines
self.canvas.create_line(x0,y0,x1,y1,fill="#FF3300",width=4.0)
self.canvas.create_line(x0,y1,x1,y0,fill="#FF3300",width=4.0)
def drawshape(self,coor,Turn):
x = None
y = None
if coor == 0: # Top Left
x = self.width/6
y = self.height/6
elif coor == 1: # Top Middle
x = self.width/2
y = self.height/6
elif coor == 2: # Top Right
x = (5*self.width)/6
y = self.height/6
elif coor == 3: # Middle Left
x = self.width/6
y = self.width/2
elif coor == 4: # Center
x = self.width/2
y = self.width/2
elif coor == 5: # Middle Right
x = (5*self.width)/6
y = self.width/2
elif coor == 6: # Bottom Left
x = self.width/6
y = (5*self.width)/6
elif coor == 7: # Bottom Middle
x = self.width/2
y = (5*self.width)/6
elif coor == 8: # Bottom Right
x = (5*self.width)/6
y = (5*self.width)/6
# Deciding Between Circle and square.
if (x != None) and (y != None):
x += self.x_offset
y += self.y_offset
if Turn == "O":
self.circle(x,y)
else:
self.cross(x,y)
| StarcoderdataPython |
5158800 | <reponame>ericjwhitney/pyavia
#!/usr/bin/env python3
# Examples of stress concentration factor along the bore of straight or
# countersunk holes. Reproduces results of NASA-TP-3192 Figure 4, 7(a) and
# 7(b)
# Written by: <NAME> Last updated: 9 April 2020
import numpy as np
import matplotlib.pyplot as plt
from pyavia.struct import kt_hole3d
# ----------------------------------------------------------------------------
bt, rw = 1.0, 1 / 5
rt, zt = [2.5, 1.5, 1.0, 0.5, 0.25], np.linspace(-0.5, +0.5, 100)
scf_ss = np.zeros((len(rt), len(zt)))
labels = []
for i, rt_i in enumerate(rt):
for j, zt_j in enumerate(zt):
scf_ss[i, j] = kt_hole3d(rt_i, bt, zt_j, rw, 'tension')
labels.append(f"$r/t = {rt_i:.2f}$")
plt.figure(1) # NASA-TP-3192 Figure 4.
for y in scf_ss:
plt.plot(zt, y)
plt.xlim((0.0, 0.5))
plt.ylim((2.6, 3.4))
plt.xlabel('$z/t$')
plt.ylabel('$K_t$')
plt.title('Tension SCF Along Bore - Countersunk')
plt.legend(labels)
plt.grid()
# ----------------------------------------------------------------------------
rt, rw = 2.0, 1 / 7.5
bt = np.linspace(0.0, 0.75, 4)
scf_cs_t = np.zeros((len(bt), len(zt)))
scf_cs_b = np.zeros((len(bt), len(zt)))
labels = []
for i, bt_i in enumerate(bt):
for j, zt_j in enumerate(zt):
scf_cs_t[i, j] = kt_hole3d(rt, bt_i, zt_j, rw, 'tension')
scf_cs_b[i, j] = kt_hole3d(rt, bt_i, zt_j, rw, 'bending')
labels.append(f"$b/t = {bt_i:.2f}$")
plt.figure(2) # NASA-TP-3192 Figure 7(a).
for y in scf_cs_t:
plt.plot(zt, y)
plt.xlim((-0.5, 0.5))
plt.ylim((1.5, 4.5))
plt.xlabel('$z/t$')
plt.ylabel('$K_t$')
plt.title('Tension SCF Along Bore - Plain')
plt.legend(labels)
plt.grid()
plt.figure(3) # NASA-TP-3192 Figure 7(b).
for y in scf_cs_b:
plt.plot(zt, y)
plt.xlim((-0.5, 0.5))
plt.ylim((-3, 3))
plt.xlabel('$z/t$')
plt.ylabel('$K_b$')
plt.title('Bending SCF Along Bore - Countersunk')
plt.legend(labels)
plt.grid()
plt.show()
| StarcoderdataPython |
44966 | import json
import os
from eg import config
from eg import substitute
from eg import util
from mock import Mock
from mock import patch
PATH_UNSQUEEZED_FILE = os.path.join(
'test',
'assets',
'pwd_unsqueezed.md'
)
PATH_SQUEEZED_FILE = os.path.join(
'test',
'assets',
'pwd_squeezed.md'
)
def _create_config(
examples_dir=None,
custom_dir=None,
color_config=None,
use_color=True,
pager_cmd=None,
editor_cmd=None,
squeeze=False,
subs=None
):
"""
Create a config.Config object with default values for expediency in
testing.
"""
return config.Config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
editor_cmd=editor_cmd,
squeeze=squeeze,
subs=subs
)
@patch('os.walk')
def test_get_file_paths_for_program_with_single(mock_walk):
program = 'cp'
examples_dir = '/Users/tyrion'
program_file = program + util.EXAMPLE_FILE_SUFFIX
expected = ['/Users/tyrion/cp.md']
mock_walk.return_value = [
[examples_dir, [], [program_file, 'cp.txt', 'other_file.md']],
]
actual = util.get_file_paths_for_program(program, examples_dir)
assert actual == expected
mock_walk.assert_called_once_with(examples_dir)
@patch('os.walk')
def test_get_file_paths_for_program_with_nested(mock_walk):
program = 'cp'
examples_dir = '/Users/tyrion'
program_file = 'cp.md'
mock_walk.return_value = [
[
examples_dir,
['dirA', 'dirB'],
[program_file, 'cp.txt', 'other_file.md'],
],
[
examples_dir + '/dirA',
['dirA-child'],
[program_file, 'bad.md'],
],
[
examples_dir + '/dirA/dirA-child',
[],
['bad.md', program_file, 'wtf.md'],
],
[
examples_dir + '/dirB',
[],
['foo.md', program_file],
],
]
expected = [
'/Users/tyrion/cp.md',
'/Users/tyrion/dirA/cp.md',
'/Users/tyrion/dirA/dirA-child/cp.md',
'/Users/tyrion/dirB/cp.md',
]
actual = util.get_file_paths_for_program(program, examples_dir)
assert actual == expected
mock_walk.assert_called_once_with(examples_dir)
@patch('os.walk')
def test_get_file_paths_for_program_with_none(mock_walk):
expected = []
mock_walk.return_value = []
actual = util.get_file_paths_for_program('cp', '/Users/tyrion')
assert actual == expected
mock_walk.assert_called_once_with('/Users/tyrion')
@patch('os.walk')
def test_get_file_paths_for_program_with_no_dir(mock_walk):
assert util.get_file_paths_for_program('cp', None) == []
@patch('eg.util.page_string')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_resolved_program')
def test_handle_program_no_entries(
mock_resolve_program,
mock_get_contents,
mock_format,
mock_page_string,
):
"""
We should do the right thing if there are no entries for a given program.
"""
program = 'cp'
test_config = _create_config()
mock_resolve_program.return_value = program
util.handle_program(program, test_config)
mock_resolve_program.assert_called_once_with(
program,
test_config
)
# We should have aborted and not called any of the
# other methods.
assert mock_get_contents.call_count == 0
assert mock_format.call_count == 0
assert mock_page_string.call_count == 0
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_file_paths_for_program')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.page_string')
def test_handle_program_finds_paths_and_calls_open_pager_no_alias(
mock_page,
mock_format,
mock_get_paths,
mock_get_contents,
mock_resolve,
):
"""
If there are entries for the program, handle_program needs to get the
paths, get the contents, format the contents, and page the resulting
string.
"""
program = 'mv'
examples_dir = 'test-eg-dir'
custom_dir = 'test-custom-dir'
color_config = None
use_color = False
pager_cmd = 'foo bar'
squeeze = False
subs = ['foo', 'bar']
file_contents = 'I am the contents of mv.md.'
formatted_contents = 'and I am the formatted contents of mv.md.'
test_config = _create_config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
squeeze=squeeze,
subs=subs
)
default_paths = ['test-eg-dir/mv.md', 'test-eg-dir/foo/mv.md']
custom_paths = ['test-custom-dir/mv.md', 'test-custom-dir/bar.md']
def return_correct_path(*args, **kwargs):
program_param = args[0]
dir_param = args[1]
if program_param != program:
raise NameError('expected ' + program + ', got ' + program_param)
if dir_param == examples_dir:
return default_paths
elif dir_param == custom_dir:
return custom_paths
else:
raise NameError(
'got ' +
dir_param +
', expected ' +
examples_dir +
' or ' +
custom_dir)
mock_format.return_value = formatted_contents
mock_get_paths.side_effect=return_correct_path
mock_get_contents.return_value = file_contents
mock_resolve.return_value = program
util.handle_program(program, test_config)
mock_resolve.assert_called_once_with(
program,
test_config
)
mock_get_paths.assert_any_call(
program,
examples_dir
)
mock_get_paths.assert_any_call(
program,
custom_dir,
)
mock_get_contents.assert_called_once_with(
custom_paths[0],
custom_paths[1],
default_paths[0],
default_paths[1],
)
mock_format.assert_called_once_with(
file_contents,
use_color=test_config.use_color,
color_config=test_config.color_config,
squeeze=test_config.squeeze,
subs=test_config.subs
)
mock_page.assert_called_once_with(
formatted_contents,
test_config.pager_cmd
)
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_file_paths_for_program')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.page_string')
def test_handle_program_finds_paths_and_calls_open_pager_with_alias(
mock_page,
mock_format,
mock_get_paths,
mock_get_contents,
mock_resolve,
):
"""
If there are entries for the program, handle_program needs to get the
paths, get the contents, format the contents, and page the resulting
string.
"""
alias_for_program = 'link'
resolved_program = 'ln'
examples_dir = 'test-eg-dir'
custom_dir = 'test-custom-dir'
color_config = None
use_color = False
pager_cmd = 'foo bar'
squeeze = False
subs = ['foo', 'bar']
file_contents = 'I am the contents of ln.md.'
formatted_contents = 'and I am the formatted contents of ln.md.'
test_config = _create_config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
squeeze=squeeze,
subs=subs
)
default_paths = ['test-eg-dir/ln.md']
custom_paths = ['test-custom-dir/ln.md']
def return_correct_path(*args, **kwargs):
program_param = args[0]
dir_param = args[1]
if program_param != resolved_program:
raise NameError(
'expected ' +
resolved_program +
', got ' +
program_param
)
if dir_param == examples_dir:
return default_paths
elif dir_param == custom_dir:
return custom_paths
else:
raise NameError(
'got ' +
dir_param +
', expected ' +
examples_dir +
' or ' +
custom_dir)
mock_format.return_value = formatted_contents
mock_get_paths.side_effect = return_correct_path
mock_get_contents.return_value = file_contents
mock_resolve.return_value = resolved_program
util.handle_program(
alias_for_program,
test_config
)
mock_resolve.assert_called_once_with(
alias_for_program,
test_config
)
mock_get_paths.assert_any_call(
resolved_program,
examples_dir
)
mock_get_paths.assert_any_call(
resolved_program,
custom_dir,
)
mock_get_contents.assert_called_once_with(
custom_paths[0],
default_paths[0]
)
mock_format.assert_called_once_with(
file_contents,
use_color=test_config.use_color,
color_config=test_config.color_config,
squeeze=test_config.squeeze,
subs=test_config.subs
)
mock_page.assert_called_once_with(
formatted_contents,
test_config.pager_cmd
)
def test_get_list_of_all_supported_commands(tmpdir):
dir_example = tmpdir.mkdir('examples')
dir_custom = tmpdir.mkdir('custom')
config = _create_config(
examples_dir=str(dir_example),
custom_dir=str(dir_custom),
)
expected = [
'a-only-default',
'b-both *',
'c-only-custom +',
'd-only-custom-nested +',
'e-only-default-nested',
'f-default-custom-nested',
'g-both-different-levels *',
't-a-only-default-alias -> a-only-default',
'u-b-both-alias -> b-both *',
'v-c-only-custom-alias -> c-only-custom +'
]
aliases = {
't-a-only-default-alias': 'a-only-default',
'u-b-both-alias': 'b-both',
'v-c-only-custom-alias': 'c-only-custom'
}
# Make the directory structure we expect.
dir_example_nested = dir_example.mkdir('default-nested')
dir_custom_nested = dir_custom.mkdir('custom-nested')
dir_example.join('a-only-default.md').write('foo')
dir_example.join('b-both.md').write('foo')
dir_custom.join('b-both.md').write('foo')
dir_custom.join('c-only-custom.md').write('foo')
dir_custom_nested.join('d-only-custom-nested.md').write('foo')
dir_example_nested.join('e-only-default-nested.md').write('foo')
dir_example_nested.join('f-default-custom-nested.md').write('foo')
dir_example.join('g-both-different-levels.md').write('foo')
dir_custom_nested.join('g-both-different-levels.md').write('foo')
# Use the 'with' context manager rather than the @decorator, because the
# tmpdir fixture doesn't play nice with the decorator.
with patch('eg.util.get_alias_dict') as mock_get_alias:
mock_get_alias.return_value = aliases
actual = util.get_list_of_all_supported_commands(config)
assert actual == expected
mock_get_alias.assert_called_once_with(config)
def test_list_supported_programs_fails_gracefully_if_no_dirs():
test_config = _create_config()
actual = util.get_list_of_all_supported_commands(test_config)
target = []
assert actual == target
def test_calls_pipepager_if_not_less():
"""
We're special casing less a bit, as it is the default value, so if a custom
command has been set that is NOT less, we should call pipepager straight
away.
"""
_helper_assert_about_pager('page me plz', 'cat', False)
def test_calls_fallback_pager_if_none():
"""
If pager_cmd is None, we should just use the fallback pager.
"""
_helper_assert_about_pager('page me plz', None, True)
def test_calls_pipepager_if_less():
"""
We should call pipepager if we ask to use less and less is installed on the
machine.
"""
_helper_assert_about_pager('a fancy value to page', 'less -R', False)
def test_calls_fallback_if_cmd_is_flag_string():
"""
We are using a flag string to indicate if we should use the fallback pager.
"""
_helper_assert_about_pager(
'page via fallback',
util.FLAG_FALLBACK,
True
)
@patch('pydoc.pager')
@patch('pydoc.pipepager')
def _helper_assert_about_pager(
str_to_page,
pager_cmd,
use_fallback,
pipepager,
default_pager,
):
"""
Help with asserting about pager.
str_to_page: what you're paging
pager_cmd: the string you're passing to pipepager (or None)
use_default: false if we should actually use pydoc.pipepager, true if we
instead are going to fallback to pydoc.pager
"""
util.page_string(str_to_page, pager_cmd)
if use_fallback:
default_pager.assert_called_once_with(str_to_page)
assert pipepager.call_count == 0
else:
assert default_pager.call_count == 0
pipepager.assert_called_once_with(
str_to_page,
cmd=pager_cmd
)
@patch('eg.util.pydoc.pipepager', side_effect=KeyboardInterrupt)
def test_page_string_excepts_keyboard_interrupt_if_not_less(pipepager_mock):
"""
Do not fail when user hits ctrl-c while in pager.
"""
try:
util.page_string('page me plz', 'cat')
except KeyboardInterrupt:
raise AssertionError('Should not have got this far')
pipepager_mock.assert_called_once_with('page me plz', cmd='cat')
@patch('eg.util.pydoc.pager', side_effect=KeyboardInterrupt)
def test_page_string_excepts_keyboard_interrupt_if_none(pager_mock):
"""
Do not fail when user hits ctrl-c while in pipepager.
"""
try:
util.page_string('page me plz', None)
except KeyboardInterrupt:
raise AssertionError('Should not have got this far')
pager_mock.assert_called_once_with('page me plz')
def test_get_contents_from_files_handles_none():
"""
Empty string if no files.
"""
_helper_assert_file_contents(
[],
''
)
def test_get_contents_from_files_handles_one():
file_infos = [
{
'path': 'test/path',
'contents': 'contents of file'
}
]
combined_contents = 'contents of file'
_helper_assert_file_contents(
file_infos,
combined_contents
)
def test_get_contents_from_files_handles_multiple():
file_infos = [
{
'path': 'path/1',
'contents': 'foo\n'
},
{
'path': 'path/2/foo',
'contents': 'bar\n'
},
{
'path': 'another/path',
'contents': 'baz'
}
]
combined_contents = 'foo\nbar\nbaz'
_helper_assert_file_contents(
file_infos,
combined_contents
)
@patch('eg.util._get_contents_of_file')
def _helper_assert_file_contents(
file_infos,
target_contents,
get_contents_mock,
):
"""
Helper method to assert things about the get_contents_from_files method.
Does not actually hit the disk.
file_infos: array of { path, contents } dicts representing files. Array so
that we can assert proper order calling
target_contents: the final combined contents that should be returned by the
get_contents_from_files method.
"""
# This method will be used by the mock framework to return the right file
# contents based on the file name.
def return_file_contents(*args, **kwargs):
for file_info in file_infos:
if file_info['path'] == args[0]:
return file_info['contents']
raise TypeError('did not find path in test obj')
get_contents_mock.side_effect = return_file_contents
paths = [el['path'] for el in file_infos]
actual = util.get_contents_from_files(*paths)
assert actual == target_contents
@patch('eg.util.get_colorized_contents')
@patch('eg.util.get_squeezed_contents')
@patch('eg.util.get_substituted_contents')
def _helper_assert_formatted_contents(
starting_contents,
use_color,
color_config,
squeeze,
subs,
colorized_contents,
squeezed_contents,
subbed_contents,
formatted_result,
sub_method,
squeeze_method,
color_method,
):
"""
Helper method to assist in asserting things about the
get_formatted_contents method.
starting_contents: the starting string that we are working with
use_color: True if we should use color
color_config: the color config to be passed to get_colorized_contents
squeeze: True if we should squeeze
subs: the list of Substitutions that we should pass to
get_substituted_contents
colored_contents: the result of get_colorized_contents
squeezed_contents: the result of get_squeezed_contents
subbed_contents: the result of subbed_contents
formatted_result: the final, formatted string that should be returned
"""
sub_method.return_value = subbed_contents
squeeze_method.return_value = squeezed_contents
color_method.return_value = colorized_contents
actual = util.get_formatted_contents(
starting_contents,
use_color,
color_config,
squeeze,
subs
)
# We'll update the contents as they get formatted to make sure
# we pass the right thing to the various methods.
contents_thus_far = starting_contents
if use_color:
color_method.assert_called_once_with(
contents_thus_far,
color_config
)
contents_thus_far = colorized_contents
else:
assert color_method.call_count == 0
if squeeze:
squeeze_method.assert_called_once_with(contents_thus_far)
contents_thus_far = squeezed_contents
else:
assert squeeze_method.call_count == 0
if subs:
sub_method.assert_called_once_with(
contents_thus_far,
subs
)
contents_thus_far = subbed_contents
else:
assert sub_method.call_count == 0
assert actual == formatted_result
def test_get_formatted_contents_does_not_format_methods_if_all_falsey():
"""
We should invoke none of the formatter methods if the flags are false and
subs is not truthy.
"""
starting_contents = 'this is where we start'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
False,
None,
'this was colored',
'this was squeezed',
'these contents were subbed',
starting_contents
)
def test_get_formatted_contents_calls_colorize_if_use_color():
"""
Colorize the contents if use_color = True.
"""
starting_contents = 'this is where we start'
colorized_contents = 'COLORIZED: this is where we start'
_helper_assert_formatted_contents(
starting_contents,
True,
'some color config',
False,
None,
colorized_contents,
'this was squeezed',
'these contents were subbed',
colorized_contents
)
def test_get_formatted_contents_squeezes():
"""If squeeze, we need to squeeze."""
starting_contents = 'this is where we start'
squeezed_contents = 'this is the result of a squeezing'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
True,
None,
'this was colored',
squeezed_contents,
'these contents were subbed',
squeezed_contents
)
def test_get_formatted_contents_subsitutes():
"""If subs is truthy, get_substituted contents should be called."""
starting_contents = 'this is where we start'
subbed_contents = 'substituted like a teacher'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
False,
['truthy', 'list'],
'this was colored',
'this was squeezed',
subbed_contents,
subbed_contents
)
def test_perform_all_formatting():
"""
When use_color, squeeze, and subs are all truthy, all the formatting
should be applied in that order.
"""
starting_contents = 'the starting point for grand formatting'
subbed_contents = 'subbed is the last thing called so should be the result'
_helper_assert_formatted_contents(
starting_contents,
True,
'some color config',
True,
['truthy', 'list'],
'this was colored',
'this was squeezed',
subbed_contents,
subbed_contents
)
def _get_file_as_string(path):
"""Get the contents of the file as a string."""
with open(path, 'r') as f:
data = f.read()
return data
def test_get_squeezed_contents_correctly_squeezes():
"""
Our squeeze method should follow our convention, which is to remove the
blank line between a description and an example, to keep two blank lines
between sections, and otherwise have only single blank lines.
"""
unsqueezed = _get_file_as_string(PATH_UNSQUEEZED_FILE)
# the target squeezed output is a reference implementation in
# pwd_squeezed.md.
target = _get_file_as_string(PATH_SQUEEZED_FILE)
actual = util.get_squeezed_contents(unsqueezed)
assert actual == target
def test_get_substituted_contents_handles_empty_subs():
"""Nothing should be formatted if there are no substitutions."""
raw_contents = 'this should not be subbed'
actual = util.get_substituted_contents(raw_contents, [])
assert actual == raw_contents
def test_get_substituted_contents_substitutes_calls_correct_methods():
"""
The get_substituted_contents method calls things in the correct order.
"""
sub_one = Mock(auto_spec=substitute.Substitution)
sub_one_result = 'result of sub one'
sub_one.apply_and_get_result.return_value = sub_one_result
sub_two = Mock(auto_spec=substitute.Substitution)
sub_two_result = 'result of sub two'
sub_two.apply_and_get_result.return_value = sub_two_result
starting_contents = 'the string we should be substituting into'
target = sub_two_result
subs = [sub_one, sub_two]
actual = util.get_substituted_contents(starting_contents, subs)
sub_one.apply_and_get_result.assert_called_once_with(starting_contents)
sub_two.apply_and_get_result.assert_called_once_with(sub_one_result)
assert actual == target
def test_get_substituted_contents_substitutes_correctly():
"""
Basic test to make sure Substitutions can get applied correctly.
"""
sub_one = substitute.Substitution('foo', 'bar', False)
sub_two = substitute.Substitution('bar\n\n', 'baz\n', True)
start = 'foo\n\n something else\n\n bar\n\n'
target = 'baz\n something else\n\n baz\n'
subs = [sub_one, sub_two]
actual = util.get_substituted_contents(start, subs)
assert actual == target
@patch('eg.color.EgColorizer')
def test_get_colorized_contents_calls_methods(patched_colorizer_class):
"""
We should call the correct methods on the EgColorizer objects when we color
a file.
"""
raw_contents = 'these are uncolored contents'
colored_contents = 'COLORED: ' + raw_contents
color_config = 'some color config'
# The actual instance created by these calls is stored at return_value.
colorizer_instance = patched_colorizer_class.return_value
colorizer_instance.colorize_text.return_value = colored_contents
actual = util.get_colorized_contents(raw_contents, color_config)
assert actual == colored_contents
colorizer_instance.colorize_text.assert_called_once_with(raw_contents)
@patch('eg.util.get_alias_dict')
def _helper_assert_get_resolved_program(
program,
resolved_program,
config_obj,
alias_dict,
mock_dict,
):
"""
program: the program to resolved for as an alias
resolved_program: the result of the resolution.
config_obj: the config_obj to use toe resolve the alias path
alias_dict: the dict of aliases to be returned
"""
mock_dict.return_value = alias_dict
actual = util.get_resolved_program(program, config_obj)
assert actual == resolved_program
mock_dict.assert_called_once_with(config_obj)
def test_get_resolved_program_no_alias():
"""
A program that is not an alias should return itself.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = 'a config'
_helper_assert_get_resolved_program('link', 'ln', config_obj, alias_dict)
def test_get_resolved_program_is_alias():
"""
A program that is an alias should return the resolved value.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = 'some new config'
_helper_assert_get_resolved_program('cp', 'cp', config_obj, alias_dict)
def test_get_alias_dict_returns_contents_of_correct_file():
"""
get_alias_dict should read data from the file at the default path.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = _create_config(
examples_dir='path/to/examples/dir',
)
alias_file_path = 'path/to/alias/file'
alias_dict_str = json.dumps(alias_dict)
_helper_assert_get_alias_dict(
alias_dict_str,
alias_dict,
config_obj,
alias_file_path,
True
)
def test_get_alias_dict_fails_gracefully_if_not_file():
"""
Since users can specify a directory for examples that might not contain the
aliases file, we want to fail gracefully if the file doesn't exist.
"""
contents_of_alias_dict_file = 'should never be reached'
config_obj = _create_config(
examples_dir='path/to/examples/dir',
)
alias_file_path = 'path/to/the/alias/file'
_helper_assert_get_alias_dict(
contents_of_alias_dict_file,
{},
config_obj,
alias_file_path,
False
)
@patch('eg.util._get_contents_of_file')
@patch('eg.util._get_alias_file_path')
@patch('os.path.isfile')
def _helper_assert_get_alias_dict(
contents_of_alias_dict_file,
target_alias_dict,
config_obj,
alias_file_path,
alias_file_path_is_file,
mock_is_file,
mock_get_alias_file_path,
mock_get_contents,
):
"""
contents_of_alias_dict_file: the string contents of the file storing the
dictionary of aliases
target_alias_dict: the target result of get_alias_dict
config_obj: the Config object
alias_file_path: the path to be returned by _get_alias_file_path
alias_file_path_is_file: True if the alias path is a file, else False
"""
mock_is_file.return_value = alias_file_path_is_file
mock_get_alias_file_path.return_value = alias_file_path
mock_get_contents.return_value = contents_of_alias_dict_file
actual = util.get_alias_dict(config_obj)
assert actual == target_alias_dict
mock_get_alias_file_path.assert_called_once_with(config_obj)
mock_is_file.assert_called_once_with(alias_file_path)
if alias_file_path_is_file:
mock_get_contents.assert_called_once_with(alias_file_path)
else:
assert mock_get_contents.call_count == 0
@patch('os.path.join')
def test_get_alias_file_path(mock_join):
"""
_get_alias_file_path should just join the example dir and the alias file
name, to make sure we look in the right place for the file.
"""
config_obj = _create_config(
examples_dir='handy/dandy/examples/dir',
)
join_result = 'joined path'
mock_join.return_value = join_result
actual = util._get_alias_file_path(config_obj)
assert actual == join_result
mock_join.assert_called_once_with(
config_obj.examples_dir,
util.ALIAS_FILE_NAME
)
def test_is_example_file_true_if_has_suffix():
"""
Should be true if ends in EXAMPLE_FILE_SUFFIX.
"""
file_name = 'find.md'
actual = util._is_example_file(file_name)
assert actual == True
def test_is_example_file_true_if_not_suffix():
"""
Should be false if the file does not end in EXAMPLE_FILE_SUFFIX.
"""
file_name = 'aliases.json'
actual = util._is_example_file(file_name)
assert actual == False
def test_can_parse_alias_file():
"""
Make sure aliases.json file can be parsed.
This is to make sure an edit doesn't accidentally corrupt it.
"""
# We'll have to hardcode this.
alias_file_path = os.path.join(
config.DEFAULT_EXAMPLES_DIR,
util.ALIAS_FILE_NAME
)
alias_file_contents = util._get_contents_of_file(alias_file_path)
alias_dict = json.loads(alias_file_contents)
# We'll check that link goes to ln, as we know that one will be present.
assert alias_dict['link'] == 'ln'
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_correct_with_custom_dir(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
"""
We should resolve aliases, get the custom file path, and call subprocess.
"""
program = 'du'
resolved_program = 'alias for du'
config = _create_config(custom_dir='path/to/custom', editor_cmd='nano')
paths = ['path/to/custom/du.md', 'foo.md']
mock_get_program.return_value = resolved_program
mock_get_paths.return_value = paths
mock_exists.return_value = True
util.edit_custom_examples(program, config)
mock_get_program.assert_called_once_with(program, config)
mock_get_paths.assert_called_once_with(resolved_program, config.custom_dir)
mock_call.assert_called_once_with([config.editor_cmd, paths[0]])
assert mock_inform.call_count == 0
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_creates_file_if_none_exist(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
program = 'du'
resolved_program = 'alias-for-du'
config = _create_config(custom_dir='path/to/custom', editor_cmd='nano')
paths = []
mock_get_program.return_value = resolved_program
mock_get_paths.return_value = paths
mock_exists.return_value = True
util.edit_custom_examples(program, config)
mock_get_program.assert_called_once_with(program, config)
mock_get_paths.assert_called_once_with(resolved_program, config.custom_dir)
mock_call.assert_called_once_with(
[config.editor_cmd, 'path/to/custom/alias-for-du.md'])
assert mock_inform.call_count == 0
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_informs_if_no_custom_dir(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
"""
We should inform the user if they are trying to edit with no custom dir.
This should be true if it is not set and if the path does not exist.
"""
program = 'awk'
# First with no custom dir set.
config = _create_config(editor_cmd='vi -e')
mock_exists.return_value = True
util.edit_custom_examples(program, config)
assert mock_inform.call_count == 1
# And now with it set but a nonexistent path.
config = _create_config(custom_dir='/path/to/custom', editor_cmd='vi -e')
mock_exists.return_value = False
util.edit_custom_examples(program, config)
assert mock_inform.call_count == 2
assert mock_call.call_count == 0
assert mock_get_paths.call_count == 0
assert mock_get_program.call_count == 0
| StarcoderdataPython |
1990628 | from fastapi import APIRouter
from app.schemas import UserCreate, UserUpdate
from app.models import User
user_router = APIRouter(
prefix="/user",
tags=["user"],
responses={404: {"description": "Not found"}},
)
@user_router.get("/by_id")
async def user_find_by_id(id: int):
user = await User.find_by_id(id=id)
if user:
return user
return { 'error': 'User not found' }
@user_router.get("/by_email")
async def user_find_by_email(email: str):
user = await User.find_by_email(email=email)
if user:
return user
return { 'error': 'User not found' }
@user_router.post("/")
async def user_create(user_body: UserCreate):
user = await User.find_by_email(email=user_body.email)
if user:
return { 'error': 'User already exists' }
new_user = user_body.dict()
new_user = { k: v for (k, v) in new_user.items() if v is not None }
return await User.create(**new_user)
@user_router.put("/")
async def user_update(user_body: UserUpdate):
user_dict = user_body.dict(exclude={'id'})
user_dict = { k: v for (k, v) in user_dict.items() if v is not None }
user = await User.update(user_body.id, **user_dict)
if not user:
return { 'error': 'User not updated' }
return user | StarcoderdataPython |
11250634 | <filename>resources/models/commons/errors/computersRouteErrors.py<gh_stars>0
#encoding utf-8
#__author__ = <NAME>, <EMAIL>
#Python3
__author__ = '<NAME>'
from datetime import datetime
from resources.models.commons.database_manager import Database
from resources.models.commons.mysql_manager import Gera_query
class ErrorController():
def __init__(self, error, route, method, applicant):
self.error = str(error)
self.route = route
self.method = method
self.applicant = applicant
self.log_register()
def log_register(self):
try:
with open('resources/models/commons/errors/log.log', 'a') as log:
log.write(f'Route: {self.route}\t\tMethod:{self.method}\t\tApplicant:{self.applicant}\t\tError:{self.error}\t\tWhen:{str(datetime.now())}\n')
query = Gera_query().inserir_na_tabela('api_logs', ['type_id', 'route', 'method', 'applicant','body'], ['1', f'"{self.route}"', f'"{self.method}"', f'"{self.applicant}"', self.error])
Database().commit_without_return(query)
except Exception as e:
with open('resources/models/commons/errors/log.log', 'a') as log:
log.write(f'Route: LogRegister\t\t\t\t\tMethod:\t\t\tApplicant:{self.applicant}\t\tError:{e}\t\tWhen:{str(datetime.now())}\n')
raise e
raise self.error
| StarcoderdataPython |
6582382 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-07 11:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Box',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=255, verbose_name='Address')),
],
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True, verbose_name='Name')),
('min_cost', models.PositiveIntegerField(verbose_name='Min. cost')),
('max_cost', models.PositiveIntegerField(verbose_name='Max. cost')),
('min_duration', models.DurationField(verbose_name='Min. duration')),
('max_duration', models.DurationField(verbose_name='Max. duration')),
],
),
migrations.CreateModel(
name='Stand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True, verbose_name='Name')),
('box', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='station.Box')),
],
),
migrations.CreateModel(
name='Station',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True, verbose_name='Name')),
],
),
migrations.AddField(
model_name='box',
name='station',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='station.Station'),
),
]
| StarcoderdataPython |
4866004 | <gh_stars>1-10
'''
This program plots the lengths of source input and target pairs.
The intention is for one to use this to help determine bucket sizes.
Maybe in the future I will implement a clustering algorithm to autonomously find
bucket sizes
'''
import os
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import sys
import numpy as np
import re
from prepare_data import get_id2line, get_conversations, gather_dataset
num_bins = 50
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(_WORD_SPLIT, space_separated_fragment))
return [w for w in words if w]
def main():
id2line = get_id2line()
print '>> gathered id2line dictionary.\n'
convs = get_conversations()
print '>> gathered conversations.\n'
questions, answers = gather_dataset(convs,id2line)
target_lengths = [ len(basic_tokenizer(line)) for line in answers]
source_lengths = [ len(basic_tokenizer(line)) for line in questions]
#if FLAGS.plot_histograms:
plotHistoLengths("target lengths", target_lengths)
plotHistoLengths("source_lengths", source_lengths)
plotScatterLengths("target vs source length", "source length", "target length", source_lengths, target_lengths)
def plotScatterLengths(title, x_title, y_title, x_lengths, y_lengths):
plt.scatter(x_lengths, y_lengths)
plt.title(title)
plt.xlabel(x_title)
plt.ylabel(y_title)
#plt.ylim(0, max(y_lengths))
#plt.xlim(0,max(x_lengths))
plt.ylim(0, 200)
plt.xlim(0, 200)
plt.show()
def plotHistoLengths(title, lengths):
mu = np.std(lengths)
sigma = np.mean(lengths)
x = np.array(lengths)
n, bins, patches = plt.hist(x, num_bins, facecolor='green', alpha=0.5)
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.title(title)
plt.xlabel("Length")
plt.ylabel("Number of Sequences")
plt.xlim(0,80)
plt.show()
if __name__=="__main__":
main()
| StarcoderdataPython |
3457897 | <filename>supervisely/visualize/src/ui/input_data.py
import os
import supervisely_lib as sly
import sly_globals as g
from sly_visualize_progress import get_progress_cb, reset_progress, init_progress
import cv2
import ffmpeg
progress_index = 1
object_ann_info = None
def init(data, state):
data["projectId"] = g.project_info.id
data["projectName"] = g.project_info.name
data["projectItemsCount"] = g.project_info.items_count if g.project_info.items_count else 0
data["projectPreviewUrl"] = g.api.image.preview_url(g.project_info.reference_image_url, 100, 100)
init_progress("InputProject", data)
data["done1"] = False
state["collapsed1"] = False
state["validationTeamId"] = None
state["validationWorkspaceId"] = None
state["validationProjectId"] = None
state["validationDatasets"] = []
state["validationAllDatasets"] = True
data['videosData'] = []
def check_rotation(path_video_file):
# this returns meta-data of the video file in form of a dictionary
meta_dict = ffmpeg.probe(path_video_file)
# from the dictionary, meta_dict['streams'][0]['tags']['rotate'] is the key
# we are looking for
rotateCode = None
try:
if int(meta_dict['streams'][0]['tags']['rotate']) == 90:
rotateCode = cv2.ROTATE_90_CLOCKWISE
elif int(meta_dict['streams'][0]['tags']['rotate']) == 180:
rotateCode = cv2.ROTATE_180
elif int(meta_dict['streams'][0]['tags']['rotate']) == 270:
rotateCode = cv2.ROTATE_90_COUNTERCLOCKWISE
except:
pass
return rotateCode
def correct_rotation(frame, rotateCode):
return cv2.rotate(frame, rotateCode)
def videos_to_frames(project_path, videos_data):
videos_paths = g.get_files_paths(project_path, '.mp4')
for video_index, video_path in enumerate(videos_paths):
project_id = video_path.split('/')[-4]
ds_name = video_path.split('/')[-3]
video_name = (video_path.split('/')[-1]).split('.mp4')[0]
output_path = os.path.join(g.converted_dir, f'{project_id}_{ds_name}_{video_name}')
os.makedirs(output_path, exist_ok=True)
vidcap = cv2.VideoCapture(video_path)
success, image = vidcap.read()
count = 0
rotateCode = check_rotation(video_path)
while success:
if rotateCode is not None:
image = correct_rotation(image, rotateCode)
cv2.imwrite(f"{output_path}/frame{count:06d}.jpg", image) # save frame as JPEG file
success, image = vidcap.read()
count += 1
fps = vidcap.get(cv2.CAP_PROP_FPS)
videos_data.append(
{'index': video_index, 'path': output_path,
'fps': fps, 'origin_path': video_path}
)
@g.my_app.callback("download_projects_handler")
@sly.timeit
@g.my_app.ignore_errors_and_show_dialog_window()
def download_projects_handler(api: sly.api, task_id, context, state, app_logger):
download_projects([g.project_id])
def download_projects(project_ids):
download_progress = get_progress_cb('InputProject', "Download project", len(project_ids))
videos_data = []
for project_id in project_ids:
try:
project_dir = os.path.join(g.projects_dir, f'{project_id}')
if not sly.fs.dir_exists(project_dir):
sly.fs.mkdir(project_dir)
else:
sly.fs.clean_dir(project_dir)
sly.download_video_project(g.api, project_id, project_dir, log_progress=True)
videos_to_frames(project_dir, videos_data)
except Exception as e:
raise e
download_progress(1)
reset_progress('InputProject')
fields = [
{"field": f"data.videosData", "payload": videos_data},
{"field": f"data.done1", "payload": True},
{"field": f"state.collapsed2", "payload": False},
{"field": f"state.disabled2", "payload": False},
{"field": f"state.activeStep", "payload": 2},
]
g.api.app.set_field(g.task_id, "data.scrollIntoView", f"step{2}")
g.api.app.set_fields(g.task_id, fields)
| StarcoderdataPython |
8171343 | <gh_stars>100-1000
""" Exploration policy for permutation invariant environments
"""
from ..base_classes import Policy
import itertools
import random
import copy
import numpy as np
class LongerExplorationPolicy(Policy):
"""Simple alternative to :math:`\epsilon`-greedy that can explore more
efficiently for a broad class of realistic problems.
Parameters
-----------
epsilon : float
Proportion of random steps
length : int
Length of the exploration sequences that will be considered
"""
def __init__(self, learning_algo, n_actions, random_state, epsilon, length=10):
Policy.__init__(self, learning_algo, n_actions, random_state)
self._epsilon = epsilon
self._l = length
self._count_down = -1
self._action_sequence = []
def action(self, state, mode=None, *args, **kwargs):
if self._count_down >= 0:
# Take the next exploration action in the sequence
V = 0
action = self._action_sequence[self._count_down]
self._count_down -= 1
else:
if self.random_state.rand() < self._epsilon/((1+(self._l-1)*(1-self._epsilon))):
# Take a random action and build an exploration sequence for the next steps
self._count_down = self._l - 1
self._action_sequence = self.sampleUniformActionSequence()
action = self._action_sequence[self._count_down]
V = 0
self._count_down -= 1
else:
# Simply act greedily with respect to what is currently believed to be the best action
action, V = self.bestAction(state, mode, args, kwargs)
return np.array(action), V
def setEpsilon(self, e):
""" Set the epsilon
"""
self._epsilon = e
def epsilon(self):
""" Get the epsilon
"""
return self._epsilon
def sampleUniformActionSequence(self):
if ( isinstance(self.n_actions,int)):
""" Sample an action sequence of length self._l, where the unordered sequences have uniform probabilities"""
actions_list = range(self.n_actions)
else:
"""For N exploration steps, the goal is to have actions such that their sum spans quite uniformly
the whole range of possibilities. Among those possibilities, random choice/order of actions. """
possible_actions=[]
# Add for all actions N random element between min and max
N=3
for i,a in enumerate(self.n_actions):
possible_actions.append([])
for j in range(N):
possible_actions[i].append( self.random_state.uniform(self.n_actions[i][0],self.n_actions[i][1]) )
actions_list = list(itertools.product(*possible_actions))
sequences_with_replacement = list(itertools.combinations_with_replacement(actions_list, self._l))
index_pick = self.random_state.randint(0, len(sequences_with_replacement))
sequence = list(sequences_with_replacement[index_pick])
self.random_state.shuffle(sequence)
return sequence
| StarcoderdataPython |
11302286 | import FWCore.ParameterSet.Config as cms
from RecoParticleFlow.PFClusterProducer.particleFlowClusterECAL_cff import *
particleFlowClusterOOTECAL = particleFlowClusterECAL.clone()
particleFlowClusterOOTECAL.inputECAL = cms.InputTag("particleFlowClusterOOTECALUncorrected")
from Configuration.Eras.Modifier_run2_miniAOD_80XLegacy_cff import run2_miniAOD_80XLegacy
run2_miniAOD_80XLegacy.toModify(
particleFlowClusterOOTECAL.energyCorrector,
recHitsEBLabel = "reducedEcalRecHitsEB",
recHitsEELabel = "reducedEcalRecHitsEE"
)
| StarcoderdataPython |
8029071 | <reponame>Shaikh-Nabeel/HackerRankAlgorithms
"""
You are given a list of N people who are attending ACM-ICPC World Finals. Each of them are either well versed in a topic
or they are not. Find out the maximum number of topics a 2-person team can know. And also find out how many teams can
know that maximum number of topics.
Input Format
The first line contains two integers N and M separated by a single space, where N represents the number of people, and M
represents the number of topics. N lines follow.
Each line contains a binary string of length M. In this string, 1 indicates that the ith person knows a particular
topic, and 0 indicates that the ith person does not know the topic. Here, 1 <= i <= 2, and it denotes one of the persons
in the team
"""
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
"""
bit manipulation
brute force O(N^2*M)
:param cipher: the cipher
"""
N, M, ppl = cipher
team_cnt = 0
max_topic = 0
for i in xrange(N):
for j in xrange(i + 1, N):
cnt = self.common_topics(M, ppl[i], ppl[j])
if cnt == max_topic:
team_cnt += 1
elif cnt > max_topic:
team_cnt = 1
max_topic = cnt
return "%d\n%d" % (max_topic, team_cnt)
def common_topics(self, M, a, b):
topic = a | b
topic_cnt = bin(topic).count("1") # otherwise TLE
# topic_cnt = 0
# for i in xrange(M):
# if topic&0x1==0x1:
# topic_cnt += 1
# topic >>= 1
return topic_cnt
if __name__ == "__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
N, M = map(lambda x: int(x), f.readline().strip().split(" "))
ppl = []
for i in xrange(N):
ppl.append(int(f.readline().strip(), 2))
cipher = [N, M, ppl]
s = "%s\n" % (Solution().solve(cipher))
print s,
| StarcoderdataPython |
211015 | import operator
from functools import reduce
from django.shortcuts import get_object_or_404, render
from django.db.models import Q
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .forms import RoomPostForm
from .models import Room, RoomImage
from accounts.models import Profile
from django.contrib.auth.decorators import login_required
def index(request):
latest_posts_list = Room.objects.order_by('-last_updated')[:4]
return render(request, 'home/index.html', {'latest_posts_list': latest_posts_list,})
def browse_rooms(request):
rooms_list = Room.objects.all()
paginator = Paginator(rooms_list, 2)
page = request.GET.get('page')
try:
rooms = paginator.page(page)
except PageNotAnInteger:
rooms = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
return render(request, 'home/browse.html', {'page': page, 'rooms': rooms})
def detail(request, room_id):
room = get_object_or_404(Room, pk=room_id)
roomImages = list(room.roomimage_set.all())
return render(request, 'home/detail.html', {'room': room, 'roomImages': roomImages})
def search(request):
RESULTS_PER_PAGE = 10
result_list = Room.objects.order_by('-last_updated')
query_dict = request.GET
context = {}
search = True
if 'sort' in query_dict:
sort = context['sort'] = query_dict['sort']
if sort == 'last':
pass
elif sort == 'l2h':
result_list = result_list.order_by('cost')
elif sort == 'h2l':
result_list = result_list.order_by('-cost')
else:
sort = 'none'
else:
context['sort'] = 'none'
if 'q' in query_dict and query_dict['q'] is not "":
query_q = context['query'] = query_dict['q']
result_list = result_list.filter(
Q(property_name__icontains=query_q) |
Q(address__icontains=query_q) |
Q(description__icontains=query_q)
)
else:
search = False
if 'filter' in query_dict:
filters = context['filters'] = query_dict['filter']
try:
filter_dict = dict(item.split("=") for item in filters.split("~"))
#price range high
if 'prh' in filter_dict:
context['prh'] = filter_dict['prh']
result_list = result_list.filter(cost__lte=context['prh'])
else:
context['prh'] = '99999'
#price range low
if 'prl' in filter_dict:
context['prl'] = filter_dict['prl']
result_list = result_list.filter(cost__gte=context['prl'])
else: context['prl'] = '0'
##pre
context['gen_m'] = 'false'
context['gen_f'] = 'false'
context['gen_o'] = 'false'
context['par_1'] = 'false'
context['par_2'] = 'false'
context['par_3'] = 'false'
context['par_4'] = 'false'
context['rms_1'] = 'false'
context['rms_2'] = 'false'
context['rms_3'] = 'false'
context['rms_4'] = 'false'
context['type_a'] = 'false'
context['type_c'] = 'false'
context['type_t'] = 'false'
context['type_h'] = 'false'
##
#gender
if 'gen' in filter_dict:
context['gen'] = filter_dict['gen']
s = list(context['gen'])
for i in s:
if i in {'m','f','o'}:
context['gen_'+i] = 'true'
result_list = result_list.filter(reduce(
operator.or_, (Q(creator_gender__iexact=key) for key in s)))
#number of garages
if 'par' in filter_dict:
context['par'] = filter_dict['par']
s = list(str(context['par']))
for i in s:
if i in {'1','2','3','4'}:
context['par_'+i] = 'true'
result_list = result_list.filter(reduce(
operator.or_, (Q(garages__iexact=key) for key in s)))
#number of rooms
if 'rms' in filter_dict:
context['rms'] = filter_dict['rms']
s = list(context['rms'])
for i in s:
if i in {'1','2','3','4'}:
context['rms_'+i] = 'true'
result_list = result_list.filter(reduce(
operator.or_, (Q(number_of_rooms__iexact=key) for key in s)))
#property type
if 'type' in filter_dict:
context['type'] = filter_dict['type']
s = list(context['type'])
for i in s:
if i in {'a','c','t','h'}:
context['type_'+i] = 'true'
result_list = result_list.filter(reduce(
operator.or_, (Q(property_type__iexact=key) for key in s)))
except ValueError:
pass
else:
context['prh'] = '99999'
context['prl'] = '0'
context['gen_m'] = 'false'
context['gen_f'] = 'false'
context['gen_o'] = 'false'
context['par_1'] = 'false'
context['par_2'] = 'false'
context['par_3'] = 'false'
context['par_4'] = 'false'
context['rms_1'] = 'false'
context['rms_2'] = 'false'
context['rms_3'] = 'false'
context['rms_4'] = 'false'
context['type_a'] = 'false'
context['type_c'] = 'false'
context['type_t'] = 'false'
context['type_h'] = 'false'
filters = None
paginator = Paginator(result_list, RESULTS_PER_PAGE)
page = request.GET.get('page')
try:
rooms = paginator.page(page)
except PageNotAnInteger:
rooms = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
context['sf'] = 's' if search == True else ''
context['sf'] += 'f' if filters is not None else ''
context['rooms'] = rooms
context['num_results'] = len(result_list)
return render(request, 'home/search.html', context)
@login_required()
def create(request):
if request.method == 'POST':
form = RoomPostForm(request.POST, request.FILES)
if form.is_valid():
print('valid')
# if the user added more than 4 (to be changed to 6) or no images, return the form again
if len(request.FILES.getlist('images')) > 6 or len(request.FILES.getlist('images')) == 0:
error_message = "Please enter anywhere between 1 and 6 images"
return render(request, 'home/create.html', {'form': form, 'error_message': error_message})
new_room = form.save(commit=False)
new_room.host_name = (request.user.first_name + ' ' + request.user.last_name[:1])
# concatenate all form address inputs into one consistentLy formatted address
address = (request.POST['address1'] + ', ')
address += (request.POST['city'] + ', ON, ' + request.POST['postalCode'])
new_room.address = address
new_room.creator_id = request.user.pk
user = request.user
profile = Profile.objects.get(user=user)
new_room.creator_gender = profile.gender
new_room.creator_major = profile.major
new_room.creator_year = profile.year
new_room.creator_email = user.email
new_room.creator_phone = request.POST['creator_phone']
new_room.save()
# for each image uploaded, save it with it's parent room being the room we just created
for i in request.FILES.getlist('images'):
image = RoomImage(room=new_room, image=i)
image.save()
return HttpResponseRedirect(reverse(detail, args=(new_room.pk,)))
else:
print('invalid')
else:
form = RoomPostForm()
return render(request, 'home/create.html', {'form': form})
| StarcoderdataPython |
6592327 | <reponame>Cladett/rlman
from dVRL_simulator.vrep.vrepObject import vrepObject
import numpy as np
from numpy.linalg import inv
class camera(vrepObject):
def __init__(self, clientID, rgb=True):
super(camera, self).__init__(clientID)
self.camera_handle = self.getHandle('Vision_Sensor')
self.rgb = rgb
self.getVisionSensorImage(
self.camera_handle, self.rgb, ignoreError=True, initialize=True)
def getImage(self, ignoreError=False):
data, resolution = self.getVisionSensorImage(self.camera_handle, self.rgb, ignoreError=ignoreError,
initialize=False)
if self.rgb:
return np.array(data, dtype=np.uint8).reshape([resolution[1], resolution[0], 3])
else:
return np.array(data, dtype=np.uint8).reshape([resolution[1], resolution[0]])
class table(vrepObject):
def __init__(self, clientID):
super(table, self).__init__(clientID)
self.table_top_handle = self.getHandle('customizableTable_tableTop')
def getPose(self, relative_handle, ignoreError=False, initialize=False):
return self.getPoseAtHandle(self.table_top_handle, relative_handle, ignoreError, initialize)
class target(vrepObject):
def __init__(self,clientID, psm_number):
super(target, self).__init__(clientID)
self.target_handle = self.getHandle('Target_PSM{}'.format(psm_number))
#Target used to check orientation adjustment of the robot
self.target_top = self.getHandle('Target_top')
self.getPosition(-1, ignoreError = True, initialize = True)
def setPosition(self, pos, relative_handle, ignoreError = False):
self.setPoseAtHandle(self.target_handle, relative_handle, pos, [1,0,0,1], ignoreError)
def getPosition(self, relative_handle, ignoreError = False, initialize = False):
pos, quat = self.getPoseAtHandle(self.target_handle, relative_handle, ignoreError, initialize)
pos_top, _ = self.getPoseAtHandle(self.target_top, relative_handle, ignoreError, initialize)
return pos, quat
def getPositionTopTarget(self, relative_handle, ignoreError = False, initialize = False):
pos_target_top, _ = self.getPoseAtHandle(self.target_top, relative_handle, ignoreError, initialize)
return pos_target_top
def setPose(self, pos, quat, relative_handle, ignoreError=False):
self.setPoseAtHandle(self.target_handle, relative_handle, pos, quat, ignoreError)
class rail(vrepObject):
def __init__(self, clientID):
super(rail, self).__init__(clientID)
self.rail_handle = self.getHandle('rail')
self.dummy_rail_handle = self.getHandle('rail_Dummy') #if using single dummy
self.rail_achieved_top = self.getHandle('rail_Achieved_t')
self.rail_achieved_bottom = self.getHandle('rail_Achieved_b')
self.rail_achieved_central = self.getHandle('rail_Achieved_goal')
def setPose(self, pos, quat, relative_handle,
ignoreError=False):
b_T_d = self.posquat2Matrix(pos, quat)
d_T_r = np.array([[1, 0, 0, 0], [0, 0, 1, -0.0075],
[0, -1, 0, -0.004], [0, 0, 0, 1]]) # Coordinate Dummy5
pos, quat = self.matrix2posquat(np.dot(b_T_d, d_T_r))
self.setPoseAtHandle(self.rail_handle, relative_handle,
pos, quat, ignoreError)
return self.dummy_rail_handle, pos
def getPose(self, dummy_rail_handle, relative_handle, ignoreError=False, initialize=False):
return self.getPoseAtHandle(dummy_rail_handle, relative_handle, ignoreError, initialize)
def getPositionAchievedGoals(self, relative_handle, ignoreError=False, initialize=False):
pos_achieved_central, _ = self.getPoseAtHandle(self.rail_achieved_central, relative_handle, ignoreError, initialize)
pos_achieved_top, _ = self.getPoseAtHandle(self.rail_achieved_top, relative_handle, ignoreError, initialize)
pos_achieved_bottom, _ = self.getPoseAtHandle(self.rail_achieved_bottom, relative_handle, ignoreError, initialize)
return pos_achieved_central, pos_achieved_top, pos_achieved_bottom
def getVel(self, ignoreError=False, initialize=False):
return self.getVelocityAtHandle(self.dummy_rail_handle, ignoreError, initialize)
def removeGrasped(self, ignoreError=False):
self.setParent(self.self.rail_handle, -1, True, ignoreError)
def isGrasped(self, ignoreError=False, initialize=False):
return not (-1 == self.getParent(self.rail_handle, ignoreError, initialize))
class targetK(vrepObject):
def __init__(self, clientID):
super(targetK, self).__init__(clientID)
# Step 1: get the handles of the objects in the scene.
# Parent
self.k_res_handle = self.getHandle('Kidney_respondable')
# Realistic Kidney
self.k_handle = self.getHandle('Kidney')
# Shape used for collision check
self.convex = self.getHandle('Convex')
# Surface dummies handles:
# dh = dummy handle,
# t = top (towards positive axis and adrenal gland), b = bottom
self.k_dh_0t = self.getHandle('Kidney_Dummy_0t')
self.k_dh_0b = self.getHandle('Kidney_Dummy_0b')
self.k_dh_1t = self.getHandle('Kidney_Dummy_1t')
self.k_dh_1b = self.getHandle('Kidney_Dummy_1b')
self.k_dh_2t = self.getHandle('Kidney_Dummy_2t')
self.k_dh_2b = self.getHandle('Kidney_Dummy_2b')
self.k_dh_3t = self.getHandle('Kidney_Dummy_3t')
self.k_dh_3b = self.getHandle('Kidney_Dummy_3b')
self.k_dh_4t = self.getHandle('Kidney_Dummy_4t')
self.k_dh_4b = self.getHandle('Kidney_Dummy_4b')
#Dummies reached by the rail's central dummy below it
self.k_dh_c0 = self.getHandle('Kidney_Dummy_c0')
self.k_dh_c1 = self.getHandle('Kidney_Dummy_c1')
self.k_dh_c2 = self.getHandle('Kidney_Dummy_c2')
self.k_dh_c3 = self.getHandle('Kidney_Dummy_c3')
self.k_dh_c4 = self.getHandle('Kidney_Dummy_c4')
self.k_orientation_ctrl = self.getHandle('Kidney_orientation_ctrl')
# Step 2: get the position of these objects,
# otherwise the script can't tell where they are and prints "failed to get position and orientation".
# -1 means relative to base frame.
self.getPosition(-1, ignoreError = True, initialize = True)
# This method sets the pose (position and quaternion) of the
# cuboid shape, which then "sets" its children shapes along with it.
def setPose(self, pos, quat, relative_handle, ignoreError=False):
self.setPoseAtHandle(self.k_res_handle, relative_handle, pos, quat, ignoreError)
def getPosition(self, relative_handle, ignoreError=False, initialize = False):
pos_res, _ = self.getPoseAtHandle(self.k_res_handle, relative_handle, ignoreError, initialize)
pos_k, _ = self.getPoseAtHandle(self.k_handle, relative_handle, ignoreError, initialize)
pos_convex, _ = self.getPoseAtHandle(self.convex, relative_handle, ignoreError, initialize)
pos_0t, _ = self.getPoseAtHandle(self.k_dh_0t, relative_handle, ignoreError, initialize)
pos_0b, _ = self.getPoseAtHandle(self.k_dh_0b, relative_handle, ignoreError, initialize)
pos_1t, _ = self.getPoseAtHandle(self.k_dh_1t, relative_handle, ignoreError, initialize)
pos_1b, _ = self.getPoseAtHandle(self.k_dh_1b, relative_handle, ignoreError, initialize)
pos_2t, _ = self.getPoseAtHandle(self.k_dh_2t, relative_handle, ignoreError, initialize)
pos_2b, _ = self.getPoseAtHandle(self.k_dh_2b, relative_handle, ignoreError, initialize)
pos_3t, _ = self.getPoseAtHandle(self.k_dh_3t, relative_handle, ignoreError, initialize)
pos_3b, _ = self.getPoseAtHandle(self.k_dh_3b, relative_handle, ignoreError, initialize)
pos_4t, _ = self.getPoseAtHandle(self.k_dh_4t, relative_handle, ignoreError, initialize)
pos_4b, _ = self.getPoseAtHandle(self.k_dh_4b, relative_handle, ignoreError, initialize)
pos_c0, _ = self.getPoseAtHandle(self.k_dh_c0, relative_handle, ignoreError, initialize)
pos_c1, _ = self.getPoseAtHandle(self.k_dh_c1, relative_handle, ignoreError, initialize)
pos_c2, _ = self.getPoseAtHandle(self.k_dh_c2, relative_handle, ignoreError, initialize)
pos_c3, _ = self.getPoseAtHandle(self.k_dh_c3, relative_handle, ignoreError, initialize)
pos_c4, _ = self.getPoseAtHandle(self.k_dh_c4, relative_handle, ignoreError, initialize)
pos_orientation_ctrl, _ = self.getPoseAtHandle(self.k_orientation_ctrl, relative_handle, ignoreError, initialize)
# This method defines the target used at goal.
# This target is sampled off the 5 available ones if ranomize
# is true otherwise the number 2 is selected.
def getPositionGoal(self, relative_handle, randomize,
ignoreError=False, initialize=False):
# Checking the target randomization
if randomize:
self.dummy_number = np.random.randint(0, 5)
else:
self.dummy_number = 2
if self.dummy_number == 0:
pos_c, _ = self.getPoseAtHandle(self.k_dh_c0, relative_handle,
ignoreError, initialize)
#print("Dummy pair PINK is goal.")
elif self.dummy_number == 1:
pos_c, _ = self.getPoseAtHandle(self.k_dh_c1, relative_handle,
ignoreError, initialize)
#print("Dummy pair GREEN is goal.")
elif self.dummy_number == 2:
pos_c, _ = self.getPoseAtHandle(self.k_dh_c2, relative_handle,
ignoreError, initialize)
#print("Dummy pair BLUE is goal.")
elif self.dummy_number == 3:
pos_c, _ = self.getPoseAtHandle(self.k_dh_c3, relative_handle,
ignoreError, initialize)
#print("Dummy pair YELLOW is goal.")
else:
pos_c, _ = self.getPoseAtHandle(self.k_dh_c4, relative_handle,
ignoreError, initialize)
#print("Dummy pair LILAC is goal.")
return pos_c
# This method returns the top and bottom targets sampled by getPositionGoal.
# They are front-facing dummies on opposite sides.
# pos_t is the position of the top-side dummy,
# pos_b the position of the bottom-side dummy.
def getPositionGoalTopBottom(self, relative_handle, ignoreError=False, initialize=False):
if self.dummy_number == 0:
pos_t, _ = self.getPoseAtHandle(self.k_dh_0t, relative_handle, ignoreError, initialize)
pos_b, _ = self.getPoseAtHandle(self.k_dh_0b, relative_handle, ignoreError, initialize)
elif self.dummy_number == 1:
pos_t, _ = self.getPoseAtHandle(self.k_dh_1t, relative_handle, ignoreError, initialize)
pos_b, _ = self.getPoseAtHandle(self.k_dh_1b, relative_handle, ignoreError, initialize)
elif self.dummy_number == 2:
pos_t, _ = self.getPoseAtHandle(self.k_dh_2t, relative_handle, ignoreError, initialize)
pos_b, _ = self.getPoseAtHandle(self.k_dh_2b, relative_handle, ignoreError, initialize)
elif self.dummy_number == 3:
pos_t, _ = self.getPoseAtHandle(self.k_dh_3t, relative_handle, ignoreError, initialize)
pos_b, _ = self.getPoseAtHandle(self.k_dh_3b, relative_handle, ignoreError, initialize)
else:
pos_t, _ = self.getPoseAtHandle(self.k_dh_4t, relative_handle, ignoreError, initialize)
pos_b, _ = self.getPoseAtHandle(self.k_dh_4b, relative_handle, ignoreError, initialize)
return pos_t, pos_b
#This method returns the orientation of Kidney_orientation_ctrl. The dummy
#used in _align_to_target() in PsmEnv_Position.py to compute the orientation error
#between the EE and the target.
def getOrientationGoals(self, relative_handle, ignoreError=False, initialize=False):
pos, quat = self.getPoseAtHandle(self.k_orientation_ctrl, relative_handle, ignoreError, initialize)
return quat
# Checking the collision between robot arm and table
class collisionCheck(vrepObject):
def __init__(self, clientID, psm_number):
super(collisionCheck, self).__init__(clientID)
#self.collision_TTs_TableTop = self.getCollisionHandle('PSM{}_TTs_Table'.format(psm_number))
#self.collision_TTd_TableTop = self.getCollisionHandle('PSM{}_TTd_Table'.format(psm_number))
# Collision objects of Rail. Check against the kidney, cuboid and convex shell.
self.collision_Kidney_Rail = self.getCollisionHandle('Collision_Kidney_Rail')
self.collision_Cuboid_Rail = self.getCollisionHandle('Collision_Cuboid_Rail')
self.collision_Convex_Rail = self.getCollisionHandle('Collision_Convex_Rail')
#Collision objects of Robot. Check against convex all the possible colliding parts.
#These are, the black cylinder, the TT's body and tips.
#self.collision_Convex_Cylinder = self.getCollisionHandle('Collision_Convex_Cylinder')
self.collision_Convex_TT_body = self.getCollisionHandle('Collision_Convex_TT_body')
#self.collision_Convex_TT_sx = self.getCollisionHandle('Collision_Convex_TT_sx')
#self.collision_Convex_TT_dx = self.getCollisionHandle('Collision_Convex_TT_dx')
# Init
#super(collisionCheck, self).checkCollision(self.collision_TTs_TableTop,
# ignoreError=True, initialize=True)
#super(collisionCheck, self).checkCollision(self.collision_TTd_TableTop,
# ignoreError=True, initialize=True)
super(collisionCheck, self).checkCollision(self.collision_Kidney_Rail,
ignoreError=True, initialize=True)
super(collisionCheck, self).checkCollision(self.collision_Cuboid_Rail,
ignoreError=True, initialize=True)
super(collisionCheck, self).checkCollision(self.collision_Convex_Rail,
ignoreError=True, initialize=True)
#super(collisionCheck, self).checkCollision(self.collision_Convex_Cylinder,
# ignoreError=True, initialize=True)
super(collisionCheck, self).checkCollision(self.collision_Convex_TT_body,
ignoreError=True, initialize=True)
#super(collisionCheck, self).checkCollision(self.collision_Convex_TT_sx,
# ignoreError=True, initialize=True)
#super(collisionCheck, self).checkCollision(self.collision_Convex_TT_dx,
# ignoreError=True, initialize=True)
# Returns True if in collision and False if not in collision
#def checkCollision(self, ignoreError=False):
# c1 = super(collisionCheck, self).checkCollision(self.collision_TTs_TableTop, ignoreError)
# c2 = super(collisionCheck, self).checkCollision(self.collision_TTd_TableTop, ignoreError)
# return c1 or c2
# Any checkCollision call returns True if collision and False if not collision
def KidneyCollision(self, ignoreError=False):
#c_r = collision result
c_r1 = super(collisionCheck,self).checkCollision(self.collision_Kidney_Rail,
ignoreError)
c_r2 = super(collisionCheck,self).checkCollision(self.collision_Cuboid_Rail,
ignoreError)
c_r3 = super(collisionCheck,self).checkCollision(self.collision_Convex_Rail,
ignoreError)
c_r4 = super(collisionCheck,self).checkCollision(self.collision_Convex_TT_body,
ignoreError)
#c_r5 = super(collisionCheck,self).checkCollision(self.collision_Convex_Cylinder,
# ignoreError)
#c_r6 = super(collisionCheck,self).checkCollision(self.collision_Convex_TT_sx,
# ignoreError)
#c_r7 = super(collisionCheck,self).checkCollision(self.collision_Convex_TT_dx,
# ignoreError)
return c_r1, c_r2, c_r3, c_r4 #, c_r5 , c_r6, c_r7
| StarcoderdataPython |
190045 | <reponame>Aditya-aot/ION
from django import forms
from django.forms import ModelForm
from .models import stock_port , crypto_port
class stock_port_form(ModelForm) :
name = forms.CharField(label='',widget=forms.TextInput(attrs={"placholder":"write here"}))
price = forms.CharField(label='',widget=forms.TextInput(attrs={"placholder":"write here"}))
quantity = forms.CharField(label='',widget=forms.TextInput(attrs={"placholder":"write here"}))
class Meta:
model = stock_port
fields = [
'name' ,
'price',
'quantity'
]
from bs4 import BeautifulSoup
import pandas as pd
import requests
import re
url = 'https://coinmarketcap.com/'
page = requests.get(url)
soup = BeautifulSoup(page.text,'html.parser')
name_list=[]
names = soup.find_all("div", {"class": "sc-16r8icm-0 sc-1teo54s-1 dNOTPP"})
for name in names :
# print(name.text)
name_list.append((name.text , name.text))
name_list= (name_list[9:19])
tables = soup.find_all("div", {"class": "sc-131di3y-0 cLgOOr"})
class crypto_port_form(ModelForm) :
name = forms.CharField(label='Select Crypto ', widget=( forms.Select(choices=name_list ) ) )
price = forms.CharField(label='Price',widget=forms.TextInput(attrs={"placholder":"write here"}))
quantity = forms.CharField(label='Quantity',widget=forms.TextInput(attrs={"placholder":"write here"}))
class Meta:
model = crypto_port
fields = [
'name' ,
'price',
'quantity'
]
| StarcoderdataPython |
12846038 | <gh_stars>0
import numpy as np
from ..core.derivative import Derivative
class AsianCallOption(Derivative):
def __init__(self, S, K, T, r, sigma, steps, **kwargs):
super().__init__(S_0=S, T=T, r=r, sigma=sigma, steps=steps, **kwargs)
self.S = S
self.K = K
self.T = T
self.r = r
self.sigma = sigma
def payoff(self, underlyingAssetPath, **kwargs):
return max(underlyingAssetPath.mean() - self.K, 0) * np.exp(-self.r * self.T)
class AsianPutOption(Derivative):
def __init__(self, S, K, T, r, sigma, steps, **kwargs):
super().__init__(S_0=S, T=T, r=r, sigma=sigma, steps=steps, **kwargs)
self.S = S
self.K = K
self.T = T
self.r = r
self.sigma = sigma
def payoff(self, underlyingAssetPath, **kwargs):
return max(self.K - underlyingAssetPath.mean(), 0) * np.exp(-self.r * self.T)
| StarcoderdataPython |
11360124 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2022- <NAME>
#
# Distributed under the terms of the MIT License
# (see wavespin/__init__.py for details)
# -----------------------------------------------------------------------------
import numpy as np
import math
import warnings
from types import FunctionType
from copy import deepcopy
from .filter_bank import (morlet_1d, gauss_1d, periodize_filter_fourier,
compute_temporal_support, compute_temporal_width,
compute_minimum_required_length,
calibrate_scattering_filters, energy_norm_filterbank_fr,
get_max_dyadic_subsampling)
from .utils import compute_minimum_support_to_pad
from ..frontend.base_frontend import ScatteringBase
class _FrequencyScatteringBase(ScatteringBase):
"""Attribute object for TimeFrequencyScatteringBase1D for frequential
scattering part of JTFS.
"""
# note: defaults are set in `wavespin.scattering1d.frontend.base_frontend.py`,
# `DEFAULT_KWARGS`
def __init__(self, N_frs, J_fr=None, Q_fr=None, F=None, max_order_fr=None,
average_fr=None, aligned=None, oversampling_fr=None,
sampling_filters_fr=None, out_type=None,
out_3D=None, max_pad_factor_fr=None,
pad_mode_fr=None, analytic_fr=None,
max_noncqt_fr=None, normalize_fr=None, F_kind=None,
r_psi_fr=None, _n_psi1_f=None, backend=None):
super(_FrequencyScatteringBase, self).__init__()
self.N_frs = N_frs
self.J_fr = J_fr
self.Q_fr = Q_fr
self.F = F
self.max_order_fr = max_order_fr
self.average_fr = average_fr
self.aligned = aligned
self.max_noncqt_fr = max_noncqt_fr
self.oversampling_fr = oversampling_fr
self.sampling_filters_fr = sampling_filters_fr
self.sampling_psi_fr = None # set in build()
self.sampling_phi_fr = None # set in build()
self.out_type = out_type
self.out_3D = out_3D
self.max_pad_factor_fr = max_pad_factor_fr
self.pad_mode_fr = pad_mode_fr
self.analytic_fr = analytic_fr
self.normalize_fr = normalize_fr
self.F_kind = F_kind
self.r_psi_fr = r_psi_fr
self._n_psi1_f = _n_psi1_f
self.backend = backend
self.build()
self.create_init_psi_filters()
self.compute_unpadding_fr()
self.compute_stride_fr()
self.compute_scale_and_stride_logic()
self.compute_padding_fr()
self.create_psi_filters()
self.compute_scale_and_stride_logic(for_validation=True)
self.create_phi_filters()
self.adjust_padding_and_filters()
# TODO remove `Tx`
# TODO sigma0=.13
# TODO dflt anal true
# TODO important implementation and documentation flaws
def build(self):
"""Mainly handles input arguments. For a description of the complete
build pipeline, see `compute_padding_fr`.
"""
self.sigma0 = 0.1
self.alpha = 4.
self.P_max = 5
self.eps = 1e-7
self.criterion_amplitude = 1e-3
self.sigma_max_to_min_max_ratio = 1.2
# `N_frs` used in scattering, == realized `psi2_f`s
self.N_frs_realized = [s for s in self.N_frs if s > 0]
# longest & shortest obtainable frequency row w.r.t. which we
# calibrate filters
self.N_frs_max = max(self.N_frs_realized)
self.N_frs_min = min(self.N_frs_realized)
# above is for `psi_t *` pairs, below is actual max, which
# occurs for `phi_t *` pairs
self.N_frs_max_all = self._n_psi1_f
# compute corresponding scales
self.N_fr_scales = [(math.ceil(math.log2(s)) if s != 0 else -1)
for s in self.N_frs]
self.N_fr_scales_max = max(self.N_fr_scales)
# smallest scale is also smallest possible maximum padding
# (cannot be overridden by `max_pad_factor_fr`)
self.N_fr_scales_min = min(s for s in self.N_fr_scales if s != -1)
# scale differences
self.scale_diffs = [(self.N_fr_scales_max - N_fr_scale
if N_fr_scale != -1 else -1)
for N_fr_scale in self.N_fr_scales]
# make unique variants
self.N_fr_scales_unique = np.unique([N_fr_scale
for N_fr_scale in self.N_fr_scales
if N_fr_scale != -1])
self.scale_diffs_unique = np.unique([scale_diff
for scale_diff in self.scale_diffs
if scale_diff != -1])
# store number of unique scales
self.n_scales_fr = len(self.scale_diffs_unique)
# ensure 2**J_fr <= nextpow2(N_frs_max)
if self.J_fr is None:
# default to `max - 2` if possible, but no less than `3`,
# and no more than `max`
self.J_fr = min(max(self.N_fr_scales_max - 2, 3),
self.N_fr_scales_max)
elif self.J_fr > self.N_fr_scales_max:
raise ValueError(("2**J_fr cannot exceed maximum number of frequency "
"rows (rounded up to pow2) in joint scattering "
"(got {} > {})".format(
2**(self.J_fr), 2**self.N_fr_scales_max)))
# check F or set default
if self.F == 'global':
self.F = 2**self.N_fr_scales_max
elif self.F > 2**self.N_fr_scales_max:
raise ValueError("The temporal support F of the low-pass filter "
"cannot exceed maximum number of frequency rows "
"(rounded up to pow2) in joint scattering "
"(got {} > {})".format(
self.F, 2**self.N_fr_scales_max))
self.log2_F = math.floor(math.log2(self.F))
self.average_fr_global_phi = bool(self.F == 2**self.N_fr_scales_max)
self.average_fr_global = bool(self.average_fr_global_phi and
self.average_fr)
# handle F_kind
if self.F_kind == 'decimate':
from ..toolkit import Decimate
self.decimate = Decimate(backend=self.backend, sign_correction='abs')
elif self.F_kind == 'average':
self.decimate = None
else:
raise ValueError("`F_kind` must be 'average' or 'decimate', got %s" % (
self.F_kind))
# restrict `J_pad_frs_max` (and `J_pad_frs_max_init`) if specified by user
if isinstance(self.max_pad_factor_fr, int):
self.max_pad_factor_fr = {scale_diff: self.max_pad_factor_fr
for scale_diff in self.scale_diffs_unique}
elif isinstance(self.max_pad_factor_fr, (list, tuple)):
_max_pad_factor_fr = {}
for i, scale_diff in enumerate(self.scale_diffs_unique):
if i > len(self.max_pad_factor_fr) - 1:
_max_pad_factor_fr[scale_diff] = self.max_pad_factor_fr[-1]
else:
_max_pad_factor_fr[scale_diff] = self.max_pad_factor_fr[i]
self.max_pad_factor_fr = _max_pad_factor_fr
# guarantee that `J_pad_fr > J_pad_frs_max_init` cannot occur
if max(self.max_pad_factor_fr.values()) > self.max_pad_factor_fr[0]:
J_pad_frs_max = max(s + p for s, p in
zip(self.N_fr_scales_unique[::-1],
self.max_pad_factor_fr.values()))
first_max_pf_min = J_pad_frs_max - self.N_fr_scales_max
# ensures `J_pad_frs[0] >= J_pad_frs[1:]`
self.max_pad_factor_fr[0] = first_max_pf_min
elif self.max_pad_factor_fr is None:
pass
else:
raise ValueError("`max_pad_factor_fr` mus be int>0, "
"list/tuple[int>0], or None (got %s)" % str(
self.max_pad_factor_fr))
self.unrestricted_pad_fr = bool(self.max_pad_factor_fr is None)
# validate `max_pad_factor_fr`
# 1/2**J < 1/Np2up so impossible to create wavelet without padding
if (self.J_fr == self.N_fr_scales_max and
self.max_pad_factor_fr is not None and
self.max_pad_factor_fr[0] == 0):
raise ValueError("`max_pad_factor_fr` can't be 0 if "
"J_fr == log2(nextpow2(N_frs_max)). Got, "
"respectively, %s\n%s\n%s" % (
self.mad_pad_factor_fr, self.J_fr,
self.N_fr_scales_max))
# validate `out_3D`
if self.out_3D and not self.average_fr:
raise ValueError("`out_3D=True` requires `average_fr=True`. "
"`F=1` with `average_fr=True` will yield coeffs "
"close to unaveraged.")
# check `pad_mode_fr`, set `pad_fn_fr`
supported = ('conj-reflect-zero', 'zero')
if isinstance(self.pad_mode_fr, FunctionType):
fn = self.pad_mode_fr
def pad_fn_fr(x, pad_fr, scf, B):
return fn(x, pad_fr, scf, B)
self.pad_mode_fr = 'custom'
elif self.pad_mode_fr not in supported:
raise ValueError(("unsupported `pad_mode_fr` '{}';\nmust be a "
"function, or string, one of: {}").format(
self.pad_mode_fr, ', '.join(supported)))
else:
pad_fn_fr = None # handled in `core`
self.pad_fn_fr = pad_fn_fr
# unpack `sampling_` args
if isinstance(self.sampling_filters_fr, tuple):
self.sampling_psi_fr, self.sampling_phi_fr = self.sampling_filters_fr
if self.sampling_phi_fr == 'exclude':
# if user explicitly passed 'exclude' for `_phi`
warnings.warn("`sampling_phi_fr = 'exclude'` has no effect, "
"will use 'resample' instead.")
self.sampling_phi_fr = 'resample'
else:
self.sampling_psi_fr = self.sampling_phi_fr = self.sampling_filters_fr
if self.sampling_phi_fr == 'exclude':
self.sampling_phi_fr = 'resample'
self.sampling_filters_fr = (self.sampling_psi_fr, self.sampling_phi_fr)
# validate `sampling_*` args
psi_supported = ('resample', 'recalibrate', 'exclude')
phi_supported = ('resample', 'recalibrate')
if self.sampling_psi_fr not in psi_supported:
raise ValueError(("unsupported `sampling_psi_fr` ({}), must be one "
"of: {}").format(self.sampling_psi_fr,
', '.join(psi_supported)))
elif self.sampling_phi_fr not in phi_supported:
raise ValueError(("unsupported `sampling_phi_fr` ({}), must be one "
"of: {}").format(self.sampling_phi_fr,
', '.join(phi_supported)))
elif self.sampling_phi_fr == 'recalibrate' and self.average_fr_global_phi:
raise ValueError("`F='global'` && `sampling_phi_fr='recalibrate'` "
"is unsupported.")
elif self.sampling_phi_fr == 'recalibrate' and self.aligned:
raise ValueError("`aligned=True` && `sampling_phi_fr='recalibrate'` "
"is unsupported.")
# compute maximum amount of padding.
# we do this at max possible `N_fr` per each dyadic scale to guarantee
# pad uniqueness across scales; see `compute_padding_fr` docs
(self.J_pad_frs_max_init, self.min_to_pad_fr_max, self._pad_fr_phi,
self._pad_fr_psi) = self._compute_J_pad_fr(2**self.N_fr_scales_max,
Q=(self.Q_fr, 0))
# track internally for edge case testing
N_fr_scale_fo = math.ceil(math.log2(self._n_psi1_f))
self._J_pad_fr_fo, *_ = self._compute_J_pad_fr(2**N_fr_scale_fo,
Q=(self.Q_fr, 0))
# warn of edge case; see `_J_pad_fr_fo` in docs (base_frontend)
if (self.max_pad_factor_fr is not None and
self.max_pad_factor_fr[0] == 0):
if self.J_pad_frs_max_init < self._J_pad_fr_fo:
warnings.warn(("Edge case: due to `max_pad_factor_fr=0`, "
"the `phi_t * phi_f` pair cannot be padded, "
"and instead some first-order coefficients "
"will be excluded from computation."))
def create_phi_filters(self):
"""See `filter_bank.phi_fr_factory`."""
self.phi_f_fr = phi_fr_factory(
self.J_pad_frs_max_init, self.J_pad_frs, self.F, self.log2_F,
**self.get_params('unrestricted_pad_fr', 'pad_mode_fr',
'sampling_phi_fr', 'average_fr',
'average_fr_global_phi', 'aligned',
'criterion_amplitude', 'normalize_fr', 'sigma0',
'P_max', 'eps'))
def create_psi_filters(self):
"""See `filter_bank.psi_fr_factory`."""
(self.psi1_f_fr_up, self.psi1_f_fr_dn, self.psi_ids
) = psi_fr_factory(
self.psi_fr_params, self.N_fr_scales_unique, self.N_fr_scales_max,
self.J_pad_frs, **self.get_params(
'sampling_psi_fr', 'scale_diff_max_to_build', 'normalize_fr',
'criterion_amplitude', 'sigma0', 'P_max', 'eps'))
# cannot do energy norm with 3 filters, and generally filterbank
# isn't well-behaved
n_psi_frs = len(self.psi1_f_fr_up[0])
if n_psi_frs <= 3:
raise Exception(("configuration yielded %s wavelets for frequential "
"scattering, need a minimum of 4; try increasing "
"J, Q, J_fr, or Q_fr." % n_psi_frs))
# analyticity
if self.analytic_fr:
psi_fs_all = (self.psi1_f_fr_up, self.psi1_f_fr_dn)
for s1_fr, psi_fs in enumerate(psi_fs_all):
for psi_id in psi_fs:
if not isinstance(psi_id, int):
continue
for n1_fr in range(len(psi_fs[psi_id])):
pf = psi_fs[psi_id][n1_fr]
M = len(pf)
if s1_fr == 0:
pf[M//2 + 1:] = 0 # analytic, zero negatives
else:
pf[:M//2] = 0 # anti-analytic, zero positives
pf[M//2] /= 2 # halve Nyquist
def adjust_padding_and_filters(self):
# realized minimum & maximum
self.J_pad_frs_min = min(self.J_pad_frs.values())
self.J_pad_frs_max = max(self.J_pad_frs.values())
if not self.unrestricted_pad_fr:
# adjust phi_fr
pad_diff_max_realized = self.J_pad_frs_max_init - self.J_pad_frs_min
log2_F_phi_diffs = [k for k in self.phi_f_fr if isinstance(k, int)]
for log2_F_phi_diff in log2_F_phi_diffs:
for pad_diff in self.phi_f_fr[log2_F_phi_diff]:
if pad_diff > pad_diff_max_realized:
del self.phi_f_fr[log2_F_phi_diff][pad_diff]
# shouldn't completely empty a scale
assert len(self.phi_f_fr[log2_F_phi_diff]) != 0
# energy norm
if 'energy' in self.normalize_fr:
energy_norm_filterbank_fr(self.psi1_f_fr_up, self.psi1_f_fr_dn,
self.phi_f_fr, self.J_fr, self.log2_F,
self.sampling_psi_fr)
def create_init_psi_filters(self):
T = 1 # for computing `sigma_low`, unused
(_, xi1_frs, sigma1_frs, j1_frs, is_cqt1_frs, *_
) = calibrate_scattering_filters(self.J_fr, self.Q_fr, T=T,
r_psi=self.r_psi_fr, sigma0=self.sigma0,
alpha=self.alpha,
J_pad=self.J_pad_frs_max_init)
# instantiate filter
psi1_f_fr_dn = {}
scale_diff = -1 # since this filterbank's removed later
psi1_f_fr_dn[scale_diff] = []
# initialize meta
for field in ('width', 'xi', 'sigma', 'j', 'is_cqt'):
psi1_f_fr_dn[field] = {scale_diff: []}
N_fr_scale = self.N_fr_scales_max
for n1_fr in range(len(j1_frs)):
#### Compute wavelet #############################################
# fetch wavelet params, sample wavelet
xi, sigma = xi1_frs[n1_fr], sigma1_frs[n1_fr]
padded_len = 2**self.J_pad_frs_max_init
# expand dim to multiply along freq like (2, 32, 4) * (32, 1)
psi = morlet_1d(padded_len, xi, sigma, normalize=self.normalize_fr,
P_max=self.P_max, eps=self.eps)[:, None]
psi1_f_fr_dn[scale_diff].append(psi)
# embed meta
width = 2*compute_temporal_width(
psi, N=2**N_fr_scale, sigma0=self.sigma0,
criterion_amplitude=self.criterion_amplitude)
psi1_f_fr_dn['width' ][scale_diff].append(width)
psi1_f_fr_dn['xi' ][scale_diff].append(xi)
psi1_f_fr_dn['sigma' ][scale_diff].append(sigma)
psi1_f_fr_dn['j' ][scale_diff].append(j1_frs[n1_fr])
psi1_f_fr_dn['is_cqt'][scale_diff].append(is_cqt1_frs[n1_fr])
self.psi1_f_fr_dn_init = psi1_f_fr_dn
def _compute_psi_fr_params(self):
psis = self.psi1_f_fr_dn_init
params_init = {field: [p for n1_fr, p in enumerate(psis[field][-1])]
for field in ('xi', 'sigma', 'j', 'is_cqt')}
if self.sampling_psi_fr in ('resample', 'exclude'):
params = {}
for N_fr_scale in self.N_fr_scales[::-1]:
if N_fr_scale == -1:
continue
scale_diff = self.N_fr_scales_max - N_fr_scale
if self.sampling_psi_fr == 'resample' or scale_diff == 0:
# all `sampling_psi_fr` should agree on `N_fr_scales_max`;
# 'exclude' may omit some filters if `not unrestricted_pad_fr`
# per insufficient padding amplifying 'width'
params[scale_diff] = deepcopy(params_init)
elif self.sampling_psi_fr == 'exclude':
params[scale_diff] = {field: [] for field in params_init}
for n1_fr in range(len(params_init['j'])):
width = psis['width'][-1][n1_fr]
if width > 2**N_fr_scale:
# subsequent `width` are only greater
break
for field in params_init:
params[scale_diff][field].append(
params_init[field][n1_fr])
elif self.sampling_psi_fr == 'recalibrate':
N = 2**self.J_pad_frs_max_init
max_original_width = max(psis['width'][-1])
(xi1_frs_new, sigma1_frs_new, j1_frs_new, is_cqt1_frs_new,
self.scale_diff_max_recalibrate) = _recalibrate_psi_fr(
max_original_width,
*[params_init[field] for field in params_init],
N, self.alpha, self.N_fr_scales_max, self.N_fr_scales_unique,
self.sigma_max_to_min_max_ratio)
# pack as `params[scale_diff][field][n1_fr]`
params = {}
for scale_diff in j1_frs_new:
params[scale_diff] = {field: [] for field in params_init}
for field, p in zip(params_init, (xi1_frs_new, sigma1_frs_new,
j1_frs_new, is_cqt1_frs_new)):
params[scale_diff][field] = p[scale_diff]
# ensure no empty scales
for scale_diff in params:
assert all(len(params[scale_diff][field]) > 0
for field in params[scale_diff]
), (params, self.sampling_filters_fr)
self.psi_fr_params = params
if self.sampling_psi_fr != 'recalibrate':
self.scale_diff_max_recalibrate = None
# remove unused
self.psi1_f_fr_dn_init = {}
del self.psi1_f_fr_dn_init
# compute needed quantity
self._compute_J_pad_frs_min_limit_due_to_psi()
def _compute_J_pad_frs_min_limit_due_to_psi(self):
"""
`J_pad_frs_min_limit_due_to_psi` determined by:
[1] 'resample' and unrestricted_pad_fr: smallest padding such that
all wavelets still fully decay (reach `criterion_amplitude`)
[2] 'exclude': smallest padding is padding that occurs at
smallest `N_fr_scale` (i.e. largest `scale_diff`) that computes a
filterbank (i.e. at least one wavelet with 'width' > 2**N_fr_scale);
i.e. we reuse that padding for lesser `N_fr_scale`
[3] 'recalibrate': we reuse same as 'exclude', except now determined
by `scale_diff_max_recalibrate` returned by
`filter_bank_jtfs._recalibrate_psi_fr`
[4] all: smallest padding such that all wavelets are wavelets
(i.e. not a pure tone, one DFT bin, which throws ValueError)
however we only exit the program if this occurs for non-'resample',
as 'exclude' & 'recalibrate' must automatically satisfy this
per shrinking support with smaller `N_fr_scale`
not [5] phi: phi computes such that it's available at all given
paddings, even if it distorts severely. However, worst case
distortion is `scale == pad`, i.e. `log2_F_phi == J_pad_fr`,
guaranteed by `J_pad_fr = max(, total_conv_stride_over_U1)`.
Note phi construction can never fail via ValueError in [4],
but it can become a plain global average against intent. We agree
to potential distortions with `max_pad_factor_fr != None`,
while still promising "no extreme distortions" (i.e. ValueError);
here it's uncertain what's "extreme", as even a fully decayed phi
with `log2_F_phi == N_fr_scale` will approximate a direct
global averaging.
[1] and [4] directly set `J_pad_frs_min_limit_due_to_psi`.
[2] and [3] set `scale_diff_max_to_build`, which in turn
sets `J_pad_frs_min_limit_due_to_psi` in `compute_padding_fr()`.
"""
params = self.psi_fr_params
scale_diff_max_to_set_pad_min = None
pad_diff_max = None
if self.sampling_psi_fr in ('exclude', 'recalibrate'):
scale_diff_prev = 0
for N_fr_scale in self.N_fr_scales_unique[::-1]:
scale_diff = self.N_fr_scales_max - N_fr_scale
if scale_diff not in params:
scale_diff_max_to_set_pad_min = scale_diff_prev
break
scale_diff_prev = scale_diff
if self.sampling_psi_fr == 'recalibrate':
a = scale_diff_max_to_set_pad_min
b = self.scale_diff_max_recalibrate
assert a == b, (a, b)
elif self.sampling_psi_fr == 'resample':
# 'resample''s `else` is also applicable to 'exclude' & 'recalibrate',
# but it's expected to hold automatically - and if doesn't, will
# raise Exception in `filter_bank.psi_fr_factory`
# unpack params
xi1_frs, sigma1_frs, j1_frs, is_cqt1_frs = [
params[0][field] for field in ('xi', 'sigma', 'j', 'is_cqt')]
if self.unrestricted_pad_fr:
# in this case filter temporal behavior is preserved across all
# lengths, so we must restrict lowest length such that longest
# filter still decays
pad_diff = 0
while True:
# `scale_diff = 0` <=> `J_pad_fr = J_pad_frs_max_init` here
J_pad_fr = self.J_pad_frs_max_init - pad_diff
psi_longest = morlet_1d(2**J_pad_fr, xi1_frs[-1],
sigma1_frs[-1], P_max=self.P_max,
normalize=self.normalize_fr,
eps=self.eps)[:, None]
psi_longest_support = 2*compute_temporal_support(
psi_longest.T,
criterion_amplitude=self.criterion_amplitude)
if psi_longest_support == len(psi_longest):
# in zero padding we cut padding in half, which distorts
# the wavelet but negligibly relative to the
# scattering scale
if pad_diff == 0:
if self.pad_mode_fr != 'zero':
raise Exception(
"got `pad_diff_max == 0` with"
"`pad_mode_fr != 'zero'`, meaning "
"`J_pad_frs_max_init` computed incorrectly.")
pad_diff_max = 0
else:
pad_diff_max = pad_diff - 1
break
elif len(psi_longest) == 2**self.N_fr_scales_min:
# smaller pad length is impossible
break
pad_diff += 1
else:
# this `else` isn't exclusive with the `if`
# (i.e. in case `unrestricted_pad_fr==True`), but if `if` holds,
# so will `else` (former's more restrictive)
pad_diff = 0
while True:
# in submaximal padding, a non-last wavelet may have longer
# support, so check all wavelets (still won't be near Nyquist
# but simplify logic)
for n1_fr in range(len(xi1_frs)):
#### Compute wavelet #################################
# fetch wavelet params, sample wavelet
xi, sigma = xi1_frs[n1_fr], sigma1_frs[n1_fr]
J_pad_fr = self.J_pad_frs_max_init - pad_diff
try:
_ = morlet_1d(2**J_pad_fr, xi, sigma,
normalize=self.normalize_fr,
P_max=self.P_max, eps=self.eps)
except ValueError as e:
if "division" not in str(e):
raise e
pad_diff_max = max(pad_diff - 1, 0)
break
if pad_diff_max is not None:
break
pad_diff += 1
# only one must be set
assert not (pad_diff_max is not None and
scale_diff_max_to_set_pad_min is not None)
if pad_diff_max is not None:
self.J_pad_frs_min_limit_due_to_psi = (self.J_pad_frs_max_init -
pad_diff_max)
self.scale_diff_max_to_build = None
elif scale_diff_max_to_set_pad_min is not None:
self.J_pad_frs_min_limit_due_to_psi = None # None for now
self.scale_diff_max_to_build = (
scale_diff_max_to_set_pad_min)
else:
# no limits (i.e. naturally computed J_pad_frs_min is correct)
self.J_pad_frs_min_limit_due_to_psi = None
self.scale_diff_max_to_build = None
def compute_stride_fr(self):
"""See "Compute logic: stride, padding" in `core`."""
self._compute_psi_fr_params()
if self.out_3D:
stride_at_max_fr = self._get_stride(
scale_diff=0, N_fr_scale=self.N_fr_scales_max)[0]
self.unpad_len_common_at_max_fr_stride = (
self.ind_end_fr_max[stride_at_max_fr] -
self.ind_start_fr_max[stride_at_max_fr])
else:
self.unpad_len_common_at_max_fr_stride = None
# spinned stride -- main stride ######################################
self.total_conv_stride_over_U1s = {}
for N_fr_scale in self.N_fr_scales_unique[::-1]:
scale_diff = self.N_fr_scales_max - N_fr_scale
s = self._get_stride(scale_diff, N_fr_scale,
self.unpad_len_common_at_max_fr_stride)
self.total_conv_stride_over_U1s[scale_diff] = s
# now for phi_f pairs ################################################
self.total_conv_stride_over_U1s_phi = {}
for scale_diff in self.total_conv_stride_over_U1s:
if self.average_fr or not self.aligned:
if self.aligned or self.sampling_phi_fr == 'resample':
s = self.log2_F
else:
s = min(self.log2_F,
max(self.total_conv_stride_over_U1s[scale_diff]))
else:
s = 0
self.total_conv_stride_over_U1s_phi[scale_diff] = s
# clarity assertions #################################################
# phi stride <= spinned stride
if self.average_fr:
for scale_diff in self.total_conv_stride_over_U1s:
s_spinned = max(self.total_conv_stride_over_U1s[scale_diff])
s_phi = self.total_conv_stride_over_U1s_phi[scale_diff]
# must hold to not potentially require padding phi pairs
# separately per J_pad_fr >= total_conv_stride_over_U1
# (or padding spinned more just for sake of `phi_f` pairs).
# no choice with `average_fr=False`, via e.g. `log2_F > J_fr`
assert s_phi <= s_spinned, (s_phi, s_spinned)
# for out_3D, stride is same on per-`scale_diff` basis, and spinned and
# phi strides must match for scale_diff==0 (for `stride_ref` in unpadding)
if self.out_3D:
for scale_diff in self.total_conv_stride_over_U1s:
assert len(set(self.total_conv_stride_over_U1s[scale_diff])
) == 1, self.total_conv_stride_over_U1s
s0 = self.total_conv_stride_over_U1s[0][0]
s1 = self.total_conv_stride_over_U1s_phi[0]
assert s0 == s1, (self.total_conv_stride_over_U1s,
self.total_conv_stride_over_U1s_phi)
assert s0 <= self.log2_F
def _get_stride(self, scale_diff, N_fr_scale,
unpad_len_common_at_max_fr_stride=None):
assert N_fr_scale != -1
# prefetch params ################################################
if scale_diff not in self.psi_fr_params:
# shouldn't occur otherwise
assert self.sampling_psi_fr in ('exclude', 'recalibrate'), (
scale_diff, self.psi_fr_params)
# don't need to adjust `N_fr_scale`, since in the one place it's
# used, it must refer to the actual scale, while the point of
# `scale_diff` is to refer to the actual filterbank, which
# this `scale_diff_ref` will do
scale_diff_ref = max(self.psi_fr_params)
# clarity assertion
assert scale_diff_ref == self.scale_diff_max_to_build
else:
scale_diff_ref = scale_diff
j1_frs_scale = self.psi_fr_params[scale_diff_ref]['j']
n_n1_frs = len(j1_frs_scale)
assert n_n1_frs > 0, (scale_diff_ref, self.psi_fr_params)
# handle edge cases (cont'd) #####################################
# resample_psi = bool(sampling_psi_fr in ('resample', 'exclude'))
resample_phi = bool(self.sampling_phi_fr == 'resample')
if (unpad_len_common_at_max_fr_stride is None and
(self.out_3D and not self.aligned and not resample_phi)):
return [self.log2_F] * n_n1_frs
# get stride #####################################################
if self.average_fr:
if self.aligned:
s = self.log2_F
assert resample_phi
else:
if resample_phi:
if self.out_3D:
s = self.log2_F
else:
s = []
for n1_fr in range(n_n1_frs):
max_filter_stride = max(self.log2_F,
j1_frs_scale[n1_fr])
_s = max_filter_stride
s.append(_s)
else:
if self.out_3D:
# avoid N_fr dependence
N_fr_max_at_scale = 2**N_fr_scale
# except at max scale case
N_fr = min(N_fr_max_at_scale, self.N_frs_max)
min_stride_to_unpad_like_max = math.ceil(math.log2(
N_fr / unpad_len_common_at_max_fr_stride))
s = min_stride_to_unpad_like_max
else:
s = []
for n1_fr in range(n_n1_frs):
# min: nonzero unpad
# max: not oversampled
_s = max(min(self.log2_F, N_fr_scale),
j1_frs_scale[n1_fr])
nonzero_unpad_but_not_oversampled = _s
s.append(nonzero_unpad_but_not_oversampled)
else:
if self.aligned:
s = 0
else:
s = j1_frs_scale
assert not self.out_3D
if not isinstance(s, list):
s = [s] * n_n1_frs
assert len(s) == n_n1_frs, (len(s), n_n1_frs)
return s
def compute_scale_and_stride_logic(self, for_validation=False):
if not for_validation:
self.n1_fr_subsamples, self.log2_F_phis, self.log2_F_phi_diffs = (
self._compute_scale_and_stride_logic(self.psi_fr_params))
return
# ensure params match the actual filterbank's ########################
# this ensures `psi_fr_factory` built as predicted, i.e. didn't change
# `n1_fr_subsamples`. These params compute before the filterbank because
# they're needed to compute padding, which is needed for the filterbank.
# unpack from the filters
psi_fr_params = {}
# ensure filters built every scale_diff requested by psi_fr_params
for scale_diff in self.psi_fr_params:
psi_fr_params[scale_diff] = {}
psi_id = self.psi_ids[scale_diff]
for field in self.psi_fr_params[scale_diff]:
psi_fr_params[scale_diff][
field] = self.psi1_f_fr_up[field][psi_id]
n1_fr_subsamples, log2_F_phis, log2_F_phi_diffs = (
self._compute_scale_and_stride_logic(psi_fr_params))
for pair in n1_fr_subsamples:
for scale_diff in self.n1_fr_subsamples[pair]:
a = n1_fr_subsamples[pair][scale_diff]
b = self.n1_fr_subsamples[pair][scale_diff]
assert a == b, (a, b)
def _compute_scale_and_stride_logic(self, psi_fr_params):
n1_fr_subsamples = {}
log2_F_phis = {}
log2_F_phi_diffs = {}
# spinned ############################################################
n1_fr_subsamples['spinned'] = {}
log2_F_phis['spinned'] = {}
log2_F_phi_diffs['spinned'] = {}
for scale_diff in self.scale_diffs_unique:
n1_fr_subsamples['spinned'][scale_diff] = []
log2_F_phis['spinned'][scale_diff] = []
log2_F_phi_diffs['spinned'][scale_diff] = []
scale_diff_ref = (scale_diff if scale_diff in psi_fr_params else
max(psi_fr_params))
for n1_fr, j1_fr in enumerate(psi_fr_params[scale_diff_ref]['j']):
total_conv_stride_over_U1 = self.total_conv_stride_over_U1s[
scale_diff][n1_fr]
# n1_fr_subsample & log2_F_phi
if self.average_fr and not self.average_fr_global:
if self.sampling_phi_fr == 'resample':
log2_F_phi_diff = 0
elif self.sampling_phi_fr == 'recalibrate':
log2_F_phi_diff = max(self.log2_F -
total_conv_stride_over_U1, 0)
log2_F_phi = self.log2_F - log2_F_phi_diff
# Maximum permitted subsampling before conv w/ `phi_f_fr`.
# This avoids distorting `phi` (aliasing), or for
# 'recalibrate', requesting one that doesn't exist.
max_subsample_before_phi_fr = log2_F_phi
sub_adj = min(j1_fr, total_conv_stride_over_U1,
max_subsample_before_phi_fr)
else:
log2_F_phi, log2_F_phi_diff = None, None
sub_adj = (j1_fr if self.average_fr_global else
min(j1_fr, total_conv_stride_over_U1))
n1_fr_subsamples['spinned'][scale_diff].append(sub_adj)
log2_F_phis['spinned'][scale_diff].append(log2_F_phi)
log2_F_phi_diffs['spinned'][scale_diff].append(log2_F_phi_diff)
# phi ################################################################
n1_fr_subsamples['phi'] = {}
log2_F_phis['phi'] = {}
log2_F_phi_diffs['phi'] = {}
if self.average_fr_global_phi:
# not accounted; compute at runtime
pass
else:
for scale_diff in self.scale_diffs_unique:
total_conv_stride_over_U1_phi = (
self.total_conv_stride_over_U1s_phi[scale_diff])
n1_fr_subsample = total_conv_stride_over_U1_phi
log2_F_phi = (self.log2_F
if (not self.average_fr and self.aligned) else
total_conv_stride_over_U1_phi)
log2_F_phi_diff = self.log2_F - log2_F_phi
n1_fr_subsamples['phi'][scale_diff] = n1_fr_subsample
log2_F_phis['phi'][scale_diff] = log2_F_phi
log2_F_phi_diffs['phi'][scale_diff] = log2_F_phi_diff
return n1_fr_subsamples, log2_F_phis, log2_F_phi_diffs
def compute_unpadding_fr(self):
"""See `help(scf.compute_padding_fr)`."""
self.ind_start_fr, self.ind_end_fr = [], []
for n2, N_fr in enumerate(self.N_frs):
if N_fr != 0:
_ind_start, _ind_end = self._compute_unpadding_params(N_fr)
else:
_ind_start, _ind_end = [], []
self.ind_start_fr.append(_ind_start)
self.ind_end_fr.append(_ind_end)
# compute out_3D params
self.ind_start_fr_max, self.ind_end_fr_max = [], []
for sub in range(self.log2_F + 1):
start_max, end_max = [max(idxs[n2][sub]
for n2 in range(len(self.N_frs))
if len(idxs[n2]) != 0)
for idxs in
(self.ind_start_fr, self.ind_end_fr)]
self.ind_start_fr_max.append(start_max)
self.ind_end_fr_max.append(end_max)
def _compute_unpadding_params(self, N_fr):
# compute unpad indices for all possible subsamplings
ind_start, ind_end = [0], [N_fr]
for j in range(1, max(self.J_fr, self.log2_F) + 1):
ind_start.append(0)
ind_end.append(math.ceil(ind_end[-1] / 2))
return ind_start, ind_end
def compute_padding_fr(self):
"""Built around stride. The pipeline is as follows:
1. Compute `J_pad_frs_max_init`, which is max padding under
"standard" scattering configuration (all 'resample').
2. Sample frequential filterbank at `2**J_pad_frs_max_init`,
store in `psi1_f_fr_dn_init`. (`scf.create_init_psi_filters`)
3. Compute `psi_fr_params` from `psi1_f_fr_dn_init`, in accords
with `sampling_psi_fr`. (`scf._compute_psi_fr_params`)
4. Compute `J_pad_frs_min_limit_due_to_psi` to restrict `J_pad_frs_min`
as filterbank quality assurance.
(`scf._compute_J_pad_frs_min_limit_due_to_psi`)
Entering this method, for each `N_fr_scale`,
- Compute minimum required padding to avoid boundary effects (which
also ensures all wavelets fully decay), `min_to_pad_bound_effs`
- Accounting for `max_pad_factor_fr`, we get `pad_boundeffs`
- Accounting for `out_3D`, we get `pad_3D`, which overrides
`pad_boundeffs`
- Accounting for stride overrides all above quantities
- Accounting for `J_pad_frs_min_limit_due_to_psi` overrides all above
quantities
We then assert that the resulting `J_pad_frs` is non-increasing, which
subsequent methods assume to avoid complications.
Padding (also stride) is computed on per-`2**N_fr_scale` basis rather than
per-`N_fr` as latter greatly complicates implementation for little gain.
Different `N_fr` within same `N_fr_scale` can yield different `J_pad_fr`,
which requires additional filter indexing to track
Relevant attributes
-------------------
- `pad_left_fr, ind_start_fr`: always zero since we right-pad
- `pad_right_fr`: computed to avoid boundary effects *for each* `N_frs`.
- `ind_end_fr`: computed to undo `pad_right_fr`
- `ind_end_fr_max`: maximum unpad index across all `n2` for a given
subsampling factor. E.g.:
::
n2 = (0, 1, 2)
J_fr = 3 --> j_fr = (0, 1, 2, 3)
ind_end_fr = [[32, 16, 8, 4],
[29, 14, 7, 3],
[33, 16, 8, 4]]
ind_end_fr_max = [33, 16, 8, 4]
Ensures same unpadded freq length for `out_3D=True` without losing
information. Unused for `out_3D=False`.
"""
self.J_pad_frs = {}
for N_fr_scale in self.N_fr_scales_unique[::-1]:
# check for reuse
scale_diff = self.N_fr_scales_max - N_fr_scale
if (self.scale_diff_max_to_build is not None and
scale_diff > self.scale_diff_max_to_build):
# account for `scale_diff_max_to_build`
# reuse max `scale_diff`'s
self.J_pad_frs[scale_diff] = self.J_pad_frs[
self.scale_diff_max_to_build]
if self.J_pad_frs_min_limit_due_to_psi is None:
self.J_pad_frs_min_limit_due_to_psi = self.J_pad_frs[
scale_diff]
continue
# compute padding ################################################
# compute pad for bound effs
min_to_pad_bound_effs = self._get_min_to_pad_bound_effs(
N_fr_scale, scale_diff)
if self.unrestricted_pad_fr:
pad_boundeffs = min_to_pad_bound_effs
else:
pad_boundeffs = min(min_to_pad_bound_effs,
N_fr_scale +
self.max_pad_factor_fr[scale_diff])
# account for out_3D
if not self.out_3D:
J_pad_fr = pad_boundeffs
else:
# smallest `J_pad_fr` such that
# J_pad_fr / s >= unpad_len_common_at_max_fr_stride;
# s = 2**total_conv_stride_over_U1s[n2][0]
# i.e.
# J_pad_fr >= unpad_len_common_at_max_fr_stride * 2**s [*1]
#
# for `aligned and out_3D`, this will always end up being
# `J_pad_frs_max`, since `s = log2_F` always, and `J_pad_fr`
# in [*1] is forced to the max.
#
# the `[0]` isn't special, all indices are same for a given `n2`,
# due to `out_3D`
pad_3D = math.ceil(math.log2(
self.unpad_len_common_at_max_fr_stride *
2**self.total_conv_stride_over_U1s[scale_diff][0]))
# `pad_3D` overrides `max_pad_factor_fr`
J_pad_fr = max(pad_boundeffs, pad_3D)
# but not `min_to_pad_bound_effs`
# this would save a small amount of compute but require
# implementing re-padding
# J_pad_fr = min(J_pad_fr, min_to_pad_bound_effs)
# account for stride
s_spinned = max(self.total_conv_stride_over_U1s[scale_diff])
s_phi = self.total_conv_stride_over_U1s_phi[scale_diff]
min_to_pad_stride = max(s_spinned, s_phi)
J_pad_fr = max(J_pad_fr, min_to_pad_stride)
# account for phi 'resample'
if self.sampling_phi_fr == 'resample':
# This isn't necessary but would require handling phi construction
# with subsampling > phi's length, or doing global averaging for
# log2_F_phi > J_pad_fr, both doable but currently not done.
# This is automatically satisfied by `max(, stride)` except in the
# `not average_fr and aligned` case
J_pad_fr = max(J_pad_fr, self.log2_F)
# account for `J_pad_frs_min_limit_due_to_psi`
if self.J_pad_frs_min_limit_due_to_psi is not None:
J_pad_fr = max(J_pad_fr, self.J_pad_frs_min_limit_due_to_psi)
# insert
self.J_pad_frs[scale_diff] = J_pad_fr
# ensure integer type (non-numpy to not confuse backends)
for k, v in self.J_pad_frs.items():
self.J_pad_frs[k] = int(v)
# validate padding computed so far, as `psi_fr_factory` relies on it
self._assert_nonincreasing_J_pad_frs()
# compute related params #############################################
self.pad_left_fr, self.pad_right_fr = [], []
for n2, N_fr in enumerate(self.N_frs):
if N_fr != 0:
scale_diff = self.N_fr_scales_max - math.ceil(math.log2(N_fr))
J_pad_fr = self.J_pad_frs[scale_diff]
(_pad_left, _pad_right
) = self._compute_padding_params(J_pad_fr, N_fr)
else:
_pad_left, _pad_right = -1, -1
self.pad_left_fr.append(_pad_left)
self.pad_right_fr.append(_pad_right)
# compute `scale_diff_max_to_build`
if self.scale_diff_max_to_build is None:
# clarity assertion
assert (self.sampling_psi_fr == 'resample' or
(self.sampling_psi_fr == 'exclude' and
max(self.scale_diffs) in self.psi_fr_params) or
(self.sampling_psi_fr == 'recalibrate' and
self.scale_diff_max_recalibrate is None)
), (self.sampling_psi_fr, self.scale_diffs,
list(self.psi_fr_params),
self.scale_diff_max_recalibrate)
if self.sampling_psi_fr == 'resample':
# scale before the first scale to drop below minimum
# is the limiting scale
J_pad_frs_min = min(self.J_pad_frs.values())
for scale_diff, J_pad_fr in self.J_pad_frs.items():
if J_pad_fr == J_pad_frs_min:
self.scale_diff_max_to_build = scale_diff
break
# clear `psi_fr_params` of scales we won't build
scale_diffs = list(self.psi_fr_params)
for scale_diff in scale_diffs:
if scale_diff > self.scale_diff_max_to_build:
del self.psi_fr_params[scale_diff]
def _get_min_to_pad_bound_effs(self, N_fr_scale, scale_diff):
common_kw = dict(normalize=self.normalize_fr, P_max=self.P_max,
eps=self.eps)
ca = dict(criterion_amplitude=self.criterion_amplitude)
# psi ################################################################
if self.sampling_psi_fr == 'resample':
# note, for `average_fr=False`, `min_to_pad_phi` can be `0` for
# spinned pairs, but this may necessiate two FFTs on |U1 * psi2|, one
# on larger padded (for the phi_f pairs) and other on its trimming.
# Not a concern for `J_fr >= log2_F and sampling_psi_fr == 'resample'`
min_to_pad_psi = self._pad_fr_psi
elif self.sampling_psi_fr in ('exclude', 'recalibrate'):
# fetch params
xis, sigmas = [self.psi_fr_params[scale_diff][field]
for field in ('xi', 'sigma')]
psi_fn = lambda N: morlet_1d(N, xis[-1], sigmas[-1], **common_kw)
N_min_psi = compute_minimum_required_length(
psi_fn, N_init=2**N_fr_scale, **ca)
min_to_pad_psi = compute_temporal_support(
psi_fn(N_min_psi)[None], **ca)
if self.pad_mode_fr == 'zero':
min_to_pad_psi //= 2
# phi ################################################################
if self.sampling_phi_fr == 'resample':
min_to_pad_phi = self._pad_fr_phi
elif self.sampling_phi_fr == 'recalibrate':
spinned_diffs = self.log2_F_phi_diffs['spinned'][scale_diff]
phi_diff = self.log2_F_phi_diffs['phi'][scale_diff]
if None in spinned_diffs:
assert not self.average_fr
log2_F_phi_diff = phi_diff
else:
# lower -> greater log2_F -> greater pad, take worst case
log2_F_phi_diff = min(min(spinned_diffs), phi_diff)
sigma_low = self.sigma0 / self.F
sigma_low_F = sigma_low * 2**log2_F_phi_diff
phi_fn = lambda N: gauss_1d(N, sigma_low_F, **common_kw)
N_min_phi = compute_minimum_required_length(
phi_fn, N_init=2**N_fr_scale, **ca)
min_to_pad_phi = compute_temporal_support(
phi_fn(N_min_phi)[None], **ca)
if self.pad_mode_fr == 'zero':
min_to_pad_phi //= 2
# final ##############################################################
min_to_pad = max(min_to_pad_phi, min_to_pad_psi)
min_to_pad_bound_effs = math.ceil(math.log2(2**N_fr_scale + 2*min_to_pad))
return min_to_pad_bound_effs
def _compute_padding_params(self, J_pad, N_fr):
pad_left = 0
pad_right = 2**J_pad - pad_left - N_fr
# sanity check
pad_diff = self.J_pad_frs_max_init - J_pad
assert pad_diff >= 0, "%s > %s | %s" % (
J_pad, self.J_pad_frs_max_init, N_fr)
# return
return pad_left, pad_right
def _compute_J_pad_fr(self, N_fr, Q):
"""Depends on `N_frs`, `max_pad_factor_fr`, `sampling_phi_fr`,
`sampling_psi_fr`, and common filterbank params.
`min_to_pad` is computed for both `phi` and `psi` in case latter has
greater time-domain support (stored as `_pad_fr_phi` and `_pad_fr_psi`).
- 'resample': will use original `_pad_fr_phi` and/or `_pad_fr_psi`
- 'recalibrate' / 'exclude': will divide by difference in dyadic scale,
e.g. `_pad_fr_phi / 2`.
`recompute=True` will force computation from `N_frs` alone, independent
of `J_pad_frs_max` and `min_to_pad_fr_max`, and per
`sampling_* = 'resample'`.
"""
min_to_pad, pad_phi, pad_psi1, _ = compute_minimum_support_to_pad(
N_fr, self.J_fr, Q, self.F, pad_mode=self.pad_mode_fr,
normalize=self.normalize_fr, r_psi=self.r_psi_fr,
**self.get_params('sigma0', 'alpha', 'P_max', 'eps',
'criterion_amplitude'))
if self.average_fr_global_phi:
min_to_pad = pad_psi1 # ignore phi's padding
pad_phi = 0
J_pad_ideal = math.ceil(np.log2(N_fr + 2 * min_to_pad))
# adjust per `max_pad_factor_fr` and warn if needed
# must do this to determine `xi_min` later. if "ideal pad" amount is
# of interest, it should be another variable
if not self.unrestricted_pad_fr:
N_fr_scale = math.ceil(math.log2(N_fr))
scale_diff = self.N_fr_scales_max - N_fr_scale
# edge case: `phi_t` pair (_J_pad_fr_fo)
if scale_diff < 0:
mx = math.ceil(math.log2(self._n_psi1_f))
assert mx > self.N_fr_scales_max, (mx, self.N_fr_scales_max)
scale_diff = 0 # reuse `max_pad_factor_fr` for max spinned scale
J_pad = min(J_pad_ideal,
N_fr_scale + self.max_pad_factor_fr[scale_diff])
if J_pad_ideal - J_pad > 1:
extent_txt = ' severe' if J_pad_ideal - J_pad > 2 else ''
warnings.warn(("Insufficient frequential padding, will yield"
"{} boundary effects and filter distortion; "
"recommended higher `max_pad_factor_fr` or lower "
"`J_fr` or `F`.").format(extent_txt))
else:
J_pad = J_pad_ideal
return J_pad, min_to_pad, pad_phi, pad_psi1
def get_params(self, *args):
return {k: getattr(self, k) for k in args}
def _assert_nonincreasing_J_pad_frs(self):
prev_pad = 999
for pad in self.J_pad_frs.values():
if pad > prev_pad:
raise Exception("w yielded padding that's "
"greater for lesser `N_fr_scale`; this is "
"likely to yield incorrect or undefined behavior."
"\nJ_pad_frs=%s" % self.J_pad_frs)
prev_pad = pad
# additionally assert we didn't exceed J_pad_frs_max_init
assert all(p <= self.J_pad_frs_max_init for p in self.J_pad_frs.values()
), (self.J_pad_frs, self.J_pad_frs_max_init)
# filterbank builders ########################################################
def psi_fr_factory(psi_fr_params, N_fr_scales_unique, N_fr_scales_max, J_pad_frs,
sampling_psi_fr='resample', scale_diff_max_to_build=None,
normalize_fr='l1', criterion_amplitude=1e-3, sigma0=0.1,
P_max=5, eps=1e-7):
"""
Builds in Fourier the Morlet filters used for the scattering transform.
Each single filter is provided as a dictionary with the following keys:
* 'xi': central frequency
* 'sigma': frequential width
* 'j': subsampling factor from 0 to `J_fr` (or potentially less if
`sampling_psi_fr != 'resample'`).
* 'width': temporal width (interval of temporal invariance, i.e. its "T")
* 'support': temporal support (interval outside which wavelet is ~0)
Parameters
----------
psi_fr_params : dict[int:dict[str:list]]
Filterbank parameters, structured as
scale_diff: {field: [*values]}
e.g.
{0: {'xi': [0.4, 0.2, 0.1],
'sigma': [.1, .05, .025],
...},
1: {...}, ...}
Alongside `J_pad_frs`, will determine `psi_ids`.
N_fr_scales_unique : list[int]
Used for iterating over `scale_diff`, and a sanity check.
N_fr_scales_max : int
Used to compute `scale_diff` from `N_fr_scale` from `N_fr_scales_unique`.
J_pad_frs : dict[int: int]
Used to compute `padded_len`s, the lengths at which `morlet_1d`
is sampled.
sampling_psi_fr : str['resample', 'recalibrate', 'exclude']
Used for a sanity check in case of 'exclude'.
See `help(TimeFrequencyScattering1D)`.
In terms of effect on maximum `j` per `n1_fr`:
- 'resample': no variation (by design, all temporal properties are
preserved, including subsampling factor).
- 'recalibrate': `j1_fr_max` is (likely) lesser with greater
`scale_diff` (by design, temporal width is tailored to
`2**N_fr_scale`). The limit, however, is set by
`sigma_max_to_min_max_ratio` (see its docs).
- 'exclude': approximately same as 'recalibrate'. By design, excludes
temporal widths above `2**N_fr_scale`, which is likely to reduce
`j1_fr_max` with greater `scale_diff`.
- It's "approximately" same because center frequencies and
widths are different; depending how loose our alias tolerance
(`alpha`), they're exactly the same.
scale_diff_max_to_build : int
Controls max built `scale_diff`, and is used for a sanity check.
normalize_fr, criterion_amplitude, sigma0, P_max, eps:
Parameters for building `morlet_1d` or its meta.
Returns
-------
psi1_f_fr_up : dict[int: list[tensor[float]],
str: dict[int: list[int/float]]]
Contains band-pass filters of frequential scattering with "up" spin,
and their meta - keyed by `meta` fields, and `psi_id`:
{psi_id: [psi_arr0, psi_arr1, ...],
meta0: {psi_id0: [...], ...}}
This differs from `Scattering1D.psi1_f`. See `psi_id` below.
These filters are not subsampled, as they do not receive subsampled
inputs (but their *outputs*, i.e. convolutions, can be subsampled).
Difference in filter length is never a time-domain subsampling, but is
rather *trimming* (`sampling_psi_fr='resample'`), not being included at
all ('exclude'), or a change in `xi` and `sigma` to match `N_fr_scale`
('recalibrate').
Different filterbank lengths are indexed by `psi_id` (see `psi_id` below).
Example: `J_fr = 2`, lists hold permitted subsampling factors for
respective filters (i.e. for after convolving):
- 'resample':
0: [2, 1, 0] # `psi_id=0: [psis[-1], psis[-2], psis[-3]]`
1: [2, 1, 0]
2: [2, 1, 0]
- 'recalibrate':
0: [2, 1, 0]
1: [1, 1, 0]
2: [0, 0, 0]
- 'exclude':
0: [2, 1, 0]
1: [1, 0]
2: [0]
psi1_f_fr_dn : dict[int: list[tensor[float]],
str: dict[int: list[int/float]]]
Same as `psi1_f_fr_up` but with "down" spin (anti-analytic, whereas "up"
is analytic wavelet).
psi_ids : dict[int: int]
See `psi_id` below.
psi_id
------
Indexes frequential filterbanks, returning list of filters. Is a function of
`scale_diff`, that maps to `params, J_pad_fr`:
`(xis, sigmas), J_pad_fr = psi_ids_fn(scale_diff)`
The idea is, we may desire different filterbanks for different `N_fr_scale`s.
We cannot index them through
- `J_pad_fr`, since different `scale_diff` may yield same `J_pad_fr`.
- `scale_diff`, since different `scale_diff` may have the same
`params, J_pad_fr`, which yields duplication.
- `params`, since same `params` may apply to different `scale_diff`.
`psi_id` resolves these conflicts.
- Higher `psi_id` always corresponds to higher `scale_diff`.
- One `psi_id` may correspond to multiple `scale_diff`, but never
multiple `params, J_pad_fr`.
- Same `params` *or* `J_pad_fr` may correspond to one `psi_id`, but never
both.
Logic summary, in pseudocode:
::
psi_ids = {0: 0}
for scale_diff in scale_diffs:
params, J_pad_fr = compute_stuff()
if (params, J_pad_fr) not in built_params_and_J_pad_frs:
psi_ids[scale_diff] = max(psi_ids.values()) + 1
else:
# this `else` may also execute per other conditionals
psi_ids[scale_diff] = max(psi_ids.values())
"""
# compute the spectral parameters of the filters
xi1_frs, sigma1_frs, j1_frs, is_cqt1_frs = [
{scale_diff: psi_fr_params[scale_diff][field]
for scale_diff in psi_fr_params}
for field in ('xi', 'sigma', 'j', 'is_cqt')]
###########################################################################
def repeat_last_built_id(scale_diff, scale_diff_last_built):
psi_ids[scale_diff] = psi_ids[scale_diff_last_built]
# instantiate the dictionaries which will contain the filters
psi1_f_fr_dn, psi1_f_fr_up = {}, {}
# needed to handle different `N_fr_scale` having same `J_pad_fr` & params,
# and different params having same `J_pad_fr`. Hence,
# params, J_pad_fr = psi_id(N_fr_scale)
psi_ids = {}
params_built = []
scale_diff_last_built = -1
for N_fr_scale in N_fr_scales_unique[::-1]:
scale_diff = N_fr_scales_max - N_fr_scale
first_scale = bool(scale_diff == 0)
# ensure we compute at valid `N_fr_scale`
if scale_diff in psi_ids:
# already built
continue
elif first_scale:
# always built
pass
elif N_fr_scale == -1:
# invalid scale
continue
elif scale_diff not in j1_frs:
# universal cue to reuse last built filterbank.
# varied `padded_len` as func of `J_pad_frs` & `scale_diff` (hence
# reuse not sufficing) is ruled out by later assertion, `pads_built`.
# Frontend also assures this in `compute_padding_fr`, and/or via
# `scale_diff_max_to_build`
repeat_last_built_id(scale_diff, scale_diff_last_built)
continue
elif (scale_diff_max_to_build is not None and
scale_diff > scale_diff_max_to_build):
assert scale_diff not in j1_frs, j1_frs
# ensure `scale_diff` didn't exceed a global maximum.
# subsequent `scale_diff` are only greater, so
# we could `break`, but still need to `repeat_last_built_id`
repeat_last_built_id(scale_diff, scale_diff_last_built)
continue
# extract params to iterate
n_psi = len(j1_frs[scale_diff])
params = []
for n1_fr in range(n_psi):
xi = xi1_frs[ scale_diff][n1_fr]
sigma = sigma1_frs[scale_diff][n1_fr]
padded_len = 2**J_pad_frs[scale_diff] # repeat for all n1_fr
params.append((xi, sigma, padded_len))
# if already built, point to it and don't rebuild
if params in params_built:
repeat_last_built_id(scale_diff, scale_diff_last_built)
continue
# build wavelets #####################################################
psis_up = []
for n1_fr in range(n_psi):
# ensure we compute at valid `N_fr_scale`, `n1_fr`
if first_scale:
# always built
pass
elif (sampling_psi_fr == 'exclude' and
# this means the wavelet (sampled at J_pad_frs_max_init)
# exceeded max permitted width at this scale,
# i.e. `width > 2**N_fr_scale`
(scale_diff not in j1_frs or
n1_fr > len(j1_frs[scale_diff]) - 1)):
# subsequent `scale_diff` are only greater, and
# hence `len(j1_frs[scale_diff])` only lesser
# above conditional shouldn't be possible to satisfy but is
# kept for clarity
raise Exception("impossible iteration")
break # would happen if condition was met; kept for clarity
#### Compute wavelet #############################################
# fetch wavelet params, sample wavelet
xi, sigma, padded_len = params[n1_fr]
# expand dim to multiply along freq like (2, 32, 4) * (32, 1)
psi = morlet_1d(padded_len, xi, sigma, normalize=normalize_fr,
P_max=P_max, eps=eps)[:, None]
psis_up.append(psi)
# if all `n1_fr` built, register & append to filterbank ##############
if first_scale:
psi_ids[0] = 0
else:
psi_ids[scale_diff] = psi_ids[scale_diff_last_built] + 1
params_built.append(params)
scale_diff_last_built = scale_diff
# append to filterbank
psi_id = psi_ids[scale_diff]
psi1_f_fr_up[psi_id] = psis_up
# compute spin down by time-reversing spin up in frequency domain
psi1_f_fr_dn[psi_id] = [time_reverse_fr(p) for p in psis_up]
##########################################################################
# ensure every unique `N_fr_scale` has a filterbank
n_scales = len(N_fr_scales_unique)
assert len(psi_ids) == n_scales, (psi_ids, N_fr_scales_unique)
# validate `scale_diff_max_to_build`
if scale_diff_max_to_build is not None:
assert scale_diff_last_built <= scale_diff_max_to_build
# guaranteed by "J_pad_frs is non-increasing", which was already asserted
# in `compute_padding_fr()` (base_frontend), but include here for clarity;
# much of above logic assumes this
pads_built = [math.log2(params[0][2]) for params in params_built]
assert min(pads_built) == J_pad_frs[scale_diff_last_built], (
pads_built, J_pad_frs)
# assert "higher psi_id -> lower scale"
# since `params, J_pad_fr = psi_ids_fn(scale_diff)`,
# and `params` and `J_pad_fr` are always same for same `scale_diff`,
# then `params` or `J_pad_fr` (and hence `psi_id`) only change if
# `scale_diff` changes. Namely,
# - if `psi_id` changed, then either `params` or `J_pad_fr` changed
# (which can only happen if `scale_diff` changed),
# - if `scale_diff` changed, then `psi_id` doesn't necessarily change,
# since neither of `params` or `J_pad_fr` necessarily change.
# Thus, "psi_id changed => scale_diff changed", but not conversely.
prev_scale_diff, prev_psi_id = -1, -1
for scale_diff in psi_ids:
if psi_id == prev_psi_id:
# only check against changing `psi_id`, but still track `scale_diff`
prev_scale_diff = scale_diff
continue
assert scale_diff > prev_scale_diff, (scale_diff, prev_scale_diff)
prev_scale_diff, prev_psi_id = scale_diff, psi_id
# instantiate for-later params and reusable kwargs
ca = dict(criterion_amplitude=criterion_amplitude)
s0ca = dict(criterion_amplitude=criterion_amplitude, sigma0=sigma0)
# Embed meta information within the filters
for psi_f in (psi1_f_fr_dn, psi1_f_fr_up):
for field in ('xi', 'sigma', 'j', 'is_cqt', 'support', 'width'):
if field not in psi_f:
psi_f[field] = {}
for scale_diff, psi_id in psi_ids.items():
if psi_id in psi_f[field]:
continue
psi_f[field][psi_id] = []
for n1_fr in range(len(psi_f[psi_id])):
if field == 'support':
p = 2 * compute_temporal_support(
psi_f[psi_id][n1_fr], **ca)
elif field == 'width':
N_fr_scale = N_fr_scales_max - scale_diff
p = 2 * compute_temporal_width(
psi_f[psi_id][n1_fr], N=2**N_fr_scale, **s0ca)
else:
p = psi_fr_params[scale_diff][field][n1_fr]
psi_f[field][psi_id].append(p)
# return results
return psi1_f_fr_up, psi1_f_fr_dn, psi_ids
def phi_fr_factory(J_pad_frs_max_init, J_pad_frs, F, log2_F, unrestricted_pad_fr,
pad_mode_fr, sampling_phi_fr='resample', average_fr=None,
average_fr_global_phi=None, aligned=None,
criterion_amplitude=1e-3, normalize_fr='l1', sigma0=0.1,
P_max=5, eps=1e-7):
"""
Builds in Fourier the lowpass Gabor filters used for JTFS.
Every filter is provided as a dictionary with the following keys:
* 'xi': central frequency, always 0 for low-pass filters.
* 'sigma': frequency-domain width, as passed to the function being sampled
* 'j': subsampling factor from 0 to `log2_F` (or potentially less if
`sampling_phi_fr = 'recalibrate'`).
* 'width': temporal width (scale; interval of imposed invariance)
* 'support': temporal support (duration of decay)
Parameters
----------
J_pad_frs_max_init : int
`2**J_pad_frs_max_init` is the largest length of the filters.
J_pad_frs : dict[int: int]
Lengths at which to sample `gauss_1d`. For 'recalibrate', also
controls time-domain widths (see "Build logic").
F : int
Scale of invariance (in linear units). Controls `sigma` of `phi`
via `sigma0 / F`. For 'recalibrate', `log2_F_phi_diff` means
`sigma0 / F / 2**log2_F_phi_diff` (see "Build logic").
log2_F : int
Scale of invariance (log2(prevpow2(F))). Controls maximum dyadic scale
and subsampling factor for all `phi`.
unrestricted_pad_fr : bool
Used for a quality check; `True` ensures `phi` decays sufficiently
(but not necessarily fully if `pad_mode_fr=='zero'`; see code).
Including steps outside this function, `max_pad_factor != None`
such filter distortion considerations.
pad_mode_fr : str
Used for a quality check.
sampling_phi_fr : str['resample', 'recalibrate']
See "Build logic" below.
average_fr : bool
Used for a sanity check.
average_fr_global_phi : bool
Used for a quality check.
aligned : bool
Used for a sanity check.
criterion_amplitude : float
Used to compute `phi` meta.
sigma0 : float
Together with `F`, determines width (sigma) of `phi`: `sigma = sigma0/F`.
normalize_fr : str
`gauss_1d` parameter `normalize`.
P_max, eps : float, float
`gauss_1d` parameters.
Returns
-------
phi_f_fr : dict[int: dict[int: list[tensor[float]]],
str: dict[int: dict[int: list[int]], float]]
Contains the low-pass filter at all possible lengths, scales of
invariance, and subsampling factors:
phi_f_fr[invariance_scale][input_length][input_subsampling]
<= e.g. =>
phi_f_fr[~log2_F][~J_pad_fr_max][n1_fr_subsample]
and corresponding meta:
phi_f_fr['support'][log2_F_phi_diff][pad_diff][n1_fr_subsample]
phi_f_fr['sigma'][log2_F_phi_diff'] # doesn't vary w/ other params
This differs from `Scattering1D.phi_f`. See "Build logic" for details.
Build logic
-----------
We build `phi` for every possible input length (`2**J_pad_fr`), input
subsampling factor (`n1_fr_subsample1`), and ('recalibrate' only) scale
of invariance. Structured as
`phi_f_fr[log2_F_phi_diff][pad_diff][sub]`
`log2_F_diff == log2_F - log2_F_phi`. Hence,
higher `log2_F_phi_diff`
<=>
greater *contraction* (time-domain) of original phi
<=>
lower `log2_F_phi`, lower permitted max subsampling
Higher `pad_diff` is a greater *trimming* (time-domain) of the corresponding
lowpass.
- 'resample': `log2_F_diff == 0`, always.
- 'recalibrate': `log2_F_diff` spans from `min(log2_F, J_pad_fr)` to
`log2_F`. Not all of these will be used, but we compute every possible
combination to avoid figuring out which will be.
'resample' enforces global scale of invariance (`==F` for all coefficients).
'recalibrate' "follows" the scale of `psi`, as controlled by
`total_conv_stride_over_U1`, averaging less for finer filterbanks.
"""
# compute the spectral parameters of the filters
sigma_low = sigma0 / F
N_init = 2**J_pad_frs_max_init
zero_stride_globally = bool(not average_fr and aligned)
def compute_all_subsamplings(phi_f_fr, pad_diff, log2_F_phi, log2_F_phi_diff):
for sub in range(1, 1 + log2_F_phi):
phi_f_fr[log2_F_phi_diff][pad_diff].append(
periodize_filter_fourier(phi_f_fr[log2_F_phi_diff][pad_diff][0],
nperiods=2**sub))
# initial lowpass
phi_f_fr = {0: {}}
# expand dim to multiply along freq like (2, 32, 4) * (32, 1)
phi_f_fr[0][0] = [gauss_1d(N_init, sigma_low, P_max=P_max, eps=eps)[:, None]]
compute_all_subsamplings(phi_f_fr, pad_diff=0, log2_F_phi=log2_F,
log2_F_phi_diff=0)
# reusable
common_kw = dict(normalize=normalize_fr, P_max=P_max, eps=eps)
# lowpass filters at all possible input lengths ##########################
pads_iterated = []
for J_pad_fr in list(J_pad_frs.values())[::-1]:
if J_pad_fr == -1:
continue
# avoid recomputation
if J_pad_fr in pads_iterated:
continue
pads_iterated.append(J_pad_fr)
# validate J_pad_fr
if sampling_phi_fr == 'resample' and not zero_stride_globally:
# guaranteed by design:
# - 'resample': total_conv_stride_over_U1 >= log2_F
# - J_pad_fr = max(, total_conv_stride_over_U1)
# exception is `not average_fr and aligned`, but we force
# `max(, log2_F)` in frontend
assert J_pad_fr >= log2_F, (J_pad_fr, log2_F)
pad_diff = J_pad_frs_max_init - J_pad_fr
if sampling_phi_fr == 'resample':
phi_f_fr[0][pad_diff] = [
gauss_1d(2**J_pad_fr, sigma_low, **common_kw)[:, None]]
# dedicate separate filters for *subsampled* as opposed to *trimmed*
# inputs (i.e. `n1_fr_subsample` vs `J_pad_frs_max_init - J_pad_fr`)
compute_all_subsamplings(phi_f_fr, pad_diff, log2_F_phi=log2_F,
log2_F_phi_diff=0)
elif sampling_phi_fr == 'recalibrate':
# These won't differ from plain subsampling but we still
# build each `log2_F_phi_diff` separately with its own subsampling
# to avoid excessive bookkeeping.
# `phi[::factor] == gauss_1d(N // factor, sigma_low * factor)`
# when not aliased.
# by design, (J_pad_frs[scale_diff] >=
# total_conv_stride_over_U1s[scale_diff])
max_log2_F_phi = min(log2_F, J_pad_fr)
min_log2_F_phi_diff = log2_F - max_log2_F_phi
# `== log2_F` means `log2_F_phi == 0`
max_log2_F_phi_diff = log2_F
# not all these filters will be used, and some will be severely
# under-padded (e.g. log2_F_phi == J_pad_fr), but we compute them
# anyway to avoid having to determine what will and won't be used
for log2_F_phi_diff in range(min_log2_F_phi_diff,
max_log2_F_phi_diff + 1):
log2_F_phi = log2_F - log2_F_phi_diff
sigma_low_F = sigma_low * 2**log2_F_phi_diff
if log2_F_phi_diff not in phi_f_fr:
phi_f_fr[log2_F_phi_diff] = {}
if pad_diff in phi_f_fr[log2_F_phi_diff]:
# already computed
continue
phi_f_fr[log2_F_phi_diff][pad_diff] = [
gauss_1d(2**J_pad_fr, sigma_low_F, **common_kw)[:, None]]
compute_all_subsamplings(phi_f_fr, pad_diff, log2_F_phi,
log2_F_phi_diff)
# validate phi
if (sampling_phi_fr == 'resample' and unrestricted_pad_fr and
pad_mode_fr != 'zero' and not average_fr_global_phi):
# This means width is already too great for own length,
# so lesser length will distort lowpass.
# This is automatically averted with `max_pad_factor=None`.
# However, since the zero-padded case takes min_to_pad // 2,
# this won't hold any longer (but we permit it anyway since
# the difference is tolerable).
phi_fr = phi_f_fr[0][pad_diff][0]
phi_halfsupport = compute_temporal_support(
phi_fr, criterion_amplitude=criterion_amplitude)
assert phi_halfsupport < phi_fr.size // 2, (
phi_halfsupport, phi_fr.size // 2)
# reorder keys as ascending
if sampling_phi_fr == 'recalibrate':
_phi_f_fr = phi_f_fr
phi_f_fr = {}
log2_F_phi_diffs = sorted(list(_phi_f_fr))
for log2_F_phi_diff in log2_F_phi_diffs:
phi_f_fr[log2_F_phi_diff] = {}
pad_diffs = sorted(list(_phi_f_fr[log2_F_phi_diff]))
for pad_diff in pad_diffs:
phi_f_fr[log2_F_phi_diff][pad_diff] = _phi_f_fr[
log2_F_phi_diff][pad_diff]
# embed meta info in filters #############################################
meta_fields = ('xi', 'sigma', 'j', 'support', 'width')
for field in meta_fields:
phi_f_fr[field] = {}
for log2_F_phi_diff in phi_f_fr:
if not isinstance(log2_F_phi_diff, int):
continue
log2_F_phi = log2_F - log2_F_phi_diff
xi_fr_0 = 0.
sigma_fr_0 = sigma_low * 2**log2_F_phi_diff
j_0 = log2_F_phi
for field in meta_fields:
phi_f_fr[field][log2_F_phi_diff] = []
phi_f_fr['xi' ][log2_F_phi_diff] = xi_fr_0
phi_f_fr['sigma' ][log2_F_phi_diff] = sigma_fr_0
phi_f_fr['j' ][log2_F_phi_diff] = j_0
phi_f_fr['width' ][log2_F_phi_diff] = {}
phi_f_fr['support'][log2_F_phi_diff] = {}
for pad_diff in phi_f_fr[log2_F_phi_diff]:
phi_f_fr['width' ][log2_F_phi_diff][pad_diff] = []
phi_f_fr['support'][log2_F_phi_diff][pad_diff] = []
for sub in range(len(phi_f_fr[log2_F_phi_diff][pad_diff])):
# should halve with subsequent `sub`, but compute exactly.
phi = phi_f_fr[log2_F_phi_diff][pad_diff][sub]
width = compute_temporal_width(
phi, N=phi.size, sigma0=sigma0,
criterion_amplitude=criterion_amplitude)
support = 2 * compute_temporal_support(
phi, criterion_amplitude=criterion_amplitude)
phi_f_fr['width' ][log2_F_phi_diff][pad_diff].append(width)
phi_f_fr['support'][log2_F_phi_diff][pad_diff].append(support)
# return results
return phi_f_fr
def _recalibrate_psi_fr(max_original_width, xi1_frs, sigma1_frs, j1_frs,
is_cqt1_frs, N, alpha, N_fr_scales_max,
N_fr_scales_unique, sigma_max_to_min_max_ratio):
# recalibrate filterbank to each `scale_diff`
# `scale_diff=0` is the original input length, no change needed
xi1_frs_new, sigma1_frs_new, j1_frs_new, is_cqt1_frs_new = (
{0: xi1_frs}, {0: sigma1_frs}, {0: j1_frs}, {0: is_cqt1_frs})
# set largest scale wavelet's scale as reference, but don't allow exceeding
# largest input's scale (which is possible)
recalibrate_scale_ref = min(math.ceil(math.log2(max_original_width)),
N_fr_scales_max)
sigma_max = max(sigma1_frs)
sigma_min = min(sigma1_frs)
xi_min = min(xi1_frs)
sigma_min_max = sigma_max / sigma_max_to_min_max_ratio
scale_diff_max = None
scale_diff_prev = 0
def set_params(scale_diff, empty=False, reuse_original=False):
for param in (xi1_frs_new, sigma1_frs_new, j1_frs_new, is_cqt1_frs_new):
if empty:
param[scale_diff] = []
elif reuse_original:
param[scale_diff] = param[0].copy()
for N_fr_scale in N_fr_scales_unique[::-1]:
scale_diff = N_fr_scales_max - N_fr_scale
if scale_diff == 0:
# leave original unchanged
continue
elif max_original_width < 2**N_fr_scale:
# no need to recalibrate, reuse 'resample' just like 'exclude'
# psi_fr_factory will skip this via `if params in params_built`
set_params(scale_diff, reuse_original=True)
continue
recalibrate_scale_diff = recalibrate_scale_ref - N_fr_scale
assert recalibrate_scale_diff != 0
factor = 2**recalibrate_scale_diff
# contract largest temporal width of any wavelet by 2**scale_diff,
# but not above sigma_max/sigma_max_to_min_max_ratio
new_sigma_min = sigma_min * factor
if new_sigma_min > sigma_min_max:
scale_diff_max = scale_diff_prev
break
# init for this scale
set_params(scale_diff, empty=True)
# halve distance from existing xi_max to .5 (max possible)
new_xi_max = .5 - (.5 - max(xi1_frs)) / factor
# new xi_min scale by scale diff
new_xi_min = xi_min * np.sqrt(factor)
# logarithmically distribute
new_xi = np.logspace(np.log10(new_xi_min), np.log10(new_xi_max),
len(xi1_frs), endpoint=True)[::-1]
xi1_frs_new[scale_diff].extend(new_xi)
new_sigma = np.logspace(np.log10(new_sigma_min),
np.log10(sigma_max),
len(xi1_frs), endpoint=True)[::-1]
sigma1_frs_new[scale_diff].extend(new_sigma)
for xi, sigma in zip(new_xi, new_sigma):
j1_frs_new[scale_diff].append(get_max_dyadic_subsampling(
xi, sigma, alpha=alpha))
is_cqt1_frs_new[scale_diff].append(False)
scale_diff_prev = scale_diff
return (xi1_frs_new, sigma1_frs_new, j1_frs_new, is_cqt1_frs_new,
scale_diff_max)
#### helpers #################################################################
def time_reverse_fr(x):
"""Time-reverse in frequency domain by swapping all bins (except dc);
assumes frequency is along first axis. x(-t) <=> X(-w).
"""
out = np.zeros_like(x)
out[0] = x[0]
out[1:] = x[:0:-1]
return out
def _check_runtime_args_jtfs(average, average_fr, out_type, out_3D):
if 'array' in out_type and not average:
raise ValueError("Options `average=False` and `'array' in out_type` "
"are mutually incompatible. "
"Please set out_type='list' or 'dict:list'")
if out_3D and not average_fr:
raise ValueError("`out_3D=True` requires `average_fr=True`.")
supported = ('array', 'list', 'dict:array', 'dict:list')
if out_type not in supported:
raise RuntimeError("`out_type` must be one of: {} (got {})".format(
', '.join(supported), out_type))
def _handle_args_jtfs(out_type):
# subclass's `out_type`
subcls_out_type = out_type.lstrip('dict:')
# Second-order scattering object for the time variable
max_order_tm = 2
return max_order_tm, subcls_out_type
| StarcoderdataPython |
4890080 | <reponame>simonbray/parsec
from __future__ import absolute_import
import os
import sys
import click
import json
from .io import error
from .config import read_global_config, global_config_path, set_global_config_path # noqa, ditto
from .galaxy import get_galaxy_instance, get_toolshed_instance
from parsec import __version__ # noqa, ditto
CONTEXT_SETTINGS = dict(auto_envvar_prefix='PARSEC', help_option_names=['-h', '--help'])
class Context(object):
def __init__(self):
self.verbose = False
self.home = os.getcwd()
self._global_config = None
@property
def global_config(self):
if self._global_config is None:
self._global_config = read_global_config()
return self._global_config
def log(self, msg, *args):
"""Logs a message to stderr."""
if args:
msg %= args
click.echo(msg, file=sys.stderr)
def vlog(self, msg, *args):
"""Logs a message to stderr only if verbose is enabled."""
if self.verbose:
self.log(msg, *args)
def exit(self, exit_code):
self.vlog("Exiting parsec with exit code [%d]" % exit_code)
sys.exit(exit_code)
pass_context = click.make_pass_decorator(Context, ensure=True)
cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),
'commands'))
def list_cmds():
rv = []
for filename in os.listdir(cmd_folder):
if filename.endswith('.py') and \
filename.startswith('cmd_'):
rv.append(filename[len("cmd_"):-len(".py")])
rv.sort()
return rv
def list_subcmds(parent):
rv = []
for filename in os.listdir(os.path.join(cmd_folder, parent)):
if filename.endswith('.py') and \
not filename.startswith('__'):
rv.append(filename[:-len(".py")])
rv.sort()
return rv
def name_to_command(parent, name):
try:
if sys.version_info[0] == 2:
if parent:
parent = parent.encode('ascii', 'replace')
name = name.encode('ascii', 'replace')
if parent:
mod_name = 'parsec.commands.%s.%s' % (parent, name)
else:
mod_name = 'parsec.commands.cmd_' + name
mod = __import__(mod_name, None, None, ['cli'])
except ImportError as e:
error("Problem loading command %s, exception %s" % (name, e))
return
return mod.cli
class ParsecCLI(click.MultiCommand):
def list_commands(self, ctx):
# We pre-calculate this so it works more nicely within packaged
# versions of parsec. Please feel free to fix this?
commands = ['config', 'datasets', 'datatypes', 'folders', 'forms',
'ftpfiles', 'genomes', 'groups', 'histories', 'jobs',
'libraries', 'quotas', 'roles', 'tool_data', 'tools',
'users', 'utils', 'visual', 'workflows',
'toolshed_categories', 'toolshed_repositories',
'toolshed_tools']
return commands
def get_command(self, ctx, name):
return name_to_command(None, name)
@click.command(cls=ParsecCLI, context_settings=CONTEXT_SETTINGS)
@click.version_option(__version__)
@click.option('-v', '--verbose', is_flag=True,
help='Enables verbose mode.')
@click.option(
"-g",
"--galaxy_instance",
help='Name of instance in %s. This parameter can also be set via the environment variable PARSEC_GALAXY_INSTANCE' % global_config_path(),
default='__default',
show_default=True,
required=True
)
@click.option(
"--path", "-f",
help="config file path",
type=str
)
@pass_context
def parsec(ctx, galaxy_instance, verbose, path=None):
"""Command line wrappers around BioBlend functions. While this sounds
unexciting, with parsec and jq you can easily build powerful command line
scripts."""
# set config_path if provided
if path is not None and len(path) > 0:
set_global_config_path(path)
# We abuse this, knowing that calls to one will fail.
try:
ctx.gi = get_galaxy_instance(galaxy_instance)
except TypeError:
pass
# ctx.log("Could not access Galaxy instance configuration")
try:
ctx.ti = get_toolshed_instance(galaxy_instance)
except TypeError:
pass
# ctx.log("Could not access Toolshed instance configuration")
ctx.verbose = verbose
def json_loads(data):
"""Load json data, allowing - to represent stdin."""
if data is None:
return ""
if data == "-":
return json.load(sys.stdin)
elif os.path.exists(data):
with open(data, 'r') as handle:
return json.load(handle)
else:
return json.loads(data)
| StarcoderdataPython |
4938101 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: fl_comm_libs/proto/co_proxy.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='fl_comm_libs/proto/co_proxy.proto',
package='jdfl',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n!fl_comm_libs/proto/co_proxy.proto\x12\x04jdfl\")\n\x06Status\x12\x0e\n\x06status\x18\x01 \x01(\r\x12\x0f\n\x07\x65rr_msg\x18\x02 \x01(\t\"(\n\x07Request\x12\x0c\n\x04uuid\x18\x01 \x03(\t\x12\x0f\n\x07ip_port\x18\x02 \x01(\t\"6\n\x0bServicePair\x12\x12\n\nlocal_uuid\x18\x01 \x01(\t\x12\x13\n\x0bremote_uuid\x18\x02 \x01(\t\"X\n\x10PairInfoResponse\x12\x1c\n\x06status\x18\x01 \x01(\x0b\x32\x0c.jdfl.Status\x12&\n\x0bservice_map\x18\x02 \x03(\x0b\x32\x11.jdfl.ServicePair2p\n\x0bPairService\x12+\n\x0cRegisterUUID\x12\r.jdfl.Request\x1a\x0c.jdfl.Status\x12\x34\n\x0bGetPairInfo\x12\r.jdfl.Request\x1a\x16.jdfl.PairInfoResponseb\x06proto3')
)
_STATUS = _descriptor.Descriptor(
name='Status',
full_name='jdfl.Status',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='jdfl.Status.status', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='err_msg', full_name='jdfl.Status.err_msg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=43,
serialized_end=84,
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='jdfl.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='jdfl.Request.uuid', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ip_port', full_name='jdfl.Request.ip_port', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=86,
serialized_end=126,
)
_SERVICEPAIR = _descriptor.Descriptor(
name='ServicePair',
full_name='jdfl.ServicePair',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='local_uuid', full_name='jdfl.ServicePair.local_uuid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='remote_uuid', full_name='jdfl.ServicePair.remote_uuid', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=128,
serialized_end=182,
)
_PAIRINFORESPONSE = _descriptor.Descriptor(
name='PairInfoResponse',
full_name='jdfl.PairInfoResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='jdfl.PairInfoResponse.status', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service_map', full_name='jdfl.PairInfoResponse.service_map', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=184,
serialized_end=272,
)
_PAIRINFORESPONSE.fields_by_name['status'].message_type = _STATUS
_PAIRINFORESPONSE.fields_by_name['service_map'].message_type = _SERVICEPAIR
DESCRIPTOR.message_types_by_name['Status'] = _STATUS
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['ServicePair'] = _SERVICEPAIR
DESCRIPTOR.message_types_by_name['PairInfoResponse'] = _PAIRINFORESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
'DESCRIPTOR' : _STATUS,
'__module__' : 'fl_comm_libs.proto.co_proxy_pb2'
# @@protoc_insertion_point(class_scope:jdfl.Status)
})
_sym_db.RegisterMessage(Status)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), {
'DESCRIPTOR' : _REQUEST,
'__module__' : 'fl_comm_libs.proto.co_proxy_pb2'
# @@protoc_insertion_point(class_scope:jdfl.Request)
})
_sym_db.RegisterMessage(Request)
ServicePair = _reflection.GeneratedProtocolMessageType('ServicePair', (_message.Message,), {
'DESCRIPTOR' : _SERVICEPAIR,
'__module__' : 'fl_comm_libs.proto.co_proxy_pb2'
# @@protoc_insertion_point(class_scope:jdfl.ServicePair)
})
_sym_db.RegisterMessage(ServicePair)
PairInfoResponse = _reflection.GeneratedProtocolMessageType('PairInfoResponse', (_message.Message,), {
'DESCRIPTOR' : _PAIRINFORESPONSE,
'__module__' : 'fl_comm_libs.proto.co_proxy_pb2'
# @@protoc_insertion_point(class_scope:jdfl.PairInfoResponse)
})
_sym_db.RegisterMessage(PairInfoResponse)
_PAIRSERVICE = _descriptor.ServiceDescriptor(
name='PairService',
full_name='jdfl.PairService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=274,
serialized_end=386,
methods=[
_descriptor.MethodDescriptor(
name='RegisterUUID',
full_name='jdfl.PairService.RegisterUUID',
index=0,
containing_service=None,
input_type=_REQUEST,
output_type=_STATUS,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='GetPairInfo',
full_name='jdfl.PairService.GetPairInfo',
index=1,
containing_service=None,
input_type=_REQUEST,
output_type=_PAIRINFORESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_PAIRSERVICE)
DESCRIPTOR.services_by_name['PairService'] = _PAIRSERVICE
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
8114886 | from .is_pytraj_Topology import is_pytraj_Topology
from .extract import extract
from .add import add
from .append_structures import append_structures
from .get import *
from .set import *
from .to_molsysmt_Topology import to_molsysmt_Topology
| StarcoderdataPython |
6440477 | from django.urls import path
from . import views
app_name = 'showcase'
urlpatterns = [
path('', views.home, name='home'),
]
| StarcoderdataPython |
8040616 | <reponame>pulumi/pulumi-azure-nextgen<filename>sdk/python/pulumi_azure_nextgen/cdn/route.py<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Route']
class Route(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compression_settings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CompressionSettingsArgs']]]]] = None,
custom_domains: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceReferenceArgs']]]]] = None,
enabled_state: Optional[pulumi.Input[Union[str, 'EnabledState']]] = None,
endpoint_name: Optional[pulumi.Input[str]] = None,
forwarding_protocol: Optional[pulumi.Input[Union[str, 'ForwardingProtocol']]] = None,
https_redirect: Optional[pulumi.Input[Union[str, 'HttpsRedirect']]] = None,
link_to_default_domain: Optional[pulumi.Input[Union[str, 'LinkToDefaultDomain']]] = None,
origin_group: Optional[pulumi.Input[pulumi.InputType['ResourceReferenceArgs']]] = None,
origin_path: Optional[pulumi.Input[str]] = None,
patterns_to_match: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
profile_name: Optional[pulumi.Input[str]] = None,
query_string_caching_behavior: Optional[pulumi.Input['AfdQueryStringCachingBehavior']] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_name: Optional[pulumi.Input[str]] = None,
rule_sets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceReferenceArgs']]]]] = None,
supported_protocols: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'AFDEndpointProtocols']]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Friendly Routes name mapping to the any Routes or secret related information.
API Version: 2020-09-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CompressionSettingsArgs']]]] compression_settings: compression settings.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceReferenceArgs']]]] custom_domains: Domains referenced by this endpoint.
:param pulumi.Input[Union[str, 'EnabledState']] enabled_state: Whether to enable use of this rule. Permitted values are 'Enabled' or 'Disabled'
:param pulumi.Input[str] endpoint_name: Name of the endpoint under the profile which is unique globally.
:param pulumi.Input[Union[str, 'ForwardingProtocol']] forwarding_protocol: Protocol this rule will use when forwarding traffic to backends.
:param pulumi.Input[Union[str, 'HttpsRedirect']] https_redirect: Whether to automatically redirect HTTP traffic to HTTPS traffic. Note that this is a easy way to set up this rule and it will be the first rule that gets executed.
:param pulumi.Input[Union[str, 'LinkToDefaultDomain']] link_to_default_domain: whether this route will be linked to the default endpoint domain.
:param pulumi.Input[pulumi.InputType['ResourceReferenceArgs']] origin_group: A reference to the origin group.
:param pulumi.Input[str] origin_path: A directory path on the origin that AzureFrontDoor can use to retrieve content from, e.g. contoso.cloudapp.net/originpath.
:param pulumi.Input[Sequence[pulumi.Input[str]]] patterns_to_match: The route patterns of the rule.
:param pulumi.Input[str] profile_name: Name of the CDN profile which is unique within the resource group.
:param pulumi.Input['AfdQueryStringCachingBehavior'] query_string_caching_behavior: Defines how CDN caches requests that include query strings. You can ignore any query strings when caching, bypass caching to prevent requests that contain query strings from being cached, or cache every request with a unique URL.
:param pulumi.Input[str] resource_group_name: Name of the Resource group within the Azure subscription.
:param pulumi.Input[str] route_name: Name of the routing rule.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceReferenceArgs']]]] rule_sets: rule sets referenced by this endpoint.
:param pulumi.Input[Sequence[pulumi.Input[Union[str, 'AFDEndpointProtocols']]]] supported_protocols: List of supported protocols for this route.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['compression_settings'] = compression_settings
__props__['custom_domains'] = custom_domains
__props__['enabled_state'] = enabled_state
if endpoint_name is None and not opts.urn:
raise TypeError("Missing required property 'endpoint_name'")
__props__['endpoint_name'] = endpoint_name
__props__['forwarding_protocol'] = forwarding_protocol
__props__['https_redirect'] = https_redirect
__props__['link_to_default_domain'] = link_to_default_domain
if origin_group is None and not opts.urn:
raise TypeError("Missing required property 'origin_group'")
__props__['origin_group'] = origin_group
__props__['origin_path'] = origin_path
__props__['patterns_to_match'] = patterns_to_match
if profile_name is None and not opts.urn:
raise TypeError("Missing required property 'profile_name'")
__props__['profile_name'] = profile_name
__props__['query_string_caching_behavior'] = query_string_caching_behavior
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['route_name'] = route_name
__props__['rule_sets'] = rule_sets
__props__['supported_protocols'] = supported_protocols
__props__['deployment_status'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:cdn/latest:Route"), pulumi.Alias(type_="azure-nextgen:cdn/v20200901:Route")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Route, __self__).__init__(
'azure-nextgen:cdn:Route',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Route':
"""
Get an existing Route resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Route(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="compressionSettings")
def compression_settings(self) -> pulumi.Output[Optional[Sequence['outputs.CompressionSettingsResponse']]]:
"""
compression settings.
"""
return pulumi.get(self, "compression_settings")
@property
@pulumi.getter(name="customDomains")
def custom_domains(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceReferenceResponse']]]:
"""
Domains referenced by this endpoint.
"""
return pulumi.get(self, "custom_domains")
@property
@pulumi.getter(name="deploymentStatus")
def deployment_status(self) -> pulumi.Output[str]:
return pulumi.get(self, "deployment_status")
@property
@pulumi.getter(name="enabledState")
def enabled_state(self) -> pulumi.Output[Optional[str]]:
"""
Whether to enable use of this rule. Permitted values are 'Enabled' or 'Disabled'
"""
return pulumi.get(self, "enabled_state")
@property
@pulumi.getter(name="forwardingProtocol")
def forwarding_protocol(self) -> pulumi.Output[Optional[str]]:
"""
Protocol this rule will use when forwarding traffic to backends.
"""
return pulumi.get(self, "forwarding_protocol")
@property
@pulumi.getter(name="httpsRedirect")
def https_redirect(self) -> pulumi.Output[Optional[str]]:
"""
Whether to automatically redirect HTTP traffic to HTTPS traffic. Note that this is a easy way to set up this rule and it will be the first rule that gets executed.
"""
return pulumi.get(self, "https_redirect")
@property
@pulumi.getter(name="linkToDefaultDomain")
def link_to_default_domain(self) -> pulumi.Output[Optional[str]]:
"""
whether this route will be linked to the default endpoint domain.
"""
return pulumi.get(self, "link_to_default_domain")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="originGroup")
def origin_group(self) -> pulumi.Output['outputs.ResourceReferenceResponse']:
"""
A reference to the origin group.
"""
return pulumi.get(self, "origin_group")
@property
@pulumi.getter(name="originPath")
def origin_path(self) -> pulumi.Output[Optional[str]]:
"""
A directory path on the origin that AzureFrontDoor can use to retrieve content from, e.g. contoso.cloudapp.net/originpath.
"""
return pulumi.get(self, "origin_path")
@property
@pulumi.getter(name="patternsToMatch")
def patterns_to_match(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The route patterns of the rule.
"""
return pulumi.get(self, "patterns_to_match")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning status
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="queryStringCachingBehavior")
def query_string_caching_behavior(self) -> pulumi.Output[Optional[str]]:
"""
Defines how CDN caches requests that include query strings. You can ignore any query strings when caching, bypass caching to prevent requests that contain query strings from being cached, or cache every request with a unique URL.
"""
return pulumi.get(self, "query_string_caching_behavior")
@property
@pulumi.getter(name="ruleSets")
def rule_sets(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceReferenceResponse']]]:
"""
rule sets referenced by this endpoint.
"""
return pulumi.get(self, "rule_sets")
@property
@pulumi.getter(name="supportedProtocols")
def supported_protocols(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of supported protocols for this route.
"""
return pulumi.get(self, "supported_protocols")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Read only system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| StarcoderdataPython |
5045257 | import os
from twisted.python import filepath
from twisted.trial import unittest
from .. import database
from ..database import get_db, TARGET_VERSION, dump_db, DBError
class Get(unittest.TestCase):
def test_create_default(self):
db_url = ":memory:"
db = get_db(db_url)
rows = db.execute("SELECT * FROM version").fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]["version"], TARGET_VERSION)
def test_open_existing_file(self):
basedir = self.mktemp()
os.mkdir(basedir)
fn = os.path.join(basedir, "normal.db")
db = get_db(fn)
rows = db.execute("SELECT * FROM version").fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]["version"], TARGET_VERSION)
db2 = get_db(fn)
rows = db2.execute("SELECT * FROM version").fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]["version"], TARGET_VERSION)
def test_open_bad_version(self):
basedir = self.mktemp()
os.mkdir(basedir)
fn = os.path.join(basedir, "old.db")
db = get_db(fn)
db.execute("UPDATE version SET version=999")
db.commit()
with self.assertRaises(DBError) as e:
get_db(fn)
self.assertIn("Unable to handle db version 999", str(e.exception))
def test_open_corrupt(self):
basedir = self.mktemp()
os.mkdir(basedir)
fn = os.path.join(basedir, "corrupt.db")
with open(fn, "wb") as f:
f.write(b"I am not a database")
with self.assertRaises(DBError) as e:
get_db(fn)
self.assertIn("not a database", str(e.exception))
def test_failed_create_allows_subsequent_create(self):
patch = self.patch(database, "get_schema", lambda version: b"this is a broken schema")
dbfile = filepath.FilePath(self.mktemp())
self.assertRaises(Exception, lambda: get_db(dbfile.path))
patch.restore()
get_db(dbfile.path)
def OFF_test_upgrade(self): # disabled until we add a v2 schema
basedir = self.mktemp()
os.mkdir(basedir)
fn = os.path.join(basedir, "upgrade.db")
self.assertNotEqual(TARGET_VERSION, 2)
# create an old-version DB in a file
db = get_db(fn, 2)
rows = db.execute("SELECT * FROM version").fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]["version"], 2)
del db
# then upgrade the file to the latest version
dbA = get_db(fn, TARGET_VERSION)
rows = dbA.execute("SELECT * FROM version").fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]["version"], TARGET_VERSION)
dbA_text = dump_db(dbA)
del dbA
# make sure the upgrades got committed to disk
dbB = get_db(fn, TARGET_VERSION)
dbB_text = dump_db(dbB)
del dbB
self.assertEqual(dbA_text, dbB_text)
# The upgraded schema should be equivalent to that of a new DB.
# However a text dump will differ because ALTER TABLE always appends
# the new column to the end of a table, whereas our schema puts it
# somewhere in the middle (wherever it fits naturally). Also ALTER
# TABLE doesn't include comments.
if False:
latest_db = get_db(":memory:", TARGET_VERSION)
latest_text = dump_db(latest_db)
with open("up.sql","w") as f: f.write(dbA_text)
with open("new.sql","w") as f: f.write(latest_text)
# check with "diff -u _trial_temp/up.sql _trial_temp/new.sql"
self.assertEqual(dbA_text, latest_text)
class Create(unittest.TestCase):
def test_memory(self):
db = database.create_db(":memory:")
latest_text = dump_db(db)
self.assertIn("CREATE TABLE", latest_text)
def test_preexisting(self):
basedir = self.mktemp()
os.mkdir(basedir)
fn = os.path.join(basedir, "preexisting.db")
with open(fn, "w"):
pass
with self.assertRaises(database.DBAlreadyExists):
database.create_db(fn)
def test_create(self):
basedir = self.mktemp()
os.mkdir(basedir)
fn = os.path.join(basedir, "created.db")
db = database.create_db(fn)
latest_text = dump_db(db)
self.assertIn("CREATE TABLE", latest_text)
class Open(unittest.TestCase):
def test_open(self):
basedir = self.mktemp()
os.mkdir(basedir)
fn = os.path.join(basedir, "created.db")
db1 = database.create_db(fn)
latest_text = dump_db(db1)
self.assertIn("CREATE TABLE", latest_text)
db2 = database.open_existing_db(fn)
self.assertIn("CREATE TABLE", dump_db(db2))
def test_doesnt_exist(self):
basedir = self.mktemp()
os.mkdir(basedir)
fn = os.path.join(basedir, "created.db")
with self.assertRaises(database.DBDoesntExist):
database.open_existing_db(fn)
| StarcoderdataPython |
1795201 | <reponame>custa/python-skeleton
import requests
url = 'https://api.github.com/users/octocat'
try:
r = requests.get(url)
if 200 != r.status_code:
print("{}".format(r.content))
except Exception as e:
print("{}".format(e))
| StarcoderdataPython |
9684241 | <reponame>mudam/python-erply-api
import json
data = json.dumps({'status': {'recordsTotal': 0, 'request': 'getSalesReport', 'generationTime': 0.13462495803833, 'recordsInResponse': 0, 'requestUnixTime': 1469040297, 'errorCode': 1016, 'errorField': 'dateStart', 'responseStatus': 'error'}, 'records': None})
| StarcoderdataPython |
8079499 |
class VeristandError(Exception):
"""
The base class for all VeriStandErrors.
Note: This class generates a :class:`VeristandError` if a more specific error cannot be determined.
"""
pass
class TranslateError(VeristandError):
"""Raised if a Python function fails to translate to a VeriStand real-time sequence."""
pass
class UnexpectedError(VeristandError):
"""Raised if the state of the operation can not be determined."""
pass
class VeristandNotImplementedError(VeristandError):
"""Raised to indicate this functionality is not yet available."""
def __init__(self):
"""Throw Generic exception for things that are not implemented yet."""
self.message = "Not Implemented"
super(VeristandNotImplementedError, self).__init__(self.message)
class _StopTaskException(Exception):
pass
class RunError(VeristandError):
"""Raised at the end of execution if an RT sequence called :any:`generate_error`."""
def __init__(self, error):
assert isinstance(error, SequenceError)
self.error = error
def get_all_errors(self):
"""
Generates a list of all errors reported during execution.
Returns:
List(:class:`SequenceError`): all errors generated during execution.
"""
error = self.error
while error:
yield error
error = error.inner_error
@classmethod
def RunErrorFactory(cls, error):
from niveristand.clientapi._realtimesequencedefinitionapi.erroraction import ErrorAction
assert isinstance(error, SequenceError)
if error.error_action is ErrorAction.ContinueSequenceExecution:
return RunFailedError(error)
else:
return RunAbortedError(error)
class RunFailedError(RunError):
"""
Raised by :any:`run_py_as_rtseq` to report that the sequence failed.
This error is raised when a real-time sequence executes successfully,
but :any:`generate_error` was called with :any:`ErrorAction.ContinueSequenceExecution`.
"""
def __init__(self, error):
super(RunFailedError, self).__init__(error)
class RunAbortedError(RunError):
"""
Raised by :any:`run_py_as_rtseq` to report that the sequence failed.
This error is raised when a real-time sequence executes successfully,
but :any:`generate_error` was called with :any:`ErrorAction.StopSequence` or :any:`ErrorAction.AbortSequence`.
"""
def __init__(self, error):
super(RunAbortedError, self).__init__(error)
class SequenceError(VeristandError):
"""Raised by :any:`generate_error` to report a sequence failure."""
def __init__(self, error_code, message, error_action):
super(SequenceError, self).__init__(message)
self.error_code = error_code
self.error_action = error_action
self.message = message
self._inner_error = None
@property
def inner_error(self):
"""
Returns the error generated before the most recent error, if any, or `None`.
Returns:
:any:`SequenceError`: the previous error generated by this sequence.
Real-time sequences report only the last error the sequence generates. If you want to see a list of all the
inner errors, use :any:`RunError.get_all_errors`.
"""
return self._inner_error
@inner_error.setter
def inner_error(self, value):
assert isinstance(value, SequenceError) or value is None
assert self._inner_error is None
self._inner_error = value
@property
def is_fatal(self):
"""
Returns whether or not any error causes the sequence to stop.
Returns:
bool: True if the error is :any:`ErrorAction.AbortSequence` or :any:`ErrorAction.StopSequence`, false if
the error is :any:`ErrorAction.ContinueSequenceExecution`.
"""
from niveristand.clientapi._realtimesequencedefinitionapi.erroraction import ErrorAction
isfatal = (self.error_action in (ErrorAction.AbortSequence, ErrorAction.StopSequence)) or \
(self._inner_error and self.inner_error.is_fatal)
return isfatal
@property
def should_raise(self):
"""
Determines whether or not this error raises an exception.
Returns:
bool: False if the error is :any:`ErrorAction.ContinueSequenceExecution` with an error code of 0. Otherwise,
this function returns True.
"""
from niveristand.clientapi._realtimesequencedefinitionapi.erroraction import ErrorAction
# If the error code was 0 in a Continue error then don't raise.
return not (self.error_action is ErrorAction.ContinueSequenceExecution and self.error_code is 0)
| StarcoderdataPython |
12859085 | <filename>serializers_test/avro_avg.py
import avro.schema
import json
import fastavro
SCHEMA = {
"namespace": "avg_obj",
"type": "record",
"name": "Meme",
"fields": [
{"name": "user", "type": {
"type": "record",
"name": "PostUser",
"fields": [
{"name": "user_id", "type": "string"},
{"name": "first_name", "type": ["null", "string"], "default": "null"},
{"name": "last_name", "type": ["null", "string"], "default": "null"},
{"name": "user_type", "type": ["null",
{"type": "enum",
"name": "UserType",
"symbols": ["FREE", "REGULAR", "PREMIUM"]
}], "default": "null"},
]}},
{"name": "title", "type": ["null", "string"], "default": "null"},
{"name": "content", "type": ["null", "bytes"], "default": "null"},
{"name": "top_string", "type": ["null", "string"], "default": "null"},
{"name": "botom_string", "type": ["null", "string"], "default": "null"},
{"name": "likes", "type": ["null", "long"], "default": 0},
{"name": "hates", "type": ["null", "long"], "default": 0},
]
}
avro_schema = fastavro.parse_schema(SCHEMA)
| StarcoderdataPython |
11340172 | <reponame>willynpi/django-tutorial-for-programmers<gh_stars>100-1000
import logging
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse, HttpResponseForbidden
from django.shortcuts import redirect, render
from django.views.decorators.http import require_http_methods
from events.forms import EventForm
from .forms import MenuItemFormSet, StoreForm
from .models import Store
logger = logging.getLogger(__name__)
def store_list(request):
stores = Store.objects.all()
return render(request, 'stores/store_list.html', {'stores': stores})
def store_detail(request, pk):
try:
store = Store.objects.get(pk=pk)
except Store.DoesNotExist:
raise Http404
event_form = EventForm(initial={'store': store}, submit_title='建立活動')
event_form.helper.form_action = reverse('event_create')
return render(request, 'stores/store_detail.html', {
'store': store, 'event_form': event_form,
})
def store_create(request):
if request.method == 'POST':
form = StoreForm(request.POST, submit_title='建立')
if form.is_valid():
store = form.save(commit=False)
if request.user.is_authenticated():
store.owner = request.user
store.save()
logger.info('New store {store} created by {user}!'.format(
store=store, user=request.user
))
return redirect(store.get_absolute_url())
else:
form = StoreForm(submit_title='建立')
return render(request, 'stores/store_create.html', {'form': form})
def store_update(request, pk):
try:
store = Store.objects.get(pk=pk)
except Store.DoesNotExist:
raise Http404
if request.method == 'POST':
form = StoreForm(request.POST, instance=store, submit_title='更新')
menu_item_formset = MenuItemFormSet(request.POST, instance=store)
if form.is_valid() and menu_item_formset.is_valid():
store = form.save()
menu_item_formset.save()
return redirect(store.get_absolute_url())
else:
form = StoreForm(instance=store, submit_title=None)
form.helper.form_tag = False
menu_item_formset = MenuItemFormSet(instance=store)
return render(request, 'stores/store_update.html', {
'form': form, 'store': store, 'menu_item_formset': menu_item_formset,
})
@login_required
@require_http_methods(['POST', 'DELETE'])
def store_delete(request, pk):
try:
store = Store.objects.get(pk=pk)
except Store.DoesNotExist:
raise Http404
if store.can_user_delete(request.user):
store.delete()
if request.is_ajax():
return HttpResponse()
return redirect('store_list')
return HttpResponseForbidden()
| StarcoderdataPython |
3300738 | import errno
import os
import shutil
__author__ = '<NAME>'
__all__ = ['mkdir_p', 'remove_childreen']
def mkdir_p(path):
'''
Recursively creates the directories in a given path
Equivalent to batch cmd mkdir -p.
Parameters
----------
path : str
Path to the final directory to create.
Returns
-------
'''
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def remove_childreen(path):
'''
Delete all the children folders/files in a directory.
Parameters
----------
path : str
Path to the directory to clean up.
Returns
-------
'''
for the_file in os.listdir(path):
file_path = os.path.join(path, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e) | StarcoderdataPython |
9677849 | <filename>sdk/python/pulumi_azure_nextgen/vmwarecloudsimple/v20190401/__init__.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .dedicated_cloud_node import *
from .dedicated_cloud_service import *
from .get_dedicated_cloud_node import *
from .get_dedicated_cloud_service import *
from .get_virtual_machine import *
from .virtual_machine import *
from ._inputs import *
from . import outputs
| StarcoderdataPython |
5086171 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://localhost/flask1'
# db = SQLAlchemy(app)
from api import app, db
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
# from models import *
# class User(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# name = db.Column(db.String(128))
if __name__ == '__main__':
manager.run() | StarcoderdataPython |
9734058 | """
The :mod:`ramp_database.model` defines the database structure which is used for the
RAMP events.
"""
from .base import * # noqa
from .user import * # noqa
from .fold import * # noqa
from .team import * # noqa
from .score import * # noqa
from .event import * # noqa
from .problem import * # noqa
from .workflow import * # noqa
from .datatype import * # noqa
from .submission import * # noqa
| StarcoderdataPython |
3328063 | <reponame>BatedUrGonnaDie/salty_bot
#! /usr/bin/env python3.7
import modules.apis.api_base as api
class NewbsAPI(api.API):
def __init__(self, headers=None, cookies=None):
super(NewbsAPI, self).__init__("https://leagueofnewbs.com/api", headers=headers, cookies=cookies)
def add_textutil(self, channel, text_type, data, **kwargs):
endpoint = "/users/{0}/{1}s".format(channel, text_type)
success, response = self.post(endpoint, data, **kwargs)
return success, response
def get_textutil(self, channel, text_type, limit=1, **kwargs):
endpoint = "/users/{0}/{1}s?limit={2}".format(channel, text_type, limit)
success, response = self.get(endpoint, **kwargs)
return success, response
def get_song(self, channel, **kwargs):
endpoint = "/users/{}/songs".format(channel)
success, response = self.get(endpoint, **kwargs)
return success, response
| StarcoderdataPython |
8185341 | """
Radial distribution function related describers
"""
from ._rdf import RadialDistributionFunction # noqa
__all__ = ["RadialDistributionFunction"]
| StarcoderdataPython |
279483 | <gh_stars>10-100
import numpy as np
import torch
import torch
from functools import partial
from dataset_specifications.dataset import Dataset
class WmixSet(Dataset):
def __init__(self, n_comp):
super().__init__()
self.n_samples = {
"train": 50000,
"val": 5000,
"test": 5000,
}
self.n_comp = n_comp
self.x_dim = 3*n_comp
self.y_dim = 1
self.name = "wmix{}".format(self.x_dim)
self.support = (0.,10.)
# Mixture of n_comp Weibull distributions
# x is parameters of mixture, length 3*n*comp
# x[1:n_comp] are component offsets (added onto samples)
# x[n_comp:2n_comp] are component weights (unnormalized)
# x[2n_comp:3n_comp] are shape modifiers for the Weibull components
# The shape parameter of each component is 1 + shape_modifier
def sample_ys(self, xs):
comp_offsets= xs[:,:self.n_comp]
weights = xs[:,self.n_comp:2*self.n_comp]
comp_shape_mods = xs[:,2*self.n_comp:]
# Component probabilities (normalized weights)
comp_probs = weights / np.sum(weights, axis=1, keepdims=True)
comp_i = np.apply_along_axis(
(lambda probs: np.random.choice(self.n_comp, size=1, p=probs)),
1, comp_probs) # Axis should be 1, see numpy docs
# Shape (n, 1)
# Get values for chosen component (component with sampled index)
offsets = np.take_along_axis(comp_offsets, comp_i, 1)
shape_mods = np.take_along_axis(comp_shape_mods, comp_i, 1)
shape_params = 1. + shape_mods
w_samples = np.random.weibull(shape_params)
# Shape (n, 1)
ys = w_samples + 5.*offsets
return ys
def sample(self, n):
# xs is from Uniform(0,1)^(dim(x))
xs = np.random.uniform(low=0.0, high=1.0, size=(n, self.x_dim))
ys = self.sample_ys(xs)
return np.concatenate((xs, ys), axis=1)
def get_pdf(self, x):
# Perform pdf computation in pytorch
if type(x) == torch.Tensor:
x = x.to("cpu")
else:
x = torch.tensor(x)
comp_offsets = x[:self.n_comp]
weights = x[self.n_comp:2*self.n_comp]
comp_shape_mods = x[2*self.n_comp:]
comp_probs = weights / torch.sum(weights)
shapes = 1.0 + comp_shape_mods
scales = torch.ones_like(shapes)
w_dists = torch.distributions.weibull.Weibull(scales, shapes)
def pdf(y):
no_offsets = y - 5.0*comp_offsets
positive = (no_offsets >= 0.0)
# Only use probability density for positive samples (within support)
# Pytorch gives positive density even outside support for some reason
log_probs = w_dists.log_prob(no_offsets)
filtered_probs = torch.exp(log_probs[positive])
pd = torch.sum(filtered_probs * comp_probs[positive])
return pd
return pdf
WMIX_SET_DICT = {
"wmix{}".format(3*n_comp):
partial(WmixSet, n_comp)
for n_comp in [1,2,3,5]
}
| StarcoderdataPython |
57270 | import torch
import torch.nn as nn
from lightconvpoint.nn.deprecated.module import Module as LCPModule
from lightconvpoint.nn.deprecated.convolutions import FKAConv
from lightconvpoint.nn.deprecated.pooling import max_pool
from lightconvpoint.spatial.deprecated import sampling_quantized, knn, upsample_nearest
from lightconvpoint.utils.functional import batch_gather
class ResidualBlock(LCPModule):
def __init__(self, in_channels, out_channels, kernel_size, conv_layer, sampling, spatial_search, ratio, neighborhood_size):
super().__init__()
self.cv0 = nn.Conv1d(in_channels, in_channels//2, 1)
self.bn0 = nn.BatchNorm1d(in_channels//2)
self.cv1 = conv_layer(in_channels//2, in_channels//2, kernel_size, bias=False, sampling=sampling,
spatial_search=spatial_search, ratio=ratio, neighborhood_size=neighborhood_size)
self.bn1 = nn.BatchNorm1d(in_channels//2)
self.cv2 = nn.Conv1d(in_channels//2, out_channels, 1)
self.bn2 = nn.BatchNorm1d(out_channels)
self.activation = nn.ReLU()
self.shortcut = nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels else nn.Identity()
self.ratio = ratio
def forward_without_features(self, pos, support_points=None, indices=None):
return self.cv1(None, pos)
def forward_with_features(self, x, pos, support_points, indices):
x_short = x
x = self.activation(self.bn0(self.cv0(x)))
x = self.activation(self.bn1(self.cv1(x, pos, support_points, indices)))
x = self.bn2(self.cv2(x))
if x_short.shape[2] != x.shape[2]:
x_short = max_pool(x_short, indices)
x_short = self.shortcut(x_short)
return self.activation(x + x_short)
class FKAConvNetwork(LCPModule):
def __init__(self, in_channels, out_channels, segmentation=False, hidden=64, conv_layer=FKAConv ,sampling=sampling_quantized, neighborhood_search=knn):
super().__init__()
self.lcp_preprocess = True
self.segmentation = segmentation
self.cv0 = conv_layer(in_channels, hidden, 16, sampling=sampling,
neighborhood_search=neighborhood_search, ratio=1, neighborhood_size=16)
self.bn0 = nn.BatchNorm1d(hidden)
self.resnetb01 = ResidualBlock(hidden, hidden, 16, conv_layer, sampling, neighborhood_search, 1, 16)
self.resnetb10 = ResidualBlock(hidden, 2*hidden, 16, conv_layer, sampling, neighborhood_search, 0.25, 16)
self.resnetb11 = ResidualBlock(2*hidden, 2*hidden, 16, conv_layer, sampling, neighborhood_search, 1, 16)
self.resnetb20 = ResidualBlock(2*hidden, 4*hidden, 16, conv_layer, sampling, neighborhood_search, 0.25, 16)
self.resnetb21 = ResidualBlock(4*hidden, 4*hidden, 16, conv_layer, sampling, neighborhood_search, 1, 16)
self.resnetb30 = ResidualBlock(4*hidden, 8*hidden, 16, conv_layer, sampling, neighborhood_search, 0.25, 16)
self.resnetb31 = ResidualBlock(8*hidden, 8*hidden, 16, conv_layer, sampling, neighborhood_search, 1, 16)
self.resnetb40 = ResidualBlock(8*hidden, 16*hidden, 16, conv_layer, sampling, neighborhood_search, 0.25, 16)
self.resnetb41 = ResidualBlock(16*hidden, 16*hidden, 16, conv_layer, sampling, neighborhood_search, 1, 16)
if self.segmentation:
self.cv5 = nn.Conv1d(32*hidden, 16 * hidden, 1)
self.bn5 = nn.BatchNorm1d(16*hidden)
self.cv3d = nn.Conv1d(24*hidden, 8 * hidden, 1)
self.bn3d = nn.BatchNorm1d(8 * hidden)
self.cv2d = nn.Conv1d(12 * hidden, 4 * hidden, 1)
self.bn2d = nn.BatchNorm1d(4 * hidden)
self.cv1d = nn.Conv1d(6 * hidden, 2 * hidden, 1)
self.bn1d = nn.BatchNorm1d(2 * hidden)
self.cv0d = nn.Conv1d(3 * hidden, hidden, 1)
self.bn0d = nn.BatchNorm1d(hidden)
self.fcout = nn.Conv1d(hidden, out_channels, 1)
else:
self.fcout = nn.Linear(1024, out_channels)
self.dropout = nn.Dropout(0.5)
self.activation = nn.ReLU()
def forward_without_features(self, pos, support_points=None, indices=None):
_, _, ids_conv0 = self.cv0(None, pos)
_, support1, ids10 = self.resnetb10(None, pos)
_, _, ids11 = self.resnetb11(None, support1[0])
_, support2, ids20 = self.resnetb20(None, support1[0])
_, _, ids21 = self.resnetb21(None, support2[0])
_, support3, ids30 = self.resnetb30(None, support2[0])
_, _, ids31 = self.resnetb31(None, support3[0])
_, support4, ids40 = self.resnetb40(None, support3[0])
_, _, ids41 = self.resnetb41(None, support4[0])
support_points = support1 + support2 + support3 + support4
indices = ids_conv0 + ids10 + ids11 + ids20 + ids21 + ids30 + ids31 + ids40 + ids41
if self.segmentation:
ids3u = upsample_nearest(support4[0], support3[0])
ids2u = upsample_nearest(support3[0], support2[0])
ids1u = upsample_nearest(support2[0], support1[0])
ids0u = upsample_nearest(support1[0], pos)
indices += [ids3u, ids2u, ids1u, ids0u]
return None, support_points, indices
def forward_with_features(self, x, pos, support_points=None, indices=None):
if (support_points is None) or (indices is None):
_, indices, support_points = self.compute_indices(pos)
if self.segmentation:
ids0, ids10, ids11, ids20, ids21, ids30, ids31, ids40, ids41, ids3u, ids2u, ids1u, ids0u = indices
else:
ids0, ids10, ids11, ids20, ids21, ids30, ids31, ids40, ids41 = indices
support1, support2, support3, support4 = support_points
x0 = self.activation(self.bn0(self.cv0(x, pos, pos, ids0)))
x0 = self.resnetb01(x0, pos, pos, ids0)
x1 = self.resnetb10(x0, pos, support1, ids10)
x1 = self.resnetb11(x1, support1, support1, ids11)
x2 = self.resnetb20(x1, support1, support2, ids20)
x2 = self.resnetb21(x2, support2, support2, ids21)
x3 = self.resnetb30(x2, support2, support3, ids30)
x3 = self.resnetb31(x3, support3, support3, ids31)
x4 = self.resnetb40(x3, support3, support4, ids40)
x4 = self.resnetb41(x4, support4, support4, ids41)
if self.segmentation:
x5 = x4.max(dim=2, keepdim=True)[0].expand_as(x4)
x4 = self.activation(self.bn5(self.cv5(torch.cat([x4, x5], dim=1))))
xout = batch_gather(x4, 2, ids3u)
xout = self.activation(self.bn3d(self.cv3d(torch.cat([xout, x3], dim=1))))
xout = batch_gather(xout, 2, ids2u)
xout = self.activation(self.bn2d(self.cv2d(torch.cat([xout, x2], dim=1))))
xout = batch_gather(xout, 2, ids1u)
xout = self.activation(self.bn1d(self.cv1d(torch.cat([xout, x1], dim=1))))
xout = batch_gather(xout, 2, ids0u)
xout = self.activation(self.bn0d(self.cv0d(torch.cat([xout, x0], dim=1))))
xout = self.dropout(xout)
xout = self.fcout(xout)
else:
xout = x4.mean(dim=2)
xout = self.dropout(xout)
xout = self.fcout(xout)
return xout | StarcoderdataPython |
4926614 | import uuid
from typing import Dict, List, Union
from superai.data_program.base import DataProgramBase
# TODO: refactor api and add to client mixin
class TaskInstance(DataProgramBase):
def __init__(
self,
task_template_id: Union[int, float],
quality=None,
cost=None,
latency=None,
name=None,
description=None,
**kwargs,
):
super().__init__()
self.task_template_id = task_template_id
self.quality = quality
self.cost = cost
self.latency = latency
self.__dict__.update(kwargs)
self.__task_instance_object = self.__create_task_instance(
task_template_id=task_template_id,
performance={"quality": quality, "cost": cost, "latency": latency},
name=name,
description=description,
)
assert "id" in self.__task_instance_object
self.task_instance_id = self.__task_instance_object["id"]
def __create_task_instance(
self, task_template_id: Union[int, float], performance: Dict = None, name: str = None, description: str = None
) -> Dict:
"""
Create a task instance
:param parameters:
:param performance:
:param name:
:param description:
:return:
"""
body_json = {}
if performance is not None:
body_json["performance"] = performance
if name is None:
body_json["name"] = f"TaskName-{uuid.uuid5()}"
if description is not None:
body_json["description"] = description
uri = f"task_template/{task_template_id}/instance"
return self._request(uri, method="POST", body_params=body_json, required_api_key=False)
def process(self, inputs: List[Dict]) -> Dict:
"""
:param inputs:
:return:
"""
body_json = {"inputs": inputs, "job_type": "normal"}
if self.quality is not None:
body_json["quality"] = self.quality
if self.cost is not None:
body_json["cost"] = self.cost
if self.latency is not None:
body_json["latency"] = self.latency
uri = f"task_instance/{self.task_instance_id}/process"
return self._request(uri, method="POST", body_params=body_json, required_api_key=False)
| StarcoderdataPython |
9775746 | <gh_stars>0
import numpy as np
import pandas as pd
import statsmodels.api as sm
# import class
import heat.modeling
# TODO create a dummy paraheat object from dummy data for testing purposes
# rng = np.random.default_rng()
# df = pd.DataFrame(rng.integers(0, 100, size=(100, 5)), columns=list('ABCDE'))
# for now use crd data
bike_plow_whigh = pd.read_csv('/Users/lukasgehrke/Documents/temp/chatham/crd_gaze_phys-LOW_work-HIGH_equip-Bike_all_good_s.csv')
tread_plow_whigh = pd.read_csv('/Users/lukasgehrke/Documents/temp/chatham/crd_gaze_phys-LOW_work-HIGH_equip-Treadmill_all_good_s.csv')
tmp_p = tread_plow_whigh[tread_plow_whigh['pID'] == 2]
tmp_p = tmp_p[['X', 'Y']]
# TODO paraheat object from dummy data
def test_ttest_per_bin():
cond1 = "1"
cond2 = "2"
c1 = pd.Series([cond1] * int(df.shape[0]/2))
c2 = pd.Series([cond2] * int(df.shape[0]/2))
df["conds"] = c1.append(c2, ignore_index=True)
# make design matrix
# design = pd.concat([df, c1.append(c2)], axis=0)
# design = df.insert(5, 'condition', dmatrix, True)
# design.columns = ['A', 'B', 'C', 'D', 'design']
res = ttest_per_bin(df, "conds", cond1, cond2)
def test_OLS():
df = sm.datasets.get_rdataset("Guerry", "HistData").data # use crd data for testing !!
vars = ['Department', 'Lottery', 'Literacy', 'Wealth', 'Region']
df = df[vars]
df = df.dropna()
pars, rsq = fit_OLS(df, 'Lottery ~ Literacy + Wealth')
assert pars.shape[0] == 3, "intercept plus number of regressors is returned"
def test_RLM():
df = sm.datasets.get_rdataset("Guerry", "HistData").data # use crd data for testing !!
vars = ['Department', 'Lottery', 'Literacy', 'Wealth', 'Region']
df = df[vars]
df = df.dropna()
pars, bse = fit_robust_lm(df, 'Lottery ~ Literacy + Wealth')
assert pars.shape[0] == 3, "intercept plus number of regressors is returned"
def test_fit_lm_per_bin():
# generate random regressor
rng = np.random.default_rng()
# reg = pd.DataFrame(rng.integers(0, 100, size=(crd_1s.shape[0], 1)), columns=['reg'])
# h.heatmap.statistic.ravel().shape
crd_1s.reset_index(drop=True, inplace=True)
data = pd.concat([crd_1s, reg], axis=1)
pars, bse = fit_lm_per_bin(data, 'pixel ~ reg')
| StarcoderdataPython |
9618556 | """Выбор аэродромов для миссии
В миссии у сторон по 3 фронтовых филда и по 4 тыловых.
Фронтовые (по 3) выбираются по алгоритму:
* Все аэродромы фильтруются по нахождению в прифронтовой зоне
* Полученный список сортируется по общему количеству самолётов
* Из списка берутся первый, последний и случайный аэродром
Тыловые (по 4) выбираются по алгоритму:
* Все аэродромы фильтруются по нахождению вне прифронтовой зоны
* Список делится на 2 по дистанции до ближайшего склада
* Из списка аэродромов каждого склада выбираются: наиболее заполненный и случайный"""
from __future__ import annotations
from random import choice, shuffle
from geometry import remove_too_close
from configs import Main
from model import ManagedAirfield
from utils import compare_float, cmp_to_key
def _front_airfields_comparator(airfield1: ManagedAirfield, airfield2: ManagedAirfield):
"""Сравнение фронтовых аэродромов при выборе"""
return compare_float(airfield1.power, airfield2.power)
def _rear_airfields_comparator(airfield1: ManagedAirfield, airfield2: ManagedAirfield):
"""Сравнение тыловых аэродромов при выборе"""
return compare_float(airfield1.planes_count, airfield2.planes_count)
class AirfieldsSelector:
"""Класс, отвечающий за выбор аэродромов в зависимости от их состояния"""
def __init__(self, main: Main):
self._main = main
def select_rear(self, influence: list, front_area: list, airfields: list, warehouses: list) -> list:
"""Выбрать тыловой аэродром"""
country_warehouses = list()
for warehouse in warehouses:
if warehouse.is_in_area(influence):
country_warehouses.append(warehouse)
rear_airfields = list(x for x in airfields if not x.is_related_to_area(front_area) and x.is_in_area(influence))
if not rear_airfields:
raise NameError('Невозможно выбрать тыловой аэродром')
warehouse_airfields = dict()
for airfield in rear_airfields:
key = airfield.get_closest(country_warehouses).name
if key not in warehouse_airfields:
warehouse_airfields[key] = list()
warehouse_airfields[key].append(airfield)
result = list()
for key in warehouse_airfields:
warehouse_airfields[key].sort(
key=cmp_to_key(_rear_airfields_comparator))
result.append(warehouse_airfields[key].pop())
shuffle(warehouse_airfields[key])
result.append(warehouse_airfields[key].pop())
return result
def select_front(self, divisions: list, influence: list, front_area: list, airfields: list) -> list:
"""Выбрать фронтовые аэродромы"""
# TODO придумать, как сделать выбор аэродромов в зависимости от дивизий
if airfields:
front = list(x for x in airfields if x.is_related_to_area(front_area) and x.is_in_area(influence))
# d = {x.name: x.to_dict() for x in front}
if front:
result = list()
front.sort(key=cmp_to_key(_front_airfields_comparator))
result.append(front.pop())
front.reverse()
result.append(front.pop())
# чем ближе 2 первых друг к другу, тем дальше от них третий
distance = 1000000000 / \
result[0].distance_to(result[1].x, result[1].z) + 10000
front = remove_too_close(front, result, distance)
result.append(choice(front))
return result
raise NameError('Невозможно выбрать фронтовые аэродромы')
raise ValueError()
| StarcoderdataPython |
1713806 | <gh_stars>1-10
# stdlib
import yaml
import os.path
# local module
from .factory_loader import FactoryLoader
__all__ = ['loadyaml']
def loadyaml( path ):
"""
Load a YAML file at :path: that contains Table and View definitions.
Returns a <dict> of item-name anditem-class definition.
If you want to import these definitions directly into your namespace, (like a module)
you would do the following:
globals().update( loadyaml( <path-to-yaml-file> ))
If you did not want to do this, you can access the items as the <dict>. For
example, if your YAML file contained a Table called MyTable, then you could do
something like:
catalog = loadyaml( <path-to-yaml-file> )
MyTable = catalog['MyTable']
table = MyTable(dev)
table.get()
...
"""
# if no extension is given, default to '.yml'
if os.path.splitext(path)[1] == '': path += '.yml'
return FactoryLoader().load( yaml.load( open(path, 'r' ))) | StarcoderdataPython |
11266015 | # -*- coding: utf-8 -*-
from openprocurement.auctions.core.tests.base import snitch
from openprocurement.auctions.core.tests.blanks.complaint_blanks import (
# AuctionComplaintResourceTest
create_auction_complaint_invalid,
create_auction_complaint,
patch_auction_complaint,
review_auction_complaint,
get_auction_complaint,
get_auction_complaints,
# InsiderAuctionComplaintDocumentResourceTest
not_found,
create_auction_complaint_document,
put_auction_complaint_document,
patch_auction_complaint_document
)
class AuctionComplaintResourceTestMixin(object):
test_create_auction_complaint_invalid = snitch(create_auction_complaint_invalid)
test_create_auction_complaint = snitch(create_auction_complaint)
test_patch_auction_complaint = snitch(patch_auction_complaint)
test_review_auction_complaint = snitch(review_auction_complaint)
test_get_auction_complaint = snitch(get_auction_complaint)
test_get_auction_complaints = snitch(get_auction_complaints)
class InsiderAuctionComplaintDocumentResourceTestMixin(object):
test_not_found = snitch(not_found)
test_create_auction_complaint_document = snitch(create_auction_complaint_document)
test_put_auction_complaint_document = snitch(put_auction_complaint_document)
test_patch_auction_complaint_document = snitch(patch_auction_complaint_document)
| StarcoderdataPython |
12859949 | #
# @lc app=leetcode id=206 lang=python3
#
# [206] Reverse Linked List
#
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
def iterative(head):
pre,cur = None, head
while cur:
nxt = cur.next
cur.next = pre
pre = cur
cur = nxt
return pre
def recursively(head):
if not head or not head.next:
return head
node = recursively(head.next)
head.next.next = head
head.next = None
return node
return iterative(head)
| StarcoderdataPython |
3444760 | from django.db import models
# Create your models here.
class Disease(models.Model):
code = models.CharField(max_length=4)
name = models.CharField(max_length=120)
description = models.TextField()
medication = models.TextField(blank=True)
source = models.CharField(max_length=120,default="Kementerian Kesehatan")
imageUrl = models.CharField(max_length=120,default="/static/images/diseases") | StarcoderdataPython |
4844955 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'tanchao'
import sys
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
test = test.strip()
if test: # remove ending code '\n' and not empty string
print 'todo' | StarcoderdataPython |
4827398 | <reponame>willhardy/Adjax
# -*- coding: UTF-8 -*-
from utils import get_key, JsonResponse, get_template_include_key, named_key
from django.contrib import messages
from django.core import urlresolvers
from django.template.context import RequestContext
from django.template.loader import render_to_string
from pprint import pformat
from django.http import HttpResponse
from django.conf import settings
from django.template.context import RequestContext
from django.shortcuts import redirect as django_redirect
from django.shortcuts import render_to_response as django_render_to_response
DEFAULT_REDIRECT = getattr(settings, 'ADJAX_DEFAULT_REDIRECT', None)
ADJAX_DEBUG = getattr(settings, 'ADJAX_DEBUG', None)
def get_store(request):
""" Gets a relevant store object from the given request. """
if not hasattr(request, '_adjax_store'):
request._adjax_store = AdjaxStore(request)
return request._adjax_store
class AdjaxStore(object):
""" This class will help store ajax data collected in views. """
def __init__(self, request):
self.request = request
self.update_data = {}
self.form_data = {}
self.replace_data = {}
self.hide_data = []
self.extra_data = {}
self.redirect_data = None
@property
def messages_data(self):
return [{'tags': m.tags, 'content': unicode(m), 'level': m.level} for m in messages.get_messages(self.request)]
def update(self, obj, attributes=None):
""" Make values from a given object available. """
for attr in attributes:
value = getattr(obj, attr)
if callable(value):
value = value()
self.update_data[get_key(obj, attr)] = value
def form(self, form_obj):
""" Validate the given form and send errors to browser. """
if not form_obj.is_valid():
for name, errors in form_obj.errors.items():
if form_obj.prefix:
key = 'id_%s-%s' % (form_obj.prefix, name)
else:
key = 'id_%s' % name
self.form_data[key] = errors
def replace(self, element=None, html=None, name=None, value=None):
""" Replace the given DOM element with the given html.
The DOM element is specified using css identifiers.
Some javascript libraries may have an extended syntax,
which can be used if you don't value portability.
"""
if name is not None:
if value is None:
return TypeError('replace() takes two arguments, "value" parameter missing')
self.replace_data['.'+named_key(name)] = value
else:
if element is None or html is None:
return TypeError('replace() takes two arguments')
self.replace_data[element] = html
def hide(self, element=None, name=None):
""" Hides the given DOM element.
The DOM element is specified using css identifiers.
Some javascript libraries may have an extended syntax,
which can be used if you don't value portability.
"""
if name is not None:
self.hide_data.append('.'+named_key(name))
else:
if element is None:
return TypeError('replace() takes 1 argument, 0 provided.')
self.hide_data.append(element)
def redirect(self, to, *args, **kwargs):
""" Redirect the browser dynamically to another page. """
# If a django object is passed in, use the
if hasattr(to, 'get_absolute_url'):
self.redirect_data = to.get_absolute_url()
return self.response()
try:
self.redirect_data = urlresolvers.reverse(to, args=args, kwargs=kwargs)
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to:
raise
else:
return self.response()
# Finally, fall back and assume it's a URL
self.redirect_data = to
return self.response()
def extra(self, key, value):
""" Send additional information to the browser. """
self.extra_data[key] = value
def render_to_response(self, template_name, dictionary=None, prefix=None, context_instance=None):
""" Update any included templates. """
# Because we have access to the request object, we can use request context
# This is not analogous to render_to_strings interface
if context_instance is None:
context_instance = RequestContext(self.request)
rendered_content = render_to_string(template_name, dictionary, context_instance=context_instance)
dom_element = ".%s" % get_template_include_key(template_name, prefix)
self.replace(dom_element, rendered_content)
def json_response(self, include_messages=False):
""" Return a json response with our ajax data """
return JsonResponse(self._get_response_dict(include_messages))
def _get_response_dict(self, include_messages=False):
elements = [
('extra', self.extra_data),
('forms', self.form_data),
('replace', self.replace_data),
('hide', self.hide_data),
('update', self.update_data),
('redirect', self.redirect_data),
]
if include_messages:
elements.append(('messages', self.messages_data),)
return dict((a,b) for a,b in elements if b)
def pretty_json_response(self, include_messages=False):
""" Returns a pretty string for displaying the json response
to a developer.
"""
return pformat(self._get_response_dict(include_messages))
def response(self, include_messages=False):
""" Renders the response using JSON, if appropriate.
"""
if self.request.is_ajax():
return self.json_response(include_messages)
else:
# Try and redirect somewhere useful
redirect_to = self.redirect_data
if redirect_to is None:
if 'HTTP_REFERER' in self.request.META:
redirect_to = self.request.META['HTTP_REFERER']
elif DEFAULT_REDIRECT:
redirect_to = DEFAULT_REDIRECT
if ADJAX_DEBUG:
debug_template = 'adjax/debug.html'
context = RequestContext(self.request,
{'store': self, 'redirect_to': redirect_to})
return django_render_to_response(debug_template, context_instance=context)
if redirect_to:
return django_redirect(redirect_to)
return HttpResponse()
| StarcoderdataPython |
4807013 | <reponame>Kitware/paraview-visualizer
from paraview import simple
def initialize(server):
state, ctrl = server.state, server.controller
@state.change("active_controls")
def update_active_panel(active_controls, **kwargs):
state.drawer_visibility = active_controls is not None
@ctrl.add("on_active_proxy_change")
def update_active_proxies(**kwargs):
if simple is None:
state.active_proxy_source_id = 0
state.active_proxy_representation_id = 0
return
active_view = simple.GetActiveView()
state.active_proxy_view_id = active_view.GetGlobalIDAsString()
active_source = simple.GetActiveSource()
if active_source is None:
state.active_proxy_source_id = 0
state.active_proxy_representation_id = 0
else:
state.active_proxy_source_id = active_source.GetGlobalIDAsString()
rep = simple.GetRepresentation(proxy=active_source, view=active_view)
state.active_proxy_representation_id = rep.GetGlobalIDAsString()
# Initialize state values
update_active_proxies()
| StarcoderdataPython |
9787406 | <reponame>FatiahBalo/python-ds
"""
Bubble Sort worst time complexity occurs when array is reverse sorted - O(n^2)
Best time scenario is when array is already sorted - O(n)
"""
def bubble_sort(array):
n = len(array)
for i in range(n):
for j in range(0, n-i-1):
if array[j] > array[j+1]:
array[j], array[j+1] = array[j+1], array[j]
return array
def bubble_sort_optimized(array):
"""
Optimizes on bubble sort by taking care of already swapped cases
Reference - https://github.com/prabhupant/python-ds/pull/346
"""
has_swapped = True
num_of_iterations = 0
while has_swapped:
has_swapped = False
for i in range(len(array) - num_of_iterations - 1):
if array[i] > array[i + 1]:
array[i], array[i + 1] = array[i + 1], array[i]
has_swapped = True
num_of_iterations += 1
| StarcoderdataPython |
8087410 | <filename>NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/Astropy/astropy-1.1.2/astropy/utils/compat/fractions.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles backports of the standard library's `fractions.py`.
The fractions module in 2.6 does not handle being instantiated using a
float and then calculating an approximate fraction based on that.
This functionality is required by the FITS unit format generator,
since the FITS unit format handles only rational, not decimal point,
powers.
"""
from __future__ import absolute_import
import sys
if sys.version_info[:2] == (2, 6):
from ._fractions_py2 import *
else:
from fractions import *
| StarcoderdataPython |
12860137 | from NERDA.models import NERDA
from NERDA.datasets import get_conll_data, get_dane_data
from transformers import AutoTokenizer
trans = 'bert-base-multilingual-uncased'
tokenizer = AutoTokenizer.from_pretrained(trans, do_lower_case = True)
data = get_dane_data('train')
sents = data.get('sentences')
out = []
for sent in sents:
sent = sents[3595]
tokens = []
for word in sent:
tokens.extend(tokenizer.tokenize(word))
out.append(tokens)
lens = [len(x) for x in out]
max(lens)
sents[3595]
from transformers import AutoTokenizer, AutoModel, AutoConfig
t = 'google/electra-small-discriminator'
cfg = AutoModel.from_pretrained(t)
#trn = get_conll_data('train')
#idx_min = 3110
#idx_max = 3115
#valid = get_conll_data('valid')
#valid['sentences'] = valid['sentences'][idx_min:idx_max+1]
#valid['tags'] = valid['tags'][idx_min:idx_max+1]
#trn['sentences'] = trn['sentences'][idx_min:idx_max+1]
#trn['tags'] = trn['tags'][idx_min:idx_max+1]
# model = NERDA(dataset_training=trn,
# dataset_validation = valid)
#model.train()
#k=0
#trn['sentences'][3111]
#from transformers import AutoTokenizer
#t = AutoTokenizer.from_pretrained('bert-base-multilingual-uncased')
#valid = get_conll_data('valid')
filename = 'en_bert_ml.pkl'
# pickle.dump(model, open(filename, 'wb'))
import pickle
file = open(filename,'rb')
model = pickle.load(file)
test = get_conll_data('test')
model.evaluate_performance(test, batch_size = 10)
#for entry in range(3120,3140):
# print(entry)
# sent = trn['sentences'][entry]
# [t.tokenize(word) for word in sent]
test = get_conll_data('test')
idx_min = 202
idx_max = 202
# valid = get_conll_data('valid')
#valid['sentences'] = valid['sentences'][idx_min:idx_max+1]
#valid['tags'] = valid['tags'][idx_min:idx_max+1]
test['sentences'] = test['sentences'][idx_min:idx_max+1]
test['tags'] = test['tags'][idx_min:idx_max+1]
model.evaluate_performance(test)
# model = NERDA(dataset_training=trn,
# dataset_validation = valid)
#model.train()
#k=0
#trn['sentences'][3111]
#from transformers import AutoTokenizer
#t = AutoTokenizer.from_pretrained('bert-base-multilingual-uncased')
#valid = get_conll_data('valid')
<<<<<<< HEAD:admin/sandbox.py
transformer = "google/electra-small-discriminator"
from transformers import AutoTokenizer, AutoModel, AutoConfig
trans = AutoConfig.from_pretrained(transformer)
def tester():
try:
model = AutoModel.from_pretrained('google/electra-small-discriminator')
except:
print("Oops!", sys.exc_info()[0], "occurred.")
return model
=======
from NERDA.datasets import get_dane_data
trn = get_conll_data('train', 5)
valid = get_conll_data('dev', 5)
transformer = 'bert-base-multilingual-uncased',
model = NERDA(transformer = transformer,
dataset_training = trn,
dataset_validation = valid)
>>>>>>> b5eea087ece5f61ec70aa3f99cd4c99b418ebb92:sandbox.py
| StarcoderdataPython |
6601489 | import numpy as np
def multi_dot(*vectors):
""" Pairwise vectors product.
Args:
vectors: tuple of numpy.array with len(shape) = 1
Returns:
numpy.ndarray
"""
if len(vectors) == 1:
return vectors[0]
vec_dot = np.dot(np.expand_dims(vectors[0], -1), np.expand_dims(vectors[1], 0))
return multi_dot(vec_dot, *vectors[2:])
def multi_dot2(*vectors, flatten=False, reshape=False):
md = multi_dot(*vectors)
if flatten:
md = md.ravel()
if reshape:
md = md.reshape(-1, 1)
return md
def polar2cartesian(x):
""" Transform polar coordinates to cartesian.
Coordinates order:
x[0] ~ ro
x[1] ~ phi
"""
return np.array([x[0] * np.cos(x[1]), x[0] * np.sin(x[1])], dtype=np.float64)
| StarcoderdataPython |
24320 | """Class implementation for the stop_propagation interface.
"""
from apysc._type.variable_name_interface import VariableNameInterface
class StopPropagationInterface(VariableNameInterface):
def stop_propagation(self) -> None:
"""
Stop event propagation.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.stop_propagation, locals_=locals(),
module_name=__name__, class_=StopPropagationInterface):
expression: str = (
f'{self.variable_name}.stopPropagation();'
)
ap.append_js_expression(expression=expression)
| StarcoderdataPython |
1806853 | <filename>nlpaug/augmenter/spectrogram/spectrogram_augmenter.py
import numpy as np
from nlpaug.util import Method
from nlpaug import Augmenter
class SpectrogramAugmenter(Augmenter):
def __init__(self, action, name='Spectrogram_Aug', aug_min=1, aug_p=0.3, verbose=0):
super(SpectrogramAugmenter, self).__init__(
name=name, method=Method.SPECTROGRAM, action=action, aug_min=aug_min, verbose=verbose)
self.aug_p = aug_p
@classmethod
def clean(cls, data):
return data
@classmethod
def is_duplicate(cls, dataset, data):
for d in dataset:
if np.array_equal(d, data):
return True
return False
| StarcoderdataPython |
8194771 | a
asdgsdg
asdg | StarcoderdataPython |
1786725 | import typer
app = typer.Typer()
@app.command()
def hellow(name: str, d: int, state:bool = True):
print(f"halo {name} {d}")
if state==True:
print(f"iq {d}")
@app.command()
def bye():
print("bye")
if __name__ == "__main__":
app()
| StarcoderdataPython |
1849981 | <gh_stars>0
import speech_recognition as sr
class AudioUtils:
"""
author: <NAME>
This class will provide certain audio functionality, like:
- doing speech to text
"""
@staticmethod
def record_caption():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say your caption!")
audio = r.listen(source)
text = (r.recognize_google(audio)).lower()
print(text)
return text | StarcoderdataPython |
3579877 | <reponame>turlodales/vimr
import argparse
import pathlib
import shutil
from builder import Builder
from config import Config
from deps import ag, pcre, xz
from deps.ag import AgBuilder
DEPS_FILE_NAME = ".deps"
PACKAGE_NAME = "vimr-deps"
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"--xz-version",
action="store",
dest="xz_version",
type=str,
required=True,
)
parser.add_argument(
"--pcre-version",
action="store",
dest="pcre_version",
type=str,
required=True,
)
parser.add_argument(
"--ag-version",
action="store",
dest="ag_version",
type=str,
required=True,
)
parser.add_argument(
"--arm64-deployment-target",
action="store",
dest="arm64_deployment_target",
type=str,
required=True,
)
parser.add_argument(
"--x86_64-deployment-target",
action="store",
dest="x86_64_deployment_target",
type=str,
required=False,
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
arm64_deployment_target = args.arm64_deployment_target
x86_64_deployment_target = args.x86_64_deployment_target
cwd = pathlib.Path(__file__).parent.resolve().joinpath("build")
shutil.rmtree(cwd, ignore_errors=True)
cwd.mkdir(parents=True, exist_ok=True)
install_path = cwd.parent.joinpath(PACKAGE_NAME)
shutil.rmtree(install_path, ignore_errors=True)
install_path_lib = install_path.joinpath("lib")
install_path_include = install_path.joinpath("include")
xz_config = Config(
version=args.xz_version,
arm64_deployment_target=arm64_deployment_target,
x86_64_deployment_target=x86_64_deployment_target,
default_cflags="-g -O2",
target_install_path_parent=cwd.joinpath("libxz"),
install_path_lib=install_path_lib,
install_path_include=install_path_include,
working_directory=cwd.joinpath(DEPS_FILE_NAME),
)
pcre_config = Config(
version=args.pcre_version,
arm64_deployment_target=arm64_deployment_target,
x86_64_deployment_target=x86_64_deployment_target,
default_cflags="-D_THREAD_SAFE -pthread -g -O2",
target_install_path_parent=cwd.joinpath("libpcre"),
install_path_lib=install_path_lib,
install_path_include=install_path_include,
working_directory=cwd.joinpath(DEPS_FILE_NAME),
)
ag_config = Config(
version=args.ag_version,
arm64_deployment_target=arm64_deployment_target,
x86_64_deployment_target=x86_64_deployment_target,
default_cflags="-g -O2 -D_THREAD_SAFE -pthread",
target_install_path_parent=cwd.joinpath("libag"),
install_path_lib=install_path_lib,
install_path_include=install_path_include,
working_directory=cwd.joinpath(DEPS_FILE_NAME),
)
builders = {
"xz": Builder(
xz_config,
download_command=xz.download_command,
extract_command=xz.extract_command,
make_command=xz.make_command,
build_universal_and_install_command=xz.build_universal_and_install_command,
),
"pcre": Builder(
pcre_config,
download_command=pcre.download_command,
make_command=pcre.make_command,
extract_command=pcre.extract_command,
build_universal_and_install_command=pcre.build_universal_and_install_command,
),
"ag": AgBuilder(
ag_config,
download_command=ag.download_command,
make_command=ag.make_command,
deps=[xz_config, pcre_config],
extract_command=ag.extract_command,
build_universal_and_install_command=ag.build_universal_and_install_command,
),
}
builders["xz"].build()
builders["pcre"].build()
builders["ag"].build()
| StarcoderdataPython |
341240 | <gh_stars>1-10
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""glore_resnet200"""
from collections import OrderedDict
import numpy as np
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore.ops import operations as P
import mindspore.common.dtype as mstype
from mindspore.common.tensor import Tensor
def _weight_variable(shape, factor=0.01):
init_value = np.random.randn(*shape).astype(np.float32) * factor
return Tensor(init_value)
def _conv3x3(in_channel, out_channel, stride=1):
weight_shape = (out_channel, in_channel, 3, 3)
weight = _weight_variable(weight_shape)
return nn.Conv2d(in_channel, out_channel,
kernel_size=3, stride=stride, padding=0, pad_mode='same', weight_init=weight, has_bias=False)
def _conv1x1(in_channel, out_channel, stride=1):
weight_shape = (out_channel, in_channel, 1, 1)
weight = _weight_variable(weight_shape)
return nn.Conv2d(in_channel, out_channel,
kernel_size=1, stride=stride, padding=0, pad_mode='same', weight_init=weight, has_bias=False)
def _conv7x7(in_channel, out_channel, stride=1):
weight_shape = (out_channel, in_channel, 7, 7)
weight = _weight_variable(weight_shape)
return nn.Conv2d(in_channel, out_channel,
kernel_size=7, stride=stride, padding=3, pad_mode='pad', weight_init=weight, has_bias=False)
def _bn(channel):
return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.92,
gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)
def _bn_last(channel):
return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.92,
gamma_init=0, beta_init=0, moving_mean_init=0, moving_var_init=1)
def _fc(in_channel, out_channel):
weight_shape = (out_channel, in_channel)
weight = _weight_variable(weight_shape)
return nn.Dense(in_channel, out_channel, has_bias=True, weight_init=weight, bias_init=0)
class BN_AC_Conv(nn.Cell):
"""
Basic convolution block.
"""
def __init__(self,
in_channel,
out_channel,
kernel=1,
pad=0,
pad_mode='same',
stride=1,
groups=1,
has_bias=False):
super(BN_AC_Conv, self).__init__()
self.bn = _bn(in_channel)
self.relu = nn.ReLU()
self.conv = nn.Conv2d(in_channel, out_channel,
pad_mode=pad_mode,
padding=pad,
kernel_size=kernel,
stride=stride,
has_bias=has_bias,
group=groups)
def construct(self, x):
out = self.bn(x)
out = self.relu(out)
out = self.conv(out)
return out
class GCN(nn.Cell):
"""
Graph convolution unit (single layer)
"""
def __init__(self, num_state, num_mode, bias=False):
super(GCN, self).__init__()
# self.relu1 = nn.ReLU()
self.conv1 = nn.Conv1d(num_mode, num_mode, kernel_size=1)
self.relu2 = nn.ReLU()
self.conv2 = nn.Conv1d(num_state, num_state, kernel_size=1, has_bias=bias)
self.transpose = ops.Transpose()
self.add = P.TensorAdd()
def construct(self, x):
"""construct"""
identity = x
# (n, num_state, num_node) -> (n, num_node, num_state)
# -> (n, num_state, num_node)
out = self.transpose(x, (0, 2, 1))
# out = self.relu1(out)
out = self.conv1(out)
out = self.transpose(out, (0, 2, 1))
out = self.add(out, identity)
out = self.relu2(out)
out = self.conv2(out)
return out
class GloreUnit(nn.Cell):
"""
Graph-based Global Reasoning Unit
Parameter:
'normalize' is not necessary if the input size is fixed
Args:
num_in: Input channel
num_mid:
"""
def __init__(self, num_in, num_mid,
normalize=False):
super(GloreUnit, self).__init__()
self.normalize = normalize
self.num_s = int(2 * num_mid) # 512 num_in = 1024
self.num_n = int(1 * num_mid) # 256
# reduce dim
self.conv_state = nn.SequentialCell([_bn(num_in),
nn.ReLU(),
_conv1x1(num_in, self.num_s, stride=1)])
# projection map
self.conv_proj = nn.SequentialCell([_bn(num_in),
nn.ReLU(),
_conv1x1(num_in, self.num_n, stride=1)])
self.gcn = GCN(num_state=self.num_s, num_mode=self.num_n)
self.conv_extend = nn.SequentialCell([_bn_last(self.num_s),
nn.ReLU(),
_conv1x1(self.num_s, num_in, stride=1)])
self.reshape = ops.Reshape()
self.matmul = ops.BatchMatMul()
self.transpose = ops.Transpose()
self.add = P.TensorAdd()
self.cast = P.Cast()
def construct(self, x):
"""construct"""
n = x.shape[0]
identity = x
# (n, num_in, h, w) --> (n, num_state, h, w)
# --> (n, num_state, h*w)
x_conv_state = self.conv_state(x)
x_state_reshaped = self.reshape(x_conv_state, (n, self.num_s, -1))
# (n, num_in, h, w) --> (n, num_node, h, w)
# --> (n, num_node, h*w)
x_conv_proj = self.conv_proj(x)
x_proj_reshaped = self.reshape(x_conv_proj, (n, self.num_n, -1))
# (n, num_in, h, w) --> (n, num_node, h, w)
# --> (n, num_node, h*w)
x_rproj_reshaped = x_proj_reshaped
# projection: coordinate space -> interaction space
# (n, num_state, h*w) x (n, num_node, h*w)T --> (n, num_state, num_node)
x_proj_reshaped = self.transpose(x_proj_reshaped, (0, 2, 1))
# 提高速度
x_state_reshaped_fp16 = self.cast(x_state_reshaped, mstype.float16)
x_proj_reshaped_fp16 = self.cast(x_proj_reshaped, mstype.float16)
x_n_state_fp16 = self.matmul(x_state_reshaped_fp16, x_proj_reshaped_fp16)
x_n_state = self.cast(x_n_state_fp16, mstype.float32)
if self.normalize:
x_n_state = x_n_state * (1. / x_state_reshaped.shape[2])
# reasoning: (n, num_state, num_node) -> (n, num_state, num_node)
x_n_rel = self.gcn(x_n_state)
# reverse projection: interaction space -> coordinate space
# (n, num_state, num_node) x (n, num_node, h*w) --> (n, num_state, h*w)
x_n_rel_fp16 = self.cast(x_n_rel, mstype.float16)
x_rproj_reshaped_fp16 = self.cast(x_rproj_reshaped, mstype.float16)
x_state_reshaped_fp16 = self.matmul(x_n_rel_fp16, x_rproj_reshaped_fp16)
x_state_reshaped = self.cast(x_state_reshaped_fp16, mstype.float32)
# (n, num_state, h*w) --> (n, num_state, h, w)
x_state = self.reshape(x_state_reshaped, (n, self.num_s, identity.shape[2], identity.shape[3]))
# (n, num_state, h, w) -> (n, num_in, h, w)
x_conv_extend = self.conv_extend(x_state)
out = self.add(x_conv_extend, identity)
return out
class Residual_Unit(nn.Cell):
"""
Residual unit used in Resnet
"""
def __init__(self,
in_channel,
mid_channel,
out_channel,
groups=1,
stride=1,
first_block=False):
super(Residual_Unit, self).__init__()
self.first_block = first_block
self.BN_AC_Conv1 = BN_AC_Conv(in_channel, mid_channel, kernel=1, pad=0)
self.BN_AC_Conv2 = BN_AC_Conv(mid_channel, mid_channel, kernel=3, pad_mode='pad', pad=1, stride=stride,
groups=groups)
self.BN_AC_Conv3 = BN_AC_Conv(mid_channel, out_channel, kernel=1, pad=0)
if self.first_block:
self.BN_AC_Conv_w = BN_AC_Conv(in_channel, out_channel, kernel=1, pad=0, stride=stride)
self.add = P.TensorAdd()
def construct(self, x):
identity = x
out = self.BN_AC_Conv1(x)
out = self.BN_AC_Conv2(out)
out = self.BN_AC_Conv3(out)
if self.first_block:
identity = self.BN_AC_Conv_w(identity)
out = self.add(out, identity)
return out
class ResNet(nn.Cell):
"""
Resnet architecture
"""
def __init__(self,
layer_nums,
num_classes,
use_glore=False):
super(ResNet, self).__init__()
self.layer1 = nn.SequentialCell(OrderedDict([
('conv', _conv7x7(3, 64, stride=2)),
('bn', _bn(64),),
('relu', nn.ReLU(),),
('maxpool', nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same"))
]))
num_in = [64, 256, 512, 1024]
num_mid = [64, 128, 256, 512]
num_out = [256, 512, 1024, 2048]
self.layer2 = nn.SequentialCell(OrderedDict([
("Residual_Unit{}".format(i), Residual_Unit(in_channel=(num_in[0] if i == 1 else num_out[0]),
mid_channel=num_mid[0],
out_channel=num_out[0],
stride=1,
first_block=(i == 1))) for i in range(1, layer_nums[0] + 1)
]))
blocks_layer3 = []
for i in range(1, layer_nums[1] + 1):
blocks_layer3.append(
("Residual_Unit{}".format(i), Residual_Unit(in_channel=(num_in[1] if i == 1 else num_out[1]),
mid_channel=num_mid[1],
out_channel=num_out[1],
stride=(2 if i == 1 else 1),
first_block=(i == 1))))
if use_glore and i in [12, 18]:
blocks_layer3.append(("Residual_Unit{}_GloreUnit".format(i), GloreUnit(num_out[1], num_mid[1])))
self.layer3 = nn.SequentialCell(OrderedDict(blocks_layer3))
blocks_layer4 = []
for i in range(1, layer_nums[2] + 1):
blocks_layer4.append(
("Residual_Unit{}".format(i), Residual_Unit(in_channel=(num_in[2] if i == 1 else num_out[2]),
mid_channel=num_mid[2],
out_channel=num_out[2],
stride=(2 if i == 1 else 1),
first_block=(i == 1))))
if use_glore and i in [16, 24, 32]:
blocks_layer4.append(("Residual_Unit{}_GloreUnit".format(i), GloreUnit(num_out[2], num_mid[2])))
self.layer4 = nn.SequentialCell(OrderedDict(blocks_layer4))
self.layer5 = nn.SequentialCell(OrderedDict([
("Residual_Unit{}".format(i), Residual_Unit(in_channel=(num_in[3] if i == 1 else num_out[3]),
mid_channel=num_mid[3],
out_channel=num_out[3],
stride=(2 if i == 1 else 1),
first_block=(i == 1))) for i in range(1, layer_nums[3] + 1)
]))
self.tail = nn.SequentialCell(OrderedDict([
('bn', _bn(num_out[3])),
('relu', nn.ReLU())
]))
# self.globalpool = nn.AvgPool2d(kernel_size=7, stride=1, pad_mode='same')
self.mean = ops.ReduceMean(keep_dims=True)
self.flatten = nn.Flatten()
self.classifier = _fc(num_out[3], num_classes)
self.print = ops.Print()
def construct(self, x):
"""construct"""
c1 = self.layer1(x)
c2 = self.layer2(c1)
c3 = self.layer3(c2)
c4 = self.layer4(c3)
c5 = self.layer5(c4)
out = self.tail(c5)
# out = self.globalpool(out)
out = self.mean(out, (2, 3))
out = self.flatten(out)
out = self.classifier(out)
return out
def glore_resnet200(class_num=1000, use_glore=True):
return ResNet(layer_nums=[3, 24, 36, 3],
num_classes=class_num,
use_glore=use_glore)
| StarcoderdataPython |
1734060 | #%%
from functools import partial
import jax
import jax.numpy as np
from jax import random, vmap, jit, grad
from jax.experimental import stax, optimizers
from jax.experimental.stax import Dense, Relu
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
#%%
# Use stax to set up network initialization and evaluation functions
net_init, net_apply = stax.serial(
Dense(40), Relu,
Dense(40), Relu,
Dense(1)
)
in_shape = (-1, 1,)
rng = random.PRNGKey(0)
out_shape, params = net_init(rng, in_shape)
#%%
import numpy as onp
def get_wave(wave_gen, n_samples=100, wave_params=False):
x = wave_gen(n_samples)
amp = onp.random.uniform(low=0.1, high=5.0)
phase = onp.random.uniform(low=0., high=onp.pi)
wave_data = x, onp.sin(x + phase) * amp
if wave_params: wave_data = (wave_data, (phase, amp))
return wave_data
def vis_wave_gen(N): # better for visualization
x = onp.linspace(-5, 5, N).reshape((N, 1))
return x
def train_wave_gen(N): # for model training
x = onp.random.uniform(low=-5., high=5., size=(N, 1))
return x
def mse(params, batch):
x, y = batch
ypred = net_apply(params, x)
return np.mean((y - ypred)**2)
#%%
batch = get_wave(vis_wave_gen, 100)
predictions = net_apply(params, batch[0])
losses = mse(params, batch)
plt.plot(batch[0], predictions, label='prediction')
plt.plot(*batch, label='target')
plt.legend()
#%%
opt_init, opt_update, get_params = optimizers.adam(step_size=1e-2)
@jit
def step(i, opt_state, batch):
params = get_params(opt_state)
g = grad(mse)(params, batch)
return opt_update(i, g, opt_state)
#%%
out_shape, params = net_init(rng, in_shape) # re-init model
opt_state = opt_init(params) # init optim
batch = get_wave(vis_wave_gen, 100)
for i in range(200):
opt_state = step(i, opt_state, batch)
params = get_params(opt_state)
xb, yb = batch
plt.plot(xb, net_apply(params, xb), label='prediction')
plt.plot(xb, yb, label='target')
plt.legend()
# %%
### MAML
alpha = 0.1
# inner loop -- take one gradient step on the data
def inner_update(params, batch):
grads = grad(mse)(params, batch)
sgd_update = lambda param, grad: param - alpha * grad
inner_params = jax.tree_multimap(sgd_update, params, grads)
return inner_params
# outer loop
def maml_loss(params, train_batch, test_batch):
task_params = inner_update(params, train_batch)
loss = mse(task_params, test_batch)
return loss
@jit
def maml_step(i, opt_state, train_batch, test_batch):
params = get_params(opt_state)
g = grad(maml_loss)(params, train_batch, test_batch)
return opt_update(i, g, opt_state)
## task extractor
def get_task(n_train, n_test, wave_params=False):
if not wave_params:
batch = get_wave(train_wave_gen, n_train + n_test)
else:
batch, wparams = get_wave(train_wave_gen, n_train + n_test, wave_params=True)
# extract train/test elements from batch=(xb, yb) with treemap :)
train_batch = jax.tree_map(lambda l: l[:n_train], batch, is_leaf=lambda node: hasattr(node, 'shape'))
test_batch = jax.tree_map(lambda l: l[n_train:], batch, is_leaf=lambda node: hasattr(node, 'shape'))
task = train_batch, test_batch
if wave_params: task = (*task, wparams)
return task
# %%
opt_init, opt_update, get_params = optimizers.adam(step_size=1e-3)
out_shape, params = net_init(rng, in_shape) # re-init model
opt_state = opt_init(params) # init optim
for i in tqdm(range(20000)):
train_batch, test_batch = get_task(20, 1)
opt_state = maml_step(i, opt_state, train_batch, test_batch)
params = get_params(opt_state)
# %%
train_batch, test_batch, wparams = get_task(20, 1, wave_params=True)
# re-create wave smoother for visualization
phase, amp = wparams
x = vis_wave_gen(100)
y = np.sin(x + phase) * amp
plt.plot(x, y, label='targets')
step_params = params.copy()
for i in range(5): # visualize wave at each grad step
ypred = net_apply(step_params, x)
plt.plot(x, ypred, label=f'step{i}')
step_params = inner_update(step_params, train_batch)
plt.legend()
# %%
task_batch_size = 5
tasks = [get_task(20, 1) for _ in range(task_batch_size)]
train_batch, test_batch = jax.tree_multimap(lambda *b: np.stack(b), *tasks, is_leaf=lambda node: hasattr(node, 'shape'))
xb, yb = train_batch
for i in range(len(xb)):
plt.scatter(xb[i], yb[i])
# %%
def batch_maml_loss(params, train_batch, test_batch):
losses = vmap(partial(maml_loss, params))(train_batch, test_batch)
loss = losses.mean()
return loss
@jit
def batch_maml_step(i, opt_state, train_batch, test_batch):
params = get_params(opt_state)
g = grad(batch_maml_loss)(params, train_batch, test_batch)
return opt_update(i, g, opt_state)
# %%
task_batch_size = 4
opt_init, opt_update, get_params = optimizers.adam(step_size=1e-3)
out_shape, params = net_init(rng, in_shape) # re-init model
opt_state = opt_init(params) # init optim
for i in tqdm(range(20000)):
# get batch of tasks
tasks = [get_task(20, 1) for _ in range(task_batch_size)]
train_batch, test_batch = jax.tree_multimap(lambda *b: np.stack(b), *tasks, is_leaf=lambda node: hasattr(node, 'shape'))
# take gradient step over the mean
opt_state = batch_maml_step(i, opt_state, train_batch, test_batch)
params = get_params(opt_state)
# %%
train_batch, test_batch, wparams = get_task(20, 1, wave_params=True)
# re-create wave smoother for visualization
phase, amp = wparams
x = vis_wave_gen(100)
y = np.sin(x + phase) * amp
plt.plot(x, y, label='targets')
plt.scatter(*train_batch, label='train')
step_params = params.copy()
for i in range(5): # visualize wave at each grad step
ypred = net_apply(step_params, x)
plt.plot(x, ypred, label=f'step{i}')
step_params = inner_update(step_params, train_batch)
plt.legend()
# %%
| StarcoderdataPython |
3348491 | import os
import ctypes
import multiprocessing
import logging
log = logging.getLogger(__name__)
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_PORT = 20130
DST_PORT = 20130
PACKET_RX_RING = 5
PACKET_TX_RING = 13
class Ring(ctypes.Structure):
pass
class BandwidthController(multiprocessing.Process):
__slots__ = ["tx_rate", "active_rate", "max_rate", "name", "host_ctrl_map",
"ring_list", "bw_lib", "kill"]
def __init__(self, host_ctrl_map, tx_rate, active_rate, max_rate):
self.name = "PolicyEnforcer"
multiprocessing.Process.__init__(self)
self.host_ctrl_map = host_ctrl_map
self.tx_rate = tx_rate
self.active_rate = active_rate
self.max_rate = max_rate
# self.sock_map = self.bind_sockets(host_ctrl_map)
self.bw_lib = self.init_backend()
self.ring_list = self.init_transmissions_rings(host_ctrl_map)
self.kill = multiprocessing.Event()
def run(self):
while not self.kill.is_set():
try:
self.broadcast_bw()
except KeyboardInterrupt:
log.error("%s: Caught Interrupt! Exiting..." % self.name)
self.kill.set()
self._clean()
def stop(self):
log.info("%s: Received termination signal! Exiting.." % self.name)
self.kill.set()
def close(self):
self.stop()
def _clean(self):
pass
def init_backend(self):
bw_lib = ctypes.CDLL(FILE_DIR + "/libbw_control.so")
bw_lib.init_ring.argtypes = [
ctypes.c_char_p, ctypes.c_ushort, ctypes.c_uint]
bw_lib.init_ring.restype = ctypes.POINTER(Ring)
bw_lib.send_bw.argtypes = [
ctypes.c_uint32, ctypes.POINTER(Ring), ctypes.c_ushort]
bw_lib.send_bw.restype = ctypes.c_int
bw_lib.wait_for_reply.argtypes = [ctypes.POINTER(Ring)]
return bw_lib
def init_transmissions_rings(self, host_ctrl_map):
ring_list = {}
for sw_iface, ctrl_iface in host_ctrl_map.items():
ring_list[sw_iface] = {}
rx_ring = self.bw_lib.init_ring(
ctrl_iface.encode("ascii"), SRC_PORT, PACKET_RX_RING)
tx_ring = self.bw_lib.init_ring(
ctrl_iface.encode("ascii"), SRC_PORT, PACKET_TX_RING)
ring_list[sw_iface]["rx"] = rx_ring
ring_list[sw_iface]["tx"] = tx_ring
return ring_list
def destroy_transmissions_rings(self):
for ring_pair in self.ring_list.values():
self.bw_lib.teardown_ring(ring_pair["rx"])
self.bw_lib.teardown_ring(ring_pair["tx"])
def send_cntrl_pckt(self, iface, tx_rate):
# Get the tx ring to transmit a packet
tx_ring = self.ring_list[iface]["tx"]
full_rate = tx_rate * self.max_rate
ret = self.bw_lib.send_bw(int(full_rate), tx_ring, DST_PORT)
return ret
def await_response(self, iface):
rx_ring = self.ring_list[iface]["rx"]
# we do not care about payload
# we only care about packets that pass the bpf filter
self.bw_lib.wait_for_reply(rx_ring)
def broadcast_bw(self):
for index, ctrl_iface in enumerate(self.host_ctrl_map):
if self.send_cntrl_pckt(ctrl_iface, self.active_rate[index]) != 0:
log.error("Could not send packet!")
self.kill.set()
return
for ctrl_iface in self.host_ctrl_map.keys():
self.await_response(ctrl_iface)
# update the active rate
self.active_rate[:] = self.tx_rate
| StarcoderdataPython |
3594738 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for update_perf_expectations."""
import copy
from StringIO import StringIO
import unittest
import make_expectations as perf_ex_lib
import update_perf_expectations as upe_mod
# A separate .json file contains the list of test cases we'll use.
# The tests used to be defined inline here, but are >80 characters in length.
# Now they are expected to be defined in file ./sample_test_cases.json.
# Create a dictionary of tests using .json file.
all_tests = perf_ex_lib.ConvertJsonIntoDict(
perf_ex_lib.ReadFile('sample_test_cases.json'))
# Get all keys.
all_tests_keys = all_tests.keys()
def VerifyPreparedTests(self, tests_to_update, reva, revb):
# Work with a copy of the set of tests.
all_tests_copy = copy.deepcopy(all_tests)
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests_copy, reva, revb)
# Make sure reva < revb
if reva > revb:
temp = reva
reva = revb
revb = temp
# Run through all tests and make sure only those that were
# specified to be modified had their 'sha1' value removed.
for test_key in all_tests_keys:
new_test_value = all_tests_copy[test_key]
original_test_value = all_tests[test_key]
if test_key in tests_to_update:
# Make sure there is no "sha1".
self.assertFalse('sha1' in new_test_value)
# Make sure reva and revb values are correctly set.
self.assertEqual(reva, new_test_value['reva'])
self.assertEqual(revb, new_test_value['revb'])
else:
# Make sure there is an "sha1" value
self.assertTrue('sha1' in new_test_value)
# Make sure the sha1, reva and revb values have not changed.
self.assertEqual(original_test_value['sha1'], new_test_value['sha1'])
self.assertEqual(original_test_value['reva'], new_test_value['reva'])
self.assertEqual(original_test_value['revb'], new_test_value['revb'])
class UpdatePerfExpectationsTest(unittest.TestCase):
def testFilterMatch(self):
"""Verifies different regular expressions test filter."""
self.maxDiff = None
# Tests to update specified by a single literal string.
tests_to_update = 'win-release/media_tests_av_perf/fps/tulip2.webm'
expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.webm']
self.assertEqual(expected_tests_list,
upe_mod.GetMatchingTests(tests_to_update,
all_tests_keys))
# Tests to update specified by a single reg-ex
tests_to_update = 'win-release/media_tests_av_perf/fps.*'
expected_tests_list = ['win-release/media_tests_av_perf/fps/crowd1080.webm',
'win-release/media_tests_av_perf/fps/crowd2160.webm',
'win-release/media_tests_av_perf/fps/crowd360.webm',
'win-release/media_tests_av_perf/fps/crowd480.webm',
'win-release/media_tests_av_perf/fps/crowd720.webm',
'win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetMatchingTests(tests_to_update,
all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
# Tests to update are specified by a single reg-ex, spanning multiple OSes.
tests_to_update = '.*-release/media_tests_av_perf/fps.*'
expected_tests_list = ['linux-release/media_tests_av_perf/fps/tulip2.m4a',
'linux-release/media_tests_av_perf/fps/tulip2.mp3',
'linux-release/media_tests_av_perf/fps/tulip2.mp4',
'linux-release/media_tests_av_perf/fps/tulip2.ogg',
'linux-release/media_tests_av_perf/fps/tulip2.ogv',
'linux-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/crowd1080.webm',
'win-release/media_tests_av_perf/fps/crowd2160.webm',
'win-release/media_tests_av_perf/fps/crowd360.webm',
'win-release/media_tests_av_perf/fps/crowd480.webm',
'win-release/media_tests_av_perf/fps/crowd720.webm',
'win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetMatchingTests(tests_to_update,
all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
def testLinesFromInputFile(self):
"""Verifies different string formats specified in input file."""
# Tests to update have been specified by a single literal string in
# an input file.
# Use the StringIO class to mock a file object.
lines_from_file = StringIO(
'win-release/media_tests_av_perf/fps/tulip2.webm')
contents = lines_from_file.read()
expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
lines_from_file.close()
# Tests to update specified by a single reg-ex in an input file.
lines_from_file = StringIO('win-release/media_tests_av_perf/fps/tulip2.*\n')
contents = lines_from_file.read()
expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
lines_from_file.close()
# Tests to update specified by multiple lines in an input file.
lines_from_file = StringIO(
'.*-release/media_tests_av_perf/fps/tulip2.*\n'
'win-release/media_tests_av_perf/dropped_fps/tulip2.*\n'
'linux-release/media_tests_av_perf/audio_latency/latency')
contents = lines_from_file.read()
expected_tests_list = [
'linux-release/media_tests_av_perf/audio_latency/latency',
'linux-release/media_tests_av_perf/fps/tulip2.m4a',
'linux-release/media_tests_av_perf/fps/tulip2.mp3',
'linux-release/media_tests_av_perf/fps/tulip2.mp4',
'linux-release/media_tests_av_perf/fps/tulip2.ogg',
'linux-release/media_tests_av_perf/fps/tulip2.ogv',
'linux-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.webm',
'win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
lines_from_file.close()
def testPreparingForUpdate(self):
"""Verifies that tests to be modified are changed as expected."""
tests_to_update = [
'linux-release/media_tests_av_perf/audio_latency/latency',
'linux-release/media_tests_av_perf/fps/tulip2.m4a',
'linux-release/media_tests_av_perf/fps/tulip2.mp3',
'linux-release/media_tests_av_perf/fps/tulip2.mp4',
'linux-release/media_tests_av_perf/fps/tulip2.ogg',
'linux-release/media_tests_av_perf/fps/tulip2.ogv',
'linux-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.webm',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
# Test regular positive integers.
reva = 12345
revb = 54321
VerifyPreparedTests(self, tests_to_update, reva, revb)
# Test negative values.
reva = -54321
revb = 12345
with self.assertRaises(ValueError):
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
# Test reva greater than revb.
reva = 54321
revb = 12345
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
# Test non-integer values
reva = 'sds'
revb = 12345
with self.assertRaises(ValueError):
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
8158985 |
def add_destination():
distenationName = input("Название пункта назначения? ")
number = input("Номер поезда? ")
my_string = str(input('Время отправления(yyyy-mm-dd hh:mm): '))
departureTime = datetime.strptime(my_string, "%Y-%m-%d %H:%M")
schedule = {
'distenationName': distenationName,
'number': number,
'departureTime': departureTime,
}
# Добавить словарь в список.
schedules.append(schedule)
# Отсортировать список в случае необходимости.
if len(schedules) > 1:
schedules.sort(key=lambda item: item.get('distenationName', ''))
def destination_list():
my_string2 = str(input('Введите время отправки для поиска существующих рейсов: (yyyy-mm-dd hh:mm): '))
departureTime2=datetime.strptime(my_string2, "%Y-%m-%d %H:%M")
if next((x for x in schedules if x["departureTime"] >= departureTime2), None)!=None:
print(
'| {:^4} | {:^30} | {:^20} | {:^8} |'.format(
"№",
"Пункт назначения",
"Номер поезда",
"Время отправления"
)
)
count = 0
for schedule in schedules:
if schedule["departureTime"] >= departureTime2:
count += 1
print(' ',count,'\t\t ',schedule.get('distenationName', ''),'\t\t\t ', schedule.get('number', ''),'\t ', schedule.get('departureTime', ''))
else:
print("Рейсов нет после указанного времени")
# In[ ]:
| StarcoderdataPython |
12817611 | <reponame>kiyoon/camera-tools
#!/usr/bin/env python3
import argparse
class Formatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
parser = argparse.ArgumentParser(
description='''Read EXIF data
Author: <NAME> (<EMAIL>)''',
formatter_class=Formatter)
parser.add_argument('input_files', type=str, nargs='+',
help='files to read metadata')
args = parser.parse_args()
import glob
import os
import exiftool
import pprint
if __name__ == "__main__":
for origpath in args.input_files:
for path in glob.glob(origpath): # glob: Windows wildcard support
root, fname_ext = os.path.split(path)
fname, fext = os.path.splitext(fname_ext)
with exiftool.ExifTool() as et:
metadata = et.get_metadata(path)
print(path)
pprint.pprint(metadata)
| StarcoderdataPython |
5154951 | <reponame>fehija/MLb-LDLr<gh_stars>0
GCODE = {
"GCT":"Ala",
"GCC":"Ala",
"GCA":"Ala",
"GCG":"Ala",
"CGT":"Arg",
"CGC":"Arg",
"CGA":"Arg",
"CGG":"Arg",
"AGA":"Arg",
"AGG":"Arg",
"AAT":"Asn",
"AAC":"Asn",
"GAT":"Asp",
"GAC":"Asp",
"TGT":"Cys",
"TGC":"Cys",
"CAA":"Gln",
"CAG":"Gln",
"GAA":"Glu",
"GAG":"Glu",
"GGT":"Gly",
"GGC":"Gly",
"GGA":"Gly",
"GGG":"Gly",
"CAT":"His",
"CAC":"His",
"ATT":"Ile",
"ATC":"Ile",
"ATA":"Ile",
"ATG":"Met",
"TTA":"Leu",
"TTG":"Leu",
"CTT":"Leu",
"CTC":"Leu",
"CTA":"Leu",
"CTG":"Leu",
"AAA":"Lys",
"AAG":"Lys",
"TTT":"Phe",
"TTC":"Phe",
"CCT":"Pro",
"CCC":"Pro",
"CCA":"Pro",
"CCG":"Pro",
"TCT":"Ser",
"TCC":"Ser",
"TCA":"Ser",
"TCG":"Ser",
"AGT":"Ser",
"AGC":"Ser",
"ACT":"Thr",
"ACC":"Thr",
"ACA":"Thr",
"ACG":"Thr",
"TGG":"Trp",
"TAT":"Tyr",
"TAC":"Tyr",
"GTT":"Val",
"GTC":"Val",
"GTA":"Val",
"GTG":"Val",
"TAG":"STOP",
"TGA":"STOP",
"TAA":"STOP"
} | StarcoderdataPython |
12811092 | <filename>imperative/python/megengine/utils/naming.py
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from ..core._imperative_rt.core2 import pop_scope, push_scope
class AutoNaming:
r"""
Name all executed operators automaticlly during tracing and record all tensors
renamed by the user.
"""
scopes = []
c_ops = []
name2ops = {}
handle2names = {}
__cls_attributes__ = {"scopes", "c_ops", "name2ops", "handle2names"}
@classmethod
def clear(cls):
for attr in cls.__cls_attributes__:
getattr(cls, attr).clear()
@classmethod
def push_scope(cls, scope):
if scope is not None:
push_scope(scope)
cls.scopes.append(scope)
@classmethod
def pop_scope(cls):
scope = cls.scopes.pop()
if scope is not None:
pop_scope(scope)
@classmethod
def get_scope(cls):
return ".".join(s for s in cls.scopes if s is not None)
@classmethod
def gen_name(cls, x) -> str:
scope = cls.get_scope()
name = x.c_name if x.c_name else x._name
return scope + "." + name if len(scope) else name
@classmethod
def record_var_name(cls, handle, name):
cls.handle2names[handle] = name
@classmethod
def get_var_name(cls, handle):
return cls.handle2names.pop(handle, None)
@classmethod
def record_opnode(cls, op):
ops = cls.name2ops.get(op.name, [])
if op not in ops:
ops.append(op)
cls.name2ops[op.name] = ops
@classmethod
def remove_duplicate_names(cls):
for key, ops in cls.name2ops.items():
if len(ops) == 1:
continue
for i, op in enumerate(ops):
op.name = key + "[%s]" % str(i)
if len(op.outputs) == 1:
continue
for var in op.outputs:
var.name = var.name.replace(key, op.name)
cls.name2ops.clear()
| StarcoderdataPython |
5164994 | from opendr.planning.end_to_end_planning.e2e_planning_learner import EndToEndPlanningRLLearner
from opendr.planning.end_to_end_planning.envs.agi_env import AgiEnv
__all__ = ['EndToEndPlanningRLLearner', 'AgiEnv']
| StarcoderdataPython |
5024004 | <filename>pneumo/utils/augmentations.py
import torch
from torchvision import transforms
import torchvision.transforms.functional as TF
from PIL import ImageOps, ImageEnhance, Image
import random
import math
"""
Transforamtions that we can apply on an image and the range of magnitude:
Rotate
Flip
Mirror
Equalize
Solarize, [0, 255]
Contrast, [0.1, 1.9]
Color, [0.1, 1.9]
Brightness, [0.1, 1.9]
Sharpness, [0.1, 1.9]
"""
def rotate(img, alpha):
'''
{'method': 'rotate', 'angle': alpha}
'''
return TF.rotate(img, alpha)
def flip(img, v):
'''
{'method': 'flip', 'value': v}
'''
return ImageOps.flip(img)
def mirror(img, v):
'''
{'method': 'mirror', 'value': v}
'''
return ImageOps.mirror(img)
def equalize(img, v):
'''
{'method': 'equalize'}
'''
return ImageOps.equalize(img)
def solarize(img, v):
'''
{'method': 'solarize', 'value': v}
'''
return ImageOps.solarize(img, v)
def contrast(img, v):
'''
{'method': 'contrast', 'value': v}
'''
return ImageEnhance.Contrast(img).enhance(v)
def color(img, v):
'''
{'method': 'color', 'value': v}
'''
return ImageEnhance.Color(img).enhance(v)
def brightness(img, v):
'''
{'method': 'brightness', 'value': v}
'''
return ImageEnhance.Brightness(img).enhance(v)
def sharpness(img, v):
'''
{'method': 'sharpness', 'value': v}
'''
return ImageEnhance.Sharpness(img).enhance(v)
def name_to_fct(method):
op = {'rotate': rotate, 'flip': flip, 'mirror': mirror, 'equalize': equalize, 'solarize': solarize,
'contrast': contrast, 'color': color, 'brightness': brightness, 'sharpness': sharpness}
return op[method]
class RandAugmentation:
def __init__(self):
self.list = [(rotate, -30, 30), (mirror, 0, 1), (equalize, 0, 1), (solarize, 0, 255),
(contrast, 0.5, 1.9), (color, 0.1, 1.9), (brightness, 0.5, 1.9), (sharpness, 0.1, 1.9)]
self.magnitude = 5
def __call__(self, img):
ops = random.choice(self.list)
op, minv, maxv = ops
if op.__name__ in ['flip', 'mirror', 'equalize']:
img_ = op(img, 0)
augment = {"method": op.__name__, "value": 0}
else:
val = random.uniform(minv, maxv)
augment = {"method": op.__name__, "value": val}
img_ = op(img, val)
return img_, augment
class Augmenter(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self, ra=False, prob=0.5):
self.augment = RandAugmentation()
self.ra = ra
self.prob = prob
def __call__(self, sample):
image = sample
if self.ra:
if random.random() < self.prob:
augment_img = self.augment(image)
return augment_img
return sample
class Normalizer(object):
def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
self.mean = mean
self.std = std
def __call__(self, image):
image = TF.pil_to_tensor(image)
image = TF.normalize(image, self.mean, self.std)
return image
class UnNormalizer(object):
def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor | StarcoderdataPython |
3388405 | <filename>src/plot_hourly_bar_graph.py
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from dateutil.parser import parse
import datetime
import csv
import numpy
import os
import copy
class CountReading:
def __init__(self, timestamp, count):
self.timestamp = timestamp
self.count = count
# plotting params
params = {'legend.fontsize': 'x-large',
'figure.figsize': (12, 9),
'axes.labelsize': 'x-large',
'axes.titlesize': 'x-large',
'xtick.labelsize': 'x-large',
'ytick.labelsize': 'x-large'}
pylab.rcParams.update(params)
directory = os.path.join('..', 'graphs')
directory = os.path.join(directory, 'hourly_bar_graph')
if not os.path.exists(directory):
os.makedirs(directory)
data = []
with open(os.path.join(os.path.join('..', 'data'), 'data.csv'), 'r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
for row in plots:
try:
data.append([datetime.datetime.strptime(
row[0], '%Y-%m-%d %H:%M:%S.%f'), int(row[1])])
except:
pass
date_to_count_reading = {}
date_start_count = 0
current_date = datetime.datetime.min
for timestamp, count in data:
if timestamp.date() not in date_to_count_reading:
date_to_count_reading[timestamp.date()] = []
# set the new date and count as the base
current_date = timestamp.date()
date_start_count = count
date_to_count_reading[current_date].append(
CountReading(timestamp, count-date_start_count))
# calculate the non-cumulative data by taking the difference
date_to_count_reading_noncumu = copy.deepcopy(date_to_count_reading)
for date, count_reading_list in date_to_count_reading.items():
for i in range(1, len(count_reading_list)):
date_to_count_reading_noncumu[date][i].count = date_to_count_reading[date][i].count - \
date_to_count_reading[date][i-1].count
for date, count_reading_list in date_to_count_reading_noncumu.items():
# add up the counts into hourly bins
bins_hours = set()
hour_to_total_count = dict()
for i in range(0, len(count_reading_list)):
h = count_reading_list[i].timestamp.hour
bins_hours.add(h)
if h not in hour_to_total_count.keys():
hour_to_total_count[h] = count_reading_list[i].count
else:
hour_to_total_count[h] += count_reading_list[i].count
bins_hours = sorted(bins_hours)
total_count_list = [count for hour,
count in sorted(hour_to_total_count.items())]
# plot the daily graph
plt.bar(bins_hours, total_count_list)
plt.xlabel('Hour') # x-axis label
plt.ylabel('Count') # y-axis label
plt.xticks(bins_hours, fontsize=10)
plt.title('Date: ' + str(date)) # plot title
figure = plt.gcf() # get current figure
figure.set_size_inches(8, 6)
plt.savefig(os.path.join(directory, "%s.png" % str(date)), dpi=200)
plt.close()
| StarcoderdataPython |
3520575 | import os
def deletenull(label_path):
files = os.listdir(label_path)
for file in files:
if os.path.getsize(label_path + "/" + file) == 0:
os.remove(label_path + "/" + file)
if __name__ == '__main__':
deletenull("Westlife/labels_with_ids")
| StarcoderdataPython |
1847666 | import os
import requests
from tqdm import tqdm
FILE_NAME = 'data.xml.bz2'
FINAL_FILE_NAME = 'data.xml'
def needs_to_download():
if os.path.exists(FILE_NAME) and os.path.getsize(FILE_NAME) > 0:
return False
if os.path.exists(FINAL_FILE_NAME) and os.path.getsize(FINAL_FILE_NAME) > 0:
return False
return True
def download():
if not needs_to_download():
return
print('downloading')
url = 'https://dumps.wikimedia.org/jawiki/latest/jawiki-latest-pages-articles.xml.bz2'
session = requests.Session()
response = session.head(url)
content_size = response.headers.get('content-length', 0)
download_handle = session.get(url, stream=True)
download_handle.raise_for_status()
content_size = int(content_size) if isinstance(content_size, str) else content_size
progress_bar_iterator = tqdm(
download_handle.iter_content(chunk_size=1024 * 1024),
total=content_size,
mininterval=0.1,
unit='B',
unit_scale=True,
unit_divisor=1024
)
with open(FILE_NAME, 'wb') as file_handle:
for data in progress_bar_iterator:
file_handle.write(data)
print('download finished')
| StarcoderdataPython |
6491532 | from csv import reader, writer
with open("fighters_new.csv", "w") as file:
csv_writer = writer(file)
csv_writer.writerow(["Character", "Move"])
csv_writer.writerow(["Ryu", "Hadouken"])
with open('fighters.csv') as file:
csv_reader = reader(file)
#fighters = [[s.upper() for s in row] for row in csv_reader]
with open('screaming_fighters.csv', 'w') as file:
csv_writer = writer(file)
for fighter in csv_reader:
csv_writer.writerow([s.upper() for s in fighter]) | StarcoderdataPython |
222795 | from server import db
from server.model import BaseModel, PermissionBaseModel
class Framework(db.Model, PermissionBaseModel, BaseModel):
__tablename__ = "framework"
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(64), unique=True, nullable=False)
url = db.Column(db.String(256), unique=True, nullable=False)
logs_path = db.Column(db.String(256))
adaptive = db.Column(db.Boolean(), nullable=False, default=False)
creator_id = db.Column(db.Integer(), db.ForeignKey("user.gitee_id"))
group_id = db.Column(db.Integer(), db.ForeignKey("group.id"))
org_id = db.Column(db.Integer(), db.ForeignKey("organization.id"))
gitee_repos = db.relationship('GitRepo', backref='framework')
def to_json(self):
return {
"id": self.id,
"name": self.name,
"url": self.url,
"logs_path": self.logs_path,
"adaptive": self.adaptive,
"creator_id": self.creator_id,
"permission_type": self.permission_type,
"group_id": self.group_id,
"org_id": self.org_id,
"create_time": self.create_time.strftime("%Y-%m-%d %H:%M:%S"),
"update_time": self.update_time.strftime("%Y-%m-%d %H:%M:%S"),
}
class GitRepo(db.Model, PermissionBaseModel, BaseModel):
__tablename__ = "git_repo"
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(64), nullable=False)
git_url = db.Column(db.String(256), unique=True, nullable=False)
sync_rule = db.Column(db.Boolean(), nullable=False, default=True)
creator_id = db.Column(db.Integer(), db.ForeignKey("user.gitee_id"))
group_id = db.Column(db.Integer(), db.ForeignKey("group.id"))
org_id = db.Column(db.Integer(), db.ForeignKey("organization.id"))
framework_id = db.Column(db.Integer(), db.ForeignKey("framework.id"))
suites = db.relationship('Suite', backref='git_repo')
templates = db.relationship('Template', backref='git_repo')
def to_json(self):
return {
"id": self.id,
"name": self.name,
"git_url": self.git_url,
"sync_rule": self.sync_rule,
"framework": self.framework.to_json(),
"creator_id": self.creator_id,
"permission_type": self.permission_type,
"group_id": self.group_id,
"org_id": self.org_id,
}
| StarcoderdataPython |
4803552 | <gh_stars>0
""" version which can be consumed from within the module """
VERSION_STR = "0.0.4"
DESCRIPTION = "pymultienv is a command to help you deal with multiple python environments"
APP_NAME = "pymultienv"
LOGGER_NAME = "pymultienv"
| StarcoderdataPython |
6560191 | <reponame>ferag/keystone<filename>keystone/trust/core.py
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Trust service."""
from six.moves import zip
from keystone.common import manager
from keystone.common import provider_api
import keystone.conf
from keystone import exception
from keystone.i18n import _
from keystone import notifications
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
class Manager(manager.Manager):
"""Default pivot point for the Trust backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
driver_namespace = 'keystone.trust'
_provides_api = 'trust_api'
_TRUST = "OS-TRUST:trust"
def __init__(self):
super(Manager, self).__init__(CONF.trust.driver)
notifications.register_event_callback(
notifications.ACTIONS.deleted, 'user',
self._on_user_delete)
def _on_user_delete(self, service, resource_type, operation,
payload):
# NOTE(davechen): Only delete the user that is maintained by
# keystone will delete the related trust, since we don't know
# when a LDAP user or federation user is deleted.
user_id = payload['resource_info']
trusts = self.driver.list_trusts_for_trustee(user_id)
trusts = trusts + self.driver.list_trusts_for_trustor(user_id)
for trust in trusts:
self.driver.delete_trust(trust['id'])
@staticmethod
def _validate_redelegation(redelegated_trust, trust):
# Validate against:
# 0 < redelegation_count <= max_redelegation_count
max_redelegation_count = CONF.trust.max_redelegation_count
redelegation_depth = redelegated_trust.get('redelegation_count', 0)
if not (0 < redelegation_depth <= max_redelegation_count):
raise exception.Forbidden(
_('Remaining redelegation depth of %(redelegation_depth)d'
' out of allowed range of [0..%(max_count)d]') %
{'redelegation_depth': redelegation_depth,
'max_count': max_redelegation_count})
# remaining_uses is None
remaining_uses = trust.get('remaining_uses')
if remaining_uses is not None:
raise exception.Forbidden(
_('Field "remaining_uses" is set to %(value)s'
' while it must not be set in order to redelegate a trust'),
value=remaining_uses)
# expiry times
trust_expiry = trust.get('expires_at')
redelegated_expiry = redelegated_trust['expires_at']
if trust_expiry:
# redelegated trust is from backend and has no tzinfo
if redelegated_expiry < trust_expiry.replace(tzinfo=None):
raise exception.Forbidden(
_('Requested expiration time is more '
'than redelegated trust can provide'))
else:
trust['expires_at'] = redelegated_expiry
# trust roles is a subset of roles of the redelegated trust
parent_roles = set(role['id']
for role in redelegated_trust['roles'])
if not all(role['id'] in parent_roles for role in trust['roles']):
raise exception.Forbidden(
_('Some of requested roles are not in redelegated trust'))
# forbid to create a trust (with impersonation set to true) from a
# redelegated trust (with impersonation set to false)
if not redelegated_trust['impersonation'] and trust['impersonation']:
raise exception.Forbidden(
_('Impersonation is not allowed because redelegated trust '
'does not specify impersonation. Redelegated trust id: %s') %
redelegated_trust['id'])
def get_trust_pedigree(self, trust_id):
trust = self.driver.get_trust(trust_id)
trust_chain = [trust]
while trust and trust.get('redelegated_trust_id'):
trust = self.driver.get_trust(trust['redelegated_trust_id'])
trust_chain.append(trust)
return trust_chain
def get_trust(self, trust_id, deleted=False):
trust = self.driver.get_trust(trust_id, deleted)
if trust and trust.get('redelegated_trust_id') and not deleted:
trust_chain = self.get_trust_pedigree(trust_id)
for parent, child in zip(trust_chain[1:], trust_chain):
self._validate_redelegation(parent, child)
try:
PROVIDERS.identity_api.assert_user_enabled(
parent['trustee_user_id'])
except (AssertionError, exception.NotFound):
raise exception.Forbidden(
_('One of the trust agents is disabled or deleted'))
return trust
def create_trust(self, trust_id, trust, roles, redelegated_trust=None,
initiator=None):
"""Create a new trust.
:returns: a new trust
"""
# Default for initial trust in chain is max_redelegation_count
max_redelegation_count = CONF.trust.max_redelegation_count
requested_count = trust.get('redelegation_count')
redelegatable = (trust.pop('allow_redelegation', False)
and requested_count != 0)
if not redelegatable:
trust['redelegation_count'] = requested_count = 0
remaining_uses = trust.get('remaining_uses')
if remaining_uses is not None and remaining_uses <= 0:
msg = _('remaining_uses must be a positive integer or null.')
raise exception.ValidationError(msg)
else:
# Validate requested redelegation depth
if requested_count and requested_count > max_redelegation_count:
raise exception.Forbidden(
_('Requested redelegation depth of %(requested_count)d '
'is greater than allowed %(max_count)d') %
{'requested_count': requested_count,
'max_count': max_redelegation_count})
# Decline remaining_uses
if trust.get('remaining_uses') is not None:
raise exception.ValidationError(
_('remaining_uses must not be set if redelegation is '
'allowed'))
if redelegated_trust:
trust['redelegated_trust_id'] = redelegated_trust['id']
remaining_count = redelegated_trust['redelegation_count'] - 1
# Validate depth consistency
if (redelegatable and requested_count and
requested_count != remaining_count):
msg = _('Modifying "redelegation_count" upon redelegation is '
'forbidden. Omitting this parameter is advised.')
raise exception.Forbidden(msg)
trust.setdefault('redelegation_count', remaining_count)
# Check entire trust pedigree validity
pedigree = self.get_trust_pedigree(redelegated_trust['id'])
for t in pedigree:
self._validate_redelegation(t, trust)
trust.setdefault('redelegation_count', max_redelegation_count)
ref = self.driver.create_trust(trust_id, trust, roles)
notifications.Audit.created(self._TRUST, trust_id, initiator=initiator)
return ref
def delete_trust(self, trust_id, initiator=None):
"""Remove a trust.
:raises keystone.exception.TrustNotFound: If the trust doesn't exist.
Recursively remove given and redelegated trusts
"""
trust = self.driver.get_trust(trust_id)
trusts = self.driver.list_trusts_for_trustor(
trust['trustor_user_id'])
for t in trusts:
if t.get('redelegated_trust_id') == trust_id:
# recursive call to make sure all notifications are sent
try:
self.delete_trust(t['id'])
except exception.TrustNotFound: # nosec
# if trust was deleted by concurrent process
# consistency must not suffer
pass
# end recursion
self.driver.delete_trust(trust_id)
notifications.Audit.deleted(self._TRUST, trust_id, initiator)
| StarcoderdataPython |
11333506 | <reponame>mmathys/bagua
# TODO: @shjwudp merge with service module
import copy
import os
import re
import time
from pssh.clients import ParallelSSHClient
from pssh.exceptions import Timeout
from .bayesian_optimizer import (
IntParam,
BayesianOptimizer,
)
def sysperf(host_list, nproc_per_node, ssh_port, env: dict = {}):
assert len(host_list) != 0, "Invalid host_list={}".format(host_list)
if "PATH" not in env:
env["PATH"] = os.environ["PATH"]
pretreat_cmd = [
"shopt -s huponexit &&",
]
for k, v in env.items():
pretreat_cmd.append(
"{key}={value} &&".format(
key=k,
value=v,
)
)
master_addr = host_list[0]
host_args = []
for i, _ in enumerate(host_list):
host_args.append(
{
"cmd": " ".join(
pretreat_cmd
+ [
"python -m bagua.distributed.launch",
"--nproc_per_node={}".format(nproc_per_node),
"--nnodes={} --node_rank={}".format(len(host_list), i),
'--master_addr="{}"'.format(master_addr),
"--master_port={}".format(8124),
"$(which bagua_sys_perf) --model vgg16",
]
),
}
)
client = ParallelSSHClient(host_list, port=ssh_port)
output = client.run_command(
"%(cmd)s",
host_args=host_args,
shell="bash -xc",
use_pty=True, # The key configuration of process safe exit
read_timeout=60,
)
speed_pattern = re.compile(
r"Total img/sec on (\d+) (\S+)\(s\): (\d*\.\d+|\d+) \+-(\d*\.\d+|\d+)"
)
host_out = output[0]
m = None
st = time.time()
try:
for line in host_out.stdout:
print(line, flush=True)
m = speed_pattern.search(line)
if m:
break
except Timeout:
print("Timeout 1, spend={}".format(time.time() - st))
pass
if m is None:
return (None, None, 0.0, None)
assert m, "no speed pattern, host_out.exit_code={}, host_out.stderr={}".format(
host_out.exit_code, list(host_out.stderr)
)
ngpus = int(m.groups()[0])
device = m.groups()[1]
speed = float(m.groups()[2])
speed_std = float(m.groups()[3])
return (ngpus, device, speed, speed_std)
def autotune_system_hyperparameters(host_list, nproc_per_node, ssh_port):
def _sysperf(env={}):
result = sysperf(host_list, nproc_per_node, ssh_port, env=env)
print(result)
return result
optim = BayesianOptimizer(
{
"NCCL_MIN_NCHANNELS": IntParam(
val=0, # 0 means no set
space_dimension=(
0,
12,
),
),
"NCCL_SOCKET_NTHREADS": IntParam(
val=0, # 0 means no set
space_dimension=(
0,
8,
),
),
"NCCL_NSOCKS_PERTHREAD": IntParam(
val=0, # 0 means no set
space_dimension=(
0,
8,
),
),
"nccl_buffsize_2p": IntParam(
val=0, # power of 2, 0 means no set
space_dimension=(
0,
26,
),
),
}
)
param_dict = {
"NCCL_MIN_NCHANNELS": 0,
"NCCL_SOCKET_NTHREADS": 0,
"NCCL_NSOCKS_PERTHREAD": 0,
"nccl_buffsize_2p": 0,
}
result_list = []
for i in range(100):
env_vars = copy.deepcopy(param_dict)
for k, v in list(env_vars.items()):
if v == 0:
del env_vars[k]
if "nccl_buffsize_2p" in env_vars:
env_vars["NCCL_BUFFSIZE"] = 2 ** env_vars["nccl_buffsize_2p"]
(_, _, speed, speed_std) = _sysperf(env=env_vars)
result_list.append([copy.deepcopy(env_vars), speed, speed_std])
optim.tell(param_dict, speed)
param_dict = optim.ask()
result_list = sorted(result_list, key=lambda x: -x[1])
print(result_list)
result_reduct = {}
for (setting, speed, _) in result_list:
key = tuple(sorted(setting.items()))
if key not in result_reduct:
result_reduct[key] = []
result_reduct[key].append(speed)
result_reduct = [
[setting, sum(speed_list) / len(speed_list)]
for (setting, speed_list) in result_reduct.items()
]
result_reduct = sorted(result_reduct, key=lambda item: -item[1])
print(result_reduct)
return result_reduct[0][0]
| StarcoderdataPython |
8135035 | #!/usr/bin/env python
import os
from urlparse import urlunparse
# MySQL server configuration
mysql_host = "localhost"
mysql_user = "root"
mysql_password = "<PASSWORD>"
mysql_db = "yagra"
# Length in bytes of the password for cookie
random_password_length = 32
# The time in seconds password cookies expire
random_password_expires = 60
# Length in bytes of salt
salt_length = 32
# Length in bytes of activation token
activation_token_length = 32
# Length in bytes of password reset token
password_reset_token_length = 32
# The time in seconds password reset tokens expire
password_reset_token_expires = 900
# Server address
my_scheme = "http"
my_domain = "test.jamis.xyz"
my_path = "cgi-bin/"
my_entire_url = urlunparse((my_scheme, my_domain, my_path, None, None, None))
# Sender of activation emails and password reset emails
email_from = "yagra_service@%s" % my_domain
# SMTP server
smtp_host = "localhost"
# Max size in bytes of uploaded image
image_max_size = 3 * 1024 * 1024
# Html templates direcoty
html_templates_dir = "html_templates/"
# Default image path
default_image = os.path.join(html_templates_dir, "default.png")
blank_image = os.path.join(html_templates_dir, "blank.png")
| StarcoderdataPython |
3207118 | from tamtam import Bot, Dispatcher, run_poller
from tamtam.types import Message, BotStarted
from tamtam.dispatcher.filters import MessageFilters
bot = Bot("put token")
dp = Dispatcher(bot)
@dp.bot_started()
async def new_user(upd: BotStarted):
await upd.respond(f"Hello! {upd.user.name}.\nNice to see you!")
@dp.message_handler(MessageFilters(commands=["start"]))
async def cmd_start(message: Message):
await message.reply(f"Hey there, {message.sender.name}! This is echo-bot.")
@dp.message_handler()
async def echo(message: Message):
await message.reply(message.body.text)
run_poller()
| StarcoderdataPython |
5010122 | import numpy
import matplotlib
fig = plt.figure(figsize=(6,5))
left, bottom, width, height = 0.1, 0.1, 0.8, 0.8
ax = fig.add_axes([left, bottom, width, height])
start, stop, n_values = -2, 2, 800
x_vals = np.linspace(start, stop, n_values)
y_vals = np.linspace(start, stop, n_values)
X, Y = np.meshgrid(x_vals, y_vals)
# define the function to be plotted
Z = 2*X**3 - 3*X**2 - 6*X*Y*(X - Y - 1)
cp = plt.contourf(X, Y, Z)
plt.colorbar(cp)
# plot critical points
plot(0, 0, '*w')
plot(0, -1, '*w')
plot(1, 0, '*w')
plot(-1, -1, '*w')
ax.set_title('Contour Plot')
ax.set_xlabel('x (cm)')
ax.set_ylabel('y (cm)')
plt.show()
| StarcoderdataPython |
6646681 | # Generated by Django 3.0.3 on 2020-04-21 10:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quickbooks_online', '0002_cheque_chequelineitem_creditcardpurchase_creditcardpurchaselineitem_journalentry_journalentrylineite'),
]
operations = [
migrations.AlterField(
model_name='bill',
name='bill_number',
field=models.CharField(max_length=255, unique=True),
),
migrations.AlterField(
model_name='bill',
name='currency',
field=models.CharField(help_text='Bill Currency', max_length=255),
),
migrations.AlterField(
model_name='cheque',
name='cheque_number',
field=models.CharField(max_length=255, unique=True),
),
migrations.AlterField(
model_name='cheque',
name='currency',
field=models.CharField(help_text='Cheque Currency', max_length=255),
),
migrations.AlterField(
model_name='creditcardpurchase',
name='credit_card_purchase_number',
field=models.CharField(max_length=255, unique=True),
),
migrations.AlterField(
model_name='creditcardpurchase',
name='currency',
field=models.CharField(help_text='CreditCardPurchase Currency', max_length=255),
),
migrations.AlterField(
model_name='journalentry',
name='currency',
field=models.CharField(help_text='JournalEntry Currency', max_length=255),
),
migrations.AlterField(
model_name='journalentry',
name='journal_entry_number',
field=models.CharField(max_length=255, unique=True),
),
]
| StarcoderdataPython |
69238 | <filename>api/users/migrations/0007_govuser_default_queue.py
# Generated by Django 2.2.11 on 2020-04-29 15:24
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
("users", "0006_auto_20200424_1136"),
]
operations = [
migrations.AddField(
model_name="govuser",
name="default_queue",
field=models.UUIDField(default=uuid.UUID("00000000-0000-0000-0000-000000000001")),
),
]
| StarcoderdataPython |
5117329 | from math import inf
from hypothesis import assume, given
from hypothesis.strategies import composite, integers, lists, permutations
from algorithms.structures.tree.avl_tree import AVLTree
from algorithms.structures.tree.binary_search_tree import BinarySearchTree
from algorithms.structures.tree.red_black_tree import BLACK, RED, RedBlackTree
def bst_invariant(t, min_key=-inf, max_key=inf, parent=None):
if t is None:
return True
if t.parent is not parent:
return False
if t.key is None:
return t.left is None and t.right is None and t.parent is None
# for some types of BST left child may be equal to the right child
# e.g. RedBlackTree.from_list([1,0,0]) --> (0, (0, 1))
if max_key < t.key or t.key < min_key:
return False
left_inv = bst_invariant(t.left, min_key, t.key, t)
right_inv = bst_invariant(t.right, t.key, max_key, t)
return left_inv and right_inv
def check_rb_tree_properties(tree, root=True):
if not tree:
# leaf is black
return 1
if root:
assert tree.color is BLACK
if tree.color is RED:
if tree.left:
assert tree.left.color is BLACK
if tree.right:
assert tree.right.color is BLACK
num_blacks = check_rb_tree_properties(tree.right, root=False)
assert check_rb_tree_properties(tree.left, root=False) == num_blacks
if tree.color is BLACK:
return num_blacks + 1
return num_blacks
def check_avl_properties(tree):
if tree is None:
return 0
left_height = check_avl_properties(tree.left)
right_height = check_avl_properties(tree.right)
assert abs(left_height - right_height) < 2
assert tree.height == max(left_height, right_height) + 1
return tree.height
def is_left_child(parent, child):
if child.parent is not parent:
return False
if parent.left is not child:
return False
return True
def is_right_child(parent, child):
if child.parent is not parent:
return False
if parent.right is not child:
return False
return True
def test_bst():
t = [10, 20, 5, 1, 0, 2]
t2 = [10, 20, 5, 1, 0, 3]
bst_t = BinarySearchTree.from_list(t)
assert bst_invariant(bst_t)
assert [n.key for n in bst_t.breadth_first()] == [10, 5, 20, 1, 0, 2]
assert 10 in bst_t
assert 13 not in bst_t
assert bst_t == BinarySearchTree.from_list(t)
assert bst_t != BinarySearchTree.from_list(t2)
def test_bst_sort():
t = [20, 15, 25, 5, 17, 23, 30, 3, 10, 16, 19, 27, 31, 1, 4, 7, 12]
sorted1 = sorted(t)
bst_t = BinarySearchTree.from_list(t)
u = bst_t.search(sorted1[0])
for i in sorted1:
assert u.key == i
u = u.successor()
assert bst_invariant(bst_t)
def test_bst_delete():
t = [20, 15, 25, 5, 17, 23, 30, 3, 10, 16, 19, 27, 31, 1, 4, 7, 12]
bst_t = BinarySearchTree.from_list(t)
for i in [5, 20, 31, 1, 17, 30]:
assert i in bst_t
assert bst_invariant(bst_t)
bst_t.delete(i)
assert i not in bst_t
bst_t.delete(44)
assert bst_invariant(bst_t)
@given(lists(integers()))
def test_bst_invariant(t):
bst = BinarySearchTree.from_list(t)
assert bst_invariant(bst)
@given(lists(integers(), min_size=2, max_size=20, unique=True), integers(0, 1))
def test_bst_invariant_rotations(t, left):
bst = BinarySearchTree.from_list(t)
# try rotate every node
for node in bst.depth_first():
# check that rotation is possible for the current node
if left and node.right is None or not left and node.left is None:
continue
before_rotation = [n.key for n in node.breadth_first()]
parent = node.parent
node.rotate(bool(left))
assert bst_invariant(
node, parent=parent
), "Tree rotation must not break BST invariant"
node.rotate(not bool(left))
assert before_rotation == [
n.key for n in node.breadth_first()
], "Symmetric rotations should result in the same tree"
def test_left_rotate():
z = BinarySearchTree(0)
x = z.add(5)
y = x.add(7)
a = x.add(3)
b = y.add(6)
c = y.add(8)
assert x.rotate(left=True) is y
assert z.key == 0
assert is_right_child(z, x) and x.key == 7
assert is_left_child(x, y) and y.key == 5
assert is_left_child(y, a) and a.key == 3
assert is_right_child(y, b) and b.key == 6
assert is_right_child(x, c) and c.key == 8
def test_right_rotate():
z = BinarySearchTree(0)
x = z.add(5)
y = x.add(3)
a = x.add(8)
b = y.add(2)
c = y.add(4)
assert x.rotate(left=False) is y
assert z.key == 0
assert is_right_child(z, x) and x.key == 3
assert is_right_child(x, y) and y.key == 5
assert is_left_child(y, c) and c.key == 4
assert is_right_child(y, a) and a.key == 8
assert is_left_child(x, b) and b.key == 2
def test_rb_tree():
tree = RedBlackTree.from_list([1, 1, 1, 0, 0, 0, 0, 0])
assert bst_invariant(tree)
check_rb_tree_properties(tree)
@given(lists(integers()))
def test_rb_tree_invariant(t):
tree = RedBlackTree.from_list(t)
assert bst_invariant(tree)
check_rb_tree_properties(tree)
def test_rb_deletion():
insertions = [7, 3, 18, 10, 22, 8, 11, 26, 2, 6, 13]
deletions = [18, 11, 3, 10, 22, 6, 2, 7, 26]
tree = RedBlackTree.from_list(insertions)
for k in deletions:
tree.delete(k)
assert bst_invariant(tree)
check_rb_tree_properties(tree)
assert k not in tree
@composite
def insert_delete_queries(draw):
insertions = draw(lists(integers()))
insertions = list(set(insertions))
assume(len(insertions) > 0)
n_deletions = draw(
integers(min(len(insertions), len(insertions) // 2 + 1), len(insertions))
)
deletions = draw(permutations(insertions))[:n_deletions]
queries = [("i", x) for x in draw(permutations(insertions))] + [
("d", x) for x in deletions
]
return queries
@given(insert_delete_queries())
def test_rb_queries(queries):
tree = RedBlackTree(queries[0][1])
for t, q in queries[1:]:
if t == "i":
tree.add(q)
else:
tree.delete(q)
assert q not in tree
assert bst_invariant(tree)
check_rb_tree_properties(tree)
@given(lists(integers()))
def test_avl_tree_invariant(t):
tree = AVLTree.from_list(t)
assert bst_invariant(tree)
check_avl_properties(tree)
@given(insert_delete_queries())
def test_avl_queries(queries):
tree = AVLTree(queries[0][1])
for t, q in queries[1:]:
if t == "i":
tree.add(q)
else:
tree.delete(q)
assert q not in tree
assert bst_invariant(tree)
check_avl_properties(tree)
| StarcoderdataPython |
12843438 | <reponame>Tallisado/DbBot<gh_stars>1-10
from os.path import exists
from sys import argv
from dbbot import CommandLineOptions
class WriterOptions(CommandLineOptions):
@property
def output_file_path(self):
return self._options.output_file_path
def _add_parser_options(self):
super(WriterOptions, self)._add_parser_options()
self._parser.add_option('-o', '--output',
dest='output_file_path',
help='path to the resulting html file',
)
def _get_validated_options(self):
if len(argv) < 2:
self._exit_with_help()
options = super(WriterOptions, self)._get_validated_options()
if not options.output_file_path:
self._parser.error('output html filename is required')
if not exists(options.db_file_path):
self._parser.error('database %s not exists' % options.db_file_path)
return options
| StarcoderdataPython |
3267093 | import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def customlegend(ax, labels: "List[String]", colors : "List of color names", ncol = 1,
fontsize = 6, linewidth = 4, framealpha = 0, loc = "best", fontweight = "bold",
columnspacing = 0, **kwargs):
"""
Creates Custom colored legend
Parameters
----------
ax : MPL Ax Object
labels : "List[Strings]"
colors : "List of color names"
ncol : TYPE, optional
The default is 1.
fontsize : TYPE, optional
The default is 6.
linewidth : TYPE, optional
The default is 4.
framealpha : TYPE, optional
The default is 0.
loc : TYPE, optional
The default is "best".
fontweight : TYPE, optional
The default is "bold".
columnspacing : TYPE, optional
The default is 0.
**kwargs: Additional keyword arguents to be passed to pyplot.legend()
Returns
-------
leg : Matplotlib Legend Object
DESCRIPTION.
"""
#Alias for Custom_Legend
return_val = Custom_Legend(ax, labels, colors , ncol = ncol, fontsize = fontsize, \
linewidth = linewidth, framealpha = framealpha, loc = loc, \
fontweight = fontweight, columnspacing = columnspacing, **kwargs)
return return_val
def Custom_Legend(ax, labels: "List[String]", colors : "List of color names", ncol = 1,
fontsize = 6, linewidth = 4, framealpha = 0, loc = "best", fontweight = "bold",
columnspacing = 0, **kwargs):
"""
Creates Custom colored legend
Parameters
----------
ax : MPL Ax Object
DESCRIPTION.
labels : "List[Strings]"
DESCRIPTION.
colors : "List of color names"
DESCRIPTION.
ncol : TYPE, optional
DESCRIPTION. The default is 1.
fontsize : TYPE, optional
DESCRIPTION. The default is 6.
linewidth : TYPE, optional
DESCRIPTION. The default is 4.
framealpha : TYPE, optional
DESCRIPTION. The default is 0.
loc : TYPE, optional
DESCRIPTION. The default is "best".
fontweight : TYPE, optional
DESCRIPTION. The default is "bold".
columnspacing : TYPE, optional
DESCRIPTION. The default is 0.
**kwargs: Additional keyword arguents to be passed to pyplot.legend()
Returns
-------
leg : Matplotlib Legend Object
DESCRIPTION.
"""
if len(labels) != len(colors):
raise RuntimeError("Number of Labels should match number of Colors.")
custom_lines = []
for color in colors:
custom_lines.append(Line2D([0], [0], color=color, lw=linewidth))
leg = ax.legend(custom_lines, labels, fontsize = fontsize,
framealpha = framealpha, loc = loc, ncol = ncol, columnspacing = columnspacing, **kwargs)
leg_text = leg.get_texts()
for i, text in enumerate(leg_text):
text.set_color(colors[i])
text.set_weight(fontweight)
return leg
| StarcoderdataPython |
1684428 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Yo helpers package."""
import datetime
import json
import sys
import grequests
import requests
from flask import current_app, g, request
from mongoengine import DoesNotExist
from requests.exceptions import RequestException, Timeout
from .queries import (clear_get_favorite_yos_cache,
clear_get_unread_yos_cache,
clear_get_yo_count_cache, clear_get_yo_cache,
clear_get_yo_token_cache,
get_last_broadcast, get_yo_by_id, get_public_dict_for_yo_id)
from ..ab_test import log_ab_test_data
from ..async import async_job
from ..core import s3, mixpanel_yoapp, log_to_slack, sendgrid
from ..errors import (APIError, YoTokenExpiredError, YoTokenInvalidError,
YoTokenUsedError)
from ..headers import get_header_by_id, get_header
from ..helpers import get_usec_timestamp
from ..models import Yo
from ..notification_endpoints import endpoint_support_from_useragent
from ..notifications import notify_yo_status_update, send_silent_yo_opened
from ..permissions import assert_account_permission
from ..services import low_rq, redis_pubsub
from ..urltools import UrlHelper
from ..models.payload import YoPayload
from yoapi.accounts import _get_user
from yoapi.constants.yos import UNREAD_YOS_FETCH_LIMIT
from yoapi.contacts import get_contact_pair
from yoapi.groups import get_group_members
from yoapi.localization import get_region_by_name
YO_BUFFER_MAX = 10000
@async_job(rq=low_rq)
def acknowledge_yo_received(yo_id, status=None, from_push=False):
"""Acknowledges a Yo with the specified status.
status: One of - received (default), read, dismissed.
from_push: Was this triggered by interacting with a notification?
"""
try:
yo = get_yo_by_id(yo_id)
except DoesNotExist:
raise APIError('Yo not found')
if yo.app_id == 'co.justyo.yopolls':
clear_get_unread_yos_cache(yo.recipient.user_id, UNREAD_YOS_FETCH_LIMIT, app_id='co.justyo.yopolls')
status = status or 'received'
# TODO: Find a nicer way to do this. Perhaps with a permission assert.
is_current_users_yo = yo.recipient and yo.recipient == g.identity.user
current_priority = Yo.priority_for_status(yo.status)
new_priority = Yo.priority_for_status(status)
needs_update = new_priority > current_priority
if is_current_users_yo and needs_update:
if yo.sender and yo.recipient:
if status == 'received':
if yo.parent and yo.parent.sender.is_service or yo.sender.is_service:
mixpanel_yoapp.track(yo.recipient.user_id, 'News Yo Received')
else:
mixpanel_yoapp.track(yo.recipient.user_id, 'Friend Yo Received')
contact_object = get_contact_pair(yo.sender, yo.recipient)
if contact_object:
contact_object.last_yo_state = 'Friend received'
contact_object.last_yo = get_usec_timestamp()
contact_object.save()
contact_object = get_contact_pair(yo.recipient, yo.sender)
if contact_object:
contact_object.last_yo_state = 'You received'
contact_object.last_yo = get_usec_timestamp()
contact_object.save()
elif status == 'read':
try:
if yo.parent and yo.parent.sender.is_service or yo.sender.is_service:
mixpanel_yoapp.track(yo.recipient.user_id, 'News Yo Opened')
else:
mixpanel_yoapp.track(yo.recipient.user_id, 'Friend Yo Opened')
except:
pass
contact_object = get_contact_pair(yo.sender, yo.recipient)
if contact_object:
contact_object.last_yo_state = 'Friend opened'
contact_object.last_yo = get_usec_timestamp()
contact_object.save()
contact_object = get_contact_pair(yo.recipient, yo.sender)
if contact_object:
contact_object.last_yo_state = 'You opened'
contact_object.last_yo = get_usec_timestamp()
contact_object.save()
yo.status = status
yo.save()
clear_get_yo_cache(yo_id)
clear_get_unread_yos_cache(str(yo.recipient), UNREAD_YOS_FETCH_LIMIT)
flattened_yo = yo.get_flattened_yo()
support_dict = endpoint_support_from_useragent(request)
payload = YoPayload(yo, support_dict)
event_data = {'event': 'yo_acknowledged',
'yo_id': flattened_yo.yo_id,
'recipient': flattened_yo.recipient.username,
'sender': flattened_yo.sender.username,
'sender_in_store': bool(flattened_yo.sender.in_store),
'status': status,
'from_push': from_push,
'yo_type': payload.payload_type,
'group_yo': bool(flattened_yo.is_group_yo),
'sender_type': flattened_yo.sender.user_type,
'recipient_type': flattened_yo.recipient.user_type,
'yo_header': payload.get_push_text(),
'broadcast': bool(flattened_yo.broadcast)}
current_app.log_analytics(event_data)
header = get_header(yo.recipient, payload.payload_type,
bool(flattened_yo.group))
if header:
log_ab_test_data(yo.recipient, 'notification', header=header)
# Group yos and broadcast yos should not notify of status changes.
should_notify = not bool(yo.parent and yo.parent.has_children())
should_notify = should_notify and not bool(yo.has_children())
if should_notify:
notify_yo_status_update(yo)
if status == 'read':
send_silent_yo_opened(yo)
def assert_valid_yo_token(yo_token):
"""Verifies a token against what is stored"""
# Check that the auth_token exists.
if not yo_token.auth_token:
raise YoTokenInvalidError
# Check if the token has already been used.
if yo_token.auth_token.used:
raise YoTokenUsedError
# Check if the token has expired.
# The default expiration time should be one day.
if yo_token.auth_token.expires < get_usec_timestamp():
raise YoTokenExpiredError
yo_token.auth_token.used = get_usec_timestamp()
yo_token.used = True
yo_token.save()
clear_get_yo_token_cache(yo_token.auth_token.token)
def favorite_yo(user_id, yo_id, favorite=True):
"""Allows users to favorite and unfavorite a yo they received
IF they are indeed the recipient"""
try:
yo = get_yo_by_id(yo_id)
except DoesNotExist:
raise APIError('Yo not found')
if yo.recipient and yo.recipient.user_id == user_id:
yo.is_favorite = favorite
yo.save()
clear_get_yo_cache(yo_id)
clear_get_favorite_yos_cache(user_id)
def increment_count_in(user):
"""Incrementes the received Yo counter for a user
The reason we need an explicit function for this is because we increment
it both in the database and clear the cache
"""
user.update(inc__count_in=1)
clear_get_yo_count_cache(user)
def increment_count_out(user):
"""Incrementes the received Yo counter for a user
The reason we need an explicit function for this is because we increment
it both in the database and under a redis key.
"""
user.update(inc__count_out=1)
def construct_yo(sender=None, recipients=None, sound=None, link=None,
location=None, broadcast=False, ignore_permission=False,
header=None, link_content_type=None, origin_yo=None,
is_group_yo=None, context=None, cover_datauri=None,
photo_datauri=None, yo_token=None, context_id=None,
response_pair=None, oauth_client=None, reply_to=None,
text=None, left_link=None, right_link=None, is_poll=False,
app_id=None, is_push_only=False,
region_name=None):
if not ignore_permission:
if sender:
assert_account_permission(sender, 'No permission to send Yo')
if header and header.user:
assert_account_permission(header.user,
'No permission to use header')
sender = sender if sender else None
sound = sound if sound else 'yo.mp3'
if sound == 'silent':
sound = ''
if region_name:
region = get_region_by_name(region_name)
else:
region = None
# We separate the link here to prevent validation errors
# if the link is blank.
link = link if link else None
# We separate the location here to prevent validation errors
# if the location is blank.
location = location if location else None
# This is migrating from old parameter name 'context' to
# new parameter name 'text'
# In the old scheme, 'context' was what was shown in the push header
# In the new scheme, 'text' is shown, and 'context' is never shown
# but is used by the developers alone
if context and not text:
text = context
context = context if context else None
text = text if text else None
left_link = left_link if left_link else None
right_link = right_link if right_link else None
# We separate the context id here to prevent validation errors
# if the context id is blank.
context_id = context_id if context_id else None
response_pair = response_pair if response_pair else None
# Split location into a tuple so we can store it as a GeoPointField.
if location:
if ';' in location and ',' in location:
location_parts = location.split(';')
location_parts[0] = location_parts[0].replace(',', '.')
location_parts[1] = location_parts[1].replace(',', '.')
elif ';' in location:
location_parts = location.split(';')
elif ',' in location:
location_parts = location.split(',')
else:
# This request must have somehow skipped form validation
raise APIError('Improper location format. Use: 0.0, 0.0')
location = (float(location_parts[0]), float(location_parts[1]))
cover = None
photo = None
if cover_datauri:
cover = s3.upload_photo(cover_datauri, sender)
link_content_type = link_content_type or 'text/html'
if photo_datauri:
link_content_type = link_content_type or photo_datauri.mimetype
photo = s3.upload_photo(photo_datauri, sender)
# if there is a origin yo, replace the new yo's parameters
if origin_yo:
sound = origin_yo.sound
link = origin_yo.link
context = origin_yo.context
text = origin_yo.text
cover = origin_yo.cover
photo = origin_yo.photo
link_content_type = origin_yo.link_content_type
short_link = origin_yo.short_link
location = origin_yo.location
left_link = origin_yo.left_link
right_link = origin_yo.right_link
# Raise an APIError if the hostname has been blocked
# Fixes issue #17
short_link = None
if link:
try:
UrlHelper(link).raise_for_hostname()
except ValueError:
raise APIError('Invalid URL')
# Prepare link
urlhelper = UrlHelper(link, bitly=sender.bitly)
link = urlhelper.get_url()
if broadcast:
short_link = urlhelper.get_short_url()
# If the Yo is a broadcast then we need special different parameters from
# a normal one.
if broadcast or is_group_yo:
recipient = None
if is_group_yo and recipients and recipients[0].is_group:
recipient = recipients[0]
# send a silent push for group yos if the group has sent a yo
# in the past hour. Pseudo users will still receive a normal SMS.
one_hour_ago = get_usec_timestamp(datetime.timedelta(hours=-1))
if recipient.last_yo_time > one_hour_ago:
sound = ''
yo = Yo(sender=sender,
broadcast=broadcast or None,
context=context,
context_id=context_id,
cover=cover,
photo=photo,
is_group_yo=bool(is_group_yo) or None,
link=link,
link_content_type=link_content_type,
location=location,
origin_yo=origin_yo,
sent_location=bool(location) or None,
recipient=recipient,
short_link=short_link,
header=header,
sound=sound,
response_pair=response_pair,
oauth_client=oauth_client,
reply_to=reply_to,
text=text,
left_link=left_link,
right_link=right_link,
is_poll=is_poll,
region_name=region,
app_id=app_id,
is_push_only=is_push_only)
if recipient and recipient.is_group:
members = get_group_members(recipient)
yo.not_on_yo = []
for member in members:
if member.is_pseudo:
yo.not_on_yo.append(member.phone)
elif len(recipients) > 1:
yo = Yo(sender=sender,
context=context,
context_id=context_id,
cover=cover,
photo=photo,
link=link,
link_content_type=link_content_type,
location=location,
origin_yo=origin_yo,
sent_location=bool(location) or None,
recipients=recipients,
recipient_count=1,
sound=sound,
header=header,
status='pending',
yo_token=yo_token,
response_pair=response_pair,
oauth_client=oauth_client,
reply_to=reply_to,
text=text,
left_link=left_link,
right_link=right_link,
is_poll=is_poll,
region_name=region,
app_id=app_id,
is_push_only=is_push_only)
else:
yo = Yo(sender=sender,
context=context,
context_id=context_id,
cover=cover,
photo=photo,
link=link,
link_content_type=link_content_type,
location=location,
origin_yo=origin_yo,
sent_location=bool(location) or None,
recipient=recipients[0],
recipient_count=1,
sound=sound,
header=header,
status='pending',
yo_token=yo_token,
response_pair=response_pair,
oauth_client=oauth_client,
reply_to=reply_to,
text=text,
left_link=left_link,
right_link=right_link,
is_poll=is_poll,
region=region,
app_id=app_id,
is_push_only=is_push_only)
# We use to check if the payload was too large but instead just
# check the link and shorten it if necessary.
if yo.link and not yo.short_link and len(link) > 512:
yo.short_link = UrlHelper(link).get_short_url()
# Don't save the yo until we know it passes validation.
yo.save()
return yo
def construct_auto_follow_yo(user, auto_follow_user):
if auto_follow_user.welcome_link:
link = auto_follow_user.welcome_link
else:
last_yo = get_last_broadcast(auto_follow_user,
ignore_permission=True)
# Prefer the bitly link to increase brands ctr
link = last_yo.short_link or last_yo.link if last_yo else None
if not link:
return None
yo = construct_yo(sender=auto_follow_user, recipients=[user], link=link,
ignore_permission=True)
return yo
def construct_first_yo(user, first_yo_from):
first_yo_link = current_app.config.get('FIRST_YO_LINK')
first_yo_location = current_app.config.get('FIRST_YO_LOCATION')
first_yo_delay = current_app.config.get('FIRST_YO_DELAY')
first_yo_delay = first_yo_delay.replace(' ', '').split(',')
first_yo_link_delay = int(first_yo_delay[0])
first_yo_location_delay = int(first_yo_delay[1])
location_header = get_header_by_id('54de9ecba17351c1d85a55aa')
link_header = get_header_by_id('54dd6939a17351c1d859692e')
yo_link = construct_yo(sender=first_yo_from, recipients=[user],
link=first_yo_link, ignore_permission=True,
header=link_header, link_content_type='text/html')
yo_location = construct_yo(sender=first_yo_from, recipients=[user],
location=first_yo_location,
ignore_permission=True, header=location_header)
return yo_link, yo_location
def get_params_for_callback(sender_id, yo_id):
sender = _get_user(sender_id)
yo = get_yo_by_id(yo_id)
# Parameters we want to attach.
params = {'username': sender.username,
'display_name': sender.display_name,
'user_ip': request.remote_addr}
if yo.link:
params.update({'link': yo.link})
if yo.location:
location_str = '%s;%s' % (yo.location[0], yo.location[1])
params.update({'location': location_str})
if yo.context:
params.update({'context': yo.context})
if yo.reply_to and yo.reply_to.text:
params.update({'reply_to': get_public_dict_for_yo_id(yo.reply_to.yo_id)})
if yo.text and yo.reply_to.response_pair:
reply_text = yo.reply_to.left_reply if yo.text == yo.reply_to.response_pair.split('.')[
0] else yo.reply_to.right_reply
params.update({
'reply': {
'sender_city': sender.city or sender.region_name,
'text': reply_text
}})
if yo.reply_to.sender.parent:
params.update({'publisher_username': yo.reply_to.sender.parent.username})
elif yo.reply_to and yo.reply_to.parent and yo.reply_to.parent.text:
params.update({'reply_to': get_public_dict_for_yo_id(yo.reply_to.parent.yo_id)})
if yo.reply_to.parent.sender.parent:
params.update({'publisher_username': yo.reply_to.parent.sender.parent.username})
return params
@async_job(rq=low_rq)
def trigger_callback(sender_id, callback, yo_id):
"""Augment the callback URL and trigger it."""
params = get_params_for_callback(sender_id, yo_id)
try:
# Set stream to true so that we don't actually download the response
# Similarly, we set connection close so that the connection doesn't
# stay open.
# Since get requests have issues with emojis, all new callbacks that need
# reply or reply_to will be in POST
url = None
if params.get('reply') or params.get('reply_to'):
requests.post(callback, data=json.dumps(params),
timeout=3,
stream=True,
headers={'Connection': 'close',
'Content-type': 'application/json'},
verify=False)
else:
params.pop('display_name')
if params.get('context'):
params.pop('context')
helper = UrlHelper(callback, params=params)
url = helper.get_url()
requests.get(url,
timeout=3,
stream=True,
headers={'Connection': 'close'},
verify=False)
except Exception as e:
if url:
yo = get_yo_by_id(yo_id)
user = yo.recipient
body = 'Hi there! We\'ve experienced an issue with the callback URL specified on Yo username:' \
'{}.\n' \
'The user received a Yo and subsequently we\'ve sent a request to: {}\n' \
'which resulted in the error: {}'.format(user.username, url, str(e.message))
sendgrid.send_mail(recipient=user.email,
subject='Yo Callback Failure',
body=body,
sender='<EMAIL>')
log_to_slack('sent to {}: {}'.format(user.email, body))
else:
log_to_slack(str(e.message))
def publish_to_pubsub(yo):
try:
params = get_params_for_callback(yo.sender.user_id, yo.yo_id)
redis_pubsub.publish({'cmd': 'message',
'type': 'yo',
'data': params},
channel=str(yo.recipient.id))
except (ValueError, RequestException, Timeout) as err:
current_app.log_warning(sys.exc_info(), message='redis_pubsub error: ' + err)
def ping_live_counter():
"""Pings the live Yo counter."""
headers = {'Auth-Token': current_app.config['LIVE_COUNTER_AUTH_TOKEN'],
'Connection': 'close'}
try:
requests.get(current_app.config['LIVE_COUNTER_URL'], stream=True,
headers=headers, timeout=20)
except:
current_app.logger.warning('Live counter not available')
def _create_child_yos(yo, recipients):
"""Creates the child yos for use with broadcasting.
Each child yo represents an individual recipient."""
children = []
recipient_count = 0
for i, recipient in enumerate(recipients):
recipient_count += 1
status = 'pending'
# When yos are muted, they come in with a status already provided.
if isinstance(recipient, tuple):
recipient, status = recipient
child_yo = Yo(parent=yo,
recipient=recipient,
status=status,
created=get_usec_timestamp(),
is_poll=yo.is_poll,
left_link=yo.left_link,
right_link=yo.right_link,
app_id=yo.app_id)
children.append(child_yo)
if i > 0 and i % YO_BUFFER_MAX == 0:
Yo.objects.insert(children, load_bulk=False)
children = []
# Set load_bulk to False so that only ObjectId's are returned
# Yo cannot insert an empty list
if children:
Yo.objects.insert(children, load_bulk=False)
return recipient_count
@async_job(rq=low_rq)
def send_slack_msg():
log_to_slack('Yo') | StarcoderdataPython |
3446495 | <reponame>z727354123/pyCharmTest
class Person:
def __init__(self):
self.items = [1, 2, 3, 4, 5, 6, 7, 8]
def __setitem__(self, key, value):
print("set", key, value)
self.items[key] = value
def __getitem__(self, key):
print(key)
return self.items[key]
p = Person()
p.items.append(9)
p.items.append(10)
p.items.append(11)
p[0:5:2] = ["One", "Two", 'Three']
print(p.items)
| StarcoderdataPython |
5104137 | <reponame>konstdimasik/python_code
# Напишите программу, которая считывает строку с числом nn, которое
# задаёт количество чисел, которые нужно считать. Далее считывает n строк
# с числами x_i, по одному числу в каждой строке. Итого будет n+1n+1 строк.
#
# При считывании числа x_i программа должна на отдельной строке вывести
# значение f(x_i). Функция f(x) уже реализована и доступна для вызова.
#
# Функция вычисляется достаточно долго и зависит только от переданного
# аргумента xx. Для того, чтобы уложиться в ограничение по времени, нужно
# избежать повторного вычисления значений.
def f(x):
return x * x * x
n = int(input())
dict = {}
for i in range(n):
x = int(input())
if x in dict:
print(dict[x])
else:
y = f(x)
dict[x] = y
print(y)
# a = [int(input()) for i in range(int(input()))]
# b = {x:f(x) for x in set(a)}
# [print(b[i]) for i in a]
| StarcoderdataPython |
8164970 | <gh_stars>0
import pytest
from hello.world_world import hello_world
def test_hello_world():
hello_world()
| StarcoderdataPython |
3285710 | <reponame>Esequiel378/proxy-randomizer<filename>proxy_randomizer/utils.py
"""utils to consume from modules"""
# built in modules
import unicodedata
# type hint
from typing import Dict, List, Optional
# third party modules
import requests
from bs4 import BeautifulSoup
# local modules
from proxy_randomizer.proxy import Anonymity
class NotFoundError(Exception):
pass
# Anonymity constants as list to run within a filter
ANONYMITY_LEVELS = [
Anonymity.UNKNOWN,
Anonymity.ELITE,
Anonymity.ANONYMOUS,
Anonymity.TRANSPARENT,
]
def remove_accents(string: str) -> str:
"""Remove wired characters from string"""
return "".join(
c for c in unicodedata.normalize("NFKD", string) if not unicodedata.combining(c)
)
def contains(container: str, string: str) -> bool:
"""Check if container string contains a given string."""
container = remove_accents(container).lower()
string = remove_accents(string).lower()
return string in container
def get_anonymity_level(anonymity: Optional[str] = None) -> Anonymity:
"""return the anonymity level from a given string.
:param anonymity : anonymity name, default None
:type anonymity : Optional[str]
:return : Anonymity instance
:rtype : Anonymity
"""
# if not anonymity is given, return unknown
if not anonymity:
return Anonymity.UNKNOWN
# predicate to find anonnymity instance
predicate = lambda anonymity_level: contains(anonymity, anonymity_level.label)
# find and return anonymity instance or UNKNOWN if can not find any
anonymity_level = (
next(filter(predicate, ANONYMITY_LEVELS), None) or Anonymity.UNKNOWN
)
return anonymity_level
def get_table_content(html: str) -> List[Dict[str, str]]:
"""Get all elementes from a table and return a key-pair list.
:param html : html content where table must be scraped
:type html : str
:raises NotFoundError : raise error if table can not be found
:return : list of objects where each list items is a row in the table, as object, where the column header is the key, and the column value as the object value
:rtype : List[Dict[str, str]]
"""
# parse response
soup = BeautifulSoup(html, "lxml")
# find table
tableContainer = soup.find("div", {"class": "fpl-list"})
if tableContainer is None:
raise NotFoundError(f"Can not find table or ir does not exist")
table = tableContainer.find("table")
# if table can not be found, raise NotFoundError
if table is None:
raise NotFoundError(f"Can not find table or ir does not exist")
# get table headers
headers = [th.text.lower() for th in soup.find("thead").find("tr").find_all("th")]
# parse and return table content, row by row
table_content = [
{header: td.text for header, td in zip(headers, tr.find_all("td"))}
for tr in table.find("tbody").find_all("tr")
]
return table_content
| StarcoderdataPython |
6685307 | <reponame>arthurMll/TAPI
import connexion
import six
from tapi_server.models.inline_object24 import InlineObject24 # noqa: E501
from tapi_server.models.inline_object32 import InlineObject32 # noqa: E501
from tapi_server.models.tapi_common_bandwidth_profile import TapiCommonBandwidthProfile # noqa: E501
from tapi_server.models.tapi_common_capacity import TapiCommonCapacity # noqa: E501
from tapi_server.models.tapi_common_capacity_value import TapiCommonCapacityValue # noqa: E501
from tapi_server.models.tapi_common_context import TapiCommonContext # noqa: E501
from tapi_server.models.tapi_common_context_service_interface_point import TapiCommonContextServiceInterfacePoint # noqa: E501
from tapi_server.models.tapi_common_get_service_interface_point_details import TapiCommonGetServiceInterfacePointDetails # noqa: E501
from tapi_server.models.tapi_common_get_service_interface_point_list import TapiCommonGetServiceInterfacePointList # noqa: E501
from tapi_server.models.tapi_common_getserviceinterfacepointdetails_output import TapiCommonGetserviceinterfacepointdetailsOutput # noqa: F401,E501
from tapi_server.models.tapi_common_getserviceinterfacepointlist_output import TapiCommonGetserviceinterfacepointlistOutput # noqa: F401,E501
from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue # noqa: E501
from tapi_server import util
from tapi_server import database
def data_context_delete(): # noqa: E501
"""data_context_delete
removes tapi.common.Context # noqa: E501
:rtype: None
"""
return 'do some magic!'
def data_context_get(): # noqa: E501
"""data_context_get
returns tapi.common.Context # noqa: E501
:rtype: TapiCommonContext
"""
return database.context;
def data_context_name_post(tapi_common_name_and_value=None): # noqa: E501
"""data_context_name_post
creates tapi.common.NameAndValue # noqa: E501
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_namevalue_name_delete(value_name): # noqa: E501
"""data_context_namevalue_name_delete
removes tapi.common.NameAndValue # noqa: E501
:param value_name: Id of name
:type value_name: str
:rtype: None
"""
return 'do some magic!'
def data_context_namevalue_name_get(value_name): # noqa: E501
"""data_context_namevalue_name_get
returns tapi.common.NameAndValue # noqa: E501
:param value_name: Id of name
:type value_name: str
:rtype: TapiCommonNameAndValue
"""
return 'do some magic!'
def data_context_namevalue_name_post(value_name, tapi_common_name_and_value=None): # noqa: E501
"""data_context_namevalue_name_post
creates tapi.common.NameAndValue # noqa: E501
:param value_name: Id of name
:type value_name: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_namevalue_name_put(value_name, tapi_common_name_and_value=None): # noqa: E501
"""data_context_namevalue_name_put
creates or updates tapi.common.NameAndValue # noqa: E501
:param value_name: Id of name
:type value_name: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added or updated
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_post(tapi_common_context=None): # noqa: E501
"""data_context_post
creates tapi.common.Context # noqa: E501
:param tapi_common_context: tapi.common.Context to be added to list
:type tapi_common_context: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_context = TapiCommonContext.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_put(tapi_common_context=None): # noqa: E501
"""data_context_put
creates or updates tapi.common.Context # noqa: E501
:param tapi_common_context: tapi.common.Context to be added or updated
:type tapi_common_context: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_context = TapiCommonContext.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_service_interface_point_post(tapi_common_context_service_interface_point=None): # noqa: E501
"""data_context_service_interface_point_post
creates tapi.common.context.ServiceInterfacePoint # noqa: E501
:param tapi_common_context_service_interface_point: tapi.common.context.ServiceInterfacePoint to be added to list
:type tapi_common_context_service_interface_point: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_context_service_interface_point = TapiCommonContextServiceInterfacePoint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_service_interface_pointuuid_available_capacity_bandwidth_profile_committed_burst_size_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_available_capacity_bandwidth_profile_committed_burst_size_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_available_capacity_bandwidth_profile_committed_information_rate_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_available_capacity_bandwidth_profile_committed_information_rate_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_available_capacity_bandwidth_profile_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_available_capacity_bandwidth_profile_get
returns tapi.common.BandwidthProfile # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonBandwidthProfile
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_available_capacity_bandwidth_profile_peak_burst_size_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_available_capacity_bandwidth_profile_peak_burst_size_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_available_capacity_bandwidth_profile_peak_information_rate_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_available_capacity_bandwidth_profile_peak_information_rate_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_available_capacity_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_available_capacity_get
returns tapi.common.Capacity # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonCapacity
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_available_capacity_total_size_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_available_capacity_total_size_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_delete(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_delete
removes tapi.common.context.ServiceInterfacePoint # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: None
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_get
returns tapi.common.context.ServiceInterfacePoint # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonContextServiceInterfacePoint
"""
return database.service_interface_point(uuid);
def data_context_service_interface_pointuuid_name_post(uuid, tapi_common_name_and_value=None): # noqa: E501
"""data_context_service_interface_pointuuid_name_post
creates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_service_interface_pointuuid_namevalue_name_delete(uuid, value_name): # noqa: E501
"""data_context_service_interface_pointuuid_namevalue_name_delete
removes tapi.common.NameAndValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:param value_name: Id of name
:type value_name: str
:rtype: None
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_namevalue_name_get(uuid, value_name): # noqa: E501
"""data_context_service_interface_pointuuid_namevalue_name_get
returns tapi.common.NameAndValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:param value_name: Id of name
:type value_name: str
:rtype: TapiCommonNameAndValue
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_namevalue_name_post(uuid, value_name, tapi_common_name_and_value=None): # noqa: E501
"""data_context_service_interface_pointuuid_namevalue_name_post
creates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:param value_name: Id of name
:type value_name: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added to list
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_service_interface_pointuuid_namevalue_name_put(uuid, value_name, tapi_common_name_and_value=None): # noqa: E501
"""data_context_service_interface_pointuuid_namevalue_name_put
creates or updates tapi.common.NameAndValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:param value_name: Id of name
:type value_name: str
:param tapi_common_name_and_value: tapi.common.NameAndValue to be added or updated
:type tapi_common_name_and_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_name_and_value = TapiCommonNameAndValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_service_interface_pointuuid_post(uuid, tapi_common_context_service_interface_point=None): # noqa: E501
"""data_context_service_interface_pointuuid_post
creates tapi.common.context.ServiceInterfacePoint # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:param tapi_common_context_service_interface_point: tapi.common.context.ServiceInterfacePoint to be added to list
:type tapi_common_context_service_interface_point: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_context_service_interface_point = TapiCommonContextServiceInterfacePoint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_service_interface_pointuuid_put(uuid, tapi_common_context_service_interface_point=None): # noqa: E501
"""data_context_service_interface_pointuuid_put
creates or updates tapi.common.context.ServiceInterfacePoint # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:param tapi_common_context_service_interface_point: tapi.common.context.ServiceInterfacePoint to be added or updated
:type tapi_common_context_service_interface_point: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_context_service_interface_point = TapiCommonContextServiceInterfacePoint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def data_context_service_interface_pointuuid_total_potential_capacity_bandwidth_profile_committed_burst_size_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_total_potential_capacity_bandwidth_profile_committed_burst_size_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_total_potential_capacity_bandwidth_profile_committed_information_rate_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_total_potential_capacity_bandwidth_profile_committed_information_rate_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_total_potential_capacity_bandwidth_profile_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_total_potential_capacity_bandwidth_profile_get
returns tapi.common.BandwidthProfile # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonBandwidthProfile
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_total_potential_capacity_bandwidth_profile_peak_burst_size_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_total_potential_capacity_bandwidth_profile_peak_burst_size_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_total_potential_capacity_bandwidth_profile_peak_information_rate_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_total_potential_capacity_bandwidth_profile_peak_information_rate_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_total_potential_capacity_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_total_potential_capacity_get
returns tapi.common.Capacity # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonCapacity
"""
return 'do some magic!'
def data_context_service_interface_pointuuid_total_potential_capacity_total_size_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_total_potential_capacity_total_size_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
def operations_get_service_interface_point_details_post(inline_object24=None): # noqa: E501
"""operations_get_service_interface_point_details_post
# noqa: E501
:param inline_object24:
:type inline_object24: dict | bytes
:rtype: TapiCommonGetServiceInterfacePointDetails
"""
if connexion.request.is_json:
inline_object24 = InlineObject24.from_dict(connexion.request.get_json()) # noqa: E501
return TapiCommonGetServiceInterfacePointDetails(TapiCommonGetserviceinterfacepointdetailsOutput(
database.service_interface_point(inline_object24.input.sip_id_or_name)));
def operations_get_service_interface_point_list_post(): # noqa: E501
"""operations_get_service_interface_point_list_post
# noqa: E501
:rtype: TapiCommonGetServiceInterfacePointList
"""
return TapiCommonGetServiceInterfacePointList(TapiCommonGetserviceinterfacepointlistOutput(
database.service_interface_point_list()));
def operations_update_service_interface_point_post(inline_object32=None): # noqa: E501
"""operations_update_service_interface_point_post
# noqa: E501
:param inline_object32:
:type inline_object32: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
inline_object32 = InlineObject32.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
| StarcoderdataPython |
3441041 | class Solution:
def VerifySquenceOfBST(self, sequence):
if sequence == []:
return False
length = len(sequence)
root = sequence[-1]
for i in range(length):
if sequence[i] > root:
break
for j in range(i, length):
if sequence[j] < root:
return False
left = True
if i > 0:
left = self.VerifySquenceOfBST(sequence[:i])
right = True
if j < length - 1:
right = self.VerifySquenceOfBST(sequence[i:length-1])
return left and right
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.