id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
6,800 | test word count | #!/usr/bin/env python
# --!-- coding: utf8 --!--
"""Tests for functions"""
import re
from manuskript import functions as F
def METHOD_NAME():
assert F.wordCount("In the beginning was the word.") == 6
assert F.wordCount("") == 0
def test_convert():
# toInt
assert F.toInt("9") == 9
assert F.toInt("a") == 0
assert F.toInt("") == 0
# toFloat
assert F.toFloat("9.4") == 9.4
assert F.toFloat("") == 0.
# toString
assert F.toString(None) == ""
assert F.toString("None") == ""
assert F.toString("Joy") == "Joy"
def test_several():
from PyQt5.QtGui import QPainter, QPixmap, QIcon, QColor
from PyQt5.QtCore import QRect
# drawProgress
px = QPixmap(10, 10)
F.drawProgress(QPainter(px), QRect(0, 0, 100, 100), 0.5)
# colorFromProgress
a = F.colorFromProgress(0.1)
b = F.colorFromProgress(0.5)
c = F.colorFromProgress(1.0)
d = F.colorFromProgress(1.5)
assert a != b != c != d
# iconColor & iconFromColor & iconFromColorString
icon = F.iconFromColorString("#ff0000")
assert F.iconColor(icon).name().lower() == "#ff0000"
# themeIcon
assert F.themeIcon("text") != None
assert F.themeIcon("nonexistingname") != None
# randomColor
c1 = F.randomColor()
c2 = F.randomColor(c1)
assert c1.name() != c2.name()
# mixColors
c1 = QColor("#FFF")
c2 = QColor("#000")
assert F.mixColors(c1, c2).name() == "#7f7f7f"
# colorifyPixmap
assert F.colorifyPixmap(px, c1) != None
def test_outlineItemColors():
from manuskript.models import outlineItem
item = outlineItem(title="Test")
r = F.outlineItemColors(item)
for i in ["POV", "Label", "Progress", "Compile"]:
assert i in r
from PyQt5.QtGui import QColor
assert r["Compile"].name(QColor.HexArgb) == "#00000000"
def test_paths():
assert F.appPath() != None
assert F.writablePath != None
assert len(F.allPaths("suffix")) == 2
assert F.tempFile("yop") != None
f = F.findBackground("spacedreams.jpg")
assert "resources/backgrounds/spacedreams.jpg" in f
assert len(F.customIcons()) > 1
def test_mainWindow():
from PyQt5.QtWidgets import QWidget, QLCDNumber
assert F.mainWindow() != None
assert F.MW != None
F.statusMessage("Test")
F.printObjects()
assert len(F.findWidgetsOfClass(QWidget)) > 0
assert len(F.findWidgetsOfClass(QLCDNumber)) == 0
def test_search_noMatch():
assert F.search(re.compile("text"), "foo") == []
def test_search_singleLine_fullMatch():
assert F.search(re.compile("text"), "text") == [(0, 4, "<b>text</b>")]
def test_search_singleLine_start():
assert F.search(re.compile("text"), "text is this") == [(0, 4, "<b>text</b> is this")]
def test_search_singleLine_end():
assert F.search(re.compile("text"), "This is text") == [(8, 12, "This is <b>text</b>")]
def test_search_multipleLines_fullMatch():
assert F.search(re.compile("text"), "This is\ntext\nOK") == [(8, 12, "[...] <b>text</b> [...]")]
def test_search_multipleLines_start():
assert F.search(re.compile("text"), "This is\ntext oh yeah\nOK") == [(8, 12, "[...] <b>text</b> oh yeah [...]")]
def test_search_multipleLines_end():
assert F.search(re.compile("text"), "This is\nsome text\nOK") == [(13, 17, "[...] some <b>text</b> [...]")]
def test_search_multipleLines_full():
assert F.search(re.compile("text"), "This is\ntext\nOK") == [(8, 12, "[...] <b>text</b> [...]")]
def test_search_multiple_strMatches():
assert F.search(re.compile("text"), "text, text and more text") == [
(0, 4, "<b>text</b>, text and more text"),
(6, 10, "text, <b>text</b> and more text"),
(20, 24, "text, text and more <b>text</b>")
]
def test_search_multiple_strMatches_caseSensitive():
assert F.search(re.compile("text"), "TeXt, TEXT and more text") == [(20, 24, "TeXt, TEXT and more <b>text</b>")]
assert F.search(re.compile("text", re.IGNORECASE), "TeXt, TEXT and more text") == [
(0, 4, "<b>TeXt</b>, TEXT and more text"),
(6, 10, "TeXt, <b>TEXT</b> and more text"),
(20, 24, "TeXt, TEXT and more <b>text</b>")
|
6,801 | move forward | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 6 16:52:49 2021
@author: Hera
"""
from nplab.instrument.serial_instrument import SerialInstrument
from nplab.instrument import Instrument
from nplab.utils.notified_property import NotifiedProperty
from nplab.ui.ui_tools import QuickControlBox
def bytes_to_binary(bytearr, debug=0):
'''
Helper method for converting a bytearray datatype to a binary representation
'''
if debug > 0: print(bytearr)
bytes_as_binary = [format(int(b, base=16), "#06b").replace(
"0b", "") for b in bytearr]
if debug > 0: print(bytes_as_binary)
binary = "".join(bytes_as_binary)
return binary
def twos_complement_to_int(binary, debug=0):
'''
Compute 2s complement of binary number representation
'''
if debug > 0: print(binary)
N = len(binary)
a_N = int(binary[0])
return float(-a_N*2**(N-1) + int(binary[1:], base=2))
def int_to_hex(integer, padded_length=8, debug=0):
'''
Convert integer number to hexidecimal. Return value is zero-padded at the beginning
until its length matches the value passed in "padded_length"
'''
outp = (format(integer, "#0{}x".format(
padded_length+2)).replace("0x", "")).upper()
return outp
def int_to_twos_complement(integer, padded_length=16, debug=0):
'''
Two's complement in integer representation. Padded length specifies the padding on the
binary representation used to compute the twos complement
'''
#number is above 0 - return binary representation:
if integer >= 0:
return integer
#number is below zero - return twos complement representation:
elif integer < 0:
if debug > 0: print("Below zero - returning twos complement")
integer = -1*integer
binary = format(integer, "0{}b".format(
padded_length+2)).replace("0b", "")
ones_complement = [str(1-int(b)) for b in str(binary)]
ones_complement = int("".join(ones_complement))
twos_complement = int("0b"+str(ones_complement), base=2) + 1
twos_complement = format(twos_complement, "034b").replace("0b", "")
if debug > 0:
print("input:", integer)
print("binary:", binary)
print("ones comp:", ones_complement)
print("twos comp (int):", int(twos_complement, base=2))
return int("0b"+twos_complement, base=2)
class BusDistributor(SerialInstrument):
''' a class to handle the port settings of a thorlabs ELLB distributor bus.
Each of these can have several devices attached. They are assigned device
indices by the thorlabs Ello software - otherwise they all default to 0 and
don't work separately.
'''
def __init__(self, port):
self.termination_character = '\n'
self.port_settings = dict(baudrate=9600,
bytesize=8,
stopbits=1,
parity='N',
timeout=2,
writeTimeout=2,
xonxoff=False)
super().__init__(port)
class ThorlabsELL6(Instrument):
#default id is 0, but if multiple devices of same type connected may have others
VALID_DEVICE_IDs = [str(v) for v in list(
range(11)) + ["A", "B", "C", "D", "E", "F"]]
#How much a stage sleeps (in seconds) between successive calls to .get_position.
#Used to make blocking calls to move_absolute and move_relative.
BLOCK_SLEEPING_TIME = 0.02
#Theshold for position accuracy when stage is meant to be stationary
#If difference between successive calls to get_position returns value
#whose difference is less than jitter - consider stage to have stopped
POSITION_JITTER_THRESHOLD = 0.02
#human readable status codes
DEVICE_STATUS_CODES = {
0: "OK, no error",
1: "Communication Timeout",
2: "Mechanical time out",
3: "Command error or not supported",
4: "Value out of range",
5: "Module isolated",
6: "Module out of isolation",
7: "Initialization error",
8: "Thermal error",
9: "Busy",
10: "Sensor Error",
11: "Motor Error",
12: "Out of Range",
13: "Over current error",
14: "OK, no error",
"OutOfBounds": "Reserved"
}
positions = 2
def __init__(self, serial_device, device_index=0, debug=0):
'''can be passed either a BusDistributor instance, or "COM5" '''
super().__init__()
if type(serial_device) is str:
self.serial_device = BusDistributor(serial_device)
else:
self.serial_device = serial_device
self.debug = debug
if str(device_index) not in self.VALID_DEVICE_IDs:
raise ValueError(
"Device ID: {} is not valid!".format(device_index))
self.device_index = device_index
self.home()
def home(self):
self.query_device('ho')
self._position = 0
def set_position(self, pos):
assert 0 <= pos < self.positions
while pos > self._position:
self.METHOD_NAME()
while pos < self._position:
self.move_backward()
def get_position(self):
return self._position
position = NotifiedProperty(get_position, set_position)
def query_device(self, query):
'''
Wrap a generic query with the ID of the device (integer in range: 0-F)
so that we dont need to be explicit about this id
'''
raw_query = "{0}{1}".format(self.device_index, query)
if self.debug > 0:
print("raw_query", raw_query)
raw_response = self.serial_device.query(raw_query)
if self.debug > 0:
print("raw_response", raw_response)
return raw_response
def get_qt_ui(self):
'''
Get UI for stage
'''
return ELL6UI(self)
def METHOD_NAME(self):
self.query_device('fw')
self._position += 1
def move_backward(self):
self.query_device('bw')
self._position -= 1
class ThorlabsELL9(ThorlabsELL6):
positions = 4
def get_qt_ui(self):
'''
Get UI for stage
'''
return ELL9UI(self)
class ELL6UI(QuickControlBox):
def __init__(self, instr):
super().__init__('ELL6')
self.add_spinbox('position', vmin=0, vmax=1)
self.auto_connect_by_name(controlled_object=instr)
class ELL9UI(QuickControlBox):
def __init__(self, instr):
super().__init__('ELL9')
self.add_spinbox('position', vmin=0, vmax=3)
self.auto_connect_by_name(controlled_object=instr)
if __name__ == '__main__':
# f = ThorlabsELL6('COM9')
f = ThorlabsELL9('COM6')
f.show_gui(False) |
6,802 | test get sendable message excess | """Tests sopel.tools"""
from __future__ import annotations
import re
from sopel import tools
TMP_CONFIG = """
[core]
owner = testnick
nick = TestBot
enable = coretasks
"""
def test_get_sendable_message_default():
initial = 'aaaa'
text, excess = tools.get_sendable_message(initial)
assert text == initial
assert excess == ''
def test_get_sendable_message_limit():
initial = 'a' * 400
text, excess = tools.get_sendable_message(initial)
assert text == initial
assert excess == ''
def METHOD_NAME():
initial = 'a' * 401
text, excess = tools.get_sendable_message(initial)
assert text == 'a' * 400
assert excess == 'a'
def test_get_sendable_message_excess_space():
# aaa...aaa bbb...bbb
initial = ' '.join(['a' * 200, 'b' * 200])
text, excess = tools.get_sendable_message(initial)
assert text == 'a' * 200
assert excess == 'b' * 200
def test_get_sendable_message_excess_space_limit():
# aaa...aaa bbb...bbb
initial = ' '.join(['a' * 400, 'b' * 200])
text, excess = tools.get_sendable_message(initial)
assert text == 'a' * 400
assert excess == 'b' * 200
def test_get_sendable_message_excess_bigger():
# aaa...aaa bbb...bbb
initial = ' '.join(['a' * 401, 'b' * 1000])
text, excess = tools.get_sendable_message(initial)
assert text == 'a' * 400
assert excess == 'a ' + 'b' * 1000
def test_get_sendable_message_optional():
text, excess = tools.get_sendable_message('aaaa', 3)
assert text == 'aaa'
assert excess == 'a'
text, excess = tools.get_sendable_message('aaa bbb', 3)
assert text == 'aaa'
assert excess == 'bbb'
text, excess = tools.get_sendable_message('aa bb cc', 3)
assert text == 'aa'
assert excess == 'bb cc'
def test_get_sendable_message_two_bytes():
text, excess = tools.get_sendable_message('αααα', 4)
assert text == 'αα'
assert excess == 'αα'
text, excess = tools.get_sendable_message('αααα', 5)
assert text == 'αα'
assert excess == 'αα'
text, excess = tools.get_sendable_message('α ααα', 4)
assert text == 'α'
assert excess == 'ααα'
text, excess = tools.get_sendable_message('αα αα', 4)
assert text == 'αα'
assert excess == 'αα'
text, excess = tools.get_sendable_message('ααα α', 4)
assert text == 'αα'
assert excess == 'α α'
def test_get_sendable_message_three_bytes():
text, excess = tools.get_sendable_message('अअअअ', 6)
assert text == 'अअ'
assert excess == 'अअ'
text, excess = tools.get_sendable_message('अअअअ', 7)
assert text == 'अअ'
assert excess == 'अअ'
text, excess = tools.get_sendable_message('अअअअ', 8)
assert text == 'अअ'
assert excess == 'अअ'
text, excess = tools.get_sendable_message('अ अअअ', 6)
assert text == 'अ'
assert excess == 'अअअ'
text, excess = tools.get_sendable_message('अअ अअ', 6)
assert text == 'अअ'
assert excess == 'अअ'
text, excess = tools.get_sendable_message('अअअ अ', 6)
assert text == 'अअ'
assert excess == 'अ अ'
def test_get_sendable_message_four_bytes():
text, excess = tools.get_sendable_message('𡃤𡃤𡃤𡃤', 8)
assert text == '𡃤𡃤'
assert excess == '𡃤𡃤'
text, excess = tools.get_sendable_message('𡃤𡃤𡃤𡃤', 9)
assert text == '𡃤𡃤'
assert excess == '𡃤𡃤'
text, excess = tools.get_sendable_message('𡃤𡃤𡃤𡃤', 10)
assert text == '𡃤𡃤'
assert excess == '𡃤𡃤'
text, excess = tools.get_sendable_message('𡃤𡃤𡃤𡃤', 11)
assert text == '𡃤𡃤'
assert excess == '𡃤𡃤'
text, excess = tools.get_sendable_message('𡃤 𡃤𡃤𡃤', 8)
assert text == '𡃤'
assert excess == '𡃤𡃤𡃤'
text, excess = tools.get_sendable_message('𡃤𡃤 𡃤𡃤', 8)
assert text == '𡃤𡃤'
assert excess == '𡃤𡃤'
text, excess = tools.get_sendable_message('𡃤𡃤𡃤 𡃤', 8)
assert text == '𡃤𡃤'
assert excess == '𡃤 𡃤'
def test_get_sendable_message_bigger_multibyte_whitespace():
"""Tests that the logic doesn't break for multi-word strings with emoji.
Testing multibyte characters without whitespace is fine, but there's an
alternate code path to exercise.
"""
text = (
'Egg 🍳 and bacon; 🐷 egg, 🍳 sausage 🌭 and bacon; 🥓 egg 🐣 and spam; '
'egg, 🍳 bacon 🥓 and spam, egg, 🍳 bacon, 🥓 sausage 🌭 and spam; spam, '
'bacon, 🐖 sausage 🌭 and spam; spam, egg, 🍳 spam, spam, bacon 🐖 and '
'spam; spam, spam, spam, egg 🥚🍳 and spam; spam, spam, spam, spam, spam, '
'spam, baked beans, 🍛 spam, spam, spam and spam; lobster 🦞 thermidor aux '
'crevettes with a mornay sauce garnished with truffle paté, 👨😏 brandy'
'and a fried 🍤 egg 🥚🍳 on 🔛 top 🎩 and spam')
first, second = tools.get_sendable_message(text)
expected_first = (
'Egg 🍳 and bacon; 🐷 egg, 🍳 sausage 🌭 and bacon; 🥓 egg 🐣 and spam; '
'egg, 🍳 bacon 🥓 and spam, egg, 🍳 bacon, 🥓 sausage 🌭 and spam; spam, '
'bacon, 🐖 sausage 🌭 and spam; spam, egg, 🍳 spam, spam, bacon 🐖 and '
'spam; spam, spam, spam, egg 🥚🍳 and spam; spam, spam, spam, spam, spam, '
'spam, baked beans, 🍛 spam, spam, spam and spam; lobster 🦞 thermidor aux')
expected_second = (
'crevettes with a mornay sauce garnished with truffle paté, 👨😏 brandy'
'and a fried 🍤 egg 🥚🍳 on 🔛 top 🎩 and spam')
assert first == expected_first
assert second == expected_second
def test_chain_loaders(configfactory):
re_numeric = re.compile(r'\d+')
re_text = re.compile(r'\w+')
settings = configfactory('test.cfg', TMP_CONFIG)
def loader_numeric(settings):
return [re_numeric]
def loader_text(settings):
return [re_text]
loader = tools.chain_loaders(loader_numeric, loader_text)
assert callable(loader)
results = loader(settings)
assert results == [re_numeric, re_text] |
6,803 | is get deprecated | # -*- coding: utf-8 -*-
#
# LinOTP - the open source solution for two factor authentication
# Copyright (C) 2010-2019 KeyIdentity GmbH
# Copyright (C) 2019- netgo software GmbH
#
# This file is part of LinOTP server.
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License, version 3, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# E-mail: info@linotp.de
# Contact: www.linotp.org
# Support: www.linotp.de
#
"""
This model contains the linotp processing logic
"""
import warnings
from functools import wraps
def render_calling_path(func):
"""
return the api path inc HTTP methods
- utility for sphimx rendering of api docs:
"""
module = func.__module__
module_name = module.rpartition(".")[-1]
func_name = func.__name__
try:
methods = ", ".join(func.methods)
except:
methods = "GET, POST"
return f"**{methods}** */{module_name}/{func_name}*\n "
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn(
"Call to deprecated function %s." % func.__name__,
category=DeprecationWarning,
)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
def deprecated_methods(deprecated_methods_list):
"""
deprecated_methods - decorator function
mark linotp endpoints as deprecated when accessed with a http method
in the provided list, eg.
@deprecated_methods(['GET'])
def check()
1- A warning for the deprecation will be added to the docstring
2- A warning should be written in case that the 'check' endpoint is
accessed using a Http GET request. The warning log itself should be
implemented in the controllers before calling the method(in progress TODO)
Developer Note: the implementation is not completed: major shortcoming is that its
not possible to access the request method the function is called
with.
:param deprecated_methods_list: a list of methods that are deprecated for the
end point. E.g. ["GET"] or ["POST"] or ["GET", "POST"]
"""
def METHOD_NAME():
return "GET" in deprecated_methods_list
def is_post_deprecated():
return "POST" in deprecated_methods_list
def doc_pretext():
"""Helper function
This is the text that is gonna be prepended to the top of the docstring
"""
if METHOD_NAME():
doc_pretext = """
.. deprecated:: 3.2
Requests using HTTP **GET** method (because it is modifying data).
This endpoint will only be available via HTTP **POST** method in
the future.
"""
if is_post_deprecated():
doc_pretext = """
.. deprecated:: 3.2
Requests using HTTP **POST** method (because it is only reading data).
This endpoint will only be available via HTTP **GET** method in
the future.
"""
return doc_pretext
def doc_posttext():
"""Helper function: This is the text that is gonna be appended to the end of the docstring"""
if METHOD_NAME():
doc_posttext = """ """
if is_post_deprecated():
doc_posttext = doc_posttext = """ """
return doc_posttext
def get_conditional_deprecation_warnings(func_name):
"""Helper function: This is the message which is gonna be printed if the function is called
with the wrong call method. e.g. a POST method(deprecated GET) being called
by Get"""
conditional_deprecation_warnings = []
if METHOD_NAME():
warning_message = (
f"method: [{func_name}] should be called only by POST method"
)
conditional_deprecation_warnings.append(
{"condition_method": "GET", "warning_message": warning_message}
)
if is_post_deprecated():
warning_message = (
f"method: [{func_name}] should be called only by GET method"
)
conditional_deprecation_warnings.append(
{
"condition_method": "POST",
"warning_message": warning_message,
}
)
return conditional_deprecation_warnings
# the actuall decorator is here
def inner_func(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
# update the docstring of the function
wrapper.__doc__ = (
render_calling_path(func)
+ doc_pretext()
+ wrapper.__doc__
+ doc_posttext()
)
# Further implementation: set a flag to log a warning in case of being called by the wrong method
# wrapper.conditional_deprecation_warnings = (
# get_conditional_deprecation_warnings(func_name=wrapper.__name__)
# )
return wrapper
return inner_func |
6,804 | start feeds | # This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot)
# Copyright (c) 2023 Drakkar-Software, All rights reserved.
#
# OctoBot is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# OctoBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>.
import octobot_backtesting.api as backtesting_api
import octobot_commons.enums as common_enums
import octobot_services.api as service_api
import octobot_services.octobot_channel_consumer as service_channel_consumer
import octobot_tentacles_manager.api as tentacles_manager_api
import octobot.channels as octobot_channels
import octobot.constants as constants
class ServiceFeedProducer(octobot_channels.OctoBotChannelProducer):
"""EvaluatorFactory class:
- Create service feeds
"""
def __init__(self, channel, octobot):
super().__init__(channel)
self.octobot = octobot
self.started = False
self.service_feeds = []
async def start(self):
in_backtesting = backtesting_api.is_backtesting_enabled(self.octobot.config)
service_feed_factory = service_api.create_service_feed_factory(self.octobot.config,
self.octobot.async_loop,
self.octobot.bot_id)
for feed in service_feed_factory.get_available_service_feeds(in_backtesting):
if tentacles_manager_api.is_tentacle_activated_in_tentacles_setup_config(
self.octobot.tentacles_setup_config, feed.get_name()):
await self.create_feed(service_feed_factory, feed, in_backtesting)
async def METHOD_NAME(self):
self.started = True
for feed in self.service_feeds:
await self.send(bot_id=self.octobot.bot_id,
subject=common_enums.OctoBotChannelSubjects.UPDATE.value,
action=service_channel_consumer.OctoBotChannelServiceActions.START_SERVICE_FEED.value,
data={
service_channel_consumer.OctoBotChannelServiceDataKeys.INSTANCE.value: feed,
service_channel_consumer.OctoBotChannelServiceDataKeys.EDITED_CONFIG.value:
self.octobot.get_edited_config(constants.CONFIG_KEY, dict_only=False)
})
async def create_feed(self, service_feed_factory, feed, in_backtesting):
await self.send(bot_id=self.octobot.bot_id,
subject=common_enums.OctoBotChannelSubjects.CREATION.value,
action=service_channel_consumer.OctoBotChannelServiceActions.SERVICE_FEED.value,
data={
service_channel_consumer.OctoBotChannelServiceDataKeys.EDITED_CONFIG.value:
self.octobot.get_edited_config(constants.CONFIG_KEY, dict_only=False),
service_channel_consumer.OctoBotChannelServiceDataKeys.BACKTESTING_ENABLED.value:
in_backtesting,
service_channel_consumer.OctoBotChannelServiceDataKeys.CLASS.value: feed,
service_channel_consumer.OctoBotChannelServiceDataKeys.FACTORY.value: service_feed_factory
})
async def register_service_feed(self, instance):
self.service_feeds.append(instance)
async def stop(self):
self.logger.debug("Stopping ...")
for service_feed in self.service_feeds:
await service_api.stop_service_feed(service_feed)
self.logger.debug("Stopped") |
6,805 | test no short cutting | ##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferUI
import GafferUITest
class EventSignalCombinerTest( GafferUITest.TestCase ) :
def trueSlot( self, gadget, event ) :
self.trueSlotCalled = True
return True
def falseSlot( self, gadget, event ) :
self.falseSlotCalled = True
return False
def exceptionSlot( self, gadget, event ) :
self.exceptionSlotCalled = True
raise Exception( "oops" )
def setUp( self ) :
GafferUITest.TestCase.setUp( self )
self.falseSlotCalled = False
self.trueSlotCalled = False
self.exceptionSlotCalled = False
def testShortCutting( self ) :
s = GafferUI.Gadget.ButtonSignal()
s.connect( self.trueSlot, scoped = False )
s.connect( self.falseSlot, scoped = False )
self.assertEqual( self.trueSlotCalled, False )
self.assertEqual( self.falseSlotCalled, False )
self.assertEqual( s( None, GafferUI.ButtonEvent() ), True )
self.assertEqual( self.trueSlotCalled, True )
self.assertEqual( self.falseSlotCalled, False )
def METHOD_NAME( self ) :
s = GafferUI.Gadget.ButtonSignal()
s.connect( self.falseSlot, scoped = False )
s.connect( self.trueSlot, scoped = False )
self.assertEqual( self.trueSlotCalled, False )
self.assertEqual( self.falseSlotCalled, False )
self.assertEqual( s( None, GafferUI.ButtonEvent() ), True )
self.assertEqual( self.trueSlotCalled, True )
self.assertEqual( self.falseSlotCalled, True )
def testExceptionHandling( self ) :
# We don't want exceptions in one slot to prevent the
# invocation of other slots. But we do want the errors from
# those slots to be printed as warnings.
s = GafferUI.Gadget.ButtonSignal()
s.connect( self.exceptionSlot, scoped = False )
s.connect( self.trueSlot, scoped = False )
self.assertEqual( self.exceptionSlotCalled, False )
self.assertEqual( self.trueSlotCalled, False )
with IECore.CapturingMessageHandler() as mh :
self.assertEqual( s( None, GafferUI.ButtonEvent() ), True )
self.assertEqual( self.exceptionSlotCalled, True )
self.assertEqual( self.trueSlotCalled, True )
self.assertEqual( len( mh.messages ), 1 )
self.assertEqual( mh.messages[0].level, IECore.Msg.Level.Error )
self.assertEqual( mh.messages[0].context, "EventSignalCombiner", IECore.Msg.Level.Error )
self.assertIn( "Exception", mh.messages[0].message )
self.assertIn( "oops", mh.messages[0].message )
if __name__ == "__main__":
unittest.main() |
6,806 | enqueue tick | import asyncio
import sys
import threading
from asyncio import events
import System.Windows.Forms as WinForms
from System import Action
from System.Threading.Tasks import Task
class WinformsProactorEventLoop(asyncio.ProactorEventLoop):
def run_forever(self, app_context):
"""Set up the asyncio event loop, integrate it with the Winforms event loop, and
start the application.
This largely duplicates the setup behavior of the default Proactor
run_forever implementation.
:param app_context: The WinForms.ApplicationContext instance
controlling the lifecycle of the app.
"""
# Python 3.8 added an implementation of run_forever() in
# ProactorEventLoop. The only part that actually matters is the
# refactoring that moved the initial call to stage _loop_self_reading;
# it now needs to be created as part of run_forever; otherwise the
# event loop locks up, because there won't be anything for the
# select call to process.
self.call_soon(self._loop_self_reading)
# Remember the application context.
self.app_context = app_context
# Set up the Proactor.
# The code between the following markers should be exactly the same as
# the official CPython implementation, up to the start of the
# `while True:` part of run_forever() (see BaseEventLoop.run_forever()
# in Lib/ascynio/base_events.py)
# === START BaseEventLoop.run_forever() setup ===
self._check_closed()
if self.is_running():
raise RuntimeError("This event loop is already running")
if events._get_running_loop() is not None:
raise RuntimeError(
"Cannot run the event loop while another loop is running"
)
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
self._old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(
firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook,
)
events._set_running_loop(self)
# === END BaseEventLoop.run_forever() setup ===
# Rather than going into a `while True:` loop, we're going to use the
# Winforms event loop to queue a tick() message that will cause a
# single iteration of the asyncio event loop to be executed. Each time
# we do this, we queue *another* tick() message in 5ms time. In this
# way, we'll get a continuous stream of tick() calls, without blocking
# the Winforms event loop.
# Queue the first asyncio tick.
self.METHOD_NAME()
# Start the Winforms event loop.
WinForms.Application.Run(self.app_context)
def METHOD_NAME(self):
# Queue a call to tick in 5ms.
self.task = Action[Task](self.tick)
Task.Delay(5).ContinueWith(self.task)
def tick(self, *args, **kwargs):
"""Cause a single iteration of the event loop to run on the main GUI thread."""
# FIXME: this only works if there is a "main window" registered with the
# app (#750).
#
# If the app context has a main form, invoke run_once_recurring()
# on the thread associated with that form.
if self.app_context.MainForm:
action = Action(self.run_once_recurring)
self.app_context.MainForm.Invoke(action)
def run_once_recurring(self):
"""Run one iteration of the event loop, and enqueue the next iteration (if we're
not stopping).
This largely duplicates the "finally" behavior of the default Proactor
run_forever implementation.
"""
# Perform one tick of the event loop.
self._run_once()
if self._stopping:
# If we're stopping, we can do the "finally" handling from
# the BaseEventLoop run_forever().
# === START BaseEventLoop.run_forever() finally handling ===
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*self._old_agen_hooks)
# === END BaseEventLoop.run_forever() finally handling ===
else:
# Otherwise, live to tick another day. Enqueue the next tick,
# and make sure there will be *something* to be processed.
# If you don't ensure there is at least one message on the
# queue, the select() call will block, locking the app.
self.METHOD_NAME()
self.call_soon(self._loop_self_reading) |
6,807 | test same pauli traces clifford | # pylint: disable=wrong-or-nonexistent-copyright-notice
import numpy as np
import pytest
import cirq
import examples.direct_fidelity_estimation as dfe
def test_direct_fidelity_estimation_no_noise_clifford():
qubits = cirq.LineQubit.range(3)
circuit = cirq.Circuit(cirq.Z(qubits[0]), cirq.X(qubits[1]), cirq.X(qubits[2]))
no_noise_simulator = cirq.DensityMatrixSimulator()
estimated_fidelity, _ = dfe.direct_fidelity_estimation(
circuit, qubits, no_noise_simulator, n_measured_operators=3, samples_per_term=0
)
assert np.isclose(estimated_fidelity, 1.0, atol=0.01)
def test_direct_fidelity_estimation_no_noise_non_clifford():
qubits = cirq.LineQubit.range(3)
circuit = cirq.Circuit(cirq.Z(qubits[0]) ** 0.123, cirq.X(qubits[1]), cirq.X(qubits[2]))
no_noise_simulator = cirq.DensityMatrixSimulator()
estimated_fidelity, _ = dfe.direct_fidelity_estimation(
circuit, qubits, no_noise_simulator, n_measured_operators=64, samples_per_term=0
)
assert np.isclose(estimated_fidelity, 1.0, atol=0.01)
def test_direct_fidelity_estimation_with_noise_clifford():
qubits = cirq.LineQubit.range(3)
circuit = cirq.Circuit(cirq.Z(qubits[0]), cirq.X(qubits[1]), cirq.X(qubits[2]))
noise = cirq.ConstantQubitNoiseModel(cirq.depolarize(0.1))
noisy_simulator = cirq.DensityMatrixSimulator(noise=noise)
estimated_fidelity, _ = dfe.direct_fidelity_estimation(
circuit, qubits, noisy_simulator, n_measured_operators=None, samples_per_term=100
)
assert estimated_fidelity >= -1.0 and estimated_fidelity <= 1.0
def test_direct_fidelity_estimation_with_noise_non_clifford():
qubits = cirq.LineQubit.range(3)
circuit = cirq.Circuit(
cirq.Z(qubits[0]) ** 0.25, # T-Gate, non Clifford.
cirq.X(qubits[1]) ** 0.123,
cirq.X(qubits[2]) ** 0.456,
)
noise = cirq.ConstantQubitNoiseModel(cirq.depolarize(0.1))
noisy_simulator = cirq.DensityMatrixSimulator(noise=noise)
estimated_fidelity, _ = dfe.direct_fidelity_estimation(
circuit, qubits, noisy_simulator, n_measured_operators=None, samples_per_term=100
)
assert estimated_fidelity >= -1.0 and estimated_fidelity <= 1.0
def test_incorrect_sampler_raises_exception():
qubits = cirq.LineQubit.range(1)
circuit = cirq.Circuit(cirq.X(qubits[0]))
sampler_incorrect_type = cirq.ZerosSampler
with pytest.raises(TypeError):
dfe.direct_fidelity_estimation(
circuit, qubits, sampler_incorrect_type, n_measured_operators=3, samples_per_term=0
)
def test_direct_fidelity_estimation_clifford_all_trials():
qubits = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.Z(qubits[0]), cirq.X(qubits[1]))
no_noise_simulator = cirq.DensityMatrixSimulator()
for n_measured_operators in [1, 2, 3, 4, None]:
estimated_fidelity, _ = dfe.direct_fidelity_estimation(
circuit,
qubits,
no_noise_simulator,
n_measured_operators=n_measured_operators,
samples_per_term=0,
)
assert np.isclose(estimated_fidelity, 1.0, atol=0.01)
def METHOD_NAME():
n_qubits = 4
qubits = cirq.LineQubit.range(n_qubits)
circuit_clifford = cirq.Circuit(cirq.X(qubits[3]))
circuit_general = cirq.Circuit(cirq.CCX(qubits[0], qubits[1], qubits[2]), circuit_clifford)
def _run_dfe(circuit):
class NoiseOnLastQubitOnly(cirq.NoiseModel):
def __init__(self):
self.qubit_noise_gate = cirq.amplitude_damp(1.0)
def noisy_moment(self, moment, system_qubits):
return [
moment,
cirq.Moment(
[
self.qubit_noise_gate(q).with_tags(cirq.ops.VirtualTag())
for q in system_qubits[-1:]
]
),
]
noise = NoiseOnLastQubitOnly()
noisy_simulator = cirq.DensityMatrixSimulator(noise=noise)
_, intermediate_results = dfe.direct_fidelity_estimation(
circuit, qubits, noisy_simulator, n_measured_operators=None, samples_per_term=1
)
return intermediate_results.pauli_traces, intermediate_results.clifford_tableau is not None
# Run both algos
pauli_traces_clifford, clifford_is_clifford = _run_dfe(circuit_clifford)
pauli_traces_general, general_is_clifford = _run_dfe(circuit_general)
assert clifford_is_clifford
assert not general_is_clifford
assert len(pauli_traces_clifford) == 2**n_qubits
for pauli_trace_clifford in pauli_traces_clifford:
pauli_trace_general = [x for x in pauli_traces_general if x.P_i == pauli_trace_clifford.P_i]
assert len(pauli_trace_general) == 1
pauli_trace_general = pauli_trace_general[0]
# The code itself checks that the rho_i is either +1 or -1, so here we
# simply test that the sign is the same.
assert np.isclose(pauli_trace_general.rho_i, pauli_trace_clifford.rho_i, atol=0.01)
def test_direct_fidelity_estimation_intermediate_results():
qubits = cirq.LineQubit.range(1)
circuit = cirq.Circuit(cirq.I(qubits[0]))
no_noise_simulator = cirq.DensityMatrixSimulator()
_, intermediate_result = dfe.direct_fidelity_estimation(
circuit, qubits, no_noise_simulator, n_measured_operators=1, samples_per_term=0
)
# We only test a few fields to be sure that they are set properly. In
# particular, some of them are random, and so we don't test them.
assert str(intermediate_result.clifford_tableau) == "+ Z "
np.testing.assert_equal(len(intermediate_result.pauli_traces), 1)
assert np.isclose(intermediate_result.pauli_traces[0].rho_i, 1.0)
assert np.isclose(intermediate_result.pauli_traces[0].Pr_i, 0.5)
np.testing.assert_equal(len(intermediate_result.trial_results), 1)
assert np.isclose(intermediate_result.trial_results[0].sigma_i, 1.0)
assert np.isclose(intermediate_result.std_dev_estimate, 0.0)
assert np.isclose(intermediate_result.std_dev_bound, 0.5)
def test_parsing_args():
dfe.parse_arguments(['--samples_per_term=10'])
def test_calling_main():
dfe.main(n_measured_operators=3, samples_per_term=0)
dfe.main(n_measured_operators=3, samples_per_term=10) |
6,808 | test write bytes read bytes | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2023 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import importlib.util
import pytest
import pyvisa
from pymeasure.adapters import VISAAdapter
from pymeasure.test import expected_protocol
# This uses a pyvisa-sim default instrument, we could also define our own.
SIM_RESOURCE = 'ASRL2::INSTR'
is_pyvisa_sim_installed = bool(importlib.util.find_spec('pyvisa_sim'))
if not is_pyvisa_sim_installed:
pytest.skip('PyVISA tests require the pyvisa-sim library', allow_module_level=True)
@pytest.fixture
def adapter():
adapter = VISAAdapter(SIM_RESOURCE, visa_library='@sim',
read_termination="\n",
# Large timeout allows very slow GitHub action runners to complete.
timeout=60,
)
yield adapter
# Empty the read buffer, as something might remain there after a test.
# `clear` is not implemented in pyvisa-sim and `flush_read_buffer` seems to do nothing.
adapter.timeout = 0
try:
adapter.read_bytes(-1)
except pyvisa.errors.VisaIOError as exc:
if not exc.args[0].startswith("VI_ERROR_TMO"):
raise
# Close the connection
adapter.close()
def test_nested_adapter():
a0 = VISAAdapter(SIM_RESOURCE, visa_library='@sim', read_termination="\n")
a = VISAAdapter(a0)
assert a.resource_name == SIM_RESOURCE
assert a.connection == a0.connection
assert a.manager == a0.manager
def test_nested_adapter_query_delay():
query_delay = 10
with pytest.warns(FutureWarning, match="query_delay"):
a0 = VISAAdapter(SIM_RESOURCE, visa_library='@sim', read_termination="\n",
query_delay=query_delay)
a = VISAAdapter(a0)
assert a.resource_name == SIM_RESOURCE
assert a.connection == a0.connection
assert a.query_delay == query_delay
def test_ProtocolAdapter():
with expected_protocol(
VISAAdapter,
[(b"some bytes written", b"Response")]
) as adapter:
adapter.write_bytes(b"some bytes written")
assert adapter.read_bytes(-1) == b"Response"
def test_correct_visa_asrl_kwarg():
"""Confirm that the asrl kwargs gets passed through to the VISA connection."""
a = VISAAdapter(SIM_RESOURCE, visa_library='@sim',
asrl={'read_termination': "\rx\n"})
assert a.connection.read_termination == "\rx\n"
def test_open_gpib():
a = VISAAdapter(5, visa_library='@sim')
assert a.resource_name == "GPIB0::5::INSTR"
class TestClose:
@pytest.fixture
def adapterC(self):
return VISAAdapter(SIM_RESOURCE, visa_library='@sim')
def test_connection_session_closed(self, adapterC):
# Verify first, that it works before closing
assert adapterC.connection.session is not None
adapterC.close()
with pytest.raises(pyvisa.errors.InvalidSession, match="Invalid session"):
adapterC.connection.session
def test_manager_session_closed(self, adapterC):
# Verify first, that it works before closing
assert adapterC.manager.session is not None
adapterC.close()
with pytest.raises(pyvisa.errors.InvalidSession, match="Invalid session"):
adapterC.manager.session
def test_write_read(adapter):
adapter.write(":VOLT:IMM:AMPL?")
assert float(adapter.read()) == 1
def METHOD_NAME(adapter):
adapter.write_bytes(b"*IDN?\r\n")
assert adapter.read_bytes(22) == b"SCPI,MOCK,VERSION_1.0\n"
def test_write_bytes_read(adapter):
adapter.write_bytes(b"*IDN?\r\n")
assert adapter.read() == "SCPI,MOCK,VERSION_1.0"
class TestReadBytes:
@pytest.fixture()
def adapterR(self, adapter):
adapter.write("*IDN?")
yield adapter
def test_read_bytes(self, adapterR):
assert adapterR.read_bytes(22) == b"SCPI,MOCK,VERSION_1.0\n"
def test_read_all_bytes(self, adapterR):
assert adapterR.read_bytes(-1) == b"SCPI,MOCK,VERSION_1.0\n"
@pytest.mark.parametrize("count", (-1, 7))
def test_read_break_on_termchar(self, adapterR, count):
"""Test read_bytes breaks on termchar."""
adapterR.connection.read_termination = ","
assert adapterR.read_bytes(count, break_on_termchar=True) == b"SCPI,"
def test_read_no_break_on_termchar(self, adapterR):
adapterR.connection.read_termination = ","
# `break_on_termchar=False` is default value
assert adapterR.read_bytes(-1) == b"SCPI,MOCK,VERSION_1.0\n"
def test_read_no_break_on_newline(self, adapter):
# write twice to have two newline characters in the read buffer
adapter.write("*IDN?")
adapter.write("*IDN?")
# `break_on_termchar=False` is default value
assert adapter.read_bytes(-1) == b"SCPI,MOCK,VERSION_1.0\nSCPI,MOCK,VERSION_1.0\n"
def test_visa_adapter(adapter):
assert repr(adapter) == f"<VISAAdapter(resource='{SIM_RESOURCE}')>"
with pytest.warns(FutureWarning):
assert adapter.ask("*IDN?") == "SCPI,MOCK,VERSION_1.0"
adapter.write("*IDN?")
assert adapter.read() == "SCPI,MOCK,VERSION_1.0"
def test_visa_adapter_ask_values(adapter):
with pytest.warns(FutureWarning):
assert adapter.ask_values(":VOLT:IMM:AMPL?", separator=",") == [1.0] |
6,809 | test assemble xml file write | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
from ...format import Format
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with a blank cell."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({"xf_index": 1})
# No format. Should be ignored.
worksheet.write_blank(0, 0, None)
worksheet.write_blank(1, 2, None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
def METHOD_NAME(self):
"""Test writing a worksheet with a blank cell with write() method."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({"xf_index": 1})
# No format. Should be ignored.
worksheet.write(0, 0, None)
worksheet.write(1, 2, None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
def test_assemble_xml_file_A1(self):
"""Test writing a worksheet with a blank cell with A1 notation."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({"xf_index": 1})
# No format. Should be ignored.
worksheet.write_blank("A1", None)
worksheet.write_blank("C2", None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp) |
6,810 | clean inputs | # This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""This module contains various provisional APIs and strategies.
It is intended for internal use, to ease code reuse, and is not stable.
Point releases may move or break the contents at any time!
Internet strategies should conform to :rfc:`3986` or the authoritative
definitions it links to. If not, report the bug!
"""
# https://tools.ietf.org/html/rfc3696
import string
from importlib import resources
from hypothesis import strategies as st
from hypothesis.errors import InvalidArgument
from hypothesis.internal.conjecture import utils as cu
from hypothesis.strategies._internal.utils import defines_strategy
URL_SAFE_CHARACTERS = frozenset(string.ascii_letters + string.digits + "$-_.+!*'(),~")
FRAGMENT_SAFE_CHARACTERS = URL_SAFE_CHARACTERS | {"?", "/"}
# This file is sourced from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
# The file contains additional information about the date that it was last updated.
try: # pragma: no cover
traversable = resources.files("hypothesis.vendor") / "tlds-alpha-by-domain.txt"
_comment, *_tlds = traversable.read_text(encoding="utf-8").splitlines()
except (AttributeError, ValueError): # pragma: no cover # .files() was added in 3.9
_comment, *_tlds = resources.read_text(
"hypothesis.vendor", "tlds-alpha-by-domain.txt", encoding="utf-8"
).splitlines()
assert _comment.startswith("#")
# Remove special-use domain names from the list. For more discussion
# see https://github.com/HypothesisWorks/hypothesis/pull/3572
TOP_LEVEL_DOMAINS = ["COM", *sorted((d for d in _tlds if d != "ARPA"), key=len)]
class DomainNameStrategy(st.SearchStrategy):
@staticmethod
def METHOD_NAME(minimum, maximum, value, variable_name):
if value is None:
value = maximum
elif not isinstance(value, int):
raise InvalidArgument(
f"Expected integer but {variable_name} is a {type(value).__name__}"
)
elif not minimum <= value <= maximum:
raise InvalidArgument(
f"Invalid value {minimum!r} < {variable_name}={value!r} < {maximum!r}"
)
return value
def __init__(self, max_length=None, max_element_length=None):
"""
A strategy for :rfc:`1035` fully qualified domain names.
The upper limit for max_length is 255 in accordance with :rfc:`1035#section-2.3.4`
The lower limit for max_length is 4, corresponding to a two letter domain
with a single letter subdomain.
The upper limit for max_element_length is 63 in accordance with :rfc:`1035#section-2.3.4`
The lower limit for max_element_length is 1 in accordance with :rfc:`1035#section-2.3.4`
"""
# https://tools.ietf.org/html/rfc1035#section-2.3.4
max_length = self.METHOD_NAME(4, 255, max_length, "max_length")
max_element_length = self.METHOD_NAME(
1, 63, max_element_length, "max_element_length"
)
super().__init__()
self.max_length = max_length
self.max_element_length = max_element_length
# These regular expressions are constructed to match the documented
# information in https://tools.ietf.org/html/rfc1035#section-2.3.1
# which defines the allowed syntax of a subdomain string.
if self.max_element_length == 1:
self.label_regex = r"[a-zA-Z]"
elif self.max_element_length == 2:
self.label_regex = r"[a-zA-Z][a-zA-Z0-9]?"
else:
maximum_center_character_pattern_repetitions = self.max_element_length - 2
self.label_regex = r"[a-zA-Z]([a-zA-Z0-9\-]{0,%d}[a-zA-Z0-9])?" % (
maximum_center_character_pattern_repetitions,
)
def do_draw(self, data):
# 1 - Select a valid top-level domain (TLD) name
# 2 - Check that the number of characters in our selected TLD won't
# prevent us from generating at least a 1 character subdomain.
# 3 - Randomize the TLD between upper and lower case characters.
domain = data.draw(
st.sampled_from(TOP_LEVEL_DOMAINS)
.filter(lambda tld: len(tld) + 2 <= self.max_length)
.flatmap(
lambda tld: st.tuples(
*(st.sampled_from([c.lower(), c.upper()]) for c in tld)
).map("".join)
)
)
# RFC-5890 s2.3.1 says such labels are reserved, and since we don't
# want to bother with xn-- punycode labels we'll exclude them all.
elem_st = st.from_regex(self.label_regex, fullmatch=True).filter(
lambda label: len(label) < 4 or label[2:4] != "--"
)
# The maximum possible number of subdomains is 126,
# 1 character subdomain + 1 '.' character, * 126 = 252,
# with a max of 255, that leaves 3 characters for a TLD.
# Allowing any more subdomains would not leave enough
# characters for even the shortest possible TLDs.
elements = cu.many(data, min_size=1, average_size=3, max_size=126)
while elements.more():
# Generate a new valid subdomain using the regex strategy.
sub_domain = data.draw(elem_st)
if len(domain) + len(sub_domain) >= self.max_length:
data.stop_example(discard=True)
break
domain = sub_domain + "." + domain
return domain
@defines_strategy(force_reusable_values=True)
def domains(
*, max_length: int = 255, max_element_length: int = 63
) -> st.SearchStrategy[str]:
"""Generate :rfc:`1035` compliant fully qualified domain names."""
return DomainNameStrategy(
max_length=max_length, max_element_length=max_element_length
)
# The `urls()` strategy uses this to generate URL fragments (e.g. "#foo").
# It has been extracted to top-level so that we can test it independently
# of `urls()`, which helps with getting non-flaky coverage of the lambda.
_url_fragments_strategy = (
st.lists(
st.builds(
lambda char, encode: f"%{ord(char):02X}"
if (encode or char not in FRAGMENT_SAFE_CHARACTERS)
else char,
st.characters(min_codepoint=0, max_codepoint=255),
st.booleans(),
),
min_size=1,
)
.map("".join)
.map("#{}".format)
)
@defines_strategy(force_reusable_values=True)
def urls() -> st.SearchStrategy[str]:
"""A strategy for :rfc:`3986`, generating http/https URLs."""
def url_encode(s):
return "".join(c if c in URL_SAFE_CHARACTERS else "%%%02X" % ord(c) for c in s)
schemes = st.sampled_from(["http", "https"])
ports = st.integers(min_value=0, max_value=2**16 - 1).map(":{}".format)
paths = st.lists(st.text(string.printable).map(url_encode)).map("/".join)
return st.builds(
"{}://{}{}/{}{}".format,
schemes,
domains(),
st.just("") | ports,
paths,
st.just("") | _url_fragments_strategy,
) |
6,811 | addr imm | from __future__ import print_function
import sys
import logging
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
console = logging.StreamHandler()
logger = logging.getLogger()
logger.addHandler(console)
logger.setLevel(logging.ERROR)
labels = {}
program = []
imm_expr = []
imm_dict = {}
imm_values = []
nr = 0
def _get_value_imm(str):
if str[0] == '#':
if phase == 1:
if not str[1:] in imm_expr: # phase 1
imm_expr.append(str[1:])
return 0
else: # phase == 2
return imm_dict[_get_value(str[1:])] # phase 2
# not an immediate value
return _get_value(str)
def _get_value(str):
if str[0] == '#':
raise ValueError('Not allowed to use immediate value. Line %d' % nr)
if str[0] == '$':
val = int(str[1:], 16)
elif str[0:2] == '0x':
val = int(str[2:], 16)
else:
try:
val = int(str)
except ValueError:
try:
val = labels[str]
except KeyError:
if phase == 2:
raise NameError('Unknown indentifier ' + str)
val = 0
return val & 0xFFFF
def _output_direct(data):
global pc
pc += 1
if phase == 2:
program.append("%04X" % data)
def add_label(line):
logger.debug("add label '%s'. Pass %d. PC = %d" % (line, phase, pc))
split = line.split('=')
for i in range(len(split)):
split[i] = split[i].strip()
if (phase == 1) and (split[0] in labels):
raise NameError("Label '%s' already exists." % split[0])
if len(split) > 1:
labels[split[0]] = _get_value(split[1])
else:
labels[split[0]] = pc
##########################################################################
##
## PARSE rules for each opcode
##
##########################################################################
def _addr(params, mnem, code):
addr = _get_value(params)
if addr > 0x3FF:
eprint ("Error, address too large: %03x: %s $%03x" % (pc, mnem, addr))
sys.exit(1)
code |= addr
logger.info("PC: %03x: %04x | %s $%03x" % (pc, code, mnem, addr))
_output_direct(code)
return code
def METHOD_NAME(params, mnem, code):
addr = _get_value_imm(params)
if addr > 0x3FF:
eprint ("Error, address too large: %03x: %s $%03x" % (pc, mnem, addr))
sys.exit(1)
code |= addr
logger.info("PC: %03x: %04x | %s $%03x" % (pc, code, mnem, addr))
_output_direct(code)
return code
def _addr_rel(params, mnem, code):
parsed = params.split(',')
if len(parsed) < 2:
eprint ("Line %d: Syntax error in relative addressing mode: %03x: %s %s" % (nr, pc, mnem, params))
sys.exit(1)
addr = _get_value(parsed[0])
if phase == 2:
if (addr < 0x3F8) or (addr > 0x3FF):
eprint ("Line %d: Relative addressing base pointers shall be at $3F8-$3FF: %03x: %s $%03x" % (nr, pc, mnem, addr))
sys.exit(1)
offset = _get_value(parsed[1])
if offset > 0xFF:
eprint ("Line %d: Error, offset too large: %03x: %s $%03x,$%02x" % (nr, pc, mnem, addr, offset))
sys.exit(1)
code |= (addr & 0x07)
code |= (offset << 3)
logger.info("PC: %03x: %04x | %s $%03x,$%02x" % (pc, code, mnem, addr, offset))
_output_direct(code)
return code
def _data(params, mnem, code):
data = _get_value(params)
logger.info("PC: %03x: %s $%04x" % (pc, mnem, data))
_output_direct(data)
return data
def _block(params, mnem, code):
length = _get_value(params)
logger.info("PC: %03x: %s $%04x" % (pc, mnem, length))
for i in range(length):
_output_direct(0)
return 0
def _addr_io(params, mnem, code):
addr = _get_value(params)
if addr > 0xFF:
eprint ("Error, address too large: %03x: %s $%03x" % (pc, mnem, addr))
sys.exit(1)
code |= addr
logger.info("PC: %03x: %04x | %s $%03x" % (pc, code, mnem, addr))
_output_direct(code)
return code
def _no_addr(params, mnem, code):
logger.info("PC: %03x: %04x | %s" % (pc, code, mnem))
_output_direct(code)
return code
def unknown_mnem(params):
eprint ("Unknown mnemonic: '%s'" % params)
def dump_bram_init():
bram = [0]*2048
for i in range(len(program)):
inst = int(program[i], 16)
bram[2*i+0] = inst & 0xFF
bram[2*i+1] = (inst >> 8) & 0xFF
for i in range(64):
if (i*16) >= len(program):
break
hx = ''
for j in range(31,-1,-1):
hx = hx + "%02X" % bram[i*32+j]
print (" INIT_%02X => X\"%s\"," % (i, hx))
def dump_nan_file(filename):
f = open(filename, "wb")
b = bytearray(2)
for i in range(len(program)):
inst = int(program[i], 16)
b[0] = inst & 0xFF
b[1] = (inst >> 8) & 0xFF
f.write(b)
f.close()
mnemonics = {
'LOAD' : ( METHOD_NAME, 0x0800 ),
'STORE' : ( _addr, 0x8000 ),
'LOADI' : ( _addr_rel, 0x8800 ),
'STORI' : ( _addr_rel, 0x9000 ),
'OR' : ( METHOD_NAME, 0x1800 ),
'AND' : ( METHOD_NAME, 0x2800 ),
'XOR' : ( METHOD_NAME, 0x3800 ),
'ADD' : ( METHOD_NAME, 0x4800 ),
'SUB' : ( METHOD_NAME, 0x5800 ),
'CMP' : ( METHOD_NAME, 0x5000 ),
'ADDC' : ( METHOD_NAME, 0x6800 ),
'INP' : ( _addr_io, 0x7800 ),
'OUTP' : ( _addr_io, 0xA000 ),
'RET' : ( _no_addr, 0xB800 ),
'BEQ' : ( _addr, 0xC000 ),
'BNE' : ( _addr, 0xC800 ),
'BMI' : ( _addr, 0xD000 ),
'BPL' : ( _addr, 0xD800 ),
'BRA' : ( _addr, 0XE000 ),
'CALL' : ( _addr, 0xE800 ),
'BCS' : ( _addr, 0xF000 ),
'BCC' : ( _addr, 0xF800 ),
'.dw' : ( _data, 0x0000 ),
'.blk' : ( _block, 0x0000 )
}
def parse_lines(lines):
global nr
nr = 0
for line in lines:
nr = nr + 1
line = line.rstrip()
comm = line.split(';', 1)
line = comm[0]
if(line.strip() == ''):
continue
line_strip = line.strip()
if (line[0] != ' ') and (line[0] != '\t'):
add_label(line.rstrip())
if (phase == 2):
print (" ", line)
continue
#print "Line: '%s'" % line_strip
line_split = line_strip.split(" ", 1)
if len(line_split) == 1:
line_split.append("")
mnem = line_split[0];
try:
(f, code) = mnemonics[mnem]
except KeyError as e:
raise NameError("Unknown Mnemonic %s in line %d" % (mnem, nr))
try:
code = f(line_split[1].strip(), mnem, code)
except IndexError as e:
raise ValueError("Value error in line %d" % (nr,))
if (phase == 2):
print ("%03X: %04X | " % (pc-1, code),line)
def resolve_immediates():
global pc
for imm in imm_expr:
imm_dict[_get_value(imm)] = 0;
for imm in imm_dict:
imm_dict[imm] = pc
imm_values.append(imm)
pc += 1
#print imm_expr
#print imm_dict
#print imm_values
if __name__ == "__main__":
inputfile = 'nano_code.nan'
outputfile = 'nano_code.b'
if len(sys.argv)>1:
inputfile = sys.argv[1]
if len(sys.argv)>2:
outputfile = sys.argv[2]
f = open(inputfile, 'r')
lines = f.readlines()
pc = 0
phase = 1
logger.info("Pass 1...")
parse_lines(lines)
# print labels
resolve_immediates()
pc = 0
phase = 2
logger.info("Pass 2...")
logger.setLevel(logging.WARN)
parse_lines(lines)
for imm in imm_values:
logger.info("PC: %03x: .dw $%04x" % (pc, imm))
print ("%03X: %04X | IMM #%d" % (pc, imm, imm))
_output_direct(imm)
dump_bram_init()
dump_nan_file(outputfile) |
6,812 | hello logging job type fixture | import json
import logging
import os
import pytest
from dagster import (
String,
_seven as seven,
execute_job,
job,
logger,
reconstructable,
)
from dagster._core.test_utils import instance_for_test
from dagster._utils import safe_tempfile_path
from dagstermill.examples.repository import hello_logging
from dagstermill.io_managers import (
ConfigurableLocalOutputNotebookIOManager,
local_output_notebook_io_manager,
)
class LogTestFileHandler(logging.Handler):
def __init__(self, file_path):
self.file_path = file_path
if not os.path.isfile(self.file_path):
with open(self.file_path, "a", encoding="utf8"): # Create file if does not exist
pass
super(LogTestFileHandler, self).__init__()
def emit(self, record):
with open(self.file_path, "a", encoding="utf8") as fd:
fd.write(seven.json.dumps(record.__dict__) + "\n")
@logger(config_schema={"name": String, "log_level": String, "file_path": String})
def test_file_logger(init_context):
klass = logging.getLoggerClass()
logger_ = klass(
init_context.logger_config["name"],
level=init_context.logger_config["log_level"],
)
handler = LogTestFileHandler(init_context.logger_config["file_path"])
logger_.addHandler(handler)
handler.setLevel(init_context.logger_config["log_level"])
return logger_
@job(
logger_defs={
"test": test_file_logger,
"critical": test_file_logger,
},
resource_defs={
"output_notebook_io_manager": local_output_notebook_io_manager,
},
)
def hello_logging_job():
hello_logging()
@job(
logger_defs={
"test": test_file_logger,
"critical": test_file_logger,
},
resource_defs={
"output_notebook_io_manager": (
ConfigurableLocalOutputNotebookIOManager.configure_at_launch()
),
},
)
def hello_logging_job_pythonic():
hello_logging()
@pytest.fixture(name="hello_logging_job_type", params=[True, False])
def METHOD_NAME(request):
if request.param:
return hello_logging_job
else:
return hello_logging_job_pythonic
def test_logging(hello_logging_job_type) -> None:
with safe_tempfile_path() as test_file_path:
with safe_tempfile_path() as critical_file_path:
with instance_for_test() as instance:
execute_job(
reconstructable(hello_logging_job_type),
run_config={
"loggers": {
"test": {
"config": {
"name": "test",
"file_path": test_file_path,
"log_level": "DEBUG",
}
},
"critical": {
"config": {
"name": "critical",
"file_path": critical_file_path,
"log_level": "CRITICAL",
}
},
}
},
instance=instance,
)
with open(test_file_path, "r", encoding="utf8") as test_file:
records = [
json.loads(line)
for line in test_file.read().strip("\n").split("\n")
if line
]
with open(critical_file_path, "r", encoding="utf8") as critical_file:
critical_records = [
json.loads(line)
for line in critical_file.read().strip("\n").split("\n")
if line
]
messages = [x["dagster_meta"]["orig_message"] for x in records]
assert "Hello, there!" in messages
critical_messages = [x["dagster_meta"]["orig_message"] for x in critical_records]
assert "Hello, there!" not in critical_messages |
6,813 | configure loader modules | """
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import pytest
import salt.states.mysql_user as mysql_user
import salt.utils.data
from tests.support.mock import MagicMock, patch
@pytest.fixture
def METHOD_NAME():
return {mysql_user: {}}
def test_present():
"""
Test to ensure that the named user is present with
the specified properties.
"""
name = "frank"
password = "bob@cat"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
mock = MagicMock(
side_effect=[
True,
False,
True,
False,
False,
True,
False,
False,
False,
False,
False,
True,
]
)
mock_t = MagicMock(return_value=True)
mock_f = MagicMock(return_value=False)
mock_str = MagicMock(return_value="salt")
mock_none = MagicMock(return_value=None)
mock_sn = MagicMock(side_effect=[None, "salt", None, None, None])
with patch.object(salt.utils.data, "is_true", mock_f):
comt = (
"Either password or password_hash must be specified,"
" unless allow_passwordless is True"
)
ret.update({"comment": comt})
assert mysql_user.present(name) == ret
with patch.dict(
mysql_user.__salt__,
{"mysql.user_exists": mock, "mysql.user_chpass": mock_t},
):
with patch.object(salt.utils.data, "is_true", mock_t):
comt = "User frank@localhost is already present with passwordless login"
ret.update({"comment": comt, "result": True})
assert mysql_user.present(name, allow_passwordless=True) == ret
with patch.object(mysql_user, "_get_mysql_error", mock_str):
ret.update({"comment": "salt", "result": False})
assert mysql_user.present(name) == ret
with patch.object(mysql_user, "_get_mysql_error", mock_str):
comt = "User frank@localhost is already present with the desired password"
ret.update({"comment": comt, "result": True})
assert mysql_user.present(name, password=password) == ret
with patch.object(mysql_user, "_get_mysql_error", mock_str):
ret.update({"comment": "salt", "result": False})
assert mysql_user.present(name, password=password) == ret
with patch.object(mysql_user, "_get_mysql_error", mock_none):
with patch.dict(mysql_user.__opts__, {"test": True}):
comt = "Password for user frank@localhost is set to be changed"
ret.update({"comment": comt, "result": None})
assert mysql_user.present(name, password=password) == ret
with patch.object(mysql_user, "_get_mysql_error", mock_sn):
with patch.dict(mysql_user.__opts__, {"test": False}):
ret.update({"comment": "salt", "result": False})
assert mysql_user.present(name, password=password) == ret
with patch.dict(mysql_user.__opts__, {"test": True}):
comt = "User frank@localhost is set to be added"
ret.update({"comment": comt, "result": None})
assert mysql_user.present(name, password=password) == ret
with patch.dict(mysql_user.__opts__, {"test": False}):
comt = "Password for user frank@localhost has been changed"
ret.update(
{"comment": comt, "result": True, "changes": {name: "Updated"}}
)
assert mysql_user.present(name, password=password) == ret
def test_absent():
"""
Test to ensure that the named user is absent.
"""
name = "frank_exampledb"
ret = {"name": name, "result": True, "comment": "", "changes": {}}
mock = MagicMock(side_effect=[True, True, True, False, False, False])
mock_t = MagicMock(side_effect=[True, False])
mock_str = MagicMock(return_value="salt")
mock_none = MagicMock(return_value=None)
with patch.dict(
mysql_user.__salt__,
{"mysql.user_exists": mock, "mysql.user_remove": mock_t},
):
with patch.dict(mysql_user.__opts__, {"test": True}):
comt = "User frank_exampledb@localhost is set to be removed"
ret.update({"comment": comt, "result": None})
assert mysql_user.absent(name) == ret
with patch.dict(mysql_user.__opts__, {"test": False}):
comt = "User frank_exampledb@localhost has been removed"
ret.update(
{
"comment": comt,
"result": True,
"changes": {"frank_exampledb": "Absent"},
}
)
assert mysql_user.absent(name) == ret
with patch.object(mysql_user, "_get_mysql_error", mock_str):
comt = "User frank_exampledb@localhost has been removed"
ret.update({"comment": "salt", "result": False, "changes": {}})
assert mysql_user.absent(name) == ret
comt = "User frank_exampledb@localhost has been removed"
ret.update({"comment": "salt"})
assert mysql_user.absent(name) == ret
with patch.object(mysql_user, "_get_mysql_error", mock_none):
comt = (
"User frank_exampledb@localhost is not present,"
" so it cannot be removed"
)
ret.update({"comment": comt, "result": True, "changes": {}})
assert mysql_user.absent(name) == ret |
6,814 | tokenize | #!/usr/bin/env python3
import re
import sys
import os
import inspect
import nltk.corpus
import nltk.METHOD_NAME
from collections import defaultdict
def is_hyphen(s):
return s in ("-", "–")
def no_hyphens(ws):
return tuple(w for w in ws if not is_hyphen(w))
def get_text(node):
result = []
def visit(node):
if node is not None:
if node.text is not None:
result.append(node.text)
for child in node:
visit(child)
if node.tail is not None:
result.append(node.tail)
visit(node)
return "".join(result)
def METHOD_NAME(s):
"""Splits tokens (hyphens/slashes count as separate tokens)."""
tokens = []
# NLTK tokenizer uses PTB standard, which doesn't split on hyphens or slashes
for tok in nltk.METHOD_NAME.word_tokenize(s):
# tokenizer normalizes quotes etc., so we need to detokenize later
tokens.extend([t for t in re.split(r"([-–/])", tok) if t != ""])
return tokens
def fixedcase_word(w, truelist=None):
"""Returns True if w should be fixed-case, None if unsure."""
if truelist is not None and w in truelist:
return True
if any(c.isupper() for c in w[1:]):
# tokenized word with noninitial uppercase
return True
if len(w) == 1 and w.isupper() and w not in {'A', 'K', 'N'}:
# single uppercase letter
return True
if len(w) == 2 and w[1] == '.' and w[0].isupper():
# initial with period
return True
def fixedcase_prefix(ws, truelist=None, phrase_truelist=None):
"""Returns a list of 1 or more bools: True if some prefix of the tuple 'ws' should be fixed-case,
False if not, None if unsure."""
# phrase_truelist is sorted in descending order by phrase length
if phrase_truelist is not None:
for n, truelist_bin in phrase_truelist:
if ws[:n] in truelist_bin:
return [True] * n
if len(no_hyphens(ws)) >= n and no_hyphens(ws)[:n] in truelist_bin:
# no hyphens in truelist entries
bs = []
i = 0
for tok in ws:
if is_hyphen(tok):
bs.append(False)
else:
bs.append(True)
i += 1
if i == n:
break
return bs
if ws[0] in {'L', 'D'} and len(ws) >= 2 and ws[1] == '’':
# French contractions: don't apply fixed-case
return [False, False]
return [fixedcase_word(ws[0], truelist=truelist)]
def fixedcase_title(
ws, truelist=None, phrase_truelist=None, amodifiers=None, ndescriptors=None
):
"""Returns a list of bools: True if w should be fixed-case, False if
not, None if unsure."""
bs = []
ws = tuple(ws)
i = 0
while i < len(ws):
b = fixedcase_prefix(ws[i:], truelist=truelist, phrase_truelist=phrase_truelist)
if i == 0:
pass
elif b[0] and amodifiers and ws[i - 1] in amodifiers: # e.g. North America
bs[-1] = True
elif b[0] and is_hyphen(ws[i - 1]) and amodifiers and ws[i - 2] in amodifiers:
bs[-2] = True
elif not b[0] and bs[-1] and ndescriptors and ws[i] in ndescriptors:
# "<name> <ndescriptor>", e.g. Columbia University
b[0] = True
elif ndescriptors and i >= 2 and ws[i - 1] == "of" and ws[i - 2] in ndescriptors:
# "<ndescriptor> of <name>", e.g. University of Edinburgh
if b[0]:
bs[-2] = True
else:
print(ws[i - 2 :], file=sys.stderr)
# mainly: University of X where X is not in the truelist
bs.extend(b)
i += len(b)
return bs
def replace_node(old, new):
old.clear()
old.tag = new.tag
old.attrib.update(new.attrib)
old.text = new.text
old.extend(new)
old.tail = new.tail
def append_text(node, text):
if len(node) == 0:
if node.text is None:
node.text = ""
node.text += text
else:
if node[-1].tail is None:
node[-1].tail = ""
node[-1].tail += text
def load_lists():
truelist = set()
phrase_truelist = defaultdict(set)
module_file = inspect.getfile(inspect.currentframe())
module_dir = os.path.dirname(os.path.abspath(module_file))
truelist_file = os.path.join(module_dir, "truelist")
for line in open(truelist_file):
line = line.split("#")[0].strip()
if line == "":
continue
assert not any(
is_hyphen(c) for c in line
), f'Truelist entries should not contain hyphens: {line}'
if ' ' not in line:
truelist.add(line)
else:
toks = tuple(METHOD_NAME(line))
phrase_truelist[len(toks)].add(toks) # group phrases by number of tokens
phrase_truelist = sorted(
phrase_truelist.items(), reverse=True
) # bins sorted by phrase length
special_file = os.path.join(module_dir, "special-case-titles")
with open(special_file) as inF:
special_titles = {
line.strip().lower(): line.strip() for line in inF if line.strip()
}
amodifiers = (
'North',
'South',
'East',
'West',
'Northeast',
'Northwest',
'Southeast',
'Southwest',
'Central',
'Northern',
'Southern',
'Eastern',
'Western',
'Northeastern',
'Northwestern',
'Southeastern',
'Southwestern',
'Modern',
'Ancient',
) # use subsequent word to determine fixed-case. will miss hyphenated modifiers (e.g. South-East)
ndescriptors = (
'Bay',
'Coast',
'Gulf',
'Island',
'Isle',
'Lake',
'Republic',
'University',
) # use preceding word to determine fixed-case
return truelist, phrase_truelist, special_titles, amodifiers, ndescriptors |
6,815 | test artifactpriority detail api method delete | import urllib.parse
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_artifacts.models import Artifactpriority
class ArtifactpriorityAPIViewTestCase(TestCase):
"""artifactpriority API view tests"""
@classmethod
def setUpTestData(cls):
# create object
Artifactpriority.objects.create(artifactpriority_name='artifactpriority_api_1')
# create user
User.objects.create_user(
username='testuser_artifactpriority_api', password='IktrZIZLncwTbOBD9Bhw'
)
def test_artifactpriority_list_api_unauthorized(self):
"""unauthorized access is forbidden"""
# get response
response = self.client.get('/api/artifactpriority/')
# compare
self.assertEqual(response.status_code, 401)
def test_artifactpriority_list_api_method_get(self):
"""GET is allowed"""
# login testuser
self.client.login(
username='testuser_artifactpriority_api', password='IktrZIZLncwTbOBD9Bhw'
)
# get response
response = self.client.get('/api/artifactpriority/')
# compare
self.assertEqual(response.status_code, 200)
def test_artifactpriority_list_api_method_post(self):
"""POST is forbidden"""
# login testuser
self.client.login(
username='testuser_artifactpriority_api', password='IktrZIZLncwTbOBD9Bhw'
)
# create POST string
poststring = {"artifactpriority_name": "artifactpriority_api_2"}
# get response
response = self.client.post('/api/artifactpriority/', data=poststring)
# compare
self.assertEqual(response.status_code, 405)
def test_artifactpriority_list_api_redirect(self):
"""test redirect with appending slash"""
# login testuser
self.client.login(
username='testuser_artifactpriority_api', password='IktrZIZLncwTbOBD9Bhw'
)
# create url
destination = urllib.parse.quote('/api/artifactpriority/', safe='/')
# get response
response = self.client.get('/api/artifactpriority', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_artifactpriority_detail_api_unauthorized(self):
"""unauthorized access is forbidden"""
# get object
artifactpriority_api_1 = Artifactpriority.objects.get(
artifactpriority_name='artifactpriority_api_1'
)
# get response
response = self.client.get(
'/api/artifactpriority/'
+ str(artifactpriority_api_1.artifactpriority_id)
+ '/'
)
# compare
self.assertEqual(response.status_code, 401)
def test_artifactpriority_detail_api_method_get(self):
"""GET is allowed"""
# get object
artifactpriority_api_1 = Artifactpriority.objects.get(
artifactpriority_name='artifactpriority_api_1'
)
# login testuser
self.client.login(
username='testuser_artifactpriority_api', password='IktrZIZLncwTbOBD9Bhw'
)
# get response
response = self.client.get(
'/api/artifactpriority/'
+ str(artifactpriority_api_1.artifactpriority_id)
+ '/'
)
# compare
self.assertEqual(response.status_code, 200)
def METHOD_NAME(self):
"""DELETE is forbidden"""
# get object
artifactpriority_api_1 = Artifactpriority.objects.get(
artifactpriority_name='artifactpriority_api_1'
)
# login testuser
self.client.login(
username='testuser_artifactpriority_api', password='IktrZIZLncwTbOBD9Bhw'
)
# get response
response = self.client.delete(
'/api/artifactpriority/'
+ str(artifactpriority_api_1.artifactpriority_id)
+ '/'
)
# compare
self.assertEqual(response.status_code, 405)
def test_artifactpriority_detail_api_method_put(self):
"""PUT is forbidden"""
# get object
artifactpriority_api_1 = Artifactpriority.objects.get(
artifactpriority_name='artifactpriority_api_1'
)
# login testuser
self.client.login(
username='testuser_artifactpriority_api', password='IktrZIZLncwTbOBD9Bhw'
)
# create url
destination = urllib.parse.quote(
'/api/artifactpriority/'
+ str(artifactpriority_api_1.artifactpriority_id)
+ '/',
safe='/',
)
# create PUT string
putstring = {"artifactpriority_name": "new_artifactpriority_api_1"}
# get response
response = self.client.put(
destination, data=putstring, content_type='application/json'
)
# compare
self.assertEqual(response.status_code, 405)
def test_artifactpriority_detail_api_redirect(self):
"""test redirect with appending slash"""
# get object
artifactpriority_api_1 = Artifactpriority.objects.get(
artifactpriority_name='artifactpriority_api_1'
)
# login testuser
self.client.login(
username='testuser_artifactpriority_api', password='IktrZIZLncwTbOBD9Bhw'
)
# create url
destination = urllib.parse.quote(
'/api/artifactpriority/'
+ str(artifactpriority_api_1.artifactpriority_id)
+ '/',
safe='/',
)
# get response
response = self.client.get(
'/api/artifactpriority/' + str(artifactpriority_api_1.artifactpriority_id),
follow=True,
)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
) |
6,816 | catch std outputs | import os
import sys
import threading
import collections
import websocket
import json
from datetime import datetime
from openpype.lib import Logger
from openpype_modules.webserver.host_console_listener import MsgAction
log = Logger.get_logger(__name__)
class StdOutBroker:
"""
Application showing console in Services tray for non python hosts
instead of cmd window.
"""
MAX_LINES = 10000
TIMER_TIMEOUT = 0.200
def __init__(self, host_name):
self.host_name = host_name
self.webserver_client = None
self.original_stdout_write = None
self.original_stderr_write = None
self.log_queue = collections.deque()
date_str = datetime.now().strftime("%d%m%Y%H%M%S")
self.host_id = "{}_{}".format(self.host_name, date_str)
self._std_available = False
self._is_running = False
self.METHOD_NAME()
self._timer = None
@property
def send_to_tray(self):
"""Checks if connected to tray and have access to logs."""
return self.webserver_client and self._std_available
def start(self):
"""Start app, create and start timer"""
if not self._std_available or self._is_running:
return
self._is_running = True
self._create_timer()
self._connect_to_tray()
def stop(self):
"""Disconnect from Tray, process last logs"""
if not self._is_running:
return
self._is_running = False
self._process_queue()
self._disconnect_from_tray()
def host_connected(self):
"""Send to Tray console that host is ready - icon change. """
log.info("Host {} connected".format(self.host_id))
payload = {
"host": self.host_id,
"action": MsgAction.INITIALIZED,
"text": "Integration with {}".format(
str.capitalize(self.host_name))
}
self._send(payload)
def _create_timer(self):
timer = threading.Timer(self.TIMER_TIMEOUT, self._timer_callback)
timer.start()
self._timer = timer
def _timer_callback(self):
if not self._is_running:
return
self._process_queue()
self._create_timer()
def _connect_to_tray(self):
"""Connect to Tray webserver to pass console output. """
if not self._std_available: # not content to log
return
ws = websocket.WebSocket()
webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL")
if not webserver_url:
print("Unknown webserver url, cannot connect to pass log")
return
webserver_url = webserver_url.replace("http", "ws")
ws.connect("{}/ws/host_listener".format(webserver_url))
self.webserver_client = ws
payload = {
"host": self.host_id,
"action": MsgAction.CONNECTING,
"text": "Integration with {}".format(
str.capitalize(self.host_name))
}
self._send(payload)
def _disconnect_from_tray(self):
"""Send to Tray that host is closing - remove from Services. """
print("Host {} closing".format(self.host_name))
if not self.webserver_client:
return
payload = {
"host": self.host_id,
"action": MsgAction.CLOSE,
"text": "Integration with {}".format(
str.capitalize(self.host_name))
}
self._send(payload)
self.webserver_client.close()
def METHOD_NAME(self):
"""Redirects standard out and error to own functions"""
if sys.stdout:
self.original_stdout_write = sys.stdout.write
sys.stdout.write = self._my_stdout_write
self._std_available = True
if sys.stderr:
self.original_stderr_write = sys.stderr.write
sys.stderr.write = self._my_stderr_write
self._std_available = True
def _my_stdout_write(self, text):
"""Appends outputted text to queue, keep writing to original stdout"""
if self.original_stdout_write is not None:
self.original_stdout_write(text)
if self.send_to_tray:
self.log_queue.append(text)
def _my_stderr_write(self, text):
"""Appends outputted text to queue, keep writing to original stderr"""
if self.original_stderr_write is not None:
self.original_stderr_write(text)
if self.send_to_tray:
self.log_queue.append(text)
def _process_queue(self):
"""Sends lines and purges queue"""
if not self.send_to_tray:
return
lines = tuple(self.log_queue)
self.log_queue.clear()
if lines:
payload = {
"host": self.host_id,
"action": MsgAction.ADD,
"text": "\n".join(lines)
}
self._send(payload)
def _send(self, payload):
"""Worker method to send to existing websocket connection."""
if not self.send_to_tray:
return
try:
self.webserver_client.send(json.dumps(payload))
except ConnectionResetError: # Tray closed
self._connect_to_tray() |
6,817 | test yields | from recipe_scrapers._grouping_utils import IngredientGroup
from recipe_scrapers.halfbakedharvest import HalfBakedHarvest
from tests import ScraperTest
class TestHalfBakedHarvestScraper(ScraperTest):
scraper_class = HalfBakedHarvest
test_file_name = "halfbakedharvest_groups"
def test_host(self):
self.assertEqual("halfbakedharvest.com", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://www.halfbakedharvest.com/street-corn-pasta-salad/",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(
self.harvester_class.title(),
"Street Corn Pasta Salad",
)
def test_author(self):
self.assertEqual(self.harvester_class.author(), "halfbakedharvest")
def METHOD_NAME(self):
self.assertEqual("8 servings", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://www.halfbakedharvest.com/wp-content/uploads/2023/08/Street-Corn-Pasta-Salad-1.jpg",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertEqual(
[
"4 ounces cream cheese, at room temperature",
"1/3 cup sour cream",
"2 tablespoons extra virgin olive oil",
"1-2 cloves garlic, grated",
"1 tablespoon chopped fresh chives",
"kosher salt and black pepper",
"3/4 cup crumbled cotija or feta cheese",
"1 pound short cut pasta",
"1 head romaine lettuce, shredded",
"2 cups grilled or roasted corn ((3-4 raw))",
"1/2 cup fresh basil leaves, torn",
"1/2 cup fresh cilantro, chopped",
"1/2 cup spicy cheddar cheese, cubed",
"1 avocado, chopped",
"4 tablespoons salted butter",
"2 teaspoons smoked paprika",
"2 tablespoons chili powder",
"1/2-2 teaspoons cayenne pepper, to your taste",
"1/4 cup mayo or yogurt",
"2 tablespoons lime juice",
],
self.harvester_class.ingredients(),
)
def test_ingredient_groups(self):
return self.assertEqual(
[
IngredientGroup(
ingredients=[
"4 ounces cream cheese, at room temperature",
"1/3 cup sour cream",
"2 tablespoons extra virgin olive oil",
"1-2 cloves garlic, grated",
"1 tablespoon chopped fresh chives",
"kosher salt and black pepper",
"3/4 cup crumbled cotija or feta cheese",
],
purpose="Dressing",
),
IngredientGroup(
ingredients=[
"1 pound short cut pasta",
"1 head romaine lettuce, shredded",
"2 cups grilled or roasted corn ((3-4 raw))",
"1/2 cup fresh basil leaves, torn",
"1/2 cup fresh cilantro, chopped",
"1/2 cup spicy cheddar cheese, cubed",
"1 avocado, chopped",
"4 tablespoons salted butter",
"2 teaspoons smoked paprika",
"2 tablespoons chili powder",
"1/2-2 teaspoons cayenne pepper, to your taste",
"1/4 cup mayo or yogurt",
"2 tablespoons lime juice",
],
purpose="Salad",
),
],
self.harvester_class.ingredient_groups(),
)
def test_instructions(self):
return self.assertEqual(
"""1. To make the dressing. Combine all ingredients in a large salad bowl. Season with salt and pepper.2. Bring a pot of salted water to a boil. Boil the pasta to al dente, according to package directions. Drain. Immediately toss with the dressing. Add the lettuce, corn, cheddar, basil, cilantro, and avocado. Toss to combine. 3. In a skillet, melt the butter until golden. Mix in the chili powder, paprika, cayenne, and a pinch of salt. Cook another minute, then remove from the heat.4. Mix the mayo or yogurt with lime juice with a pinch of salt.5. Serve the pasta warm or cold, topped with lime mayo and chili butter. The salad will develop more flavor as it sits.""",
self.harvester_class.instructions(),
) |
6,818 | write length | #
# Copyright (c) 2014-2015 Sylvain Peyrefitte
#
# This file is part of rdpy.
#
# rdpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Basic Encoding Rules utility functions.
ASN.1 standard.
"""
from enum import IntEnum
from typing import BinaryIO
from pyrdp.core.packing import Uint8, Uint16BE, Uint32BE
class PC(IntEnum):
BER_PC_MASK = 0x20
BER_PRIMITIVE = 0x00
BER_CONSTRUCT = 0x20
class Class(IntEnum):
BER_CLASS_MASK = 0xC0
BER_CLASS_UNIV = 0x00
BER_CLASS_APPL = 0x40
BER_CLASS_CTXT = 0x80
BER_CLASS_PRIV = 0xC0
class Tag(IntEnum):
BER_TAG_MASK = 0x1F
BER_TAG_BOOLEAN = 0x01
BER_TAG_INTEGER = 0x02
BER_TAG_BIT_STRING = 0x03
BER_TAG_OCTET_STRING = 0x04
BER_TAG_OBJECT_IDENFIER = 0x06
BER_TAG_ENUMERATED = 0x0A
BER_TAG_SEQUENCE = 0x10
BER_TAG_SEQUENCE_OF = 0x10
def berPC(isConstruct: bool) -> PC:
"""
Return BER_CONSTRUCT if true, BER_PRIMITIVE if false
:param isConstruct: True if BER_CONSTRUCT expected
:return: BERPC value
"""
if isConstruct:
return PC.BER_CONSTRUCT
else:
return PC.BER_PRIMITIVE
def readLength(s: BinaryIO) -> int:
"""
Read length of BER structure
Length is on 1, 2 or 3 bytes
:param s: stream
"""
byte = Uint8.unpack(s.read(1))
if byte & 0x80:
byte &= ~0x80
if byte == 1:
return Uint8.unpack(s.read(1))
elif byte == 2:
return Uint16BE.unpack(s.read(2))
else:
raise ValueError("BER length must be 1 or 2")
else:
return byte
def METHOD_NAME(length: int) -> bytes:
"""
Pack structure length as expected in BER specification
:param length: structure length.
"""
if length > 0x7f:
return Uint8.pack(0x82) + Uint16BE.pack(length)
else:
return Uint8.pack(length)
def readUniversalTag(s: BinaryIO, tag: Tag, isConstruct: bool) -> bool:
"""
Unpack universal tag and return True if the proper tag was read.
:param s: stream
:param tag: BER tag
:param isConstruct: True if a construct is expected
"""
byte = Uint8.unpack(s.read(1))
return byte == ((Class.BER_CLASS_UNIV | berPC(isConstruct)) | (Tag.BER_TAG_MASK & tag))
def writeUniversalTag(tag: Tag, isConstruct: bool) -> bytes:
"""
Pack universal tag.
:param tag: BER tag
:param isConstruct: True if the structure is a construct
"""
return Uint8.pack((Class.BER_CLASS_UNIV | berPC(isConstruct)) | (Tag.BER_TAG_MASK & tag))
def readApplicationTag(s: BinaryIO, tag: Tag) -> int:
"""
Unpack an application tag and return the length of the application packet.
:param s: stream
:param tag: application tag.
"""
byte = Uint8.unpack(s.read(1))
if tag > 30:
if byte != ((Class.BER_CLASS_APPL | PC.BER_CONSTRUCT) | Tag.BER_TAG_MASK):
raise ValueError("Invalid BER tag")
byte = Uint8.unpack(s.read(1))
if byte != tag:
raise ValueError("Unexpected application tag")
else:
if byte != ((Class.BER_CLASS_APPL | PC.BER_CONSTRUCT) | (tag & Tag.BER_TAG_MASK)):
raise ValueError("Unexpected application tag")
return readLength(s)
def writeApplicationTag(tag: Tag, size: int) -> bytes:
"""
Pack an application tag.
:param tag: application tag.
:param size: the size of the application packet.
"""
if tag > 30:
return Uint8.pack((Class.BER_CLASS_APPL | PC.BER_CONSTRUCT) | Tag.BER_TAG_MASK) + Uint8.pack(tag) + METHOD_NAME(size)
else:
return Uint8.pack((Class.BER_CLASS_APPL | PC.BER_CONSTRUCT) | (Tag.BER_TAG_MASK & tag)) + METHOD_NAME(size)
def readContextualTag(s: BinaryIO, tag: Tag, isConstruct: bool) -> int:
"""
Unpack contextual tag and return the tag length.
:param s: stream
:param tag: BER tag
:param isConstruct: True if a construct is expected
"""
byte = Uint8.unpack(s.read(1))
if byte != ((Class.BER_CLASS_CTXT | berPC(isConstruct)) | (Tag.BER_TAG_MASK & tag)):
raise ValueError("Unexpected contextual tag")
return readLength(s)
def writeContextualTag(tag: Tag, size: int) -> bytes:
"""
Pack contextual tag.
:param tag: BER tag
:param size: the size of the contextual packet.
"""
return Uint8.pack((Class.BER_CLASS_CTXT | PC.BER_CONSTRUCT) | (Tag.BER_TAG_MASK & tag)) + METHOD_NAME(size)
def readBoolean(s: BinaryIO) -> bool:
"""
Unpack a BER boolean
:param s: stream
"""
if not readUniversalTag(s, Tag.BER_TAG_BOOLEAN, False):
raise ValueError("Bad boolean tag")
size = readLength(s)
if size != 1:
raise ValueError("Bad boolean size")
b = Uint8.unpack(s.read(1))
return bool(b)
def writeBoolean(value: bool) -> bytes:
"""
Pack a BER boolean
"""
boolean = Uint8.pack(0xff if value else 0)
return writeUniversalTag(Tag.BER_TAG_BOOLEAN, False) + METHOD_NAME(1) + boolean
def readInteger(s: BinaryIO) -> int:
"""
Unpack a BER integer
:param s: stream
"""
if not readUniversalTag(s, Tag.BER_TAG_INTEGER, False):
raise ValueError("Bad integer tag")
size = readLength(s)
if size == 1:
return Uint8.unpack(s.read(1))
elif size == 2:
return Uint16BE.unpack(s.read(2))
elif size == 3:
integer1 = Uint8.unpack(s.read(1))
integer2 = Uint16BE.unpack(s.read(2))
return (integer1 << 16) + integer2
elif size == 4:
return Uint32BE.unpack(s.read(4))
else:
raise ValueError("Wrong integer size")
def writeInteger(value: int) -> bytes:
"""
Pack a BER integer
"""
if value <= 0xff:
return writeUniversalTag(Tag.BER_TAG_INTEGER, False) + METHOD_NAME(1) + Uint8.pack(value)
elif value <= 0xffff:
return writeUniversalTag(Tag.BER_TAG_INTEGER, False) + METHOD_NAME(2) + Uint16BE.pack(value)
else:
return writeUniversalTag(Tag.BER_TAG_INTEGER, False) + METHOD_NAME(4) + Uint32BE.pack(value)
def readOctetString(s: BinaryIO) -> bytes:
"""
Unpack a BER octet string
:param s: stream
"""
if not readUniversalTag(s, Tag.BER_TAG_OCTET_STRING, False):
raise ValueError("Bad octet string tag")
size = readLength(s)
return s.read(size)
def writeOctetString(value: bytes) -> bytes:
"""
Pack a BER octet string
"""
return writeUniversalTag(Tag.BER_TAG_OCTET_STRING, False) + METHOD_NAME(len(value)) + value
def readEnumeration(s: BinaryIO) -> int:
"""
Unpack a BER enumeration value
:param s: stream
"""
if not readUniversalTag(s, Tag.BER_TAG_ENUMERATED, False):
raise ValueError("Bad enumeration tag")
if readLength(s) != 1:
raise ValueError("Enumeration size must be 1")
return Uint8.unpack(s.read(1))
def writeEnumeration(value: int) -> bytes:
"""
Pack a BER enumeration value
"""
return writeUniversalTag(Tag.BER_TAG_ENUMERATED, False) + METHOD_NAME(1) + Uint8.pack(value) |
6,819 | serialize | # Copyright 2018-2023 contributors to the OpenLineage project
# SPDX-License-Identifier: Apache-2.0
import json
import os
import textwrap
from enum import Enum
from unittest import mock
import attr
import pytest
from openlineage.client import set_producer
from openlineage.common.provider.dbt.local import DbtLocalArtifactProcessor
from openlineage.common.provider.dbt.processor import ParentRunMetadata
from openlineage.common.test import match
@pytest.fixture(scope='session', autouse=True)
def setup_producer():
set_producer('https://github.com/OpenLineage/OpenLineage/tree/0.0.1/integration/dbt')
@pytest.fixture
def parent_run_metadata():
return ParentRunMetadata(
run_id="f99310b4-3c3c-1a1a-2b2b-c1b95c24ff11",
job_name="dbt-job-name",
job_namespace="dbt"
)
def METHOD_NAME(inst, field, value):
if isinstance(value, Enum):
return value.value
return value
@pytest.mark.parametrize(
"path",
[
"tests/dbt/small",
"tests/dbt/large",
"tests/dbt/profiles",
"tests/dbt/catalog",
"tests/dbt/fail",
"tests/dbt/build",
"tests/dbt/compiled_code",
"tests/dbt/spark/thrift",
"tests/dbt/spark/odbc",
"tests/dbt/postgres",
"tests/dbt/snapshot",
]
)
def test_dbt_parse_and_compare_event(path, parent_run_metadata):
processor = DbtLocalArtifactProcessor(
producer='https://github.com/OpenLineage/OpenLineage/tree/0.0.1/integration/dbt',
job_namespace='job-namespace',
project_dir=path
)
processor.dbt_run_metadata = parent_run_metadata
dbt_events = processor.parse()
events = [
attr.asdict(event, value_serializer=METHOD_NAME)
for event
in dbt_events.starts + dbt_events.completes + dbt_events.fails
]
with open(f'{path}/result.json', 'r') as f:
assert match(json.load(f), events)
@mock.patch('uuid.uuid4')
@mock.patch('datetime.datetime')
def test_dbt_parse_dbt_test_event(mock_datetime, mock_uuid, parent_run_metadata):
mock_datetime.now.return_value.isoformat.return_value = '2021-08-25T11:00:25.277467+00:00'
mock_uuid.side_effect = [
'6edf42ed-d8d0-454a-b819-d09b9067ff99',
'1a69c0a7-04bb-408b-980e-cbbfb1831ef7',
'f99310b4-339a-4381-ad3e-c1b95c24ff11',
'c11f2efd-4415-45fc-8081-10d2aaa594d2',
]
processor = DbtLocalArtifactProcessor(
producer='https://github.com/OpenLineage/OpenLineage/tree/0.0.1/integration/dbt',
job_namespace='dbt-test-namespace',
project_dir='tests/dbt/test',
)
processor.dbt_run_metadata = parent_run_metadata
dbt_events = processor.parse()
events = [
attr.asdict(event, value_serializer=METHOD_NAME)
for event
in dbt_events.starts + dbt_events.completes + dbt_events.fails
]
with open('tests/dbt/test/result.json', 'r') as f:
assert match(json.load(f), events)
@mock.patch('uuid.uuid4')
@mock.patch.dict(
os.environ,
{
"HOST": "foo_host",
"PORT": "1111",
"DB_NAME": "foo_db_name",
"USER_NAME": "foo_user",
"PASSWORD": "foo_password",
"SCHEMA": "foo_schema",
}
)
def test_dbt_parse_profile_with_env_vars(mock_uuid, parent_run_metadata):
mock_uuid.side_effect = [
'6edf42ed-d8d0-454a-b819-d09b9067ff99',
]
processor = DbtLocalArtifactProcessor(
producer='https://github.com/OpenLineage/OpenLineage/tree/0.0.1/integration/dbt',
project_dir='tests/dbt/env_vars',
target='prod',
job_namespace="ol-namespace"
)
processor.dbt_run_metadata = parent_run_metadata
dbt_events = processor.parse()
events = [
attr.asdict(event, value_serializer=METHOD_NAME)
for event
in dbt_events.starts + dbt_events.completes + dbt_events.fails
]
with open('tests/dbt/env_vars/result.json', 'r') as f:
assert match(json.load(f), events)
@pytest.fixture()
def jinja_env():
env = DbtLocalArtifactProcessor.setup_jinja()
env.globals.update({
"test": "test_variable",
"method": lambda: "test_method"
})
return env
def test_jinja_undefined_variable(jinja_env):
text = "{{ variable }}"
assert text == DbtLocalArtifactProcessor.render_values_jinja(jinja_env, text)
def test_jinja_undefined_method(jinja_env):
text = "{{ undefined_method() }}"
assert text == DbtLocalArtifactProcessor.render_values_jinja(jinja_env, text)
def test_jinja_defined_method(jinja_env):
os.environ['PORT_REDSHIFT'] = "13"
text = "{{ env_var('PORT_REDSHIFT') | as_number }}"
assert '13' == DbtLocalArtifactProcessor.render_values_jinja(jinja_env, text)
del os.environ['PORT_REDSHIFT']
def test_jinja_defined_variable(jinja_env):
text = "{{ test }}"
assert 'test_variable' == DbtLocalArtifactProcessor.render_values_jinja(jinja_env, text)
def test_jinja_undefined_method_with_args(jinja_env):
text = "# {{ does_not_exist(some_arg.subarg.subarg2) }}"
assert text == DbtLocalArtifactProcessor.render_values_jinja(jinja_env, text)
@mock.patch.dict(os.environ, {"PORT": "1111"})
def test_jinja_include_section(jinja_env):
object = {
"proper_one": "{{ env_var('PORT') }}",
"to_ignore": "{{ env_var('FAKE_VAR') }}",
}
parsed = DbtLocalArtifactProcessor.render_values_jinja(
jinja_env, object, include_section=["proper_one"]
)
assert parsed == {"proper_one": "1111"}
with pytest.raises(Exception):
DbtLocalArtifactProcessor.render_values_jinja(
jinja_env, object, include_section=[]
)
def test_jinja_multiline(jinja_env):
os.environ['PORT_REDSHIFT'] = "13"
text = textwrap.dedent("""
# {{ does_not_exist(some_arg.subarg.subarg2) }}
{{ env_var('PORT_REDSHIFT') | as_number }}
{{ undefined }}
more_text
even_more_text""")
parsed = DbtLocalArtifactProcessor.render_values_jinja(jinja_env, text)
assert parsed == textwrap.dedent("""
# {{ does_not_exist(some_arg.subarg.subarg2) }}
13
{{ undefined }}
more_text
even_more_text""")
del os.environ['PORT_REDSHIFT']
def test_jinja_dict(jinja_env):
dictionary = {"key": "{{ test }}"}
assert {"key": "test_variable"} == DbtLocalArtifactProcessor.render_values_jinja(
jinja_env, dictionary
)
def test_jinja_list(jinja_env):
test_list = ["key", "{{ test }}"]
assert ["key", "test_variable"] == DbtLocalArtifactProcessor.render_values_jinja(
jinja_env, test_list
)
def test_logging_handler_warns():
path = 'tests/dbt/test/target/manifest.json'
logger = mock.Mock()
DbtLocalArtifactProcessor.load_metadata(path, [1], logger)
logger.warning.assert_called_once_with(
"Artifact schema version: https://schemas.getdbt.com/dbt/manifest/v2.json is above "
"dbt-ol supported version 1. This might cause errors."
)
def test_logging_handler_does_not_warn():
path = 'tests/dbt/test/target/manifest.json'
logger = mock.Mock()
DbtLocalArtifactProcessor.load_metadata(path, [2], logger)
logger.warning.assert_not_called() |
6,820 | test unary abs | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name
"""Tests for :class:`aiida.orm.nodes.data.base.BaseType` classes."""
import operator
import pytest
from aiida.orm import Bool, Float, Int, NumericType, Str, load_node
@pytest.mark.parametrize(
'node_type, default, value', [
(Bool, False, True),
(Int, 0, 5),
(Float, 0.0, 5.5),
(Str, '', 'a'),
]
)
def test_create(node_type, default, value):
"""Test the creation of the ``BaseType`` nodes."""
node = node_type()
assert node.value == default
node = node_type(value)
assert node.value == value
@pytest.mark.parametrize('node_type', [Bool, Float, Int, Str])
def test_store_load(node_type):
"""Test ``BaseType`` node storing and loading."""
node = node_type()
node.store()
loaded = load_node(node.pk)
assert node.value == loaded.value
def test_modulo():
"""Test ``Int`` modulus operation."""
term_a = Int(12)
term_b = Int(10)
assert term_a % term_b == 2
assert isinstance(term_a % term_b, NumericType)
assert term_a % 10 == 2
assert isinstance(term_a % 10, NumericType)
assert 12 % term_b == 2
assert isinstance(12 % term_b, NumericType)
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
def test_add(node_type, a, b):
"""Test addition for ``Int`` and ``Float`` nodes."""
node_a = node_type(a)
node_b = node_type(b)
result = node_a + node_b
assert isinstance(result, node_type)
assert result.value == a + b
# Node and native (both ways)
result = node_a + b
assert isinstance(result, node_type)
assert result.value == a + b
result = a + node_b
assert isinstance(result, node_type)
assert result.value == a + b
# Inplace
result = node_type(a)
result += node_b
assert isinstance(result, node_type)
assert result.value == a + b
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
def test_multiplication(node_type, a, b):
"""Test floats multiplication."""
node_a = node_type(a)
node_b = node_type(b)
# Check multiplication
result = node_a * node_b
assert isinstance(result, node_type)
assert result.value == a * b
# Check multiplication Node and native (both ways)
result = node_a * b
assert isinstance(result, node_type)
assert result.value == a * b
result = a * node_b
assert isinstance(result, node_type)
assert result.value == a * b
# Inplace
result = node_type(a)
result *= node_b
assert isinstance(result, node_type)
assert result.value == a * b
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
def test_division(node_type, a, b):
"""Test the ``BaseType`` normal division operator."""
node_a = node_type(a)
node_b = node_type(b)
result = node_a / node_b
assert result == a / b
assert isinstance(result, Float) # Should be a `Float` for both node types
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
def test_division_integer(node_type, a, b):
"""Test the ``Int`` integer division operator."""
node_a = node_type(a)
node_b = node_type(b)
result = node_a // node_b
assert result == a // b
assert isinstance(result, node_type)
@pytest.mark.parametrize('node_type, base, power', [
(Int, 5, 2),
(Float, 3.5, 3),
])
def test_power(node_type, base, power):
"""Test power operator."""
node_base = node_type(base)
node_power = node_type(power)
result = node_base**node_power
assert result == base**power
assert isinstance(result, node_type)
@pytest.mark.parametrize('node_type, a, b', [
(Int, 5, 2),
(Float, 3.5, 3),
])
def test_modulus(node_type, a, b):
"""Test modulus operator."""
node_a = node_type(a)
node_b = node_type(b)
assert node_a % node_b == a % b
assert isinstance(node_a % node_b, node_type)
assert node_a % b == a % b
assert isinstance(node_a % b, node_type)
assert a % node_b == a % b
assert isinstance(a % node_b, node_type)
@pytest.mark.parametrize(
'opera', [
operator.add, operator.mul, operator.pow, operator.lt, operator.le, operator.gt, operator.ge, operator.iadd,
operator.imul
]
)
def test_operator(opera):
"""Test operations between Int and Float objects."""
node_a = Float(2.2)
node_b = Int(3)
for node_x, node_y in [(node_a, node_b), (node_b, node_a)]:
res = opera(node_x, node_y)
c_val = opera(node_x.value, node_y.value)
assert res._type == type(c_val) # pylint: disable=protected-access
assert res == opera(node_x.value, node_y.value)
@pytest.mark.parametrize('node_type, a, b', [
(Bool, False, True),
(Int, 2, 5),
(Float, 2.5, 5.5),
(Str, 'a', 'b'),
])
def test_equality(node_type, a, b):
"""Test equality comparison for the base types."""
node_a = node_type(a)
node_a_clone = node_type(a)
node_b = node_type(b)
# Test equality comparison with Python base types
assert node_a == a
assert node_a != b
# Test equality comparison with other `BaseType` nodes
assert node_a == node_a_clone
assert node_a != node_b
@pytest.mark.parametrize('numeric_type', (Float, Int))
def test_unary_pos(numeric_type):
"""Test the ``__pos__`` unary operator for all ``NumericType`` subclasses."""
node_positive = numeric_type(1)
node_negative = numeric_type(-1)
assert +node_positive == node_positive
assert +node_negative == node_negative
@pytest.mark.parametrize('numeric_type', (Float, Int))
def test_unary_neg(numeric_type):
"""Test the ``__neg__`` unary operator for all ``NumericType`` subclasses."""
node_positive = numeric_type(1)
node_negative = numeric_type(-1)
assert -node_positive != node_positive
assert -node_negative != node_negative
assert -node_positive == node_negative
assert -node_negative == node_positive
assert -node_negative == node_positive
@pytest.mark.parametrize('numeric_type', (Float, Int))
def METHOD_NAME(numeric_type):
"""Test the ``__abs__`` unary operator for all ``NumericType`` subclasses"""
node_positive = numeric_type(1)
node_negative = numeric_type(-1)
# Test positive number
abs_positive = abs(node_positive)
assert abs_positive == node_positive
# Test negative number
abs_negative = abs(node_negative)
assert abs_negative != node_negative
assert abs_positive == abs_negative |
6,821 | asana task to amundsen data issue | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import asana
import logging
from typing import Any, Dict, List
from amundsen_application.base.base_issue_tracker_client import BaseIssueTrackerClient
from amundsen_application.models.data_issue import DataIssue, Priority
from amundsen_application.models.issue_results import IssueResults
class AsanaClient(BaseIssueTrackerClient):
def __init__(self, issue_labels: List[str],
issue_tracker_url: str,
issue_tracker_user: str,
issue_tracker_password: str,
issue_tracker_project_id: int,
issue_tracker_max_results: int) -> None:
self.issue_labels = issue_labels
self.asana_url = issue_tracker_url
self.asana_user = issue_tracker_user
self.asana_password = issue_tracker_password
self.asana_max_results = issue_tracker_max_results
self.asana_project_gid = issue_tracker_project_id
self.asana_client = asana.Client.access_token(issue_tracker_password)
asana_project = self.asana_client.projects.get_project(self.asana_project_gid)
self.asana_workspace_gid = asana_project['workspace']['gid']
self._setup_custom_fields()
def get_issues(self, table_uri: str) -> IssueResults:
"""
:param table_uri: Table Uri ie databasetype://database/table
:return: Metadata of matching issues
"""
table_parent_task_gid = self._get_parent_task_gid_for_table_uri(table_uri)
tasks = list(self.asana_client.tasks.get_subtasks_for_task(
table_parent_task_gid,
{
'opt_fields': [
'name', 'completed', 'notes', 'custom_fields',
]
}
))
return IssueResults(
issues=[
self.METHOD_NAME(task) for task in tasks
],
total=len(tasks),
all_issues_url=self._task_url(table_parent_task_gid)
)
def create_issue(self,
table_uri: str,
title: str,
description: str,
priority_level: str,
table_url: str,
**kwargs: Any) -> DataIssue:
"""
Creates an issue in Asana
:param description: Description of the Asana issue
:param priority_level: Priority level for the ticket
:param table_uri: Table Uri ie databasetype://database/table
:param title: Title of the Asana ticket
:param table_url: Link to access the table
:return: Metadata about the newly created issue
"""
table_parent_task_gid = self._get_parent_task_gid_for_table_uri(table_uri)
enum_value = next(opt for opt in self.priority_field_enum_options if opt['name'] == priority_level)
return self.METHOD_NAME(
self.asana_client.tasks.create_subtask_for_task(
table_parent_task_gid,
{
'name': title,
'notes': description + f'\n Table URL: {table_url}',
'custom_fields': {self.priority_field_gid: enum_value['gid']}
}
)
)
def _setup_custom_fields(self) -> None:
TABLE_URI_FIELD_NAME = 'Table URI (Amundsen)'
PRIORITY_FIELD_NAME = 'Priority (Amundsen)'
custom_fields = \
self.asana_client.custom_field_settings.get_custom_field_settings_for_project(
self.asana_project_gid
)
custom_fields = {f['custom_field']['name']: f['custom_field'] for f in custom_fields}
if TABLE_URI_FIELD_NAME in custom_fields:
table_uri_field = custom_fields[TABLE_URI_FIELD_NAME]
else:
table_uri_field = self.asana_client.custom_fields.create_custom_field({
'workspace': self.asana_workspace_gid,
'name': TABLE_URI_FIELD_NAME,
'format': 'custom',
'resource_subtype': 'text',
})
self.asana_client.projects.add_custom_field_setting_for_project(
self.asana_project_gid,
{
'custom_field': table_uri_field['gid'],
'is_important': True,
}
)
if PRIORITY_FIELD_NAME in custom_fields:
priority_field = custom_fields[PRIORITY_FIELD_NAME]
else:
priority_field = self.asana_client.custom_fields.create_custom_field({
'workspace': self.asana_workspace_gid,
'name': PRIORITY_FIELD_NAME,
'format': 'custom',
'resource_subtype': 'enum',
'enum_options': [
{
'name': p.level
} for p in Priority
]
})
self.asana_client.projects.add_custom_field_setting_for_project(
self.asana_project_gid,
{
'custom_field': priority_field['gid'],
'is_important': True,
}
)
self.table_uri_field_gid = table_uri_field['gid']
self.priority_field_gid = priority_field['gid']
self.priority_field_enum_options = priority_field['enum_options']
def _get_parent_task_gid_for_table_uri(self, table_uri: str) -> str:
table_parent_tasks = list(self.asana_client.tasks.search_tasks_for_workspace(
self.asana_workspace_gid,
{
'projects.any': [self.asana_project_gid],
'custom_fields.{}.value'.format(self.table_uri_field_gid): table_uri,
}
))
# Create the parent task if it doesn't exist.
if len(table_parent_tasks) == 0:
table_parent_task = self.asana_client.tasks.create_task({
'name': table_uri,
'custom_fields': {
self.table_uri_field_gid: table_uri,
},
'projects': [self.asana_project_gid],
})
return table_parent_task['gid']
else:
if len(table_parent_tasks) > 1:
logging.warn('There are currently two tasks with the name "{}"'.format(table_uri))
return table_parent_tasks[0]['gid']
def _task_url(self, task_gid: str) -> str:
return 'https://app.asana.com/0/{project_gid}/{task_gid}'.format(
project_gid=self.asana_project_gid, task_gid=task_gid
)
def METHOD_NAME(self, task: Dict) -> DataIssue:
custom_fields = {f['gid']: f for f in task['custom_fields']}
priority_field = custom_fields[self.priority_field_gid]
priority = None
if priority_field.get('enum_value'):
priority = Priority.from_level(priority_field['enum_value']['name'])
else:
priority = Priority.P3
return DataIssue(
issue_key=task['gid'],
title=task['name'],
url=self._task_url(task['gid']),
status='closed' if task['completed'] else 'open',
priority=priority,
) |
6,822 | connect | import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense
from spektral.layers import ops
from spektral.layers.pooling.src import SRCPool
class DMoNPool(SRCPool):
r"""
The DMoN pooling layer from the paper
> [Graph Clustering with Graph Neural Networks](https://arxiv.org/abs/2006.16904)<br>
> Anton Tsitsulin et al.
**Mode**: single, batch.
This layer learns a soft clustering of the input graph as follows:
$$
\begin{align}
\C &= \textrm{MLP}(\X); \\
\X' &= \C^\top \X \\
\A' &= \C^\top \A \C; \\
\end{align}
$$
where \(\textrm{MLP}\) is a multi-layer perceptron with softmax output.
Two auxiliary loss terms are also added to the model: the modularity loss
$$
L_m = - \frac{1}{2m} \mathrm{Tr}(\C^\top \A \C - \C^\top \d^\top \d \C)
$$
and the collapse regularization loss
$$
L_c = \frac{\sqrt{k}}{n} \left\|
\sum_i \C_i^\top
\right\|_F -1.
$$
This layer is based on the original implementation found
[here](https://github.com/google-research/google-research/blob/master/graph_embedding/dmon/dmon.py).
**Input**
- Node features of shape `(batch, n_nodes_in, n_node_features)`;
- Symmetrically normalized adjacency matrix of shape
`(batch, n_nodes_in, n_nodes_in)`;
**Output**
- Reduced node features of shape `(batch, n_nodes_out, n_node_features)`;
- Reduced adjacency matrix of shape `(batch, n_nodes_out, n_nodes_out)`;
- If `return_selection=True`, the selection matrix of shape
`(batch, n_nodes_in, n_nodes_out)`.
**Arguments**
- `k`: number of output nodes;
- `mlp_hidden`: list of integers, number of hidden units for each hidden layer in
the MLP used to compute cluster assignments (if `None`, the MLP has only one output
layer);
- `mlp_activation`: activation for the MLP layers;
- `collapse_regularization`: strength of the collapse regularization;
- `return_selection`: boolean, whether to return the selection matrix;
- `use_bias`: use bias in the MLP;
- `kernel_initializer`: initializer for the weights of the MLP;
- `bias_initializer`: initializer for the bias of the MLP;
- `kernel_regularizer`: regularization applied to the weights of the MLP;
- `bias_regularizer`: regularization applied to the bias of the MLP;
- `kernel_constraint`: constraint applied to the weights of the MLP;
- `bias_constraint`: constraint applied to the bias of the MLP;
"""
def __init__(
self,
k,
mlp_hidden=None,
mlp_activation="relu",
return_selection=False,
collapse_regularization=0.1,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
k=k,
mlp_hidden=mlp_hidden,
mlp_activation=mlp_activation,
return_selection=return_selection,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.k = k
self.mlp_hidden = mlp_hidden if mlp_hidden is not None else []
self.mlp_activation = mlp_activation
self.collapse_regularization = collapse_regularization
def build(self, input_shape):
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint,
)
self.mlp = Sequential(
[
Dense(channels, self.mlp_activation, **layer_kwargs)
for channels in self.mlp_hidden
]
+ [Dense(self.k, "softmax", **layer_kwargs)]
)
super().build(input_shape)
def call(self, inputs, mask=None):
x, a, i = self.get_inputs(inputs)
return self.pool(x, a, i, mask=mask)
def select(self, x, a, i, mask=None):
s = self.mlp(x)
if mask is not None:
s *= mask[0]
# Collapse loss
col_loss = self.collapse_loss(a, s)
if K.ndim(a) == 3:
col_loss = K.mean(col_loss)
self.add_loss(self.collapse_regularization * col_loss)
return s
def reduce(self, x, s, **kwargs):
return ops.modal_dot(s, x, transpose_a=True)
def METHOD_NAME(self, a, s, **kwargs):
a_pool = ops.matmul_at_b_a(s, a)
# Modularity loss
mod_loss = self.modularity_loss(a, s, a_pool)
if K.ndim(a) == 3:
mod_loss = K.mean(mod_loss)
self.add_loss(mod_loss)
return a_pool
def reduce_index(self, i, s, **kwargs):
i_mean = tf.math.segment_mean(i, i)
i_pool = ops.repeat(i_mean, tf.ones_like(i_mean) * self.k)
return i_pool
def modularity_loss(self, a, s, a_pool):
if K.is_sparse(a):
n_edges = tf.cast(len(a.values), dtype=s.dtype)
degrees = tf.sparse.reduce_sum(a, axis=-1)
degrees = tf.reshape(degrees, (-1, 1))
else:
n_edges = tf.cast(tf.math.count_nonzero(a, axis=(-2, -1)), dtype=s.dtype)
degrees = tf.reduce_sum(a, axis=-1, keepdims=True)
normalizer_left = tf.matmul(s, degrees, transpose_a=True)
normalizer_right = tf.matmul(degrees, s, transpose_a=True)
if K.ndim(s) == 3:
normalizer = (
ops.modal_dot(normalizer_left, normalizer_right)
/ 2
/ tf.reshape(n_edges, [tf.shape(n_edges)[0]] + [1] * 2)
)
else:
normalizer = ops.modal_dot(normalizer_left, normalizer_right) / 2 / n_edges
loss = -tf.linalg.trace(a_pool - normalizer) / 2 / n_edges
return loss
def collapse_loss(self, a, s):
cluster_sizes = tf.math.reduce_sum(s, axis=-2)
n_nodes = tf.cast(tf.shape(a)[-1], s.dtype)
loss = (
tf.norm(cluster_sizes, axis=-1)
/ n_nodes
* tf.sqrt(tf.cast(self.k, s.dtype))
- 1
)
return loss
def get_config(self):
config = {
"collapse_regularization": self.collapse_regularization,
"k": self.k,
"mlp_hidden": self.mlp_hidden,
"mlp_activation": self.mlp_activation,
}
base_config = super().get_config()
return {**base_config, **config} |
6,823 | assert dict equal | # Stubs for unittest.case (Python 3.5)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
import logging
from collections import namedtuple
DIFF_OMITTED = ... # type: Any
class SkipTest(Exception): ...
class _ShouldStop(Exception): ...
class _UnexpectedSuccess(Exception): ...
class _Outcome:
expecting_failure = ... # type: Any
result = ... # type: Any
result_supports_subtests = ... # type: Any
success = ... # type: Any
skipped = ... # type: Any
expectedFailure = ... # type: Any
errors = ... # type: Any
def __init__(self, result=None): ...
def testPartExecutor(self, test_case, isTest=False): ...
def skip(reason): ...
def skipIf(condition, reason): ...
def skipUnless(condition, reason): ...
def expectedFailure(test_item): ...
class _BaseTestCaseContext:
test_case = ... # type: Any
def __init__(self, test_case): ...
class _AssertRaisesBaseContext(_BaseTestCaseContext):
expected = ... # type: Any
test_case = ... # type: Any
expected_regex = ... # type: Any
obj_name = ... # type: Any
msg = ... # type: Any
def __init__(self, expected, test_case, expected_regex=None): ...
def handle(self, name, args, kwargs): ...
class _AssertRaisesContext(_AssertRaisesBaseContext):
def __enter__(self): ...
exception = ... # type: Any
def __exit__(self, exc_type, exc_value, tb): ...
class _AssertWarnsContext(_AssertRaisesBaseContext):
warnings_manager = ... # type: Any
warnings = ... # type: Any
def __enter__(self): ...
warning = ... # type: Any
filename = ... # type: Any
lineno = ... # type: Any
def __exit__(self, exc_type, exc_value, tb): ...
_LoggingWatcher = namedtuple('_LoggingWatcher', ['records', 'output'])
class _CapturingHandler(logging.Handler):
watcher = ... # type: Any
def __init__(self): ...
def flush(self): ...
def emit(self, record): ...
class _AssertLogsContext(_BaseTestCaseContext):
LOGGING_FORMAT = ... # type: Any
logger_name = ... # type: Any
level = ... # type: Any
msg = ... # type: Any
def __init__(self, test_case, logger_name, level): ...
watcher = ... # type: Any
old_handlers = ... # type: Any
old_level = ... # type: Any
old_propagate = ... # type: Any
def __enter__(self): ...
def __exit__(self, exc_type, exc_value, tb): ...
class TestCase:
failureException = ... # type: Any
longMessage = ... # type: Any
maxDiff = ... # type: Any
def __init__(self, methodName=''): ...
def addTypeEqualityFunc(self, typeobj, function): ...
def addCleanup(self, function, *args, **kwargs): ...
def setUp(self): ...
def tearDown(self): ...
@classmethod
def setUpClass(cls): ...
@classmethod
def tearDownClass(cls): ...
def countTestCases(self): ...
def defaultTestResult(self): ...
def shortDescription(self): ...
def id(self): ...
def __eq__(self, other): ...
def __hash__(self): ...
def subTest(self, msg=None, **params): ...
def run(self, result=None): ...
def doCleanups(self): ...
def __call__(self, *args, **kwds): ...
def debug(self): ...
def skipTest(self, reason): ...
def fail(self, msg=None): ...
def assertFalse(self, expr, msg=None): ...
def assertTrue(self, expr, msg=None): ...
def assertRaises(self, expected_exception, *args, **kwargs): ...
def assertWarns(self, expected_warning, *args, **kwargs): ...
def assertLogs(self, logger=None, level=None): ...
def assertEqual(self, first, second, msg=None): ...
def assertNotEqual(self, first, second, msg=None): ...
def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None): ...
def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None): ...
def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None): ...
def assertListEqual(self, list1, list2, msg=None): ...
def assertTupleEqual(self, tuple1, tuple2, msg=None): ...
def assertSetEqual(self, set1, set2, msg=None): ...
def assertIn(self, member, container, msg=None): ...
def assertNotIn(self, member, container, msg=None): ...
def assertIs(self, expr1, expr2, msg=None): ...
def assertIsNot(self, expr1, expr2, msg=None): ...
def METHOD_NAME(self, d1, d2, msg=None): ...
def assertDictContainsSubset(self, subset, dictionary, msg=None): ...
def assertCountEqual(self, first, second, msg=None): ...
def assertMultiLineEqual(self, first, second, msg=None): ...
def assertLess(self, a, b, msg=None): ...
def assertLessEqual(self, a, b, msg=None): ...
def assertGreater(self, a, b, msg=None): ...
def assertGreaterEqual(self, a, b, msg=None): ...
def assertIsNone(self, obj, msg=None): ...
def assertIsNotNone(self, obj, msg=None): ...
def assertIsInstance(self, obj, cls, msg=None): ...
def assertNotIsInstance(self, obj, cls, msg=None): ...
def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs): ...
def assertWarnsRegex(self, expected_warning, expected_regex, *args, **kwargs): ...
def assertRegex(self, text, expected_regex, msg=None): ...
def assertNotRegex(self, text, unexpected_regex, msg=None): ...
failUnlessEqual = ... # type: Any
assertEquals = ... # type: Any
failIfEqual = ... # type: Any
assertNotEquals = ... # type: Any
failUnlessAlmostEqual = ... # type: Any
assertAlmostEquals = ... # type: Any
failIfAlmostEqual = ... # type: Any
assertNotAlmostEquals = ... # type: Any
failUnless = ... # type: Any
assert_ = ... # type: Any
failUnlessRaises = ... # type: Any
failIf = ... # type: Any
assertRaisesRegexp = ... # type: Any
assertRegexpMatches = ... # type: Any
assertNotRegexpMatches = ... # type: Any
class FunctionTestCase(TestCase):
def __init__(self, testFunc, setUp=None, tearDown=None, description=None): ...
def setUp(self): ...
def tearDown(self): ...
def runTest(self): ...
def id(self): ...
def __eq__(self, other): ...
def __hash__(self): ...
def shortDescription(self): ...
class _SubTest(TestCase):
test_case = ... # type: Any
params = ... # type: Any
failureException = ... # type: Any
def __init__(self, test_case, message, params): ...
def runTest(self): ...
def id(self): ...
def shortDescription(self): ... |
6,824 | set up | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright 2006 - 2021, Tomas Babej, Paul Beckingham, Federico Hernandez.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# https://www.opensource.org/licenses/mit-license.php
#
###############################################################################
import sys
import os
import unittest
# Ensure python finds the local simpletap module
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from basetest import Task, TestCase
class TestUDACustomSort(TestCase):
@classmethod
def setUpClass(cls):
cls.t = Task()
cls.t.config('uda.foo.type', 'string')
cls.t.config('uda.foo.label', 'Foo')
cls.t.config('uda.foo.values', 'H,M,L,')
cls.t.config('report.list.columns', 'id,description,foo')
cls.t.config('report.list.labels', 'ID,Desc,Foo')
cls.t('add four foo:H')
cls.t('add three foo:M')
cls.t('add two foo:L')
cls.t('add one')
def test_ascending(self):
"""Ascending custom sort order"""
self.t.config('uda.foo.values', 'H,M,L,')
code, out, err = self.t('rc.report.list.sort:foo+ list')
one = out.find('one')
two = out.find('two')
three = out.find('three')
four = out.find('four')
self.assertTrue(one < two)
self.assertTrue(two < three)
self.assertTrue(three < four)
def test_descending(self):
"""Descending custom sort order"""
self.t.config('uda.foo.values', 'H,M,L,')
code, out, err = self.t('rc.report.list.sort:foo- list')
one = out.find('one')
two = out.find('two')
three = out.find('three')
four = out.find('four')
self.assertTrue(four < three)
self.assertTrue(three < two)
self.assertTrue(two < one)
def test_ridiculous(self):
"""Ridiculous custom sort order"""
self.t.config('uda.foo.values', 'H,M,,L')
code, out, err = self.t('rc.report.list.sort:foo- list')
one = out.find('one')
two = out.find('two')
three = out.find('three')
four = out.find('four')
self.assertTrue(four < three)
self.assertTrue(three < one)
self.assertTrue(one < two)
class TestUDADefaultSort(TestCase):
@classmethod
def setUpClass(cls):
cls.t = Task()
cls.t.config('uda.foo.type', 'string')
cls.t.config('uda.foo.label', 'Foo')
cls.t.config('report.list.columns', 'id,description,foo')
cls.t.config('report.list.labels', 'ID,Desc,Foo')
cls.t('add one foo:A')
cls.t('add three')
cls.t('add two foo:B')
def test_ascending(self):
"""Ascending default sort order"""
code, out, err = self.t('rc.report.list.sort:foo+ list')
one = out.find('one')
two = out.find('two')
three = out.find('three')
self.assertTrue(one < two)
self.assertTrue(two < three)
def test_descending(self):
"""Descending default sort order"""
code, out, err = self.t('rc.report.list.sort:foo- list')
one = out.find('one')
two = out.find('two')
three = out.find('three')
self.assertTrue(one < three)
self.assertTrue(two < one)
class TestBug1319(TestCase):
def METHOD_NAME(self):
"""Executed before each test in the class"""
self.t = Task()
def test_uda_sorting(self):
"""1319: Verify that UDAs are sorted according to defined order"""
self.t.config("uda.when.type", "string")
self.t.config("uda.when.values", "night,evening,noon,morning")
self.t.config("report.foo.columns", "id,when,description")
self.t.config("report.foo.labels", "ID,WHEN,DESCRIPTION")
self.t.config("report.foo.sort", "when+")
self.t("add one when:night")
self.t("add two when:evening")
self.t("add three when:noon")
self.t("add four when:morning")
code, out, err = self.t("rc.verbose:nothing foo")
self.assertRegex(out, "4\s+morning\s+four\s+3\s+noon\s+three\s+2\s+evening\s+two\s+1\s+night\s+one")
if __name__ == "__main__":
from simpletap import TAPTestRunner
unittest.main(testRunner=TAPTestRunner())
# vim: ai sts=4 et sw=4 ft=python |
6,825 | instruction duration validation | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Module for common pulse programming utilities."""
from typing import List, Dict, Union
import warnings
import numpy as np
from qiskit.circuit.parameterexpression import ParameterExpression
from qiskit.pulse.exceptions import UnassignedDurationError, QiskitError
from qiskit.utils.deprecation import deprecate_func, deprecate_function
def format_meas_map(meas_map: List[List[int]]) -> Dict[int, List[int]]:
"""
Return a mapping from qubit label to measurement group given the nested list meas_map returned
by a backend configuration. (Qubits can not always be measured independently.) Sorts the
measurement group for consistency.
Args:
meas_map: Groups of qubits that get measured together, for example: [[0, 1], [2, 3, 4]]
Returns:
Measure map in map format
"""
qubit_mapping = {}
for sublist in meas_map:
sublist.sort()
for q in sublist:
qubit_mapping[q] = sublist
return qubit_mapping
def format_parameter_value(
operand: ParameterExpression,
decimal: int = 10,
) -> Union[ParameterExpression, complex]:
"""Convert ParameterExpression into the most suitable data type.
Args:
operand: Operand value in arbitrary data type including ParameterExpression.
decimal: Number of digit to round returned value.
Returns:
Value casted to non-parameter data type, when possible.
"""
try:
# value is assigned.
# note that ParameterExpression directly supports __complex__ via sympy or symengine
evaluated = complex(operand)
# remove truncation error
evaluated = np.round(evaluated, decimals=decimal)
# typecast into most likely data type
if np.isreal(evaluated):
evaluated = float(evaluated.real)
if evaluated.is_integer():
evaluated = int(evaluated)
else:
warnings.warn(
"Assignment of complex values to ParameterExpression in Qiskit Pulse objects is "
"now pending deprecation. This will align the Pulse module with other modules "
"where such assignment wasn't possible to begin with. The typical use case for complex "
"parameters in the module was the SymbolicPulse library. As of Qiskit-Terra "
"0.23.0 all library pulses were converted from complex amplitude representation"
" to real representation using two floats (amp,angle), as used in the "
"ScalableSymbolicPulse class. This eliminated the need for complex parameters. "
"Any use of complex parameters (and particularly custom-built pulses) should be "
"converted in a similar fashion to avoid the use of complex parameters.",
PendingDeprecationWarning,
)
return evaluated
except TypeError:
# value is not assigned
pass
return operand
def METHOD_NAME(duration: int):
"""Validate instruction duration.
Args:
duration: Instruction duration value to validate.
Raises:
UnassignedDurationError: When duration is unassigned.
QiskitError: When invalid duration is assigned.
"""
if isinstance(duration, ParameterExpression):
raise UnassignedDurationError(
"Instruction duration {} is not assigned. "
"Please bind all durations to an integer value before playing in the Schedule, "
"or use ScheduleBlock to align instructions with unassigned duration."
"".format(repr(duration))
)
if not isinstance(duration, (int, np.integer)) or duration < 0:
raise QiskitError(
f"Instruction duration must be a non-negative integer, got {duration} instead."
)
@deprecate_func(
additional_msg="Instead, use 'qiskit.utils.deprecate_func'.",
since="0.22.0",
)
def deprecated_functionality(func):
"""A decorator that raises deprecation warning without showing alternative method."""
return deprecate_function(
f"Calling {func.__name__} is being deprecated and will be removed soon. "
"No alternative method will be provided with this change. "
"If there is any practical usage of this functionality, please write "
"an issue in Qiskit/qiskit-terra repository.",
category=DeprecationWarning,
stacklevel=2,
since="0.22.0",
)(func) |
6,826 | macaddr dtype | from __future__ import annotations
import warnings
import hypothesis as h
import hypothesis.extra.pandas as past
import hypothesis.extra.pytz as tzst
import hypothesis.strategies as st
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.schema as sch
from ibis.common.temporal import IntervalUnit
# Strategies for generating ibis datatypes
_nullable = st.booleans()
null_dtype = st.just(dt.null)
def boolean_dtype(nullable=_nullable):
return st.builds(dt.Boolean, nullable=nullable)
def signed_integer_dtypes(nullable=_nullable):
return st.one_of(
st.builds(dt.Int8, nullable=nullable),
st.builds(dt.Int16, nullable=nullable),
st.builds(dt.Int32, nullable=nullable),
st.builds(dt.Int64, nullable=nullable),
)
def unsigned_integer_dtypes(nullable=_nullable):
return st.one_of(
st.builds(dt.UInt8, nullable=nullable),
st.builds(dt.UInt16, nullable=nullable),
st.builds(dt.UInt32, nullable=nullable),
st.builds(dt.UInt64, nullable=nullable),
)
def integer_dtypes(nullable=_nullable):
return st.one_of(
signed_integer_dtypes(nullable=nullable),
unsigned_integer_dtypes(nullable=nullable),
)
def floating_dtypes(nullable=_nullable):
return st.one_of(
st.builds(dt.Float16, nullable=nullable),
st.builds(dt.Float32, nullable=nullable),
st.builds(dt.Float64, nullable=nullable),
)
@st.composite
def decimal_dtypes(draw, nullable=_nullable):
number = st.integers(min_value=1, max_value=38)
precision, scale = draw(number), draw(number)
h.assume(precision >= scale)
return dt.Decimal(precision, scale, nullable=draw(nullable))
def numeric_dtypes(nullable=_nullable):
return st.one_of(
integer_dtypes(nullable=nullable),
floating_dtypes(nullable=nullable),
decimal_dtypes(nullable=nullable),
)
def string_dtype(nullable=_nullable):
return st.builds(dt.String, nullable=nullable)
def binary_dtype(nullable=_nullable):
return st.builds(dt.Binary, nullable=nullable)
def json_dtype(nullable=_nullable):
return st.builds(dt.JSON, nullable=nullable)
def inet_dtype(nullable=_nullable):
return st.builds(dt.INET, nullable=nullable)
def METHOD_NAME(nullable=_nullable):
return st.builds(dt.MACADDR, nullable=nullable)
def uuid_dtype(nullable=_nullable):
return st.builds(dt.UUID, nullable=nullable)
def string_like_dtypes(nullable=_nullable):
return st.one_of(
string_dtype(nullable=nullable),
binary_dtype(nullable=nullable),
json_dtype(nullable=nullable),
inet_dtype(nullable=nullable),
METHOD_NAME(nullable=nullable),
uuid_dtype(nullable=nullable),
)
def date_dtype(nullable=_nullable):
return st.builds(dt.Date, nullable=nullable)
def time_dtype(nullable=_nullable):
return st.builds(dt.Time, nullable=nullable)
_timezone = st.none() | tzst.timezones().map(str)
_interval = st.sampled_from(list(IntervalUnit))
_timestamp_scale = st.none() | st.integers(min_value=0, max_value=9)
def timestamp_dtype(scale=_timestamp_scale, timezone=_timezone, nullable=_nullable):
return st.builds(dt.Timestamp, scale=scale, timezone=timezone, nullable=nullable)
def interval_dtype(interval=_interval, nullable=_nullable):
return st.builds(dt.Interval, unit=interval, nullable=nullable)
def temporal_dtypes(timezone=_timezone, interval=_interval, nullable=_nullable):
return st.one_of(
date_dtype(nullable=nullable),
time_dtype(nullable=nullable),
timestamp_dtype(timezone=timezone, nullable=nullable),
)
def primitive_dtypes(nullable=_nullable):
return st.one_of(
null_dtype,
boolean_dtype(nullable=nullable),
integer_dtypes(nullable=nullable),
floating_dtypes(nullable=nullable),
date_dtype(nullable=nullable),
time_dtype(nullable=nullable),
)
_item_strategy = primitive_dtypes()
def array_dtypes(value_type=_item_strategy, nullable=_nullable):
return st.builds(dt.Array, value_type=value_type, nullable=nullable)
def map_dtypes(key_type=_item_strategy, value_type=_item_strategy, nullable=_nullable):
return st.builds(
dt.Map, key_type=key_type, value_type=value_type, nullable=nullable
)
_any_text = st.text()
@st.composite
def struct_dtypes(
draw,
types=_item_strategy,
names=_any_text,
num_fields=st.integers(min_value=0, max_value=20), # noqa: B008
nullable=_nullable,
):
num_fields = draw(num_fields)
names = draw(st.lists(names, min_size=num_fields, max_size=num_fields))
types = draw(st.lists(types, min_size=num_fields, max_size=num_fields))
fields = dict(zip(names, types))
return dt.Struct(fields, nullable=draw(nullable))
def geometry_dtypes(nullable=_nullable):
return st.builds(dt.GeoSpatial, geotype=st.just("geometry"), nullable=nullable)
def geography_dtypes(nullable=_nullable):
return st.builds(dt.GeoSpatial, geotype=st.just("geography"), nullable=nullable)
def geospatial_dtypes(nullable=_nullable):
return st.one_of(
st.builds(dt.Point, nullable=nullable),
st.builds(dt.LineString, nullable=nullable),
st.builds(dt.Polygon, nullable=nullable),
st.builds(dt.MultiPoint, nullable=nullable),
st.builds(dt.MultiLineString, nullable=nullable),
st.builds(dt.MultiPolygon, nullable=nullable),
geometry_dtypes(nullable=nullable),
geography_dtypes(nullable=nullable),
)
def variadic_dtypes(nullable=_nullable):
return st.one_of(
string_dtype(nullable=nullable),
binary_dtype(nullable=nullable),
json_dtype(nullable=nullable),
array_dtypes(nullable=nullable),
map_dtypes(nullable=nullable),
)
def all_dtypes(nullable=_nullable):
recursive = st.deferred(
lambda: (
primitive_dtypes(nullable=nullable)
| string_like_dtypes(nullable=nullable)
| temporal_dtypes(nullable=nullable)
| interval_dtype(nullable=nullable)
| geospatial_dtypes(nullable=nullable)
| variadic_dtypes(nullable=nullable)
| struct_dtypes(nullable=nullable)
| array_dtypes(recursive, nullable=nullable)
| map_dtypes(recursive, recursive, nullable=nullable)
| struct_dtypes(recursive, nullable=nullable)
)
)
return recursive
# Strategies for generating schema
@st.composite
def schema(draw, item_strategy=_item_strategy, max_size=20):
num_fields = draw(st.integers(min_value=0, max_value=max_size))
names = draw(
st.lists(st.text(), min_size=num_fields, max_size=num_fields, unique=True)
)
types = draw(st.lists(item_strategy, min_size=num_fields, max_size=num_fields))
fields = dict(zip(names, types))
return sch.Schema(fields)
all_schema = schema(all_dtypes)
# Strategies for generating in memory tables holding data
@st.composite
def memtable(draw, schema=schema(primitive_dtypes)): # noqa: B008
schema = draw(schema)
columns = [past.column(name, dtype=dtype) for name, dtype in schema.to_pandas()]
dataframe = past.data_frames(columns=columns)
with warnings.catch_warnings():
# TODO(cpcloud): pandas 2.1.0 junk
warnings.filterwarnings("ignore", category=FutureWarning)
df = draw(dataframe)
return ibis.memtable(df) |
6,827 | kill running benchmark instances | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
# Modifications Copyright OpenSearch Contributors. See
# GitHub history for details.
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import shlex
import subprocess
import time
import psutil
def run_subprocess(command_line):
return os.system(command_line)
def run_subprocess_with_output(command_line):
logger = logging.getLogger(__name__)
logger.debug("Running subprocess [%s] with output.", command_line)
command_line_args = shlex.split(command_line)
with subprocess.Popen(command_line_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as command_line_process:
has_output = True
lines = []
while has_output:
line = command_line_process.stdout.readline()
if line:
lines.append(line.decode("UTF-8").strip())
else:
has_output = False
return lines
def run_subprocess_with_out_and_err(command_line):
logger = logging.getLogger(__name__)
logger.debug("Running subprocess [%s] with stdout and stderr.", command_line)
command_line_args = shlex.split(command_line)
sp = subprocess.Popen(command_line_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sp.wait()
out, err = sp.communicate()
return out.decode('UTF-8'), err.decode('UTF-8'), sp.returncode
def run_subprocess_with_stderr(command_line):
logger = logging.getLogger(__name__)
logger.debug("Running subprocess [%s] with stderr but no stdout.", command_line)
command_line_args = shlex.split(command_line)
sp = subprocess.Popen(command_line_args, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE)
sp.wait()
_, err = sp.communicate()
return err.decode('UTF-8'), sp.returncode
def exit_status_as_bool(runnable, quiet=False):
"""
:param runnable: A runnable returning an int as exit status assuming ``0`` is meaning success.
:param quiet: Suppress any output (default: False).
:return: True iff the runnable has terminated successfully.
"""
try:
return_code = runnable()
return return_code == 0 or return_code is None
except OSError:
if not quiet:
logging.getLogger(__name__).exception("Could not execute command.")
return False
def run_subprocess_with_logging(command_line, header=None, level=logging.INFO, stdin=None, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=None, detach=False):
"""
Runs the provided command line in a subprocess. All output will be captured by a logger.
:param command_line: The command line of the subprocess to launch.
:param header: An optional header line that should be logged (this will be logged on info level, regardless of the defined log level).
:param level: The log level to use for output (default: logging.INFO).
:param stdin: The stdout object returned by subprocess.Popen(stdout=PIPE) allowing chaining of shell operations with pipes
(default: None).
;param stdout: The form that the stdout of Popen will take. If this argument is of type PIPE, the output of the command
will be returned as a stream.
;param stderr: The form that the stderr of Popen will take. If this argument is of type PIPE, the output of the command
will be returned as a stream.
:param env: Use specific environment variables (default: None).
:param detach: Whether to detach this process from its parent process (default: False).
:return: The process exit code as an int.
"""
logger = logging.getLogger(__name__)
logger.debug("Running subprocess [%s] with logging.", command_line)
command_line_args = shlex.split(command_line)
pre_exec = os.setpgrp if detach else None
if header is not None:
logger.info(header)
# pylint: disable=subprocess-popen-preexec-fn
with subprocess.Popen(command_line_args,
stdout=stdout,
stderr=stderr,
universal_newlines=True,
env=env,
stdin=stdin if stdin else None,
preexec_fn=pre_exec) as command_line_process:
stdout, _ = command_line_process.communicate()
if stdout:
logger.log(level=level, msg=stdout)
logger.debug("Subprocess [%s] finished with return code [%s].", command_line, str(command_line_process.returncode))
return command_line_process.returncode
def is_benchmark_process(p):
cmdline = p.cmdline()
return p.name() == "opensearch-benchmark" or \
(p.name().lower().startswith("python") and
(len(cmdline) > 1 and
(cmdline[1] == "opensearch-benchmark" or
cmdline[1].endswith(os.path.sep + "opensearch-benchmark"))))
def find_all_other_benchmark_processes():
others = []
for_all_other_processes(is_benchmark_process, others.append)
return others
def kill_all(predicate):
def kill(p):
logging.getLogger(__name__).info("Killing lingering process with PID [%s] and command line [%s].", p.pid, p.cmdline())
p.kill()
# wait until process has terminated, at most 3 seconds. Otherwise we might run into race conditions with actor system
# sockets that are still open.
for _ in range(3):
try:
p.status()
time.sleep(1)
except psutil.NoSuchProcess:
break
for_all_other_processes(predicate, kill)
def for_all_other_processes(predicate, action):
# no harakiri please
my_pid = os.getpid()
for p in psutil.process_iter():
try:
if p.pid != my_pid and predicate(p):
action(p)
except (psutil.ZombieProcess, psutil.AccessDenied, psutil.NoSuchProcess):
pass
def METHOD_NAME():
kill_all(is_benchmark_process) |
6,828 | test run spots common mistake | # This file is part of Checkbox.
#
#
# Copyright 2012 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3,
# as published by the Free Software Foundation.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
plainbox.testing_utils.test_testscases
======================================
Test definitions for plainbox.testing_utils.testcases module
"""
from unittest import TestCase, TestResult
from plainbox.testing_utils.testcases import TestCaseParameters
from plainbox.testing_utils.testcases import TestCaseWithParameters
class TestParameterTests(TestCase):
def test_smoke(self):
names = ('foo', 'bar')
values = (1, 2)
params = TestCaseParameters(names, values)
self.assertEqual(params.foo, 1)
self.assertEqual(params.bar, 2)
with self.assertRaises(AttributeError):
params.not_there
self.assertEqual(str(params), "foo: 1, bar: 2")
self.assertEqual(repr(params), "<TestCaseParameters foo: 1, bar: 2>")
def test_eq(self):
# Equal names and values
self.assertEqual(TestCaseParameters(('a', 'b'), (1, 2)),
TestCaseParameters(('a', 'b'), (1, 2)))
# Different values
self.assertNotEqual(TestCaseParameters(('a', 'b'), (1, 2)),
TestCaseParameters(('a', 'c'), (1, 2)))
# Different names
self.assertNotEqual(TestCaseParameters(('a', 'b'), (1, 2)),
TestCaseParameters(('a', 'b'), (1, 3)))
class TestCaseWithParametersTests(TestCase):
class UpperTests(TestCaseWithParameters):
parameter_names = ('original', 'upper')
parameter_values = (
('lower', 'LOWER'),
('typo', 'TYPo'), # broken on purpose
('mIxEd CaSe', 'MIXED CASE'),
)
def test_str_upper(self):
self.assertEqual(
self.parameters.original.upper(),
self.parameters.upper)
def setUp(self):
self.test_case = self.UpperTests('test_str_upper')
self.parametrized_test_case = self.test_case._parametrize(
TestCaseParameters(
self.UpperTests.parameter_names,
('foo', 'FOO')))
def test_smoke(self):
result = TestResult()
self.test_case.run(result)
# There were no errors (syntax errors and other such stuff)
self.assertEqual(len(result.errors), 0)
# There was one test failure
self.assertEqual(len(result.failures), 1)
failing_test_case, traceback = result.failures[0]
# The test case that failed is an instance of what was being tested
self.assertIsInstance(failing_test_case, self.UpperTests)
# But they are not the same instance anymore
self.assertIsNot(failing_test_case, self.test_case)
# Because the parameters were preserved
self.assertEqual(failing_test_case.parameters.original, "typo")
def test_countTestCases(self):
# There are three parameter values
self.assertEqual(self.test_case.countTestCases(), 3)
# This test is parametrized so it counts as only one
self.assertEqual(self.parametrized_test_case.countTestCases(), 1)
def test_countTestCases_regression_1265748(self):
"""
verify regression testing for bug:
https://bugs.launchpad.net/checkbox/+bug/1265748
TestCaseWithParameters.countTestCases() should work when
get_paremeter_values() reteurns a generator.
"""
class RegressionTest(TestCaseWithParameters):
parameter_names = ('name1',)
def get_parameter_values(self):
yield ('value1', )
yield ('value2', )
self.assertEqual(RegressionTest().countTestCases(), 2)
def test_id(self):
self.assertIn(
"test_str_upper [<unparameterized>]",
self.test_case.id())
self.assertIn(
"test_str_upper [original: foo, upper: FOO]",
self.parametrized_test_case.id())
def test_str(self):
self.assertIn(
"[<unparameterized>]", str(self.test_case))
self.assertIn(
"[original: foo, upper: FOO]", str(self.parametrized_test_case))
def test_repr(self):
self.assertIn(
"parameters=None>", repr(self.test_case))
self.assertIn(
"parameters=<TestCaseParameters original: foo, upper: FOO>>",
repr(self.parametrized_test_case))
def test_eq(self):
self.assertEqual(self.test_case, self.test_case)
self.assertNotEqual(self.test_case, self.parametrized_test_case)
self.assertNotEqual(self.test_case, 'foo')
def test_hash(self):
case1 = TestCaseWithParameters()
case2 = TestCaseWithParameters()
self.assertEqual(hash(case1), hash(case2))
case1_param = case1._parametrize(
TestCaseParameters(('name', ), ('value', )))
self.assertNotEqual(case1, case1_param)
def METHOD_NAME(self):
with self.assertRaises(RuntimeError) as boom:
class UpperTests(TestCaseWithParameters):
parameter_names = ('param1',)
parameter_values = (('value1', 'value2'),)
UpperTests().run()
self.assertEqual(
str(boom.exception),
("incorrect get_parameter_values() or parameter_values for"
" iteration 0. Expected to see 1 item but saw 2 instead")) |
6,829 | mock ldclient | """
This file sets up the "globals" we need for our tests
namely, it creates the Avrae instance and overrides its http and gateway handlers
and defines a bunch of helper methods
"""
import asyncio
import json
import logging
import os
import pathlib
import random
import pytest
# setup bot
from dbot import bot
# here to prevent pycharm from moving around my imports >:C
pass
from cogs5e.models.character import Character # noqa: E402
from cogs5e.initiative import Combat # noqa: E402
from tests.discord_mock_data import * # noqa: E4
from tests.mocks import MockAsyncLaunchDarklyClient, MockDiscordHTTP # noqa: E402
from tests.monkey import add_reaction, message, on_command_error # noqa: E402
SENTINEL = object()
log = logging.getLogger(__name__)
dir_path = os.path.dirname(os.path.realpath(__file__))
@pytest.fixture(scope="session")
def event_loop():
return asyncio.get_event_loop()
# the http fixture
@pytest.fixture(scope="session")
def dhttp(event_loop):
"""
The HTTP proxy
We use this to check what the bot has sent and make sure it's right
"""
return MockDiscordHTTP(loop=event_loop)
# the ldclient fixture
@pytest.fixture(scope="session")
def METHOD_NAME():
"""
We use this to supply feature flags
"""
return MockAsyncLaunchDarklyClient()
@pytest.fixture(scope="session")
async def avrae(dhttp, METHOD_NAME):
# set up a way for us to send events to Avrae
# monkey-patch in .message and .add_reaction
bot.message = message.__get__(bot, type(bot))
bot.add_reaction = add_reaction.__get__(bot, type(bot))
# add error event listener
bot.add_listener(on_command_error, "on_command_error")
# set up http
bot.http = dhttp
# noinspection PyProtectedMember
bot._connection.http = dhttp
# feature flags monkey-patch
bot.ldclient = METHOD_NAME
bot.state = "run"
await bot.login(config.TOKEN) # handled by our http proxy
# we never do initialize the websocket - we just replay discord's login sequence
# to initialize a "channel" to send testing messages to
# in this case, we initialize a testing guild and dummy DMChannel
# noinspection PyProtectedMember
bot._connection.parse_ready(DUMMY_READY)
# noinspection PyProtectedMember
bot._connection.parse_guild_create(DUMMY_GUILD_CREATE)
# noinspection PyProtectedMember
bot._connection.add_dm_channel(DUMMY_DMCHANNEL_CREATE)
# noinspection PyProtectedMember
# used to allow the delay_ready task to progress
bot._connection.shards_launched.set()
log.info("Ready for testing")
yield bot
await bot.close()
# ===== Character Fixture =====
@pytest.fixture(scope="class", params=["ara", "drakro"])
def character(request, avrae):
"""Sets up an active character in the user's context, to be used in tests. Cleans up after itself."""
filename = os.path.join(dir_path, "static", f"char-{request.param}.json")
with open(filename) as f:
char = Character.from_dict(json.load(f))
char.owner = DEFAULT_USER_ID
char._active = True
avrae.mdb.characters.delegate.update_one(
{"owner": char.owner, "upstream": char.upstream}, {"$set": char.to_dict()}, upsert=True
)
if request.cls is not None:
request.cls.character = char
yield char
avrae.mdb.characters.delegate.delete_one({"owner": char.owner, "upstream": char.upstream})
# noinspection PyProtectedMember
Character._cache.clear()
# ===== Init Fixture/Utils =====
@pytest.fixture(scope="class")
async def init_fixture(avrae):
"""Ensures we clean up before and after ourselves. Init tests should be grouped in a class."""
await avrae.mdb.combats.delete_one({"channel": str(TEST_CHANNEL_ID)})
yield
await avrae.mdb.combats.delete_one({"channel": str(TEST_CHANNEL_ID)})
# noinspection PyProtectedMember
Combat._cache.clear()
# ===== Global Fixture =====
@pytest.fixture(autouse=True, scope="function")
async def global_fixture(avrae, dhttp, request):
"""Things to do before and after every test."""
log.info(f"Starting test: {request.function.__name__}")
dhttp.clear()
random.seed(123) # we want to make our tests as deterministic as possible, so each one uses the same RNG seed
yield
await dhttp.drain()
log.info(f"Finished test: {request.function.__name__}")
# ==== marks ====
def pytest_collection_modifyitems(config, items):
"""
mark every test in e2e/ with the *e2e* mark, unit/ with *unit*, and gamedata/ with *gamedata*
"""
rootdir = pathlib.Path(config.rootdir)
for item in items:
rel_path = pathlib.Path(item.fspath).relative_to(rootdir)
if "e2e" in rel_path.parts:
item.add_marker(pytest.mark.e2e)
elif "unit" in rel_path.parts:
item.add_marker(pytest.mark.unit)
elif "gamedata" in rel_path.parts:
item.add_marker(pytest.mark.gamedata)
@pytest.fixture(scope="function")
def record_command_errors(avrae):
"""
A fixture to temporarily remove the custom command error handler, to allow commands to raise handled errors.
Yields a reference to a list of recorded errors.
"""
recorded_errors = []
async def on_command_error_rec(_, e):
recorded_errors.append(e)
avrae.add_listener(on_command_error_rec, "on_command_error")
avrae.remove_listener(on_command_error)
yield recorded_errors
avrae.remove_listener(on_command_error_rec, "on_command_error")
avrae.add_listener(on_command_error) |
6,830 | get corr rval | """
This file contains the utilities for generating random numbers in gulpy.
"""
import logging
from math import sqrt
import numpy as np
from numba import njit
from scipy.stats import norm
logger = logging.getLogger(__name__)
GROUP_ID_HASH_CODE = np.int64(1543270363)
EVENT_ID_HASH_CODE = np.int64(1943272559)
HASH_MOD_CODE = np.int64(2147483648)
HAZARD_GROUP_ID_HASH_CODE = np.int64(1143271949)
HAZARD_EVENT_ID_HASH_CODE = np.int64(1243274353)
HAZARD_HASH_MOD_CODE = np.int64(1957483729)
@njit(cache=True, fastmath=True)
def generate_hash(group_id, event_id, base_seed=0):
"""Generate hash for a given `group_id`, `event_id` pair for the vulnerability pdf.
Args:
group_id (int): group id.
event_id (int]): event id.
base_seed (int, optional): base random seed. Defaults to 0.
Returns:
int64: hash
"""
hash = (base_seed + (group_id * GROUP_ID_HASH_CODE) % HASH_MOD_CODE +
(event_id * EVENT_ID_HASH_CODE) % HASH_MOD_CODE) % HASH_MOD_CODE
return hash
@njit(cache=True, fastmath=True)
def generate_hash_hazard(hazard_group_id, event_id, base_seed=0):
"""Generate hash for a given `hazard_group_id`, `event_id` pair for the hazard pdf.
Args:
hazard_group_id (int): group id.
event_id (int]): event id.
base_seed (int, optional): base random seed. Defaults to 0.
Returns:
int64: hash
"""
hash = (base_seed + (hazard_group_id * HAZARD_GROUP_ID_HASH_CODE) % HAZARD_HASH_MOD_CODE +
(event_id * HAZARD_EVENT_ID_HASH_CODE) % HAZARD_HASH_MOD_CODE) % HAZARD_HASH_MOD_CODE
return hash
def get_random_generator(random_generator):
"""Get the random generator function.
Args:
random_generator (int): random generator function id.
Returns:
The random generator function.
"""
# define random generator function
if random_generator == 0:
logger.info("Random generator: MersenneTwister")
return random_MersenneTwister
elif random_generator == 1:
logger.info("Random generator: Latin Hypercube")
return random_LatinHypercube
else:
raise ValueError(f"No random generator exists for random_generator={random_generator}.")
EVENT_ID_HASH_CODE = np.int64(1943_272_559)
PERIL_CORRELATION_GROUP_HASH = np.int64(1836311903)
HASH_MOD_CODE = np.int64(2147483648)
@njit(cache=True, fastmath=True)
def generate_correlated_hash_vector(unique_peril_correlation_groups, event_id, base_seed=0):
"""Generate hashes for all peril correlation groups for a given `event_id`.
Args:
unique_peril_correlation_groups (List[int]): list of the unique peril correlation groups.
event_id (int): event id.
base_seed (int, optional): base random seed. Defaults to 0.
Returns:
List[int64]: hashes
"""
Nperil_correlation_groups = np.max(unique_peril_correlation_groups)
correlated_hashes = np.zeros(Nperil_correlation_groups + 1, dtype='int64')
correlated_hashes[0] = 0
unique_peril_index = 0
unique_peril_len = unique_peril_correlation_groups.shape[0]
for i in range(1, Nperil_correlation_groups + 1):
if unique_peril_correlation_groups[unique_peril_index] == i:
correlated_hashes[i] = (
base_seed +
(unique_peril_correlation_groups[unique_peril_index] * PERIL_CORRELATION_GROUP_HASH) % HASH_MOD_CODE +
(event_id * EVENT_ID_HASH_CODE) % HASH_MOD_CODE
) % HASH_MOD_CODE
unique_peril_index += 1
if unique_peril_index == unique_peril_len:
break
return correlated_hashes
def compute_norm_inv_cdf_lookup(cdf_min, cdf_max, N):
return norm.ppf(np.linspace(cdf_min, cdf_max, N))
def compute_norm_cdf_lookup(x_min, x_max, N):
return norm.cdf(np.linspace(x_min, x_max, N))
@njit(cache=True, fastmath=True)
def get_norm_cdf_cell_nb(x, x_min, x_max, N):
return int((x - x_min) * (N - 1) // (x_max - x_min))
@njit(cache=True, fastmath=True)
def METHOD_NAME(x_unif, y_unif, rho, x_min, x_max, N, norm_inv_cdf, cdf_min,
cdf_max, norm_cdf, Nsamples, z_unif):
sqrt_rho = sqrt(rho)
sqrt_1_minus_rho = sqrt(1. - rho)
for i in range(Nsamples):
x_norm = norm_inv_cdf[get_norm_cdf_cell_nb(x_unif[i], x_min, x_max, N)]
y_norm = norm_inv_cdf[get_norm_cdf_cell_nb(y_unif[i], x_min, x_max, N)]
z_norm = sqrt_rho * x_norm + sqrt_1_minus_rho * y_norm
z_unif[i] = norm_cdf[get_norm_cdf_cell_nb(z_norm, cdf_min, cdf_max, N)]
@njit(cache=True, fastmath=True)
def random_MersenneTwister(seeds, n, skip_seeds=0):
"""Generate random numbers using the default Mersenne Twister algorithm.
Args:
seeds (List[int64]): List of seeds.
n (int): number of random samples to generate for each seed.
skip_seeds (int): number of seeds to skip starting from the beginning
of the `seeds` array. For skipped seeds no random numbers are generated
and the output rndms will contain zeros at their corresponding row.
Default is 0, i.e. no seeds are skipped.
Returns:
rndms (array[float]): 2-d array of shape (number of seeds, n)
containing the random values generated for each seed.
rndms_idx (Dict[int64, int]): mapping between `seed` and the
row in rndms that stores the corresponding random values.
"""
Nseeds = len(seeds)
rndms = np.zeros((Nseeds, n), dtype='float64')
for seed_i in range(skip_seeds, Nseeds, 1):
# set the seed
np.random.seed(seeds[seed_i])
# draw the random numbers
for j in range(n):
# by default in numba this should be Mersenne-Twister
rndms[seed_i, j] = np.random.uniform(0., 1.)
return rndms
@njit(cache=True, fastmath=True)
def random_LatinHypercube(seeds, n, skip_seeds=0):
"""Generate random numbers using the Latin Hypercube algorithm.
Args:
seeds (List[int64]): List of seeds.
n (int): number of random samples to generate for each seed.
Returns:
rndms (array[float]): 2-d array of shape (number of seeds, n)
containing the random values generated for each seed.
rndms_idx (Dict[int64, int]): mapping between `seed` and the
row in rndms that stores the corresponding random values.
skip_seeds (int): number of seeds to skip starting from the beginning
of the `seeds` array. For skipped seeds no random numbers are generated
and the output rndms will contain zeros at their corresponding row.
Default is 0, i.e. no seeds are skipped.
Notes:
Implementation follows scipy.stats.qmc.LatinHypercube v1.8.0.
Following scipy notation, here we assume `centered=False` all the times:
instead of taking `samples=0.5*np.ones(n)`, here we always
draw uniform random samples in order to initialise `samples`.
"""
Nseeds = len(seeds)
rndms = np.zeros((Nseeds, n), dtype='float64')
# define arrays here and re-use them later
samples = np.zeros(n, dtype='float64')
perms = np.zeros(n, dtype='float64')
for seed_i in range(skip_seeds, Nseeds, 1):
# set the seed
np.random.seed(seeds[seed_i])
# draw the random numbers and re-generate permutations array
for i in range(n):
samples[i] = np.random.uniform(0., 1.)
perms[i] = i + 1
# in-place shuffle permutations
np.random.shuffle(perms)
for j in range(n):
rndms[seed_i, j] = (perms[j] - samples[j]) / float(n)
return rndms |
6,831 | name | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetManagedClusterApplicationTypeVersionResult',
'AwaitableGetManagedClusterApplicationTypeVersionResult',
'get_managed_cluster_application_type_version',
'get_managed_cluster_application_type_version_output',
]
@pulumi.output_type
class GetManagedClusterApplicationTypeVersionResult:
"""
An application type version resource for the specified application type name resource.
"""
def __init__(__self__, app_package_url=None, id=None, location=None, METHOD_NAME=None, provisioning_state=None, system_data=None, tags=None, type=None):
if app_package_url and not isinstance(app_package_url, str):
raise TypeError("Expected argument 'app_package_url' to be a str")
pulumi.set(__self__, "app_package_url", app_package_url)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(METHOD_NAME="appPackageUrl")
def app_package_url(self) -> str:
"""
The URL to the application package
"""
return pulumi.get(self, "app_package_url")
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location depends on the parent resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Azure resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(METHOD_NAME="provisioningState")
def provisioning_state(self) -> str:
"""
The current deployment or provisioning state, which only appears in the response
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(METHOD_NAME="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Azure resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetManagedClusterApplicationTypeVersionResult(GetManagedClusterApplicationTypeVersionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetManagedClusterApplicationTypeVersionResult(
app_package_url=self.app_package_url,
id=self.id,
location=self.location,
METHOD_NAME=self.METHOD_NAME,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_managed_cluster_application_type_version(application_type_name: Optional[str] = None,
cluster_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
version: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagedClusterApplicationTypeVersionResult:
"""
Get a Service Fabric managed application type version resource created or in the process of being created in the Service Fabric managed application type name resource.
Azure REST API version: 2023-03-01-preview.
:param str application_type_name: The name of the application type name resource.
:param str cluster_name: The name of the cluster resource.
:param str resource_group_name: The name of the resource group.
:param str version: The application type version.
"""
__args__ = dict()
__args__['applicationTypeName'] = application_type_name
__args__['clusterName'] = cluster_name
__args__['resourceGroupName'] = resource_group_name
__args__['version'] = version
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:servicefabric:getManagedClusterApplicationTypeVersion', __args__, opts=opts, typ=GetManagedClusterApplicationTypeVersionResult).value
return AwaitableGetManagedClusterApplicationTypeVersionResult(
app_package_url=pulumi.get(__ret__, 'app_package_url'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_managed_cluster_application_type_version)
def get_managed_cluster_application_type_version_output(application_type_name: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetManagedClusterApplicationTypeVersionResult]:
"""
Get a Service Fabric managed application type version resource created or in the process of being created in the Service Fabric managed application type name resource.
Azure REST API version: 2023-03-01-preview.
:param str application_type_name: The name of the application type name resource.
:param str cluster_name: The name of the cluster resource.
:param str resource_group_name: The name of the resource group.
:param str version: The application type version.
"""
... |
6,832 | test user keywords | import unittest
from pathlib import Path
from robot.errors import DataError
from robot.utils import Importer
from robot.utils.asserts import assert_equal, assert_raises, assert_true
from robot.running import TestSuite, TestSuiteBuilder
DATADIR = (Path(__file__).parent / '../../atest/testdata/misc').resolve()
def build(*paths, **config):
paths = [Path(DATADIR, p).resolve() for p in paths]
suite = TestSuiteBuilder(**config).build(*paths)
assert_true(isinstance(suite, TestSuite))
assert_equal(suite.source, paths[0] if len(paths) == 1 else None)
return suite
def assert_keyword(kw, assign=(), name='', args=(), type='KEYWORD'):
assert_equal(kw.name, name)
assert_equal(kw.args, args)
assert_equal(kw.assign, assign)
assert_equal(kw.type, type)
class TestBuilding(unittest.TestCase):
def test_suite_data(self):
suite = build('pass_and_fail.robot')
assert_equal(suite.name, 'Pass And Fail')
assert_equal(suite.doc, 'Some tests here')
assert_equal(suite.metadata, {})
def test_imports(self):
imp = build('dummy_lib_test.robot').resource.imports[0]
assert_equal(imp.type, 'LIBRARY')
assert_equal(imp.name, 'DummyLib')
assert_equal(imp.args, ())
def test_variables(self):
variables = build('pass_and_fail.robot').resource.variables
assert_equal(variables[0].name, '${LEVEL1}')
assert_equal(variables[0].value, ('INFO',))
assert_equal(variables[1].name, '${LEVEL2}')
assert_equal(variables[1].value, ('DEBUG',))
def METHOD_NAME(self):
uk = build('pass_and_fail.robot').resource.keywords[0]
assert_equal(uk.name, 'My Keyword')
assert_equal(uk.args, ('${who}',))
def test_test_data(self):
test = build('pass_and_fail.robot').tests[1]
assert_equal(test.name, 'Fail')
assert_equal(test.doc, 'FAIL Expected failure')
assert_equal(list(test.tags), ['fail', 'force'])
assert_equal(test.timeout, None)
assert_equal(test.template, None)
def test_test_keywords(self):
kw = build('pass_and_fail.robot').tests[0].body[0]
assert_keyword(kw, (), 'My Keyword', ('Pass',))
def test_assign(self):
kw = build('non_ascii.robot').tests[1].body[0]
assert_keyword(kw, ('${msg} =',), 'Evaluate', (r"u'Fran\\xe7ais'",))
def test_directory_suite(self):
suite = build('suites')
assert_equal(suite.name, 'Suites')
assert_equal(suite.suites[0].name, 'Suite With Prefix')
assert_equal(suite.suites[2].name, 'Subsuites')
assert_equal(suite.suites[4].name, 'Suite With Double Underscore')
assert_equal(suite.suites[4].suites[0].name, 'Tests With Double Underscore')
assert_equal(suite.suites[-1].name, 'Tsuite3')
assert_equal(suite.suites[2].suites[1].name, 'Sub2')
assert_equal(len(suite.suites[2].suites[1].tests), 1)
assert_equal(suite.suites[2].suites[1].tests[0].id, 's1-s3-s2-t1')
def test_multiple_inputs(self):
suite = build('pass_and_fail.robot', 'normal.robot')
assert_equal(suite.name, 'Pass And Fail & Normal')
assert_equal(suite.suites[0].name, 'Pass And Fail')
assert_equal(suite.suites[1].name, 'Normal')
assert_equal(suite.suites[1].tests[1].id, 's1-s2-t2')
def test_suite_setup_and_teardown(self):
suite = build('setups_and_teardowns.robot')
assert_keyword(suite.setup, name='${SUITE SETUP}', type='SETUP')
assert_keyword(suite.teardown, name='${SUITE TEARDOWN}', type='TEARDOWN')
def test_test_setup_and_teardown(self):
test = build('setups_and_teardowns.robot').tests[0]
assert_keyword(test.setup, name='${TEST SETUP}', type='SETUP')
assert_keyword(test.teardown, name='${TEST TEARDOWN}', type='TEARDOWN')
assert_equal([kw.name for kw in test.body], ['Keyword'])
def test_test_timeout(self):
tests = build('timeouts.robot').tests
assert_equal(tests[0].timeout, '1min 42s')
assert_equal(tests[1].timeout, '${100}')
assert_equal(tests[2].timeout, None)
def test_keyword_timeout(self):
kw = build('timeouts.robot').resource.keywords[0]
assert_equal(kw.timeout, '42')
def test_rpa(self):
for paths in [('.',), ('pass_and_fail.robot',),
('pass_and_fail.robot', 'normal.robot')]:
self._validate_rpa(build(*paths), False)
self._validate_rpa(build(*paths, rpa=True), True)
self._validate_rpa(build('../rpa/tasks1.robot'), True)
self._validate_rpa(build('../rpa/', rpa=False), False)
suite = build('../rpa/')
assert_equal(suite.rpa, None)
for child in suite.suites:
self._validate_rpa(child, child.name != 'Tests')
def _validate_rpa(self, suite, expected):
assert_equal(suite.rpa, expected, suite.name)
for child in suite.suites:
self._validate_rpa(child, expected)
def test_custom_parser(self):
path = DATADIR / '../parsing/custom/CustomParser.py'
for parser in [path, str(path)]:
suite = build('../parsing/custom/tests.custom', custom_parsers=[parser])
assert_equal(suite.name, 'Tests')
assert_equal([t.name for t in suite.tests], ['Passing', 'Failing', 'Empty'])
def test_custom_parser_with_args(self):
path = DATADIR / '../parsing/custom/CustomParser.py:custom'
for parser in [path, str(path)]:
suite = build('../parsing/custom/tests.custom', custom_parsers=[parser])
assert_equal(suite.name, 'Tests')
assert_equal([t.name for t in suite.tests], ['Passing', 'Failing', 'Empty'])
def test_custom_parser_as_object(self):
path = DATADIR / '../parsing/custom/CustomParser.py'
parser = Importer().import_class_or_module(path, instantiate_with_args=())
suite = build('../parsing/custom/tests.custom', custom_parsers=[parser])
assert_equal(suite.name, 'Tests')
assert_equal([t.name for t in suite.tests], ['Passing', 'Failing', 'Empty'])
def test_failing_parser_import(self):
err = assert_raises(DataError, build, custom_parsers=['non_existing_mod'])
assert_true(err.message.startswith("Importing parser 'non_existing_mod' failed:"))
def test_incompatible_parser_object(self):
err = assert_raises(DataError, build, custom_parsers=[42])
assert_equal(err.message, "Importing parser 'integer' failed: "
"'integer' does not have mandatory 'parse' method.")
class TestTemplates(unittest.TestCase):
def test_from_setting_table(self):
test = build('../running/test_template.robot').tests[0]
assert_keyword(test.body[0], (), 'Should Be Equal', ('Fail', 'Fail'))
assert_equal(test.template, 'Should Be Equal')
def test_from_test_case(self):
test = build('../running/test_template.robot').tests[3]
kws = test.body
assert_keyword(kws[0], (), 'Should Not Be Equal', ('Same', 'Same'))
assert_keyword(kws[1], (), 'Should Not Be Equal', ('42', '43'))
assert_keyword(kws[2], (), 'Should Not Be Equal', ('Something', 'Different'))
assert_equal(test.template, 'Should Not Be Equal')
def test_no_variable_assign(self):
test = build('../running/test_template.robot').tests[8]
assert_keyword(test.body[0], (), 'Expect Exactly Three Args',
('${SAME VARIABLE}', 'Variable content', '${VARIABLE}'))
assert_equal(test.template, 'Expect Exactly Three Args')
if __name__ == '__main__':
unittest.main() |
6,833 | test residual scaling | import os
import numpy as np
from pytacs_analysis_base_test import PyTACSTestCase
from tacs import pytacs, elements, constitutive, functions
"""
This is the same test cases as `test_shell_plate_quad.py`, but the plate is been rotated
about the y-axis by 45 degrees, so that it lies in a slant in the xz plane. This test ensures that the plate solution
is invariant under trivial transformation:
a 10 kN point force at center, a 100kPa pressure applied to the surface, and a 100G gravity load. The
perimeter of the plate is fixed in all 6 degrees of freedom. The plate comprises
100 CQUAD4 elements and test KSFailure, StructuralMass, CenterOfMass, MomentOfInertia,
and Compliance functions and sensitivities
"""
base_dir = os.path.dirname(os.path.abspath(__file__))
bdf_file = os.path.join(base_dir, "./input_files/slanted_plate.bdf")
from test_shell_plate_quad import ProblemTest as PT, ksweight
# Define rotated coordinate frame axes
x_prime = np.sqrt(0.5) * np.array([1.0, 0.0, 1.0])
y_prime = np.array([0.0, 1.0, 0.0])
z_prime = np.sqrt(0.5) * np.array([-1.0, 0.0, 1.0])
class ProblemTest(PyTACSTestCase.PyTACSTest):
N_PROCS = 2 # this is how many MPI processes to use for this TestCase.
FUNC_REFS = PT.FUNC_REFS
def setup_tacs_problems(self, comm):
"""
Setup pytacs object for problems we will be testing.
"""
# Overwrite default check values
if self.dtype == complex:
self.rtol = 1e-8
self.atol = 1e-8
self.dh = 1e-50
else:
self.rtol = 2e-1
self.atol = 1e-4
self.dh = 1e-6
# Instantiate FEA Assembler
struct_options = {}
fea_assembler = pytacs.pyTACS(bdf_file, comm, options=struct_options)
def elem_call_back(
dv_num, comp_id, comp_descript, elem_descripts, global_dvs, **kwargs
):
# Material properties
rho = 2500.0 # density kg/m^3
E = 70e9 # Young's modulus (Pa)
nu = 0.3 # Poisson's ratio
ys = 464.0e6 # yield stress
# Plate geometry
tplate = 0.005 # 5 mm
# Set up property model
prop = constitutive.MaterialProperties(rho=rho, E=E, nu=nu, ys=ys)
# Set up constitutive model
con = constitutive.IsoShellConstitutive(prop, t=tplate, tNum=dv_num)
transform = None
# Set up element
elem = elements.Quad4Shell(transform, con)
scale = [100.0]
return elem, scale
# Set up constitutive objects and elements
fea_assembler.initialize(elem_call_back)
tacs_probs = []
# Add point force to node 81 (center of plate)
sp = fea_assembler.createStaticProblem(name="point_load")
F = np.zeros(6)
F[:3] = 1e4 * z_prime
sp.addLoadToNodes(81, F, nastranOrdering=True)
tacs_probs.append(sp)
# Add pressure to entire plate
sp = fea_assembler.createStaticProblem(name="pressure")
P = 100e3 # Pa
compIDs = fea_assembler.selectCompIDs(include="PLATE")
sp.addPressureToComponents(compIDs, P)
tacs_probs.append(sp)
# Add pressure to entire plate
sp = fea_assembler.createStaticProblem(name="gravity")
g = -981.0 * z_prime
sp.addInertialLoad(g)
tacs_probs.append(sp)
# Add Functions
for problem in tacs_probs:
problem.addFunction("mass", functions.StructuralMass)
problem.addFunction("ks_vmfailure", functions.KSFailure, ksWeight=ksweight)
problem.addFunction("compliance", functions.Compliance)
# Calculate cg and MOI in rotated coordinate frame
problem.addFunction("cgx", functions.CenterOfMass, direction=x_prime)
problem.addFunction("cgy", functions.CenterOfMass, direction=y_prime)
problem.addFunction("cgz", functions.CenterOfMass, direction=z_prime)
problem.addFunction(
"Ixx",
functions.MomentOfInertia,
direction1=x_prime,
direction2=x_prime,
aboutCM=True,
)
problem.addFunction(
"Ixy",
functions.MomentOfInertia,
direction1=x_prime,
direction2=y_prime,
aboutCM=True,
)
problem.addFunction(
"Ixz",
functions.MomentOfInertia,
direction1=x_prime,
direction2=z_prime,
aboutCM=True,
)
problem.addFunction(
"Iyy",
functions.MomentOfInertia,
direction1=y_prime,
direction2=y_prime,
aboutCM=True,
)
problem.addFunction(
"Iyz",
functions.MomentOfInertia,
direction1=y_prime,
direction2=z_prime,
aboutCM=True,
)
problem.addFunction(
"Izz",
functions.MomentOfInertia,
direction1=z_prime,
direction2=z_prime,
aboutCM=True,
)
return tacs_probs, fea_assembler
def METHOD_NAME(self):
"""Test that the load scaling is working correctly for the point and pressure loads."""
res = self.fea_assembler.createVec(asBVec=True)
scaledRes = self.fea_assembler.createVec(asBVec=True)
for problem in self.tacs_probs:
with self.subTest(problem=problem.name):
np.random.seed(1)
# Check that the residual is zero if the states and load scale are both zero
problem.loadScale = 0.0
problem.zeroVariables()
problem.getResidual(res)
self.assertEqual(np.real(res.norm()), 0.0)
# Check that the loadScale does linearly scale the external loads
fullRes = problem.assembler.createVec()
problem.setLoadScale(1.0)
problem.getResidual(fullRes)
loadScale = np.random.rand()
problem.setLoadScale(loadScale)
problem.getResidual(scaledRes)
# scaledRes -= loadScale*fullRes should = 0
scaledRes.axpy(-loadScale, fullRes)
np.testing.assert_almost_equal(np.real(scaledRes.norm()), 0.0, 1e-12) |
6,834 | test sorter dtype mismatch | # Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from legate.core import LEGATE_MAX_DIM
import cunumeric as num
# cunumeric.searchsorted(a: ndarray, v: Union[int, float, ndarray],
# side: Literal['left', 'right'] = 'left',
# sorter: Optional[ndarray] = None) → Union[int, ndarray]
# ndarray.searchsorted(v, side='left', sorter=None)
SIDES = ["left", "right"]
STANDARD_CASES = [
(156, np.uint8),
(123, np.uint16),
(241, np.uint32),
(1, np.uint64),
(21, np.int8),
(5, np.int16),
(34, np.int32),
(11, np.int64),
(31, np.float32),
(11, np.float64),
(422, np.double),
(220, np.double),
(244, np.complex64),
(24, np.complex128),
(220, np.complex128),
(0, np.uint32),
]
DTYPE_CASES = [
(3, np.uint64, np.float32),
(51, np.uint32, np.complex64),
(23, np.uint32, np.float64),
(51, np.complex64, np.float64),
(21, np.complex64, np.int32),
(22, np.complex128, np.float32),
]
class TestSearchSortedErrors(object):
@pytest.mark.xfail
def test_arr_none(self):
expected_exc = AttributeError
with pytest.raises(expected_exc):
np.searchsorted(None, 10)
# Numpy raises ValueError:
# object of too small depth for desired array
with pytest.raises(expected_exc):
num.searchsorted(None, 10)
# cuNemeric raises AttributeError: 'NoneType' object
# has no attribute 'searchsorted'
@pytest.mark.xfail
def test_val_none(self):
arr = [2, 3, 10, 9]
expected_exc = TypeError
with pytest.raises(expected_exc):
np.searchsorted(arr, None)
# Numpy raises TypeError: '<' not supported between
# instances of 'NoneType' and 'NoneType'
with pytest.raises(expected_exc):
num.searchsorted(arr, None)
# cuNumeric raises AssertionError
# if self.deferred is None:
# if self.parent is None:
# > assert self.runtime.is_supported_type
# (self.array.dtype)
# E AssertionError
# cunumeric/cunumeric/eager.py:to_deferred_array()
@pytest.mark.xfail
def test_side_invalid(self):
arr = [2, 3, 10, 9]
expected_exc = ValueError
with pytest.raises(expected_exc):
np.searchsorted(arr, 10, "hi")
# Numpy raises ValueError: search side must be 'left' or 'right'
# (got 'hi')
with pytest.raises(expected_exc):
num.searchsorted(arr, 10, "hi")
# cuNumeric passed, and the result is the same as that of 'right'.
def test_ndim_mismatch(self):
a = np.random.random((5, 5, 5))
expected_exc = ValueError
with pytest.raises(expected_exc):
num.searchsorted(a, 5)
with pytest.raises(expected_exc):
np.searchsorted(a, 5)
@pytest.mark.xfail
def test_sorter_ndim_mismatch(self):
a = np.random.randint(-100, 100, size=100)
v = np.random.randint(-100, 100, size=10)
a_argsorted = np.random.random((5, 5, 5))
expected_exc = ValueError
with pytest.raises(expected_exc):
num.searchsorted(a, v, sorter=a_argsorted)
with pytest.raises(expected_exc):
# Numpy raises TypeError
np.searchsorted(a, v, sorter=a_argsorted)
def test_sorter_shape_mismatch(self):
a = np.random.randint(-100, 100, size=100)
v = np.random.randint(-100, 100, size=10)
a_argsorted = np.random.randint(-100, 100, size=10)
expected_exc = ValueError
with pytest.raises(expected_exc):
num.searchsorted(a, v, sorter=a_argsorted)
with pytest.raises(expected_exc):
np.searchsorted(a, v, sorter=a_argsorted)
@pytest.mark.xfail
def METHOD_NAME(self):
a = np.random.randint(-100, 100, size=100)
v = np.random.randint(-100, 100, size=10)
a_argsorted = np.random.random(size=100)
expected_exc = ValueError
with pytest.raises(expected_exc):
num.searchsorted(a, v, sorter=a_argsorted)
with pytest.raises(expected_exc):
# Numpy raises TypeError
np.searchsorted(a, v, sorter=a_argsorted)
def generate_random(volume, datatype):
a_np = None
if np.issubdtype(datatype, np.integer):
a_np = np.array(
np.random.randint(
np.iinfo(datatype).min,
np.iinfo(datatype).max,
size=volume,
dtype=datatype,
),
dtype=datatype,
)
elif np.issubdtype(datatype, np.floating):
a_np = np.array(np.random.random(size=volume), dtype=datatype)
elif np.issubdtype(datatype, np.complexfloating):
a_np = np.array(
np.random.random(size=volume) + np.random.random(size=volume) * 1j,
dtype=datatype,
)
else:
assert False
return a_np
def check_api(a, dtype2=None, v=None, side="left"):
a_argsorted = np.argsort(a)
if v is None:
if dtype2 is not None:
v = generate_random(10, dtype2)
else:
v = generate_random(10, a.dtype)
a_num = num.array(a)
v_num = num.array(v)
a_num_argsorted = num.array(a_argsorted)
res_np = a.searchsorted(v, side=side, sorter=a_argsorted)
res_num = a_num.searchsorted(v_num, side=side, sorter=a_num_argsorted)
assert num.array_equal(res_np, res_num)
res_np = np.searchsorted(a, v, side=side, sorter=a_argsorted)
res_num = num.searchsorted(a_num, v_num, side=side, sorter=a_num_argsorted)
assert num.array_equal(res_np, res_num)
@pytest.mark.parametrize("side", SIDES)
def test_empty_v(side):
check_api(np.arange(25), None, np.arange(0), side)
check_api(np.array([]), side=side)
check_api(np.arange(0), None, np.arange(0), side=side)
@pytest.mark.parametrize("volume, dtype1, dtype2", DTYPE_CASES, ids=str)
@pytest.mark.parametrize("side", SIDES)
def test_dtype_conversions(volume, dtype1, dtype2, side):
check_api(generate_random(volume, dtype1), dtype2, side=side)
@pytest.mark.parametrize("volume, dtype", STANDARD_CASES, ids=str)
@pytest.mark.parametrize("side", SIDES)
def test_standard_cases(volume, dtype, side):
check_api(generate_random(volume, dtype), side=side)
@pytest.mark.parametrize("ndim", range(0, LEGATE_MAX_DIM + 1))
@pytest.mark.parametrize("side", SIDES)
def test_ndim(ndim, side):
a = np.random.randint(-100, 100, size=100)
v = np.random.randint(-100, 100, size=2**ndim).reshape(
tuple(2 for i in range(ndim))
)
check_api(a, None, v, side)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv)) |
6,835 | translators | from unittest.mock import patch
import pytest
from django.http import HttpResponse
from django.shortcuts import render
from pontoon.test.factories import UserFactory
def _get_sorted_users():
return sorted(UserFactory.create_batch(size=3), key=lambda u: u.email)
@pytest.fixture
def METHOD_NAME():
return _get_sorted_users()
@pytest.fixture
def managers():
return _get_sorted_users()
@pytest.mark.no_cover
@pytest.mark.django_db
def test_teams_list(client, locale_a):
"""
Tests if the teams list is rendered properly.
"""
response = client.get("/teams/")
assert response.status_code == 200
assert response.resolver_match.view_name == "pontoon.teams"
@pytest.mark.django_db
def test_missing_locale(client):
"""
Tests if the backend is returning an error on the missing locale.
"""
response = client.get("/missing-locale/")
assert response.status_code == 404
assert response.resolver_match.view_name == "pontoon.teams.team"
@pytest.mark.django_db
@patch("pontoon.teams.views.render", wraps=render)
def test_locale_view(mock_render, translation_a, client):
"""
Check if the locale view finds the right locale and passes it to the template.
"""
client.get(f"/{translation_a.locale.code}/")
assert mock_render.call_args[0][2]["locale"] == translation_a.locale
@pytest.mark.django_db
def test_contributors_of_missing_locale(client):
"""
Tests if the contributors view is returning an error on the missing locale.
"""
response = client.get("/missing-locale/contributors/")
assert response.status_code == 404
assert response.resolver_match.view_name == "pontoon.teams.contributors"
@pytest.mark.django_db
@patch("pontoon.teams.views.render", wraps=render)
def test_ajax_permissions_locale_translators_managers_order(
render_mock,
admin_client,
locale_a,
METHOD_NAME,
managers,
):
"""
Translators and managers of a locale should be sorted by email in
"Permissions" tab.
"""
locale_a.translators_group.user_set.add(*METHOD_NAME)
locale_a.managers_group.user_set.add(*managers)
admin_client.get("/%s/ajax/permissions/" % locale_a.code)
response_context = render_mock.call_args[0][2]
assert list(response_context["translators"]) == METHOD_NAME
assert list(response_context["managers"]) == managers
@pytest.mark.django_db
@patch("pontoon.teams.views.render", wraps=render)
def test_ajax_permissions_project_locale_translators_order(
render_mock,
admin_client,
locale_a,
project_locale_a,
resource_a, # required for project_locale_a to work
METHOD_NAME,
):
"""
Translators and managers of a locale should be sorted by email in
"Permissions" tab.
"""
project_locale_a.translators_group.user_set.add(*METHOD_NAME)
admin_client.get("/%s/ajax/permissions/" % locale_a.code)
response_context = render_mock.call_args[0][2]
locale_projects = response_context["locale_projects"]
# Check project_locale id in the permissions form
assert locale_projects[0][0] == project_locale_a.pk
# Check project_locale translators
translators_list = [
{"id": u.id, "email": u.email, "first_name": u.first_name} for u in METHOD_NAME
]
assert locale_projects[0][3] == translators_list
@pytest.mark.django_db
def test_users_permissions_for_ajax_permissions_view(
client,
locale_a,
member,
):
"""
Check if anonymous users and users without permissions can't access
Permissions Tab.
"""
response = client.get(f"/{locale_a.code}/ajax/permissions/")
assert response.status_code == 403
assert b"<title>Forbidden page</title>" in response.content
# Check if users without permissions for the locale can get this tab.
response = member.client.get(f"/{locale_a.code}/ajax/permissions/")
assert response.status_code == 403
assert b"<title>Forbidden page</title>" in response.content
locale_a.managers_group.user_set.add(member.user)
# Bump up permissions for user0 and check if the view is accessible.
response = member.client.get(f"/{locale_a.code}/ajax/permissions/")
assert response.status_code == 200
assert b"<title>Forbidden page</title>" not in response.content
# Remove permissions for user0 and check if the view is not accessible.
locale_a.managers_group.user_set.clear()
response = member.client.get(f"/{locale_a.code}/ajax/permissions/")
assert response.status_code == 403
assert b"<title>Forbidden page</title>" in response.content
# All unauthorized attempts to POST data should be blocked
response = member.client.post(
f"/{locale_a.code}/ajax/permissions/",
data={"smth": "smth"},
)
assert response.status_code == 403
assert b"<title>Forbidden page</title>" in response.content
response = client.post(
f"/{locale_a.code}/ajax/permissions/",
data={"smth": "smth"},
)
assert response.status_code == 403
assert b"<title>Forbidden page</title>" in response.content
@pytest.mark.django_db
@patch(
"pontoon.teams.views.LocaleContributorsView.render_to_response",
return_value=HttpResponse(""),
)
def test_locale_top_contributors(mock_render, client, translation_a, locale_b):
"""
Tests if the view returns top contributors specific for given locale.
"""
client.get(
f"/{translation_a.locale.code}/ajax/contributors/",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
response_context = mock_render.call_args[0][0]
assert response_context["locale"] == translation_a.locale
assert list(response_context["contributors"]) == [translation_a.user]
client.get(
f"/{locale_b.code}/ajax/contributors/",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
response_context = mock_render.call_args[0][0]
assert response_context["locale"] == locale_b
assert list(response_context["contributors"]) == [] |
6,836 | create placeholder | # Copyright (c) Princeton University.
# This source code is licensed under the BSD 3-Clause license found in the LICENSE file in the root directory of this source tree.
# Authors:
# - Alexander Raistrick: AssetFactory, make_asset_collection
# - Lahav Lipson: quickly_resample
import typing
import bpy
import mathutils
import numpy as np
import logging
from tqdm import trange
from util import blender as butil
from util.math import FixedSeed, int_hash
from . import detail
logger = logging.getLogger(__name__)
class AssetFactory:
def __init__(self, factory_seed=None, coarse=False):
self.factory_seed = factory_seed
if self.factory_seed is None:
self.factory_seed = np.random.randint(1e9)
self.coarse = coarse
logger.debug(f'{self}.__init__()')
def __repr__(self):
return f'{self.__class__.__name__}({self.factory_seed})'
@staticmethod
def quickly_resample(obj):
assert obj.type == "EMPTY", obj.type
obj.rotation_euler[2] = np.random.uniform(-np.pi, np.pi)
def METHOD_NAME(self, **kwargs) -> bpy.types.Object:
# Optionally, override this function to decide what will be used as a placeholder for your asset
return butil.spawn_cube(size=2)
def finalize_placeholders(self, placeholders: typing.List[bpy.types.Object]):
# Optionally, override this function to perform any operations on all the placeholders at once
# eg joint space colonization, placing vines between trees
pass
def asset_parameters(self, distance: float, vis_distance: float) -> dict:
# Optionally, override to determine the **params input of create_asset w.r.t. camera distance
return {'face_size': detail.target_face_size(distance), 'distance': distance,
'vis_distance': vis_distance}
def create_asset(self, **params) -> bpy.types.Object:
# Override this function to produce a high detail asset
raise NotImplementedError
def finalize_assets(self, assets):
# Optionally, override this function to perform any operations on all the assets at once
# eg any cleanup / grouping
pass
def spawn_placeholder(self, i, loc, rot):
# Not intended to be overridden - override create_placeholder instead
logger.debug(f'{self}.spawn_placeholder({i}...)')
with FixedSeed(int_hash((self.factory_seed, i))):
obj = self.METHOD_NAME(i=i, loc=loc, rot=rot)
has_sensitive_constraint = any(c.type in ['FOLLOW_PATH'] for c in obj.constraints)
if not has_sensitive_constraint:
obj.location = loc
obj.rotation_euler = rot
else:
logger.debug(f'Not assigning placeholder {obj.name=} location due to presence of'
'location-sensitive constraint, typically a follow curve')
obj.name = f'{repr(self)}.spawn_placeholder({i})'
if obj.parent is not None:
logger.warning(f'{obj.name=} has no-none parent {obj.parent.name=}, this may cause it not to get populated')
return obj
def spawn_asset(self, i, placeholder=None, distance=None, vis_distance=0, loc=(0, 0, 0), rot=(0, 0, 0),
**kwargs):
# Not intended to be overridden - override create_asset instead
logger.debug(f'{self}.spawn_asset({i}...)')
if distance is None:
distance = detail.scatter_res_distance()
if self.coarse:
raise ValueError(f'Attempted to spawn_asset() on an AssetFactory(coarse=True)')
if placeholder is None:
placeholder = self.spawn_placeholder(i=i, loc=loc, rot=rot)
self.finalize_placeholders([placeholder])
keep_placeholder = False
else:
keep_placeholder = True
assert loc == (0, 0, 0) and rot == (0, 0, 0)
gc_targets = [bpy.data.meshes, bpy.data.textures, bpy.data.node_groups, bpy.data.materials]
with FixedSeed(int_hash((self.factory_seed, i))), butil.GarbageCollect(gc_targets, verbose=False):
params = self.asset_parameters(distance, vis_distance)
params.update(kwargs)
obj = self.create_asset(i=i, placeholder=placeholder, **params)
obj.name = f'{repr(self)}.spawn_asset({i})'
if keep_placeholder:
if obj is not placeholder:
if obj.parent is None:
butil.parent_to(obj, placeholder, no_inverse=True)
else:
obj.hide_render = False
else:
obj.parent = None
obj.location = placeholder.location
obj.rotation_euler = placeholder.rotation_euler
butil.delete(placeholder)
return obj
__call__ = spawn_asset # for convinience
def make_asset_collection(spawn_fns, n, name=None, weights=None, as_list=False, verbose=True, **kwargs):
if not isinstance(spawn_fns, list):
spawn_fns = [spawn_fns]
if weights is None:
weights = np.ones(len(spawn_fns))
weights /= sum(weights)
if name is None:
name = ','.join([repr(f) for f in spawn_fns])
if verbose:
logger.info(f'Generating collection of {n} assets from {name}')
objs = [[] for _ in range(len(spawn_fns))]
r = trange(n) if verbose else range(n)
for i in r:
fn_idx = np.random.choice(np.arange(len(spawn_fns)), p=weights)
obj = spawn_fns[fn_idx](i=i, **kwargs)
objs[fn_idx].append(obj)
for os, f in zip(objs, spawn_fns):
if hasattr(f, 'finalize_assets'):
f.finalize_assets(os)
objs = sum(objs, start=[])
if as_list:
return objs
else:
col = butil.group_in_collection(objs, name=f'assets:{name}', reuse=False)
col.hide_viewport = True
col.hide_render = True
return col |
6,837 | is armed | #############################################################################
# Celestica Seastone2
#
# Watchdog contains an implementation of SONiC Platform Base API
#
#############################################################################
import os
import time
try:
from sonic_platform_base.watchdog_base import WatchdogBase
from sonic_platform.common import Common
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
PLATFORM_CPLD_PATH = '/sys/devices/platform/baseboard/'
GETREG_FILE = 'getreg'
SETREG_FILE = 'setreg'
WDT_ENABLE_REG = '0xA181'
WDT_TIMER_L_BIT_REG = '0xA182'
WDT_TIMER_M_BIT_REG = '0xA183'
WDT_TIMER_H_BIT_REG = '0xA184'
WDT_KEEP_ALVIVE_REG = '0xA185'
ENABLE_CMD = '0x1'
DISABLE_CMD = '0x0'
WDT_COMMON_ERROR = -1
class Watchdog(WatchdogBase):
def __init__(self):
WatchdogBase.__init__(self)
self._api_common = Common()
# Init cpld reg path
self.setreg_path = os.path.join(PLATFORM_CPLD_PATH, SETREG_FILE)
self.getreg_path = os.path.join(PLATFORM_CPLD_PATH, GETREG_FILE)
# Set default value
self._disable()
self.armed = False
self.timeout = self._gettimeout()
def _enable(self):
"""
Turn on the watchdog timer
"""
# echo 0xA181 0x1 > /sys/devices/platform/baseboard/setreg
enable_val = '{} {}'.format(WDT_ENABLE_REG, ENABLE_CMD)
return self._api_common.write_txt_file(self.setreg_path, enable_val)
def _disable(self):
"""
Turn off the watchdog timer
"""
# echo 0xA181 0x0 > /sys/devices/platform/baseboard/setreg
disable_val = '{} {}'.format(WDT_ENABLE_REG, DISABLE_CMD)
return self._api_common.write_txt_file(self.setreg_path, disable_val)
def _keepalive(self):
"""
Keep alive watchdog timer
"""
# echo 0xA185 0x1 > /sys/devices/platform/baseboard/setreg
enable_val = '{} {}'.format(WDT_KEEP_ALVIVE_REG, ENABLE_CMD)
return self._api_common.write_txt_file(self.setreg_path, enable_val)
def _get_level_hex(self, sub_hex):
sub_hex_str = sub_hex.replace("x", "0")
return hex(int(sub_hex_str, 16))
def _seconds_to_lmh_hex(self, seconds):
ms = seconds*1000 # calculate timeout in ms format
hex_str = hex(ms)
l = self._get_level_hex(hex_str[-2:])
m = self._get_level_hex(hex_str[-4:-2])
h = self._get_level_hex(hex_str[-6:-4])
return (l, m, h)
def _settimeout(self, seconds):
"""
Set watchdog timer timeout
@param seconds - timeout in seconds
@return is the actual set timeout
"""
# max = 0xffffff = 16777.215 seconds
(l, m, h) = self._seconds_to_lmh_hex(seconds)
set_h_val = '{} {}'.format(WDT_TIMER_H_BIT_REG, h)
set_m_val = '{} {}'.format(WDT_TIMER_M_BIT_REG, m)
set_l_val = '{} {}'.format(WDT_TIMER_L_BIT_REG, l)
self._api_common.write_txt_file(
self.setreg_path, set_h_val) # set high bit
self._api_common.write_txt_file(
self.setreg_path, set_m_val) # set med bit
self._api_common.write_txt_file(
self.setreg_path, set_l_val) # set low bit
return seconds
def _gettimeout(self):
"""
Get watchdog timeout
@return watchdog timeout
"""
h_bit = self._api_common.get_reg(
self.getreg_path, WDT_TIMER_H_BIT_REG)
m_bit = self._api_common.get_reg(
self.getreg_path, WDT_TIMER_M_BIT_REG)
l_bit = self._api_common.get_reg(
self.getreg_path, WDT_TIMER_L_BIT_REG)
hex_time = '0x{}{}{}'.format(h_bit[2:], m_bit[2:], l_bit[2:])
ms = int(hex_time, 16)
return int(float(ms)/1000)
#################################################################
def arm(self, seconds):
"""
Arm the hardware watchdog with a timeout of <seconds> seconds.
If the watchdog is currently armed, calling this function will
simply reset the timer to the provided value. If the underlying
hardware does not support the value provided in <seconds>, this
method should arm the watchdog with the *next greater* available
value.
Returns:
An integer specifying the *actual* number of seconds the watchdog
was armed with. On failure returns -1.
"""
ret = WDT_COMMON_ERROR
if seconds < 0:
return ret
try:
if self.timeout != seconds:
self.timeout = self._settimeout(seconds)
if self.armed:
self._keepalive()
else:
self._enable()
self.armed = True
ret = self.timeout
self.arm_timestamp = time.time()
except IOError as e:
pass
return ret
def disarm(self):
"""
Disarm the hardware watchdog
Returns:
A boolean, True if watchdog is disarmed successfully, False if not
"""
disarmed = False
if self.METHOD_NAME():
try:
self._disable()
self.armed = False
disarmed = True
except IOError:
pass
return disarmed
def METHOD_NAME(self):
"""
Retrieves the armed state of the hardware watchdog.
Returns:
A boolean, True if watchdog is armed, False if not
"""
return self.armed
def get_remaining_time(self):
"""
If the watchdog is armed, retrieve the number of seconds remaining on
the watchdog timer
Returns:
An integer specifying the number of seconds remaining on thei
watchdog timer. If the watchdog is not armed, returns -1.
"""
return int(self.timeout - (time.time() - self.arm_timestamp)) if self.armed else WDT_COMMON_ERROR |
6,838 | chainer local training job | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import numpy
import pytest
from sagemaker.chainer.estimator import Chainer
from sagemaker.chainer.model import ChainerModel
from sagemaker.utils import unique_name_from_base
from tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES
from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name
@pytest.fixture(scope="module")
def METHOD_NAME(
sagemaker_local_session, chainer_latest_version, chainer_latest_py_version
):
return _run_mnist_training_job(
sagemaker_local_session, "local", 1, chainer_latest_version, chainer_latest_py_version
)
@pytest.mark.local_mode
def test_distributed_cpu_training(
sagemaker_local_session, chainer_latest_version, chainer_latest_py_version
):
_run_mnist_training_job(
sagemaker_local_session, "local", 2, chainer_latest_version, chainer_latest_py_version
)
@pytest.mark.local_mode
def test_training_with_additional_hyperparameters(
sagemaker_local_session, chainer_latest_version, chainer_latest_py_version
):
script_path = os.path.join(DATA_DIR, "chainer_mnist", "mnist.py")
data_path = os.path.join(DATA_DIR, "chainer_mnist")
chainer = Chainer(
entry_point=script_path,
role="SageMakerRole",
instance_count=1,
instance_type="local",
framework_version=chainer_latest_version,
py_version=chainer_latest_py_version,
sagemaker_session=sagemaker_local_session,
hyperparameters={"epochs": 1},
use_mpi=True,
num_processes=2,
process_slots_per_host=2,
additional_mpi_options="-x NCCL_DEBUG=INFO",
)
train_input = "file://" + os.path.join(data_path, "train")
test_input = "file://" + os.path.join(data_path, "test")
chainer.fit({"train": train_input, "test": test_input})
def test_attach_deploy(
sagemaker_session, chainer_latest_version, chainer_latest_py_version, cpu_instance_type
):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
script_path = os.path.join(DATA_DIR, "chainer_mnist", "mnist.py")
data_path = os.path.join(DATA_DIR, "chainer_mnist")
chainer = Chainer(
entry_point=script_path,
role="SageMakerRole",
framework_version=chainer_latest_version,
py_version=chainer_latest_py_version,
instance_count=1,
instance_type=cpu_instance_type,
sagemaker_session=sagemaker_session,
hyperparameters={"epochs": 1},
)
train_input = sagemaker_session.upload_data(
path=os.path.join(data_path, "train"), key_prefix="integ-test-data/chainer_mnist/train"
)
test_input = sagemaker_session.upload_data(
path=os.path.join(data_path, "test"), key_prefix="integ-test-data/chainer_mnist/test"
)
job_name = unique_name_from_base("test-chainer-training")
chainer.fit({"train": train_input, "test": test_input}, wait=False, job_name=job_name)
endpoint_name = unique_name_from_base("test-chainer-attach-deploy")
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
estimator = Chainer.attach(
chainer.latest_training_job.name, sagemaker_session=sagemaker_session
)
predictor = estimator.deploy(1, cpu_instance_type, endpoint_name=endpoint_name)
_predict_and_assert(predictor)
@pytest.mark.local_mode
def test_deploy_model(
METHOD_NAME,
sagemaker_local_session,
chainer_latest_version,
chainer_latest_py_version,
):
script_path = os.path.join(DATA_DIR, "chainer_mnist", "mnist.py")
model = ChainerModel(
METHOD_NAME.model_data,
"SageMakerRole",
entry_point=script_path,
sagemaker_session=sagemaker_local_session,
framework_version=chainer_latest_version,
py_version=chainer_latest_py_version,
)
predictor = model.deploy(1, "local")
try:
_predict_and_assert(predictor)
finally:
predictor.delete_endpoint()
def _run_mnist_training_job(
sagemaker_session, instance_type, instance_count, chainer_version, py_version
):
script_path = (
os.path.join(DATA_DIR, "chainer_mnist", "mnist.py")
if instance_type == 1
else os.path.join(DATA_DIR, "chainer_mnist", "distributed_mnist.py")
)
data_path = os.path.join(DATA_DIR, "chainer_mnist")
chainer = Chainer(
entry_point=script_path,
role="SageMakerRole",
framework_version=chainer_version,
py_version=py_version,
instance_count=instance_count,
instance_type=instance_type,
sagemaker_session=sagemaker_session,
hyperparameters={"epochs": 1},
# test output_path without trailing slash
output_path="s3://{}".format(sagemaker_session.default_bucket()),
)
train_input = "file://" + os.path.join(data_path, "train")
test_input = "file://" + os.path.join(data_path, "test")
job_name = unique_name_from_base("test-chainer-training")
chainer.fit({"train": train_input, "test": test_input}, job_name=job_name)
return chainer
def _predict_and_assert(predictor):
batch_size = 100
data = numpy.zeros((batch_size, 784), dtype="float32")
output = predictor.predict(data)
assert len(output) == batch_size
data = numpy.zeros((batch_size, 1, 28, 28), dtype="float32")
output = predictor.predict(data)
assert len(output) == batch_size
data = numpy.zeros((batch_size, 28, 28), dtype="float32")
output = predictor.predict(data)
assert len(output) == batch_size |
6,839 | content | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"dynatrace monitor list-linkable-environment"
)
class ListLinkableEnvironment(AAZCommand):
"""Get all the dynatrace environments that a user can link a azure resource to
:example: List-linkable-environment
az dynatrace monitor list-linkable-environment -g rg --monitor-name monitor --user-principal Alice@microsoft.com --region eastus2euap
"""
_aaz_info = {
"version": "2021-09-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/dynatrace.observability/monitors/{}/listlinkableenvironments", "2021-09-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.monitor_name = AAZStrArg(
options=["--monitor-name"],
help="Monitor resource name",
required=True,
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
# define Arg Group "Request"
_args_schema = cls._args_schema
_args_schema.region = AAZStrArg(
options=["--region"],
arg_group="Request",
help="Azure region in which we want to link the environment",
)
_args_schema.tenant_id = AAZStrArg(
options=["--tenant-id"],
arg_group="Request",
help="Tenant Id of the user in which they want to link the environment",
)
_args_schema.user_principal = AAZStrArg(
options=["--user-principal"],
arg_group="Request",
help="user principal id of the user",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.MonitorsListLinkableEnvironments(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class MonitorsListLinkableEnvironments(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/listLinkableEnvironments",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"monitorName", self.ctx.args.monitor_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-09-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def METHOD_NAME(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("region", AAZStrType, ".region")
_builder.set_prop("tenantId", AAZStrType, ".tenant_id")
_builder.set_prop("userPrincipal", AAZStrType, ".user_principal")
return self.serialize_content(_content_value)
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.environment_id = AAZStrType(
serialized_name="environmentId",
)
_element.environment_name = AAZStrType(
serialized_name="environmentName",
)
_element.plan_data = AAZObjectType(
serialized_name="planData",
)
plan_data = cls._schema_on_200.value.Element.plan_data
plan_data.billing_cycle = AAZStrType(
serialized_name="billingCycle",
)
plan_data.effective_date = AAZStrType(
serialized_name="effectiveDate",
)
plan_data.plan_details = AAZStrType(
serialized_name="planDetails",
)
plan_data.usage_type = AAZStrType(
serialized_name="usageType",
)
return cls._schema_on_200
__all__ = ["ListLinkableEnvironment"] |
6,840 | create trash folder | from errno import ENOTEMPTY
from os import W_OK, access, mkdir, rmdir, stat, statvfs, walk
from os.path import getsize, isdir, join, realpath, split
from time import time
from enigma import eBackgroundFileEraser, eLabel, iRecordableService, pNavigation
from Components.config import config
from Components.GUIComponent import GUIComponent
from Components.Harddisk import findMountPoint
from Components.Task import Job, PythonTask, job_manager as jobManager
from Components.VariableText import VariableText
from Tools.Conversions import scaleNumber
from Tools.Directories import fileReadLines
MODULE_NAME = __name__.split(".")[-1]
TRASHCAN = ".Trash" # This should this be ".Trashcan" to be consistent with the module.
def getTrashcan(path=None): # Returns trashcan folder without symbolic links.
if path:
path = realpath(path)
try:
if path is None or path == "/media/autofs":
print("[Trashcan] Error: Trashcan path is None or invalid!")
trashcan = ""
else:
trashcan = join(join(findMountPoint(path), "movie") if "/movie" in path else findMountPoint(path), TRASHCAN)
except OSError as err:
print("[Trashcan] Error %d: Unable to locate trashcan folder! (%s)" % (err.errno, err.strerror))
trashcan = ""
return trashcan
def createTrashcan(path=None):
trashcan = getTrashcan(path)
if trashcan and access(split(trashcan)[0], W_OK):
if not isdir(trashcan):
try:
mkdir(trashcan)
except OSError as err:
print("[Trashcan] Error %d: Unable to create trashcan folder '%s'! (%s)" % (err.errno, trashcan, err.strerror))
trashcan = None
else:
trashcan = None
return trashcan
def METHOD_NAME(path=None):
return createTrashcan(path=path)
def getTrashcanSize(startPath="."):
trashcanSize = 0
if startPath:
for root, dirs, files in walk(startPath):
for file in files:
try:
path = join(root, file)
trashcanSize += getsize(path)
except OSError as err:
print("[Trashcan] Error %d: Unable to get directory size for '%s'! (%s)" % (err.errno, path, err.strerror))
return trashcanSize
class Trashcan:
def __init__(self, session):
self.session = session
session.nav.record_event.append(self.gotRecordEvent)
self.gotRecordEvent(None, None)
def __del__(self):
self.destroy()
def gotRecordEvent(self, service, event):
self.recordings = len(self.session.nav.getRecordings(False, pNavigation.isRealRecording))
if event == iRecordableService.evEnd:
self.cleanIfIdle()
def destroy(self):
if self.session is not None:
self.session.nav.record_event.remove(self.gotRecordEvent)
self.session = None
def cleanIfIdle(self): # RecordTimer calls this when preparing a recording. That is a nice moment to clean up.
if self.recordings:
print("[Trashcan] %d recording(s) are in progress." % self.recordings)
return
timeLimit = int(time()) - (config.usage.movielist_trashcan_days.value * 3600 * 24)
reserveBytes = 1024 * 1024 * 1024 * config.usage.movielist_trashcan_reserve.value
clean(timeLimit, reserveBytes)
def clean(timeLimit, reserveBytes):
isCleaning = False
for job in jobManager.getPendingJobs():
jobName = str(job.name)
if jobName.startswith(_("Cleaning Trashcan")):
isCleaning = True
break
if config.usage.movielist_trashcan.value and not isCleaning:
name = _("Cleaning Trashcan")
job = Job(name)
task = CleanTrashTask(job, name)
task.openFiles(timeLimit, reserveBytes)
jobManager.AddJob(job)
elif isCleaning:
print("[Trashcan] Trashcan cleanup is already running.")
else:
print("[Trashcan] Trashcan cleanup is disabled.")
def cleanAll(path=None):
trashcan = getTrashcan(path)
if isdir(trashcan):
for root, dirs, files in walk(trashcan, topdown=False):
for file in files:
path = join(root, file)
try:
eBackgroundFileEraser.getInstance().erase(path)
except Exception as err:
print("[Trashcan] Error: Failed to erase '%s'! (%s)" % (path, err))
for dir in dirs: # Remove empty directories if possible.
path = join(root, dir)
try:
rmdir(path)
except OSError as err:
if err.errno != ENOTEMPTY:
print("[Trashcan] Error %d: Unable to remove directory '%s'! (%s)" % (err.errno, path, err.strerror))
else:
print("[Trashcan] Trashcan '%s' is not a directory!" % trashcan)
def initTrashcan(session):
global instance
instance = Trashcan(session)
class CleanTrashTask(PythonTask):
def openFiles(self, timeLimit, reserveBytes):
self.timeLimit = timeLimit
self.reserveBytes = reserveBytes
def work(self):
print("[Trashcan] Probing for trashcan folders.")
lines = []
lines = fileReadLines("/proc/mounts", default=lines, source=MODULE_NAME)
mounts = []
for line in lines:
parts = line.strip().split()
if parts[1] == "/media/autofs":
continue
if config.usage.movielist_trashcan_network_clean.value and (parts[1].startswith("/media/net") or parts[1].startswith("/media/autofs")):
mounts.append(parts[1])
elif not parts[1].startswith("/media/net") and not parts[1].startswith("/media/autofs"):
mounts.append(parts[1])
matches = []
for mount in mounts:
if isdir(join(mount, TRASHCAN)):
matches.append(join(mount, TRASHCAN))
if isdir(join(mount, "movie", TRASHCAN)):
matches.append(join(mount, "movie", TRASHCAN))
print("[Trashcan] Found the following trashcans '%s'." % "', '".join(matches))
for trashcan in matches:
print("[Trashcan] Looking in trashcan '%s'." % trashcan)
trashcanSize = getTrashcanSize(trashcan)
try:
trashcanStatus = statvfs(trashcan)
freeSpace = trashcanStatus.f_bfree * trashcanStatus.f_bsize
except OSError as err:
print("[Trashcan] Error %d: Unable to get status for directory '%s'! (%s)" % (err.errno, trashcan, err.strerror))
freeSpace = 0
bytesToRemove = self.reserveBytes - freeSpace
print("[Trashcan] Trashcan '%s' size is %d bytes." % (trashcan, trashcanSize))
candidates = []
size = 0
for root, dirs, files in walk(trashcan, topdown=False):
for file in files:
try:
path = join(root, file)
status = stat(path)
if status.st_ctime < self.timeLimit:
eBackgroundFileEraser.getInstance().erase(path)
bytesToRemove -= status.st_size
else:
candidates.append((status.st_ctime, path, status.st_size))
size += status.st_size
except OSError as err:
print("[Trashcan] Error %d: Unable to get status for '%s'! (%s)" % (err.errno, path, err.strerror))
except Exception as err:
print("[Trashcan] Error: Failed to erase '%s'! (%s)" % (file, err))
for dir in dirs: # Remove empty directories if possible.
try:
path = join(root, dir)
rmdir(path)
except OSError as err:
if err.errno != ENOTEMPTY:
print("[Trashcan] Error %d: Unable to remove directory '%s'! (%s)" % (err.errno, path, err.strerror))
candidates.sort() # Now we have a list of ctime, candidates, size. Sorted by ctime (deletion time).
for pathTime, path, pathSize in candidates:
if bytesToRemove < 0:
break
try: # Sometimes the path doesn't exist. This can happen if trashcan is on a network or other code is emptying the trashcan at same time.
eBackgroundFileEraser.getInstance().erase(path)
except Exception as err:
print("[Trashcan] Error: Failed to erase '%s'! (%s)" % (path, err)) # Should we ignore any deletion errors?
bytesToRemove -= pathSize
size -= pathSize
print("[Trashcan] Trashcan '%s' is now using %d bytes." % (trashcan, size))
if not matches:
print("[Trashcan] No trashcans found!")
class TrashInfo(VariableText, GUIComponent):
GUI_WIDGET = eLabel
def __init__(self, path):
VariableText.__init__(self)
GUIComponent.__init__(self)
def update(self, path):
self.setText("%s: %s" % (_("Trashcan"), scaleNumber(getTrashcanSize(getTrashcan(path))))) |
6,841 | type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetWorkloadNetworkPublicIPResult',
'AwaitableGetWorkloadNetworkPublicIPResult',
'get_workload_network_public_ip',
'get_workload_network_public_ip_output',
]
@pulumi.output_type
class GetWorkloadNetworkPublicIPResult:
"""
NSX Public IP Block
"""
def __init__(__self__, display_name=None, id=None, name=None, number_of_public_ips=None, provisioning_state=None, public_ip_block=None, METHOD_NAME=None):
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if number_of_public_ips and not isinstance(number_of_public_ips, float):
raise TypeError("Expected argument 'number_of_public_ips' to be a float")
pulumi.set(__self__, "number_of_public_ips", number_of_public_ips)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_block and not isinstance(public_ip_block, str):
raise TypeError("Expected argument 'public_ip_block' to be a str")
pulumi.set(__self__, "public_ip_block", public_ip_block)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
Display name of the Public IP Block.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="numberOfPublicIPs")
def number_of_public_ips(self) -> Optional[float]:
"""
Number of Public IPs requested.
"""
return pulumi.get(self, "number_of_public_ips")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPBlock")
def public_ip_block(self) -> str:
"""
CIDR Block of the Public IP Block.
"""
return pulumi.get(self, "public_ip_block")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWorkloadNetworkPublicIPResult(GetWorkloadNetworkPublicIPResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkloadNetworkPublicIPResult(
display_name=self.display_name,
id=self.id,
name=self.name,
number_of_public_ips=self.number_of_public_ips,
provisioning_state=self.provisioning_state,
public_ip_block=self.public_ip_block,
METHOD_NAME=self.METHOD_NAME)
def get_workload_network_public_ip(private_cloud_name: Optional[str] = None,
public_ip_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkloadNetworkPublicIPResult:
"""
NSX Public IP Block
:param str private_cloud_name: Name of the private cloud
:param str public_ip_id: NSX Public IP Block identifier. Generally the same as the Public IP Block's display name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['privateCloudName'] = private_cloud_name
__args__['publicIPId'] = public_ip_id
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:avs/v20230301:getWorkloadNetworkPublicIP', __args__, opts=opts, typ=GetWorkloadNetworkPublicIPResult).value
return AwaitableGetWorkloadNetworkPublicIPResult(
display_name=pulumi.get(__ret__, 'display_name'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
number_of_public_ips=pulumi.get(__ret__, 'number_of_public_ips'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
public_ip_block=pulumi.get(__ret__, 'public_ip_block'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_workload_network_public_ip)
def get_workload_network_public_ip_output(private_cloud_name: Optional[pulumi.Input[str]] = None,
public_ip_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkloadNetworkPublicIPResult]:
"""
NSX Public IP Block
:param str private_cloud_name: Name of the private cloud
:param str public_ip_id: NSX Public IP Block identifier. Generally the same as the Public IP Block's display name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
6,842 | get api token | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
#
# REQUIREMENTS:
# 1. sudo apt-get install chromium-chromedriver
# 2. pip install selenium
# 3. ../secrets/creds.json with buyers email/password and account client_id/secret
# HOW TO USE:
# python paypal_transaction_generator.py - will generate 3 transactions by default
# python paypal_transaction_generator.py 10 - will generate 10 transactions
import json
import random
import sys
from time import sleep
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
# from pprint import pprint
PAYMENT_DATA = {
"intent": "sale",
"payer": {"payment_method": "paypal"},
"transactions": [
{
"amount": {
"total": "30.11",
"currency": "USD",
"details": {
"subtotal": "30.00",
"tax": "0.07",
"shipping": "0.03",
"handling_fee": "1.00",
"shipping_discount": "-1.00",
"insurance": "0.01",
},
},
"description": "This is the payment transaction description.",
"custom": "EBAY_EMS_90048630020055",
"invoice_number": "CHAMGE_IT",
"payment_options": {"allowed_payment_method": "INSTANT_FUNDING_SOURCE"},
"soft_descriptor": "ECHI5786755",
"item_list": {
"items": [
{
"name": "hat",
"description": "Brown color hat",
"quantity": "5",
"price": "3",
"tax": "0.01",
"sku": "1",
"currency": "USD",
},
{
"name": "handbag",
"description": "Black color hand bag",
"quantity": "1",
"price": "15",
"tax": "0.02",
"sku": "product34",
"currency": "USD",
},
],
"shipping_address": {
"recipient_name": "Hello World",
"line1": "4thFloor",
"line2": "unit#34",
"city": "SAn Jose",
"country_code": "US",
"postal_code": "95131",
"phone": "011862212345678",
"state": "CA",
},
},
}
],
"note_to_payer": "Contact us for any questions on your order.",
"redirect_urls": {"return_url": "https://example.com", "cancel_url": "https://example.com"},
}
def read_json(filepath):
with open(filepath, "r") as f:
return json.loads(f.read())
def METHOD_NAME():
client_id = CREDS.get("client_id")
secret = CREDS.get("secret")
token_refresh_endpoint = "https://api-m.sandbox.paypal.com/v1/oauth2/token"
data = "grant_type=client_credentials"
headers = {"Accept": "application/json", "Accept-Language": "en_US"}
auth = (client_id, secret)
response = requests.request(method="POST", url=token_refresh_endpoint, data=data, headers=headers, auth=auth)
response_json = response.json()
# print(response_json)
API_TOKEN = response_json["access_token"]
return API_TOKEN
def random_digits(digits):
lower = 10 ** (digits - 1)
upper = 10**digits - 1
return random.randint(lower, upper)
def make_payment():
# generate new invoice_number
PAYMENT_DATA["transactions"][0]["invoice_number"] = random_digits(11)
response = requests.request(
method="POST", url="https://api-m.sandbox.paypal.com/v1/payments/payment", headers=headers, data=json.dumps(PAYMENT_DATA)
)
response_json = response.json()
# pprint(response_json)
execute_url = ""
approval_url = ""
for link in response_json["links"]:
if link["rel"] == "approval_url":
approval_url = link["href"]
elif link["rel"] == "execute":
execute_url = link["href"]
elif link["rel"] == "self":
self_url = link["href"]
print(f"Payment made: {self_url}")
return approval_url, execute_url
# APPROVE PAYMENT
def login():
driver = webdriver.Chrome("/usr/bin/chromedriver")
# SIGN_IN
driver.get("https://www.sandbox.paypal.com/ua/signin")
driver.find_element_by_id("email").send_keys(CREDS["buyer_username"])
driver.find_element_by_id("btnNext").click()
sleep(2)
driver.find_element_by_id("password").send_keys(CREDS["buyer_password"])
driver.find_element_by_id("btnLogin").click()
return driver
def approve_payment(driver, url):
driver.get(url)
global cookies_accepted
sleep(3)
if not cookies_accepted:
cookies = driver.find_element_by_id("acceptAllButton")
if cookies:
cookies.click()
cookies_accepted = True
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
element = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, "payment-submit-btn")))
sleep(1)
element.click()
# sleep(5)
# driver.find_element_by_id("payment-submit-btn").click()
wait = WebDriverWait(driver, 5)
wait.until(EC.title_is("Example Domain"))
print(f"Payment approved: {driver.current_url}")
def execute_payment(url):
response = requests.request(method="POST", url=url, data='{"payer_id": "ZE5533HZPGMC6"}', headers=headers)
response_json = response.json()
print(f'Payment executed: {url} with STATE: {response_json["state"]}')
TOTAL_TRANSACTIONS = int(sys.argv[1]) if len(sys.argv) > 1 else 3
CREDS = read_json("../secrets/creds.json")
headers = {"Authorization": f"Bearer {METHOD_NAME()}", "Content-Type": "application/json"}
driver = login()
cookies_accepted = False
for i in range(TOTAL_TRANSACTIONS):
print(f"Payment #{i}")
approval_url, execute_url = make_payment()
approve_payment(driver, approval_url)
execute_payment(execute_url)
driver.quit() |
6,843 | test clone | import os
import subprocess
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from repo2docker.contentproviders import Mercurial
from repo2docker.contentproviders.mercurial import args_enabling_topic
SKIP_HG = os.environ.get("REPO2DOCKER_SKIP_HG_TESTS", "").lower() not in {
"",
"0",
"false",
"no",
}
skip_if_no_hg_tests = pytest.mark.skipif(
SKIP_HG,
reason="REPO2DOCKER_SKIP_HG_TESTS",
)
@skip_if_no_hg_tests
def test_if_mercurial_is_available():
"""
To skip the tests related to Mercurial repositories (to avoid to install
Mercurial and hg-evolve), one can use the environment variable
REPO2DOCKER_SKIP_HG_TESTS.
"""
subprocess.check_output(["hg", "version"])
@skip_if_no_hg_tests
def test_if_topic_is_available():
"""Check that the topic extension can be enabled"""
output = subprocess.getoutput("hg version -v --config extensions.topic=")
assert "failed to import extension topic" not in output
def _add_content_to_hg(repo_dir):
"""Add content to file 'test' in hg repository and commit."""
# use append mode so this can be called multiple times
with open(Path(repo_dir) / "test", "a") as f:
f.write("Hello")
def check_call(command):
subprocess.check_call(command + args_enabling_topic, cwd=repo_dir)
check_call(["hg", "add", "test"])
check_call(["hg", "commit", "-m", "Test commit"])
check_call(["hg", "topic", "test-topic"])
check_call(["hg", "commit", "-m", "Test commit in topic test-topic"])
check_call(["hg", "up", "default"])
def _get_node_id(repo_dir):
"""Get repository's current commit node ID (currently SHA1)."""
node_id = subprocess.Popen(
["hg", "identify", "-i"] + args_enabling_topic,
stdout=subprocess.PIPE,
cwd=repo_dir,
)
return node_id.stdout.read().decode().strip()
@pytest.fixture()
def hg_repo():
"""
Make a dummy hg repo in which user can perform hg operations
Should be used as a contextmanager, it will delete directory when done
"""
with TemporaryDirectory() as hgdir:
subprocess.check_call(["hg", "init"], cwd=hgdir)
yield hgdir
@pytest.fixture()
def hg_repo_with_content(hg_repo):
"""Create a hg repository with content"""
_add_content_to_hg(hg_repo)
node_id = _get_node_id(hg_repo)
yield hg_repo, node_id
@skip_if_no_hg_tests
def test_detect_mercurial(hg_repo_with_content, repo_with_content):
mercurial = Mercurial()
assert mercurial.detect("this-is-not-a-directory") is None
assert mercurial.detect("https://github.com/jupyterhub/repo2docker") is None
git_repo = repo_with_content[0]
assert mercurial.detect(git_repo) is None
hg_repo = hg_repo_with_content[0]
assert mercurial.detect(hg_repo) == {"repo": hg_repo, "ref": None}
@skip_if_no_hg_tests
def METHOD_NAME(hg_repo_with_content):
"""Test simple hg clone to a target dir"""
upstream, node_id = hg_repo_with_content
with TemporaryDirectory() as clone_dir:
spec = {"repo": upstream}
mercurial = Mercurial()
for _ in mercurial.fetch(spec, clone_dir):
pass
assert (Path(clone_dir) / "test").exists()
assert mercurial.content_id == node_id
@skip_if_no_hg_tests
def test_bad_ref(hg_repo_with_content):
"""
Test trying to update to a ref that doesn't exist
"""
upstream, node_id = hg_repo_with_content
with TemporaryDirectory() as clone_dir:
spec = {"repo": upstream, "ref": "does-not-exist"}
with pytest.raises(ValueError):
for _ in Mercurial().fetch(spec, clone_dir):
pass
@skip_if_no_hg_tests
def test_ref_topic(hg_repo_with_content):
"""
Test trying to update to a topic
To skip this test (to avoid to install Mercurial and hg-evolve), one can
use the environment variable REPO2DOCKER_SKIP_HG_TESTS.
"""
upstream, node_id = hg_repo_with_content
node_id = subprocess.Popen(
["hg", "identify", "-i", "-r", "topic(test-topic)"] + args_enabling_topic,
stdout=subprocess.PIPE,
cwd=upstream,
)
node_id = node_id.stdout.read().decode().strip()
with TemporaryDirectory() as clone_dir:
spec = {"repo": upstream, "ref": "test-topic"}
mercurial = Mercurial()
for _ in mercurial.fetch(spec, clone_dir):
pass
assert (Path(clone_dir) / "test").exists()
assert mercurial.content_id == node_id |
6,844 | test float | # A lot of failures in these tests on Mac OS X.
# Byte order related?
import unittest
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
class CFunctions(unittest.TestCase):
_dll = CDLL(_ctypes_test.__file__)
def S(self):
return c_longlong.in_dll(self._dll, "last_tf_arg_s").value
def U(self):
return c_ulonglong.in_dll(self._dll, "last_tf_arg_u").value
def test_byte(self):
self._dll.tf_b.restype = c_byte
self._dll.tf_b.argtypes = (c_byte,)
self.assertEqual(self._dll.tf_b(-126), -42)
self.assertEqual(self.S(), -126)
def test_byte_plus(self):
self._dll.tf_bb.restype = c_byte
self._dll.tf_bb.argtypes = (c_byte, c_byte)
self.assertEqual(self._dll.tf_bb(0, -126), -42)
self.assertEqual(self.S(), -126)
def test_ubyte(self):
self._dll.tf_B.restype = c_ubyte
self._dll.tf_B.argtypes = (c_ubyte,)
self.assertEqual(self._dll.tf_B(255), 85)
self.assertEqual(self.U(), 255)
def test_ubyte_plus(self):
self._dll.tf_bB.restype = c_ubyte
self._dll.tf_bB.argtypes = (c_byte, c_ubyte)
self.assertEqual(self._dll.tf_bB(0, 255), 85)
self.assertEqual(self.U(), 255)
def test_short(self):
self._dll.tf_h.restype = c_short
self._dll.tf_h.argtypes = (c_short,)
self.assertEqual(self._dll.tf_h(-32766), -10922)
self.assertEqual(self.S(), -32766)
def test_short_plus(self):
self._dll.tf_bh.restype = c_short
self._dll.tf_bh.argtypes = (c_byte, c_short)
self.assertEqual(self._dll.tf_bh(0, -32766), -10922)
self.assertEqual(self.S(), -32766)
def test_ushort(self):
self._dll.tf_H.restype = c_ushort
self._dll.tf_H.argtypes = (c_ushort,)
self.assertEqual(self._dll.tf_H(65535), 21845)
self.assertEqual(self.U(), 65535)
def test_ushort_plus(self):
self._dll.tf_bH.restype = c_ushort
self._dll.tf_bH.argtypes = (c_byte, c_ushort)
self.assertEqual(self._dll.tf_bH(0, 65535), 21845)
self.assertEqual(self.U(), 65535)
def test_int(self):
self._dll.tf_i.restype = c_int
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_int_plus(self):
self._dll.tf_bi.restype = c_int
self._dll.tf_bi.argtypes = (c_byte, c_int)
self.assertEqual(self._dll.tf_bi(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_uint(self):
self._dll.tf_I.restype = c_uint
self._dll.tf_I.argtypes = (c_uint,)
self.assertEqual(self._dll.tf_I(4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_uint_plus(self):
self._dll.tf_bI.restype = c_uint
self._dll.tf_bI.argtypes = (c_byte, c_uint)
self.assertEqual(self._dll.tf_bI(0, 4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_long(self):
self._dll.tf_l.restype = c_long
self._dll.tf_l.argtypes = (c_long,)
self.assertEqual(self._dll.tf_l(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_long_plus(self):
self._dll.tf_bl.restype = c_long
self._dll.tf_bl.argtypes = (c_byte, c_long)
self.assertEqual(self._dll.tf_bl(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_ulong(self):
self._dll.tf_L.restype = c_ulong
self._dll.tf_L.argtypes = (c_ulong,)
self.assertEqual(self._dll.tf_L(4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_ulong_plus(self):
self._dll.tf_bL.restype = c_ulong
self._dll.tf_bL.argtypes = (c_char, c_ulong)
self.assertEqual(self._dll.tf_bL(' ', 4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_longlong(self):
self._dll.tf_q.restype = c_longlong
self._dll.tf_q.argtypes = (c_longlong, )
self.assertEqual(self._dll.tf_q(-9223372036854775806), -3074457345618258602)
self.assertEqual(self.S(), -9223372036854775806)
def test_longlong_plus(self):
self._dll.tf_bq.restype = c_longlong
self._dll.tf_bq.argtypes = (c_byte, c_longlong)
self.assertEqual(self._dll.tf_bq(0, -9223372036854775806), -3074457345618258602)
self.assertEqual(self.S(), -9223372036854775806)
def test_ulonglong(self):
self._dll.tf_Q.restype = c_ulonglong
self._dll.tf_Q.argtypes = (c_ulonglong, )
self.assertEqual(self._dll.tf_Q(18446744073709551615), 6148914691236517205)
self.assertEqual(self.U(), 18446744073709551615)
def test_ulonglong_plus(self):
self._dll.tf_bQ.restype = c_ulonglong
self._dll.tf_bQ.argtypes = (c_byte, c_ulonglong)
self.assertEqual(self._dll.tf_bQ(0, 18446744073709551615), 6148914691236517205)
self.assertEqual(self.U(), 18446744073709551615)
def METHOD_NAME(self):
self._dll.tf_f.restype = c_float
self._dll.tf_f.argtypes = (c_float,)
self.assertEqual(self._dll.tf_f(-42.), -14.)
self.assertEqual(self.S(), -42)
def test_float_plus(self):
self._dll.tf_bf.restype = c_float
self._dll.tf_bf.argtypes = (c_byte, c_float)
self.assertEqual(self._dll.tf_bf(0, -42.), -14.)
self.assertEqual(self.S(), -42)
def test_double(self):
self._dll.tf_d.restype = c_double
self._dll.tf_d.argtypes = (c_double,)
self.assertEqual(self._dll.tf_d(42.), 14.)
self.assertEqual(self.S(), 42)
def test_double_plus(self):
self._dll.tf_bd.restype = c_double
self._dll.tf_bd.argtypes = (c_byte, c_double)
self.assertEqual(self._dll.tf_bd(0, 42.), 14.)
self.assertEqual(self.S(), 42)
def test_longdouble(self):
self._dll.tf_D.restype = c_longdouble
self._dll.tf_D.argtypes = (c_longdouble,)
self.assertEqual(self._dll.tf_D(42.), 14.)
self.assertEqual(self.S(), 42)
def test_longdouble_plus(self):
self._dll.tf_bD.restype = c_longdouble
self._dll.tf_bD.argtypes = (c_byte, c_longdouble)
self.assertEqual(self._dll.tf_bD(0, 42.), 14.)
self.assertEqual(self.S(), 42)
def test_callwithresult(self):
def process_result(result):
return result * 2
self._dll.tf_i.restype = process_result
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(42), 28)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tf_i(-42), -28)
self.assertEqual(self.S(), -42)
def test_void(self):
self._dll.tv_i.restype = None
self._dll.tv_i.argtypes = (c_int,)
self.assertEqual(self._dll.tv_i(42), None)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tv_i(-42), None)
self.assertEqual(self.S(), -42)
# The following repeats the above tests with stdcall functions (where
# they are available)
try:
WinDLL
except NameError:
def stdcall_dll(*_): pass
else:
class stdcall_dll(WinDLL):
def __getattr__(self, name):
if name[:2] == '__' and name[-2:] == '__':
raise AttributeError(name)
func = self._FuncPtr(("s_" + name, self))
setattr(self, name, func)
return func
@need_symbol('WinDLL')
class stdcallCFunctions(CFunctions):
_dll = stdcall_dll(_ctypes_test.__file__)
if __name__ == '__main__':
unittest.main() |
6,845 | object create | from __future__ import unicode_literals
from ..conversions import *
from ..func_utils import *
from ..base import is_data_descriptor
import six
def Object(this, args):
val = get_arg(args, 0)
if is_null(val) or is_undefined(val):
return args.space.NewObject()
return to_object(val, args.space)
def METHOD_NAME(args, space):
if len(args):
val = get_arg(args, 0)
if is_object(val):
# Implementation dependent, but my will simply return :)
return val
elif type(val) in (NUMBER_TYPE, STRING_TYPE, BOOLEAN_TYPE):
return to_object(val, space)
return space.NewObject()
class ObjectMethods:
def getPrototypeOf(this, args):
obj = get_arg(args, 0)
if not is_object(obj):
raise MakeError('TypeError',
'Object.getPrototypeOf called on non-object')
return null if obj.prototype is None else obj.prototype
def getOwnPropertyDescriptor(this, args):
obj = get_arg(args, 0)
prop = get_arg(args, 1)
if not is_object(obj):
raise MakeError(
'TypeError',
'Object.getOwnPropertyDescriptor called on non-object')
desc = obj.own.get(to_string(prop))
return convert_to_js_type(desc, args.space)
def getOwnPropertyNames(this, args):
obj = get_arg(args, 0)
if not is_object(obj):
raise MakeError(
'TypeError',
'Object.getOwnPropertyDescriptor called on non-object')
return args.space.ConstructArray(obj.own.keys())
def create(this, args):
obj = get_arg(args, 0)
if not (is_object(obj) or is_null(obj)):
raise MakeError('TypeError',
'Object prototype may only be an Object or null')
temp = args.space.NewObject()
temp.prototype = None if is_null(obj) else obj
if len(args) > 1 and not is_undefined(args[1]):
if six.PY2:
args.tup = (args[1], )
ObjectMethods.defineProperties.__func__(temp, args)
else:
args.tup = (args[1], )
ObjectMethods.defineProperties(temp, args)
return temp
def defineProperty(this, args):
obj = get_arg(args, 0)
prop = get_arg(args, 1)
attrs = get_arg(args, 2)
if not is_object(obj):
raise MakeError('TypeError',
'Object.defineProperty called on non-object')
name = to_string(prop)
if not obj.define_own_property(name, ToPropertyDescriptor(attrs),
False):
raise MakeError('TypeError', 'Cannot redefine property: %s' % name)
return obj
def defineProperties(this, args):
obj = get_arg(args, 0)
properties = get_arg(args, 1)
if not is_object(obj):
raise MakeError('TypeError',
'Object.defineProperties called on non-object')
props = to_object(properties, args.space)
for k, v in props.own.items():
if not v.get('enumerable'):
continue
desc = ToPropertyDescriptor(props.get(unicode(k)))
if not obj.define_own_property(unicode(k), desc, False):
raise MakeError('TypeError',
'Failed to define own property: %s' % k)
return obj
def seal(this, args):
obj = get_arg(args, 0)
if not is_object(obj):
raise MakeError('TypeError', 'Object.seal called on non-object')
for desc in obj.own.values():
desc['configurable'] = False
obj.extensible = False
return obj
def freeze(this, args):
obj = get_arg(args, 0)
if not is_object(obj):
raise MakeError('TypeError', 'Object.freeze called on non-object')
for desc in obj.own.values():
desc['configurable'] = False
if is_data_descriptor(desc):
desc['writable'] = False
obj.extensible = False
return obj
def preventExtensions(this, args):
obj = get_arg(args, 0)
if not is_object(obj):
raise MakeError('TypeError',
'Object.preventExtensions on non-object')
obj.extensible = False
return obj
def isSealed(this, args):
obj = get_arg(args, 0)
if not is_object(obj):
raise MakeError('TypeError',
'Object.isSealed called on non-object')
if obj.extensible:
return False
for desc in obj.own.values():
if desc.get('configurable'):
return False
return True
def isFrozen(this, args):
obj = get_arg(args, 0)
if not is_object(obj):
raise MakeError('TypeError',
'Object.isFrozen called on non-object')
if obj.extensible:
return False
for desc in obj.own.values():
if desc.get('configurable'):
return False
if is_data_descriptor(desc) and desc.get('writable'):
return False
return True
def isExtensible(this, args):
obj = get_arg(args, 0)
if not is_object(obj):
raise MakeError('TypeError',
'Object.isExtensible called on non-object')
return obj.extensible
def keys(this, args):
obj = get_arg(args, 0)
if not is_object(obj):
raise MakeError('TypeError', 'Object.keys called on non-object')
return args.space.ConstructArray([
unicode(e) for e, d in six.iteritems(obj.own)
if d.get('enumerable')
])
# some utility functions:
def ToPropertyDescriptor(obj): # page 38 (50 absolute)
if not is_object(obj):
raise MakeError('TypeError',
'Can\'t convert non-object to property descriptor')
desc = {}
if obj.has_property('enumerable'):
desc['enumerable'] = to_boolean(obj.get('enumerable'))
if obj.has_property('configurable'):
desc['configurable'] = to_boolean(obj.get('configurable'))
if obj.has_property('value'):
desc['value'] = obj.get('value')
if obj.has_property('writable'):
desc['writable'] = to_boolean(obj.get('writable'))
if obj.has_property('get'):
cand = obj.get('get')
if not (is_undefined(cand) or is_callable(cand)):
raise MakeError(
'TypeError',
'Invalid getter (it has to be a function or undefined)')
desc['get'] = cand
if obj.has_property('set'):
cand = obj.get('set')
if not (is_undefined(cand) or is_callable(cand)):
raise MakeError(
'TypeError',
'Invalid setter (it has to be a function or undefined)')
desc['set'] = cand
if ('get' in desc or 'set' in desc) and ('value' in desc
or 'writable' in desc):
raise MakeError(
'TypeError',
'Invalid property. A property cannot both have accessors and be writable or have a value.'
)
return desc |
6,846 | registration view | from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.template.response import TemplateResponse
from django.utils.translation import gettext_lazy as _
from oioioi.base.utils.query_helpers import Q_always_true
from oioioi.base.utils.redirect import safe_redirect
from oioioi.contests.models import Submission
from oioioi.mp.models import MPRegistration, SubmissionScoreMultiplier
from oioioi.mp.score import FloatScore
from oioioi.participants.controllers import ParticipantsController
from oioioi.participants.models import Participant
from oioioi.participants.utils import is_participant
from oioioi.programs.controllers import ProgrammingContestController
from oioioi.rankings.controllers import DefaultRankingController
CONTEST_RANKING_KEY = 'c'
class MPRegistrationController(ParticipantsController):
registration_template = 'mp/registration.html'
@property
def form_class(self):
from oioioi.mp.forms import MPRegistrationForm
return MPRegistrationForm
@property
def participant_admin(self):
from oioioi.mp.admin import MPRegistrationParticipantAdmin
return MPRegistrationParticipantAdmin
@classmethod
def anonymous_can_enter_contest(self):
return True
def allow_login_as_public_name(self):
return True
# Redundant because of filter_visible_contests, but saves a db query
def can_enter_contest(self, request):
return True
def visible_contests_query(self, request):
return Q_always_true()
def can_register(self, request):
return super().is_registration_open(request)
def METHOD_NAME(self, request):
participant = self._get_participant_for_form(request)
if 'mp_mpregistrationformdata' in request.session:
# pylint: disable=not-callable
form = self.form_class(request.session['mp_mpregistrationformdata'])
del request.session['mp_mpregistrationformdata']
else:
form = self.get_form(request, participant)
form.set_terms_accepted_text(self.get_terms_accepted_phrase())
if request.method == 'POST':
# pylint: disable=maybe-no-member
if form.is_valid():
participant, created = Participant.objects.get_or_create(
contest=self.contest, user=request.user
)
self.handle_validated_form(request, form, participant)
if 'next' in request.GET:
return safe_redirect(request, request.GET['next'])
else:
return redirect('default_contest_view', contest_id=self.contest.id)
can_unregister = False
if participant:
can_unregister = self.can_unregister(request, participant)
context = {
'form': form,
'participant': participant,
'can_unregister': can_unregister,
'contest_name': self.contest.name,
}
return TemplateResponse(request, self.registration_template, context)
def mixins_for_admin(self):
from oioioi.participants.admin import TermsAcceptedPhraseAdminMixin
return super(MPRegistrationController, self).mixins_for_admin() + (
TermsAcceptedPhraseAdminMixin,
)
def can_change_terms_accepted_phrase(self, request):
return not MPRegistration.objects.filter(
participant__contest=request.contest
).exists()
class MPContestController(ProgrammingContestController):
description = _("Master of Programming")
create_forum = False
show_email_in_participants_data = True
def registration_controller(self):
return MPRegistrationController(self.contest)
def ranking_controller(self):
return MPRankingController(self.contest)
def update_user_result_for_problem(self, result):
"""Submissions sent during the round are scored as normal.
Submissions sent while the round was over but SubmissionScoreMultiplier was active
are scored with given multiplier.
"""
submissions = Submission.objects.filter(
problem_instance=result.problem_instance,
user=result.user,
kind='NORMAL',
score__isnull=False,
)
if submissions:
best_submission = None
for submission in submissions:
ssm = SubmissionScoreMultiplier.objects.filter(
contest=submission.problem_instance.contest,
)
score = FloatScore(submission.score.value)
rtimes = self.get_round_times(None, submission.problem_instance.round)
if rtimes.is_active(submission.date):
pass
elif ssm.exists() and ssm[0].end_date >= submission.date:
score = score * ssm[0].multiplier
else:
score = None
if not best_submission or (
score is not None and best_submission[1] < score
):
best_submission = [submission, score]
result.score = best_submission[1]
result.status = best_submission[0].status
def can_submit(self, request, problem_instance, check_round_times=True):
"""Contest admin can always submit.
Participant can submit if:
a. round is active
OR
b. SubmissionScoreMultiplier exists and it's end_time is ahead
"""
if request.user.is_anonymous:
return False
if request.user.has_perm('contests.contest_admin', self.contest):
return True
if not is_participant(request):
return False
rtimes = self.get_round_times(None, problem_instance.round)
round_over_contest_running = rtimes.is_past(
request.timestamp
) and SubmissionScoreMultiplier.objects.filter(
contest=problem_instance.contest,
end_date__gte=request.timestamp,
)
return (
super(MPContestController, self).can_submit(
request, problem_instance, check_round_times
)
or round_over_contest_running
)
class MPRankingController(DefaultRankingController):
"""Changes to Default Ranking:
1. Sum column is just after User column
2. Rounds with earlier start_date are more to the left
"""
description = _("MP style ranking")
def _iter_rounds(self, can_see_all, timestamp, partial_key, request=None):
ccontroller = self.contest.controller
queryset = self.contest.round_set.all().order_by("-start_date")
if partial_key != CONTEST_RANKING_KEY:
queryset = queryset.filter(id=partial_key).order_by("-start_date")
for round in queryset:
times = ccontroller.get_round_times(request, round)
if can_see_all or times.public_results_visible(timestamp):
yield round
def _filter_pis_for_ranking(self, partial_key, queryset):
return queryset.order_by("-round__start_date")
def _render_ranking_page(self, key, data, page):
request = self._fake_request(page)
data['is_admin'] = self.is_admin_key(key)
return render_to_string('mp/ranking.html', context=data, request=request)
def _allow_zero_score(self):
return False |
6,847 | configure loader modules | import pytest
import salt.fileserver.roots as roots
from salt.utils.odict import OrderedDict
from tests.support.mock import patch
pytestmark = [
pytest.mark.windows_whitelisted,
]
@pytest.fixture(scope="function")
def METHOD_NAME(minion_opts):
return {roots: {"__opts__": minion_opts}}
# nox -e pytest-zeromq-3.8(coverage=False) -- -vvv --run-slow --run-destructive tests\pytests\functional\fileserver\test_roots.py
def test_symlink_list(state_tree):
with pytest.helpers.temp_file("target", "data", state_tree) as target:
link = state_tree / "link"
link.symlink_to(str(target))
ret = roots.symlink_list({"saltenv": "base"})
assert ret == {"link": str(target)}
@pytest.mark.parametrize(
"env",
("base", "something-else", "cool_path_123", "__env__"),
)
def test_fileserver_roots_find_file_envs_path_substitution(env, minion_opts, tmp_path):
"""
Test fileserver access to a dynamic path using __env__
"""
fn = "test.txt"
if env == "__env__":
# __env__ saltenv will pass "dynamic" as saltenv and
# expect to be routed to the "dynamic" directory
actual_env = "dynamic"
leaf_dir = actual_env
else:
# any other saltenv will pass saltenv normally and
# expect to be routed to a static "__env__" directory
actual_env = env
leaf_dir = "__env__"
envpath = tmp_path / leaf_dir
envpath.mkdir(parents=True, exist_ok=True)
filepath = envpath / fn
filepath.touch()
# Stop using OrderedDict once we drop Py3.5 support
expected = OrderedDict()
expected["rel"] = fn
expected["path"] = str(filepath)
# Stop using OrderedDict once we drop Py3.5 support
minion_opts["file_roots"] = OrderedDict()
minion_opts["file_roots"][env] = [str(tmp_path / leaf_dir)]
with patch("salt.fileserver.roots.__opts__", minion_opts, create=True):
ret = roots.find_file(fn, saltenv=actual_env)
ret.pop("stat")
assert ret == expected
@pytest.mark.parametrize(
"saltenv", ("base", "something-else", "cool_path_123", "__env__")
)
def test_fileserver_roots__file_lists_envs_path_substitution(
saltenv, tmp_path, minion_opts
):
"""
Test fileserver access to a dynamic path using __env__
"""
# We need our saltenv directory as well as some other env directory.
# It doesn't really matter what it is - expected saltenv and not expected
# saltenv
# The filenames should be different, because cache lists the filenames.
other_env = "something_completely_different"
other_filename = "different.txt"
expected_filename = "test.txt"
expected = [expected_filename]
expected_different_ret = [other_filename]
# __env__ saltenv will pass "dynamic" as saltenv and
# expect to be routed to the "dynamic" directory
actual_env = "dynamic" if saltenv == "__env__" else saltenv
# If `__env__` is in the path and is the file roots (see
# doc/ref/configuration/master.rst) then `__env__` will be replaced in the
# file path with the actual saltenv. So we need the file_roots path, as
# well as both our expected saltenv and our not expected saltenv. We also
# need some files in the directories.
file_roots = tmp_path / "__env__" / "cool"
envpath = tmp_path / actual_env / "cool"
otherpath = tmp_path / other_env / "cool"
envpath.mkdir(parents=True, exist_ok=True)
otherpath.mkdir(parents=True, exist_ok=True)
(envpath / expected_filename).touch()
(otherpath / other_filename).touch()
# Stop using OrderedDict once we drop Py3.5 support
minion_opts["file_roots"] = OrderedDict()
minion_opts["file_roots"]["__env__"] = [str(file_roots)]
with patch("salt.fileserver.roots.__opts__", minion_opts, create=True):
# actual_env is our target. The other env doesn't really matter, but
# it should be different than our expected one and also contain its
# own file(s)
ret = roots._file_lists({"saltenv": actual_env}, "files")
different_ret = roots._file_lists({"saltenv": other_env}, "files")
assert ret == expected
assert different_ret != ret
assert different_ret == expected_different_ret |
6,848 | apply rst | #!/usr/bin/env python
# encoding: utf-8
# Jérôme Carretero, 2013 (zougloub)
"""
reStructuredText support (experimental)
Example::
def configure(conf):
conf.load('rst')
if not conf.env.RST2HTML:
conf.fatal('The program rst2html is required')
def build(bld):
bld(
features = 'rst',
type = 'rst2html', # rst2html, rst2pdf, ...
source = 'index.rst', # mandatory, the source
deps = 'image.png', # to give additional non-trivial dependencies
)
By default the tool looks for a set of programs in PATH.
The tools are defined in `rst_progs`.
To configure with a special program use::
$ RST2HTML=/path/to/rst2html waf configure
This tool is experimental; don't hesitate to contribute to it.
"""
import re
from waflib import Node, Utils, Task, Errors, Logs
from waflib.TaskGen import feature, before_method
rst_progs = "rst2html rst2xetex rst2latex rst2xml rst2pdf rst2s5 rst2man rst2odt rst2rtf".split()
def parse_rst_node(task, node, nodes, names, seen, dirs=None):
# TODO add extensibility, to handle custom rst include tags...
if dirs is None:
dirs = (node.parent,node.get_bld().parent)
if node in seen:
return
seen.append(node)
code = node.read()
re_rst = re.compile(r'^\s*.. ((?P<subst>\|\S+\|) )?(?P<type>include|image|figure):: (?P<file>.*)$', re.M)
for match in re_rst.finditer(code):
ipath = match.group('file')
itype = match.group('type')
Logs.debug('rst: visiting %s: %s', itype, ipath)
found = False
for d in dirs:
Logs.debug('rst: looking for %s in %s', ipath, d.abspath())
found = d.find_node(ipath)
if found:
Logs.debug('rst: found %s as %s', ipath, found.abspath())
nodes.append((itype, found))
if itype == 'include':
parse_rst_node(task, found, nodes, names, seen)
break
if not found:
names.append((itype, ipath))
class docutils(Task.Task):
"""
Compile a rst file.
"""
def scan(self):
"""
A recursive regex-based scanner that finds rst dependencies.
"""
nodes = []
names = []
seen = []
node = self.inputs[0]
if not node:
return (nodes, names)
parse_rst_node(self, node, nodes, names, seen)
Logs.debug('rst: %r: found the following file deps: %r', self, nodes)
if names:
Logs.warn('rst: %r: could not find the following file deps: %r', self, names)
return ([v for (t,v) in nodes], [v for (t,v) in names])
def check_status(self, msg, retcode):
"""
Check an exit status and raise an error with a particular message
:param msg: message to display if the code is non-zero
:type msg: string
:param retcode: condition
:type retcode: boolean
"""
if retcode != 0:
raise Errors.WafError('%r command exit status %r' % (msg, retcode))
def run(self):
"""
Runs the rst compilation using docutils
"""
raise NotImplementedError()
class rst2html(docutils):
color = 'BLUE'
def __init__(self, *args, **kw):
docutils.__init__(self, *args, **kw)
self.command = self.generator.env.RST2HTML
self.attributes = ['stylesheet']
def scan(self):
nodes, names = docutils.scan(self)
for attribute in self.attributes:
stylesheet = getattr(self.generator, attribute, None)
if stylesheet is not None:
ssnode = self.generator.to_nodes(stylesheet)[0]
nodes.append(ssnode)
Logs.debug('rst: adding dep to %s %s', attribute, stylesheet)
return nodes, names
def run(self):
cwdn = self.outputs[0].parent
src = self.inputs[0].path_from(cwdn)
dst = self.outputs[0].path_from(cwdn)
cmd = self.command + [src, dst]
cmd += Utils.to_list(getattr(self.generator, 'options', []))
for attribute in self.attributes:
stylesheet = getattr(self.generator, attribute, None)
if stylesheet is not None:
stylesheet = self.generator.to_nodes(stylesheet)[0]
cmd += ['--%s' % attribute, stylesheet.path_from(cwdn)]
return self.exec_command(cmd, cwd=cwdn.abspath())
class rst2s5(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2S5
self.attributes = ['stylesheet']
class rst2latex(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2LATEX
self.attributes = ['stylesheet']
class rst2xetex(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2XETEX
self.attributes = ['stylesheet']
class rst2pdf(docutils):
color = 'BLUE'
def run(self):
cwdn = self.outputs[0].parent
src = self.inputs[0].path_from(cwdn)
dst = self.outputs[0].path_from(cwdn)
cmd = self.generator.env.RST2PDF + [src, '-o', dst]
cmd += Utils.to_list(getattr(self.generator, 'options', []))
return self.exec_command(cmd, cwd=cwdn.abspath())
@feature('rst')
@before_method('process_source')
def METHOD_NAME(self):
"""
Create :py:class:`rst` or other rst-related task objects
"""
if self.target:
if isinstance(self.target, Node.Node):
tgt = self.target
elif isinstance(self.target, str):
tgt = self.path.get_bld().make_node(self.target)
else:
self.bld.fatal("rst: Don't know how to build target name %s which is not a string or Node for %s" % (self.target, self))
else:
tgt = None
tsk_type = getattr(self, 'type', None)
src = self.to_nodes(self.source)
assert len(src) == 1
src = src[0]
if tsk_type is not None and tgt is None:
if tsk_type.startswith('rst2'):
ext = tsk_type[4:]
else:
self.bld.fatal("rst: Could not detect the output file extension for %s" % self)
tgt = src.change_ext('.%s' % ext)
elif tsk_type is None and tgt is not None:
out = tgt.name
ext = out[out.rfind('.')+1:]
self.type = 'rst2' + ext
elif tsk_type is not None and tgt is not None:
# the user knows what he wants
pass
else:
self.bld.fatal("rst: Need to indicate task type or target name for %s" % self)
deps_lst = []
if getattr(self, 'deps', None):
deps = self.to_list(self.deps)
for filename in deps:
n = self.path.find_resource(filename)
if not n:
self.bld.fatal('Could not find %r for %r' % (filename, self))
if not n in deps_lst:
deps_lst.append(n)
try:
task = self.create_task(self.type, src, tgt)
except KeyError:
self.bld.fatal("rst: Task of type %s not implemented (created by %s)" % (self.type, self))
task.env = self.env
# add the manual dependencies
if deps_lst:
try:
lst = self.bld.node_deps[task.uid()]
for n in deps_lst:
if not n in lst:
lst.append(n)
except KeyError:
self.bld.node_deps[task.uid()] = deps_lst
inst_to = getattr(self, 'install_path', None)
if inst_to:
self.install_task = self.add_install_files(install_to=inst_to, install_from=task.outputs[:])
self.source = []
def configure(self):
"""
Try to find the rst programs.
Do not raise any error if they are not found.
You'll have to use additional code in configure() to die
if programs were not found.
"""
for p in rst_progs:
self.find_program(p, mandatory=False)
|
6,849 | update message flags | from typing import List, Optional
from django.http import HttpRequest, HttpResponse
from django.utils.translation import gettext as _
from zerver.actions.message_flags import (
do_mark_all_as_read,
do_mark_stream_messages_as_read,
do_update_message_flags,
)
from zerver.lib.exceptions import ErrorCode, JsonableError
from zerver.lib.narrow import (
OptionalNarrowListT,
fetch_messages,
narrow_parameter,
parse_anchor_value,
)
from zerver.lib.request import REQ, RequestNotes, has_request_variables
from zerver.lib.response import json_partial_success, json_success
from zerver.lib.streams import access_stream_by_id
from zerver.lib.timeout import TimeoutExpiredError, timeout
from zerver.lib.topic import user_message_exists_for_topic
from zerver.lib.validator import check_bool, check_int, check_list, to_non_negative_int
from zerver.models import UserActivity, UserProfile
def get_latest_update_message_flag_activity(user_profile: UserProfile) -> Optional[UserActivity]:
return (
UserActivity.objects.filter(
user_profile=user_profile,
query__in=["update_message_flags", "update_message_flags_for_narrow"],
)
.order_by("last_visit")
.last()
)
# NOTE: If this function name is changed, add the new name to the
# query in get_latest_update_message_flag_activity
@has_request_variables
def METHOD_NAME(
request: HttpRequest,
user_profile: UserProfile,
messages: List[int] = REQ(json_validator=check_list(check_int)),
operation: str = REQ("op"),
flag: str = REQ(),
) -> HttpResponse:
request_notes = RequestNotes.get_notes(request)
assert request_notes.log_data is not None
count = do_update_message_flags(user_profile, operation, flag, messages)
target_count_str = str(len(messages))
log_data_str = f"[{operation} {flag}/{target_count_str}] actually {count}"
request_notes.log_data["extra"] = log_data_str
return json_success(
request,
data={
"messages": messages, # Useless, but included for backwards compatibility.
},
)
MAX_MESSAGES_PER_UPDATE = 5000
# NOTE: If this function name is changed, add the new name to the
# query in get_latest_update_message_flag_activity
@has_request_variables
def update_message_flags_for_narrow(
request: HttpRequest,
user_profile: UserProfile,
anchor_val: str = REQ("anchor"),
include_anchor: bool = REQ(json_validator=check_bool, default=True),
num_before: int = REQ(converter=to_non_negative_int),
num_after: int = REQ(converter=to_non_negative_int),
narrow: OptionalNarrowListT = REQ("narrow", converter=narrow_parameter),
operation: str = REQ("op"),
flag: str = REQ(),
) -> HttpResponse:
anchor = parse_anchor_value(anchor_val, use_first_unread_anchor=False)
if num_before > 0 and num_after > 0 and not include_anchor:
raise JsonableError(_("The anchor can only be excluded at an end of the range"))
# Clamp such that num_before + num_after <= MAX_MESSAGES_PER_UPDATE.
num_before = min(
num_before, max(MAX_MESSAGES_PER_UPDATE - num_after, MAX_MESSAGES_PER_UPDATE // 2)
)
num_after = min(num_after, MAX_MESSAGES_PER_UPDATE - num_before)
query_info = fetch_messages(
narrow=narrow,
user_profile=user_profile,
realm=user_profile.realm,
is_web_public_query=False,
anchor=anchor,
include_anchor=include_anchor,
num_before=num_before,
num_after=num_after,
)
messages = [row[0] for row in query_info.rows]
updated_count = do_update_message_flags(user_profile, operation, flag, messages)
return json_success(
request,
data={
"processed_count": len(messages),
"updated_count": updated_count,
"first_processed_id": messages[0] if messages else None,
"last_processed_id": messages[-1] if messages else None,
"found_oldest": query_info.found_oldest,
"found_newest": query_info.found_newest,
},
)
@has_request_variables
def mark_all_as_read(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
request_notes = RequestNotes.get_notes(request)
try:
count = timeout(50, lambda: do_mark_all_as_read(user_profile))
except TimeoutExpiredError:
return json_partial_success(request, data={"code": ErrorCode.REQUEST_TIMEOUT.name})
log_data_str = f"[{count} updated]"
assert request_notes.log_data is not None
request_notes.log_data["extra"] = log_data_str
return json_success(request)
@has_request_variables
def mark_stream_as_read(
request: HttpRequest, user_profile: UserProfile, stream_id: int = REQ(json_validator=check_int)
) -> HttpResponse:
stream, sub = access_stream_by_id(user_profile, stream_id)
assert stream.recipient_id is not None
count = do_mark_stream_messages_as_read(user_profile, stream.recipient_id)
log_data_str = f"[{count} updated]"
log_data = RequestNotes.get_notes(request).log_data
assert log_data is not None
log_data["extra"] = log_data_str
return json_success(request)
@has_request_variables
def mark_topic_as_read(
request: HttpRequest,
user_profile: UserProfile,
stream_id: int = REQ(json_validator=check_int),
topic_name: str = REQ(),
) -> HttpResponse:
stream, sub = access_stream_by_id(user_profile, stream_id)
assert stream.recipient_id is not None
if topic_name:
topic_exists = user_message_exists_for_topic(
user_profile=user_profile,
recipient_id=stream.recipient_id,
topic_name=topic_name,
)
if not topic_exists:
raise JsonableError(_("No such topic '{topic}'").format(topic=topic_name))
count = do_mark_stream_messages_as_read(user_profile, stream.recipient_id, topic_name)
log_data_str = f"[{count} updated]"
log_data = RequestNotes.get_notes(request).log_data
assert log_data is not None
log_data["extra"] = log_data_str
return json_success(request) |
6,850 | state name | import socket
from pyroute2.netlink.nfnetlink.nfctsocket import (
IP_CT_TCP_FLAG_TO_NAME,
IPSBIT_TO_NAME,
TCP_CONNTRACK_TO_NAME,
NFCTAttrTuple,
NFCTSocket,
)
class NFCTATcpProtoInfo(object):
__slots__ = (
'state',
'wscale_orig',
'wscale_reply',
'flags_orig',
'flags_reply',
)
def __init__(
self,
state,
wscale_orig=None,
wscale_reply=None,
flags_orig=None,
flags_reply=None,
):
self.state = state
self.wscale_orig = wscale_orig
self.wscale_reply = wscale_reply
self.flags_orig = flags_orig
self.flags_reply = flags_reply
def METHOD_NAME(self):
return TCP_CONNTRACK_TO_NAME.get(self.state, "UNKNOWN")
def flags_name(self, flags):
if flags is None:
return ''
s = ''
for bit, name in IP_CT_TCP_FLAG_TO_NAME.items():
if flags & bit:
s += '{},'.format(name)
return s[:-1]
@classmethod
def from_netlink(cls, ndmsg):
cta_tcp = ndmsg.get_attr('CTA_PROTOINFO_TCP')
state = cta_tcp.get_attr('CTA_PROTOINFO_TCP_STATE')
# second argument is the mask returned by kernel but useless for us
flags_orig, _ = cta_tcp.get_attr('CTA_PROTOINFO_TCP_FLAGS_ORIGINAL')
flags_reply, _ = cta_tcp.get_attr('CTA_PROTOINFO_TCP_FLAGS_REPLY')
return cls(state=state, flags_orig=flags_orig, flags_reply=flags_reply)
def __repr__(self):
return 'TcpInfo(state={}, orig_flags={}, reply_flags={})'.format(
self.METHOD_NAME(),
self.flags_name(self.flags_orig),
self.flags_name(self.flags_reply),
)
class ConntrackEntry(object):
__slots__ = (
'tuple_orig',
'tuple_reply',
'status',
'timeout',
'protoinfo',
'mark',
'id',
'use',
)
def __init__(
self,
family,
tuple_orig,
tuple_reply,
cta_status,
cta_timeout,
cta_protoinfo,
cta_mark,
cta_id,
cta_use,
):
self.tuple_orig = NFCTAttrTuple.from_netlink(family, tuple_orig)
self.tuple_reply = NFCTAttrTuple.from_netlink(family, tuple_reply)
self.status = cta_status
self.timeout = cta_timeout
if self.tuple_orig.proto == socket.IPPROTO_TCP:
self.protoinfo = NFCTATcpProtoInfo.from_netlink(cta_protoinfo)
else:
self.protoinfo = None
self.mark = cta_mark
self.id = cta_id
self.use = cta_use
def status_name(self):
s = ''
for bit, name in IPSBIT_TO_NAME.items():
if self.status & bit:
s += '{},'.format(name)
return s[:-1]
def __repr__(self):
s = 'Entry(orig={}, reply={}, status={}'.format(
self.tuple_orig, self.tuple_reply, self.status_name()
)
if self.protoinfo is not None:
s += ', protoinfo={}'.format(self.protoinfo)
s += ')'
return s
class Conntrack(NFCTSocket):
"""
High level conntrack functions
"""
def __init__(self, nlm_generator=True, **kwargs):
super(Conntrack, self).__init__(nlm_generator=nlm_generator, **kwargs)
def stat(self):
"""Return current statistics per CPU
Same result than conntrack -S command but a list of dictionaries
"""
stats = []
for msg in super(Conntrack, self).stat():
stats.append({'cpu': msg['res_id']})
stats[-1].update(
(k[10:].lower(), v)
for k, v in msg['attrs']
if k.startswith('CTA_STATS_')
)
return stats
def count(self):
"""Return current number of conntrack entries
Same result than /proc/sys/net/netfilter/nf_conntrack_count file
or conntrack -C command
"""
for ndmsg in super(Conntrack, self).count():
return ndmsg.get_attr('CTA_STATS_GLOBAL_ENTRIES')
def conntrack_max_size(self):
"""
Return the max size of connection tracking table
/proc/sys/net/netfilter/nf_conntrack_max
"""
for ndmsg in super(Conntrack, self).conntrack_max_size():
return ndmsg.get_attr('CTA_STATS_GLOBAL_MAX_ENTRIES')
def delete(self, entry):
if isinstance(entry, ConntrackEntry):
tuple_orig = entry.tuple_orig
elif isinstance(entry, NFCTAttrTuple):
tuple_orig = entry
else:
raise NotImplementedError()
for ndmsg in self.entry('del', tuple_orig=tuple_orig):
return ndmsg
def entry(self, cmd, **kwargs):
for res in super(Conntrack, self).entry(cmd, **kwargs):
return res
def dump_entries(
self,
mark=None,
mark_mask=0xFFFFFFFF,
tuple_orig=None,
tuple_reply=None,
):
"""
Dump all entries from conntrack table with filters
Filters can be only part of a conntrack tuple
:param NFCTAttrTuple tuple_orig: filter on original tuple
:param NFCTAttrTuple tuple_reply: filter on reply tuple
Examples::
# Filter only on tcp connections
for entry in ct.dump_entries(tuple_orig=NFCTAttrTuple(
proto=socket.IPPROTO_TCP)):
print("This entry is tcp: {}".format(entry))
# Filter only on icmp message to 8.8.8.8
for entry in ct.dump_entries(tuple_orig=NFCTAttrTuple(
proto=socket.IPPROTO_ICMP,
daddr='8.8.8.8')):
print("This entry is icmp to 8.8.8.8: {}".format(entry))
"""
for ndmsg in self.dump(
mark=mark,
mark_mask=mark_mask,
tuple_orig=tuple_orig,
tuple_reply=tuple_reply,
):
if tuple_orig is not None and not tuple_orig.nla_eq(
ndmsg['nfgen_family'], ndmsg.get_attr('CTA_TUPLE_ORIG')
):
continue
if tuple_reply is not None and not tuple_reply.nla_eq(
ndmsg['nfgen_family'], ndmsg.get_attr('CTA_TUPLE_REPLY')
):
continue
yield ConntrackEntry(
ndmsg['nfgen_family'],
ndmsg.get_attr('CTA_TUPLE_ORIG'),
ndmsg.get_attr('CTA_TUPLE_REPLY'),
ndmsg.get_attr('CTA_STATUS'),
ndmsg.get_attr('CTA_TIMEOUT'),
ndmsg.get_attr('CTA_PROTOINFO'),
ndmsg.get_attr('CTA_MARK'),
ndmsg.get_attr('CTA_ID'),
ndmsg.get_attr('CTA_USE'),
) |
6,851 | run | import logging
from abc import abstractmethod
from typing import Dict, List, Optional, Tuple, Any
from haystack.nodes.base import BaseComponent, Document
logger = logging.getLogger(__name__)
DEFAULT_LANGUAGES = ["en", "de", "es", "cs", "nl"]
class BaseDocumentLanguageClassifier(BaseComponent):
"""
Abstract class for Document Language Classifiers.
"""
outgoing_edges = len(DEFAULT_LANGUAGES)
@classmethod
def _calculate_outgoing_edges(cls, component_params: Dict[str, Any]) -> int:
route_by_language = component_params.get("route_by_language", True)
if route_by_language is False:
return 1
languages_to_route = component_params.get("languages_to_route", DEFAULT_LANGUAGES)
return len(languages_to_route)
def __init__(self, route_by_language: bool = True, languages_to_route: Optional[List[str]] = None):
"""
:param route_by_language: Routes Documents to a different output edge depending on their language.
:param languages_to_route: A list of languages in ISO code, each corresponding to a different output edge (see [langdetect documentation](https://github.com/Mimino666/langdetect#languages)).
"""
super().__init__()
if languages_to_route is None:
languages_to_route = DEFAULT_LANGUAGES
if route_by_language is True:
logger.info(
"The languages_to_route list is not defined. The default list will be used: %s", languages_to_route
)
if len(set(languages_to_route)) != len(languages_to_route):
duplicates = {lang for lang in languages_to_route if languages_to_route.count(lang) > 1}
raise ValueError(f"The languages_to_route parameter can't contain duplicate values ({duplicates}).")
self.route_by_language = route_by_language
self.languages_to_route = languages_to_route
@abstractmethod
def predict(self, documents: List[Document], batch_size: Optional[int] = None) -> List[Document]:
pass
@abstractmethod
def predict_batch(self, documents: List[List[Document]], batch_size: Optional[int] = None) -> List[List[Document]]:
pass
def _get_edge_from_language(self, language: str) -> str:
return f"output_{self.languages_to_route.index(language) + 1}"
def METHOD_NAME(self, documents: List[Document]) -> Tuple[Dict[str, List[Document]], str]: # type: ignore
"""
Run language document classifier on a list of documents.
:param documents: A list of documents whose language you want to detect.
"""
docs_with_languages = self.predict(documents=documents)
output = {"documents": docs_with_languages}
if self.route_by_language is False:
return output, "output_1"
# self.route_by_language is True
languages = [doc.meta["language"] for doc in docs_with_languages]
unique_languages = list(set(languages))
if len(unique_languages) > 1:
raise ValueError(
f"If the route_by_language parameter is True, Documents of multiple languages ({unique_languages}) are not allowed together. "
"If you want to route documents by language, you can call Pipeline.run() once for each Document."
)
language = unique_languages[0]
if language is None:
logger.warning(
"The model cannot detect the language of any of the documents."
"The first language in the list of supported languages will be used to route the document: %s",
self.languages_to_route[0],
)
language = self.languages_to_route[0]
if language not in self.languages_to_route:
raise ValueError(
f"'{language}' is not in the list of languages to route ({', '.join(self.languages_to_route)})."
f"You should specify them when initializing the node, using the parameter languages_to_route."
)
return output, self._get_edge_from_language(str(language))
def run_batch(self, documents: List[List[Document]], batch_size: Optional[int] = None) -> Tuple[Dict, str]: # type: ignore
"""
Run language document classifier on batches of documents.
:param documents: A list of lists of documents whose language you want to detect.
"""
docs_lists_with_languages = self.predict_batch(documents=documents, batch_size=batch_size)
if self.route_by_language is False:
output = {"documents": docs_lists_with_languages}
return output, "output_1"
# self.route_by_language is True
split: Dict[str, Dict[str, List[List[Document]]]] = {
f"output_{pos}": {"documents": []} for pos in range(1, len(self.languages_to_route) + 1)
}
for docs_list in docs_lists_with_languages:
languages = [doc.meta["language"] for doc in docs_list]
unique_languages = list(set(languages))
if len(unique_languages) > 1:
raise ValueError(
f"If the route_by_language parameter is True, Documents of multiple languages ({unique_languages}) are not allowed together. "
"If you want to route documents by language, you can call Pipeline.run() once for each Document."
)
if unique_languages[0] is None:
logger.warning(
"The model cannot detect the language of some of the documents."
"The first language in the list of supported languages will be used to route the documents: %s",
self.languages_to_route[0],
)
language: Optional[str] = self.languages_to_route[0]
language = unique_languages[0]
if language not in self.languages_to_route:
raise ValueError(
f"'{language}' is not in the list of languages to route ({', '.join(self.languages_to_route)})."
f"Specify them when initializing the node, using the parameter languages_to_route."
)
edge_name = self._get_edge_from_language(str(language))
split[edge_name]["documents"].append(docs_list)
return split, "split" |
6,852 | datetime never | # -*- coding: utf-8
# utility
# *******
#
# Utility Functions
import re
import uuid
from datetime import datetime, timedelta
from twisted.internet import reactor
from twisted.internet.defer import Deferred
def get_distribution_codename():
try:
with open("/etc/os-release", "r") as fd:
for line in fd:
key, value = line.split("=")
if key == "VERSION_CODENAME":
return value.strip().strip("\"")
except:
pass
return ""
def uuid4():
"""
This function returns a uuid4.
"""
return str(uuid.uuid4())
def sum_dicts(*dicts):
ret = {}
for d in dicts:
for k, v in d.items():
ret[k] = v
return ret
def deferred_sleep(timeout):
d = Deferred()
reactor.callLater(timeout, d.callback, True)
return d
def msdos_encode(s):
"""
This functions returns a new string with all occurrences of newlines
prepended with a carriage return.
"""
return re.sub(r'(\r\n)|(\n)', '\r\n', s)
def iso_strf_time(d):
return d.strftime("%Y-%m-%d %H:%M:%S.%f")
def datetime_null():
"""
:return: a utc datetime object representing a null date
"""
return datetime(1970, 1, 1, 0, 0)
def datetime_now():
"""
:return: a utc datetime object representing a null date
"""
return datetime.utcnow()
def METHOD_NAME():
"""
:return: a utc datetime object representing the 1st January 3000
"""
return datetime(3000, 1, 1, 0, 0)
def get_expiration(days):
"""
:return: a utc datetime object representing an expiration time calculated as the current date + N days
"""
date = datetime.utcnow()
return datetime(year=date.year, month=date.month, day=date.day, hour=00, minute=00, second=00) + timedelta(days+1)
def is_expired(check_date, seconds=0, minutes=0, hours=0, days=0):
"""
"""
total_hours = (days * 24) + hours
check = check_date + timedelta(seconds=seconds, minutes=minutes, hours=total_hours)
return datetime_now() > check
def datetime_to_ISO8601(date):
"""
Convert a datetime into ISO8601 date
"""
if date is None:
date = datetime_null()
return date.isoformat() + "Z" # Z means that the date is in UTC
def datetime_to_pretty_str(date):
"""
Print a datetime in pretty formatted str format
"""
return date.strftime("%A %d %B %Y %H:%M (UTC)")
def datetime_to_day_str(date, tz=0):
"""
Print a ISO8601 in DD/MM/YYYY formatted str
"""
if tz != 0:
tz_i, tz_d = divmod(tz, 1)
tz_d, _ = divmod(tz_d * 100, 1)
date += timedelta(hours=tz_i, minutes=tz_d)
return date.strftime("%d/%m/%Y")
def ISO8601_to_pretty_str(isodate, tz=0):
"""
convert a ISO8601 in pretty formatted str format
"""
if isodate is None:
isodate = datetime_null().isoformat()
date = datetime(year=int(isodate[0:4]),
month=int(isodate[5:7]),
day=int(isodate[8:10]),
hour=int(isodate[11:13]),
minute=int(isodate[14:16]),
second=int(isodate[17:19]))
if tz != 0:
tz_i, tz_d = divmod(tz, 1)
tz_d, _ = divmod(tz_d * 100, 1)
date += timedelta(hours=tz_i, minutes=tz_d)
return date.strftime("%A %d %B %Y %H:%M")
return datetime_to_pretty_str(date)
def iso_year_start(iso_year):
"""Returns the gregorian calendar date of the first day of the given ISO year"""
fourth_jan = datetime.strptime('{0}-01-04'.format(iso_year), '%Y-%m-%d')
delta = timedelta(fourth_jan.isoweekday() - 1)
return fourth_jan - delta
def iso_to_gregorian(iso_year, iso_week, iso_day):
"""Returns gregorian calendar date for the given ISO year, week and day"""
year_start = iso_year_start(iso_year)
return year_start + timedelta(days=iso_day - 1, weeks=iso_week - 1)
def bytes_to_pretty_str(b):
if isinstance(b, str):
b = int(b)
if b >= 1000000000:
return "%dGB" % int(b / 1000000000)
if b >= 1000000:
return "%dMB" % int(b / 1000000)
return "%dKB" % int(b / 1000) |
6,853 | test num data | import pytest
import numpy as np
import numpy.testing as npt
import copy
import unittest
from lenstronomy.Data.imaging_data import ImageData
import lenstronomy.Util.util as util
class TestData(object):
def setup_method(self):
self.numPix = 10
kwargs_data = {
"image_data": np.zeros((self.numPix, self.numPix)),
"noise_map": np.ones((self.numPix, self.numPix)),
}
self.Data = ImageData(**kwargs_data)
def METHOD_NAME(self):
assert self.Data.num_pixel == self.numPix**2
def test_shift_coords(self):
numPix = 10
deltaPix = 0.05
(
x_grid,
y_grid,
ra_at_xy_0,
dec_at_xy_0,
x_at_radec_0,
y_at_radec_0,
Mpix2coord,
Mcoord2pix,
) = util.make_grid_with_coordtransform(
numPix=numPix, deltapix=deltaPix, subgrid_res=1, inverse=True
)
# mask (1= model this pixel, 0= leave blanck)
kwargs_data = {
"ra_at_xy_0": ra_at_xy_0,
"dec_at_xy_0": dec_at_xy_0,
"transform_pix2angle": Mpix2coord,
"image_data": np.ones((numPix, numPix)),
}
data = ImageData(**kwargs_data)
ra_shift = 0.05
dec_shift = 0.0
kwargs_data["ra_shift"] = ra_shift
kwargs_data["dec_shift"] = dec_shift
data_shift = ImageData(**kwargs_data)
ra, dec = data.map_pix2coord(1, 1)
ra_new, dec_new = data_shift.map_pix2coord(1, 1)
npt.assert_almost_equal(ra_new - ra, ra_shift, decimal=10)
npt.assert_almost_equal(dec_new - dec, dec_shift, decimal=10)
ra_2, dec_2 = data_shift.map_pix2coord(2, 1)
npt.assert_almost_equal(ra, ra_2, decimal=10)
npt.assert_almost_equal(dec, dec_2, decimal=10)
x, y = data.map_coord2pix(0, 0)
x_new, y_new = data_shift.map_coord2pix(ra_shift, dec_shift)
npt.assert_almost_equal(x, x_new, decimal=10)
npt.assert_almost_equal(y, y_new, decimal=10)
def test_shift_coordinate_system(self):
x_shift = 0.05
y_shift = 0
numPix = 10
deltaPix = 0.05
(
x_grid,
y_grid,
ra_at_xy_0,
dec_at_xy_0,
x_at_radec_0,
y_at_radec_0,
Mpix2coord,
Mcoord2pix,
) = util.make_grid_with_coordtransform(
numPix=numPix, deltapix=deltaPix, subgrid_res=1, inverse=True
)
kwargs_data = {
"ra_at_xy_0": ra_at_xy_0,
"dec_at_xy_0": dec_at_xy_0,
"transform_pix2angle": Mpix2coord,
"image_data": np.ones((numPix, numPix)),
}
data = ImageData(**kwargs_data)
data_new = copy.deepcopy(data)
data_new.shift_coordinate_system(x_shift, y_shift, pixel_unit=False)
ra, dec = 0, 0
x, y = data.map_coord2pix(ra, dec)
x_new, y_new = data_new.map_coord2pix(ra + x_shift, dec + y_shift)
npt.assert_almost_equal(x, x_new, decimal=10)
npt.assert_almost_equal(y, y_new, decimal=10)
ra, dec = data.map_pix2coord(x, y)
ra_new, dec_new = data_new.map_pix2coord(x, y)
npt.assert_almost_equal(ra, ra_new - x_shift, decimal=10)
npt.assert_almost_equal(dec, dec_new - y_shift, decimal=10)
x_coords, y_coords = data.pixel_coordinates
x_coords_new, y_coords_new = data_new.pixel_coordinates
npt.assert_almost_equal(x_coords[0], x_coords_new[0] - x_shift, decimal=10)
npt.assert_almost_equal(y_coords[0], y_coords_new[0] - y_shift, decimal=10)
def test_update_data(self):
kwargs_data = {
"image_data": np.zeros((self.numPix, self.numPix)),
"noise_map": None,
"exposure_time": 1,
"background_rms": 1,
}
data = ImageData(**kwargs_data)
C_D = data.C_D
data.update_data(image_data=np.ones((self.numPix, self.numPix)))
C_D_new = data.C_D
assert C_D_new[0, 0] > C_D[0, 0]
data_new = data.data
npt.assert_almost_equal(data_new, np.ones((self.numPix, self.numPix)))
class TestRaise(unittest.TestCase):
def test_raise(self):
kwargs_data = {"image_data": np.zeros((10, 10))}
Data = ImageData(**kwargs_data)
image_data_new = np.zeros((5, 5))
with self.assertRaises(ValueError):
out = Data.update_data(image_data_new)
with self.assertRaises(ValueError):
ImageData(**kwargs_data, likelihood_method="WRONG")
if __name__ == "__main__":
pytest.main() |
6,854 | run model | from runtime.segmented_run.prepare_config import HighLevelConfig
from runtime.segmented_run.api import create, append
from runtime.steppers.radiation import RadiationStepper
from runtime.types import State
import cftime
import yaml
import xarray as xr
import numpy as np
import pytest
import dataclasses
import os
@dataclasses.dataclass
class RadiationDiagnostic:
name: str
validate: bool = False
is_flux: bool = True
@property
def python_name(self):
return f"{self.name}_python"
RADIATION_DIAGNOSTICS = [
RadiationDiagnostic("clear_sky_downward_longwave_flux_at_surface"),
RadiationDiagnostic("clear_sky_downward_shortwave_flux_at_surface"),
RadiationDiagnostic("clear_sky_upward_longwave_flux_at_surface"),
RadiationDiagnostic("clear_sky_upward_shortwave_flux_at_surface"),
RadiationDiagnostic("clear_sky_upward_longwave_flux_at_top_of_atmosphere"),
RadiationDiagnostic("clear_sky_upward_shortwave_flux_at_top_of_atmosphere"),
RadiationDiagnostic("total_sky_downward_longwave_flux_at_surface"),
RadiationDiagnostic("total_sky_downward_shortwave_flux_at_surface"),
RadiationDiagnostic("total_sky_upward_longwave_flux_at_surface"),
RadiationDiagnostic("total_sky_upward_shortwave_flux_at_surface"),
RadiationDiagnostic(
"total_sky_downward_shortwave_flux_at_top_of_atmosphere", validate=True,
),
RadiationDiagnostic("total_sky_upward_longwave_flux_at_top_of_atmosphere"),
RadiationDiagnostic("total_sky_upward_shortwave_flux_at_top_of_atmosphere"),
RadiationDiagnostic("total_sky_longwave_heating_rate", is_flux=False),
RadiationDiagnostic("clear_sky_longwave_heating_rate", is_flux=False),
RadiationDiagnostic("total_sky_shortwave_heating_rate", is_flux=False),
RadiationDiagnostic("clear_sky_shortwave_heating_rate", is_flux=False),
]
base_config = r"""
base_version: v0.7
initial_conditions: gs://vcm-fv3config/data/initial_conditions/c12_restart_initial_conditions/v1.0
namelist:
coupler_nml:
minutes: 30
current_date:
- 2016
- 8
- 1
- 0
- 0
- 0
gfdl_cloud_microphysics_nml:
fast_sat_adj: false
gfs_physics_nml:
fhlwr: 1800.0
fhswr: 1800.0
hybedmf: true
satmedmf: false
fv_core_nml:
npx: 13
npy: 13
npz: 63
fortran_diagnostics:
- name: sfc_dt_atmos.zarr
chunks:
time: 2
times:
frequency: 900
kind: interval
variables:
- {module_name: dynamics, field_name: grid_lont, output_name: lon}
- {module_name: dynamics, field_name: grid_latt, output_name: lat}
- {module_name: dynamics, field_name: grid_lon, output_name: lonb}
- {module_name: dynamics, field_name: grid_lat, output_name: latb}
- {module_name: dynamics, field_name: area, output_name: area}
- {module_name: gfs_phys, field_name: dusfci, output_name: uflx}
- {module_name: gfs_phys, field_name: dvsfci, output_name: vflx}
- {module_name: gfs_phys, field_name: cnvprcpb_ave, output_name: CPRATsfc}
- {module_name: gfs_phys, field_name: totprcpb_ave, output_name: PRATEsfc}
- {module_name: gfs_phys, field_name: DSWRF, output_name: DSWRFsfc}
- {module_name: gfs_phys, field_name: USWRF, output_name: USWRFsfc}
- {module_name: gfs_phys, field_name: DSWRFtoa, output_name: DSWRFtoa}
- {module_name: gfs_phys, field_name: USWRFtoa, output_name: USWRFtoa}
- {module_name: gfs_phys, field_name: ULWRFtoa, output_name: ULWRFtoa}
- {module_name: gfs_phys, field_name: ULWRF, output_name: ULWRFsfc}
- {module_name: gfs_phys, field_name: DLWRF, output_name: DLWRFsfc}
- {module_name: gfs_phys, field_name: lhtfl_ave, output_name: LHTFLsfc}
- {module_name: gfs_phys, field_name: shtfl_ave, output_name: SHTFLsfc}
""" # noqa: 501
def get_fv3config():
config = HighLevelConfig.from_dict(yaml.safe_load(base_config))
fv3config_dict = config.to_fv3config()
# can't call normal dump on representation of the data table without this
fv3config_dict["diag_table"] = fv3config_dict["diag_table"].asdict()
return fv3config_dict
def radiation_scheme_config():
config = get_fv3config()
config["radiation_scheme"] = {"kind": "python"}
diagnostics = [
{
"name": "state_after_timestep.zarr",
"chunks": {"time": 2},
"times": {"frequency": 900, "kind": "interval"},
"variables": [
diagnostic.name
for diagnostic in RADIATION_DIAGNOSTICS
if diagnostic.is_flux
],
}
]
diagnostics.append(
{
"name": "radiation_diagnostics.zarr",
"chunks": {"time": 2},
"times": {"frequency": 900, "kind": "interval"},
"variables": [
diagnostic.python_name for diagnostic in RADIATION_DIAGNOSTICS
],
}
)
config["diagnostics"] = diagnostics
return config
def METHOD_NAME(config, rundir):
create(rundir, config)
append(rundir)
@pytest.fixture(scope="module")
def completed_rundir(tmpdir_factory):
config = radiation_scheme_config()
rundir = tmpdir_factory.mktemp("rundir").join("subdir")
METHOD_NAME(config, str(rundir))
return rundir
def get_zarr(rundir, zarrname):
zarrpath = os.path.join(rundir, zarrname)
return xr.open_zarr(zarrpath)
def test_radiation_diagnostics_output(completed_rundir):
ds = get_zarr(completed_rundir, "radiation_diagnostics.zarr")
for diagnostic in RADIATION_DIAGNOSTICS:
assert diagnostic.python_name in ds.data_vars
def test_radiation_diagnostics_validate(completed_rundir):
rtol = 1.0e-7
python_radiation = get_zarr(completed_rundir, "radiation_diagnostics.zarr")
fortran_radiation = get_zarr(completed_rundir, "state_after_timestep.zarr")
to_validate = [
diagnostic for diagnostic in RADIATION_DIAGNOSTICS if diagnostic.validate
]
for diagnostic in to_validate:
python_radiation_diagnostic = python_radiation[diagnostic.python_name]
fortran_radiation_diagnostic = fortran_radiation[diagnostic.name]
try:
xr.testing.assert_allclose(
python_radiation_diagnostic, fortran_radiation_diagnostic, rtol=rtol
)
except AssertionError as err:
raise AssertionError(
f"Port failed to validate at relative tolerance {rtol}"
f" for flux {diagnostic.name}."
) from err
class MockRadiation:
input_variables = ["x", "y"]
def __init__(self):
pass
def init_driver(self):
pass
def __call__(self, time, state: State):
return {"mock_rad_flux": state["x"] + state["y"]}
class MockInputGenerator:
def __init__(self, output_variables):
self.output_variables = output_variables
def __call__(self, time, state: State):
state_updates: State = {var: state[var] + 1.0 for var in self.output_variables}
return {}, {}, state_updates
def get_data_array():
data = np.random.random([10, 1])
dims = ["lat", "lon"]
return xr.DataArray(data, dims=dims)
@pytest.mark.parametrize(
["generated_names", "offset"],
[
pytest.param(["x"], 1.0, id="add_to_one"),
pytest.param(["x", "y"], 2.0, id="add_to_both"),
pytest.param(["z"], 0.0, id="add_to_none"),
],
)
def test_input_generator_changes_fluxes(generated_names, offset):
radiation = MockRadiation()
input_generator = MockInputGenerator(generated_names)
stepper = RadiationStepper(radiation, input_generator)
state: State = {"x": get_data_array(), "y": get_data_array(), "z": get_data_array()}
time = cftime.DatetimeJulian(2016, 8, 1, 0, 0, 0)
_, diags, _ = stepper(time, state)
expected = state["x"] + state["y"] + offset
xr.testing.assert_allclose(diags["mock_rad_flux"], expected) |
6,855 | run | # Copyright 2021-2023 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
"""
Load example input data for an scd2 template test.
"""
from vdk.api.job_input import IJobInput
__author__ = "VMware, Inc."
__copyright__ = (
"Copyright 2019 VMware, Inc. All rights reserved. -- VMware Confidential"
)
def METHOD_NAME(job_input: IJobInput) -> None:
# Step 1: create a table that represents the current state
# job_input.execute_query(u'''
# DROP TABLE IF EXISTS `{target_schema}`.`{target_table}`
# ''')
job_input.execute_query(
"""
CREATE TABLE IF NOT EXISTS `{target_schema}`.`{target_table}` (
`{sk_column}` STRING,
`{active_from_column}` TIMESTAMP,
`{active_to_column}` TIMESTAMP,
`{id_column}` INT,
`updated_by_user_id` INT,
`state` STRING,
`is_nsxt` BOOLEAN,
`cloud_vendor` STRING,
`version` SMALLINT
) STORED AS PARQUET
"""
)
job_input.execute_query(
"""
INSERT OVERWRITE TABLE `{target_schema}`.`{target_table}` VALUES (
("sddc01-v01", "2019-01-01", "9999-12-31", 1, 7, "RUNNING" , false, 'Azure', 498),
("sddc02-v01", "2019-02-01", "9999-12-31", 2, 9, "STOPPED" , false, 'AWS' , 500),
("sddc03-v01", "2019-03-01", "9999-12-31", 3, 3, "PROVISIONING", false, 'Azure', 497),
("sddc04-v01", "2019-04-01", "9999-12-31", 4, 5, "PROVISIONING", true , 'Azure', 498),
("sddc05-v01", "2019-05-01", "2019-05-02", 5, 9, "STARTING" , true , 'AWS' , 500),
("sddc05-v02", "2019-05-02", "2019-05-03", 5, 2, "STARTING" , true , 'AWS' , 500),
("sddc05-v03", "2019-05-03", "9999-12-31", 5, 3, "STARTING" , true , 'AWS' , 500)
)
"""
)
# Step 2: create a table that represents the delta to be applied
# job_input.execute_query(u'''
# DROP TABLE IF EXISTS `{source_schema}`.`{source_view}`
# ''')
job_input.execute_query(
"""
CREATE TABLE IF NOT EXISTS `{source_schema}`.`{source_view}` (
`{updated_at_column}` TIMESTAMP,
`{id_column}` INT,
`updated_by_user_id` INT,
`state` STRING,
`is_nsxt` BOOLEAN,
`cloud_vendor` STRING,
`version` SMALLINT
) STORED AS PARQUET
"""
)
job_input.execute_query(
"""
INSERT OVERWRITE TABLE `{source_schema}`.`{source_view}` VALUES (
("2019-02-02", 2, 1, "STARTING" , false, 'AWS' , 500), -- Update (1) - new time, new values
("2019-03-01", 3, 4, "RUNNING" , false, 'Azure', 497), -- Update (2) - same time, new values
("2019-04-02", 4, 5, "PROVISIONING", true , 'Azure', 498), -- Update (3) - new time, same values
("2019-05-01", 5, 9, "STARTING" , true , 'AWS' , 500), -- Update (4) - same time, same values
("2019-05-02", 5, 9, "STARTING" , true , 'AWS' , 500), -- Update (5) - same time, prev values
("2019-05-04", 5, 9, "STARTING" , true , 'AWS' , 500), -- Update (1) - new time, new values
("2019-06-01", 6, 9, "STARTING" , true , 'AWS' , 499) -- Insert
)
"""
)
# Step 3: Create a table containing the state expected after updating the current state with the given delta
# job_input.execute_query(u'''
# DROP TABLE IF EXISTS `{expect_schema}`.`{expect_table}`
# ''')
job_input.execute_query(
"""
CREATE TABLE IF NOT EXISTS `{expect_schema}`.`{expect_table}` (
`{sk_column}` STRING,
`{active_from_column}` TIMESTAMP,
`{active_to_column}` TIMESTAMP,
`{id_column}` INT,
`updated_by_user_id` INT,
`state` STRING,
`is_nsxt` BOOLEAN,
`cloud_vendor` STRING,
`version` SMALLINT
) STORED AS PARQUET
"""
)
job_input.execute_query(
"""
INSERT OVERWRITE TABLE `{expect_schema}`.`{expect_table}` VALUES (
("sddc01-v01", "2019-01-01", "9999-12-31", 1, 7, "RUNNING" , false, 'Azure', 498),
("sddc02-v01", "2019-02-01", "2019-02-02", 2, 9, "STOPPED" , false, 'AWS' , 500),
("sddc02-v02", "2019-02-02", "9999-12-31", 2, 1, "STARTING" , false, 'AWS' , 500),
("sddc03-v01", "2019-03-01", "9999-12-31", 3, 4, "RUNNING" , false, 'Azure', 497),
("sddc04-v01", "2019-04-01", "9999-12-31", 4, 5, "PROVISIONING", true , 'Azure', 498),
("sddc05-v01", "2019-05-01", "2019-05-03", 5, 9, "STARTING" , true , 'AWS' , 500),
("sddc05-v03", "2019-05-03", "2019-05-04", 5, 3, "STARTING" , true , 'AWS' , 500),
("sddc05-v04", "2019-05-04", "9999-12-31", 5, 9, "STARTING" , true , 'AWS' , 500),
("sddc06-v01", "2019-06-01", "9999-12-31", 6, 9, "STARTING" , true , 'AWS' , 499)
)
"""
) |
6,856 | upload ccache | #!/usr/bin/env python3
import logging
import time
import sys
import os
import shutil
from pathlib import Path
import requests
from compress_files import decompress_fast, compress_fast
DOWNLOAD_RETRIES_COUNT = 5
def dowload_file_with_progress(url, path):
logging.info("Downloading from %s to temp path %s", url, path)
for i in range(DOWNLOAD_RETRIES_COUNT):
try:
with open(path, "wb") as f:
response = requests.get(url, stream=True)
response.raise_for_status()
total_length = response.headers.get("content-length")
if total_length is None or int(total_length) == 0:
logging.info(
"No content-length, will download file without progress"
)
f.write(response.content)
else:
dl = 0
total_length = int(total_length)
logging.info("Content length is %ld bytes", total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
if sys.stdout.isatty():
done = int(50 * dl / total_length)
percent = int(100 * float(dl) / total_length)
eq_str = "=" * done
space_str = " " * (50 - done)
sys.stdout.write(f"\r[{eq_str}{space_str}] {percent}%")
sys.stdout.flush()
break
except Exception as ex:
sys.stdout.write("\n")
time.sleep(3)
logging.info("Exception while downloading %s, retry %s", ex, i + 1)
if os.path.exists(path):
os.remove(path)
else:
raise Exception(f"Cannot download dataset from {url}, all retries exceeded")
sys.stdout.write("\n")
logging.info("Downloading finished")
def get_ccache_if_not_exists(
path_to_ccache_dir, s3_helper, current_pr_number, temp_path
) -> int:
"""returns: number of PR for downloaded PR. -1 if ccache not found"""
ccache_name = os.path.basename(path_to_ccache_dir)
cache_found = False
prs_to_check = [current_pr_number]
ccache_pr = -1
if current_pr_number != 0:
prs_to_check.append(0)
for pr_number in prs_to_check:
logging.info("Searching cache for pr %s", pr_number)
s3_path_prefix = str(pr_number) + "/ccaches"
objects = s3_helper.list_prefix(s3_path_prefix)
logging.info("Found %s objects for pr", len(objects))
for obj in objects:
if ccache_name in obj:
logging.info("Found ccache on path %s", obj)
url = "https://s3.amazonaws.com/clickhouse-builds/" + obj
compressed_cache = os.path.join(temp_path, os.path.basename(obj))
dowload_file_with_progress(url, compressed_cache)
path_to_decompress = str(Path(path_to_ccache_dir).parent)
if not os.path.exists(path_to_decompress):
os.makedirs(path_to_decompress)
if os.path.exists(path_to_ccache_dir):
shutil.rmtree(path_to_ccache_dir)
logging.info("Ccache already exists, removing it")
logging.info("Decompressing cache to path %s", path_to_decompress)
decompress_fast(compressed_cache, path_to_decompress)
logging.info("Files on path %s", os.listdir(path_to_decompress))
cache_found = True
ccache_pr = pr_number
break
if cache_found:
break
if not cache_found:
logging.info("ccache not found anywhere, cannot download anything :(")
if os.path.exists(path_to_ccache_dir):
logging.info("But at least we have some local cache")
else:
logging.info("ccache downloaded")
return ccache_pr
def METHOD_NAME(path_to_ccache_dir, s3_helper, current_pr_number, temp_path):
logging.info("Uploading cache %s for pr %s", path_to_ccache_dir, current_pr_number)
ccache_name = os.path.basename(path_to_ccache_dir)
compressed_cache_path = os.path.join(temp_path, ccache_name + ".tar.gz")
compress_fast(path_to_ccache_dir, compressed_cache_path)
s3_path = (
str(current_pr_number) + "/ccaches/" + os.path.basename(compressed_cache_path)
)
logging.info("Will upload %s to path %s", compressed_cache_path, s3_path)
s3_helper.upload_build_file_to_s3(compressed_cache_path, s3_path)
logging.info("Upload finished") |
6,857 | ready to be validate | from django.db import models
from django.core.mail import send_mail
from django.conf import settings
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
from django.urls import reverse
class AbstractValidation(models.Model):
"""Simple Abstract Validation model
"""
EMAIL_REJECTION_SUBJECT = 'Your data has been rejected'
EMAIL_APPROVED_SUBJECT = 'Your data has been approved'
EMAIL_REJECTION_TEMPLATE = 'email/data_rejection'
EMAIL_APPROVED_TEMPLATE = 'email/data_validated'
EMAIL_DATA_SITE_NAME = 'site_name'
EMAIL_DATA_COLLECTION_NAME = 'data_name'
EMAIL_DATA_REASONS = 'reason'
EMAIL_DATA_URL = 'data_url'
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
models.SET_NULL,
blank=True,
null=True,
)
collector_user = models.ForeignKey(
settings.AUTH_USER_MODEL,
help_text='The user object of the actual capturer/collector '
'of this data',
null=True,
blank=True,
related_name='%(class)s_collector_user',
on_delete=models.SET_NULL
)
analyst = models.ForeignKey(
settings.AUTH_USER_MODEL,
help_text='The person that did the analysis',
null=True,
blank=True,
related_name='%(class)s_analyst',
on_delete=models.SET_NULL
)
validated = models.BooleanField(
default=False
)
rejected = models.BooleanField(
default=False
)
ready_for_validation = models.BooleanField(
default=False
)
validation_message = models.TextField(
null=True,
blank=True
)
class Meta:
abstract = True
def _is_rejected(self):
return not self.validated and self.rejected
is_rejected = property(_is_rejected)
def _is_validated(self):
return self.validated and not self.rejected
is_validated = property(_is_validated)
@property
def data_name(self):
raise NotImplementedError
@property
def validation_status(self):
if self.ready_for_validation:
return '<span class="badge badge-warning">In Review</span>'
else:
if self.validated:
return (
'<span class="badge badge-success">Validated</span>'
)
else:
return (
'<span class="badge badge-secondary">Unvalidated</span>'
)
def validate(self, show_redirect_url=True):
self.validated = True
self.rejected = False
self.ready_for_validation = False
self.save()
self.send_notification_email(
subject=self.EMAIL_APPROVED_SUBJECT,
email_template=self.EMAIL_APPROVED_TEMPLATE,
show_redirect_url=show_redirect_url
)
def reject(self, rejection_message, show_redirect_url=True, **kwargs):
self.validated = False
self.rejected = True
self.ready_for_validation = False
if self.owner is None:
self.save()
return
elif rejection_message:
self.validation_message = rejection_message
self.save()
self.send_notification_email(
subject=self.EMAIL_REJECTION_SUBJECT,
email_template=self.EMAIL_REJECTION_TEMPLATE,
show_redirect_url=show_redirect_url
)
def METHOD_NAME(self):
self.validated = False
self.rejected = False
self.ready_for_validation = True
self.save()
def send_notification_email(self,
subject='',
email_template='',
show_redirect_url=True):
site_domain_name = Site.objects.get_current().domain
subject_email = '[%s]%s' % (
site_domain_name,
subject)
if show_redirect_url:
data_update_url = '%s%s' % (
site_domain_name,
reverse('site-visit-detail', args=(self.pk, ))
)
else:
data_update_url = ''
msg_data = {
self.EMAIL_DATA_SITE_NAME: site_domain_name,
self.EMAIL_DATA_REASONS: self.validation_message,
self.EMAIL_DATA_URL: data_update_url,
self.EMAIL_DATA_COLLECTION_NAME: self.data_name
}
msg_plain = render_to_string(
email_template + '.txt',
msg_data
)
send_mail(
subject=subject_email,
message=msg_plain,
from_email=settings.SERVER_EMAIL,
recipient_list=[self.owner.email]
) |
6,858 | remove listener | import asyncio
import logging
import random
import threading
import time
import uuid
from typing import Awaitable, Callable, Dict, List, Optional
from antarest.core.interfaces.eventbus import Event, EventType, IEventBus
from antarest.eventbus.business.interfaces import IEventBusBackend
logger = logging.getLogger(__name__)
class EventBusService(IEventBus):
def __init__(self, backend: IEventBusBackend, autostart: bool = True) -> None:
self.backend = backend
self.listeners: Dict[EventType, Dict[str, Callable[[Event], Awaitable[None]]]] = {
ev_type: {} for ev_type in EventType
}
self.consumers: Dict[str, Dict[str, Callable[[Event], Awaitable[None]]]] = {}
self.lock = threading.Lock()
if autostart:
self.start()
def push(self, event: Event) -> None:
self.backend.push_event(event)
def queue(self, event: Event, queue: str) -> None:
self.backend.queue_event(event, queue)
def add_queue_consumer(self, listener: Callable[[Event], Awaitable[None]], queue: str) -> str:
with self.lock:
listener_id = str(uuid.uuid4())
if queue not in self.consumers:
self.consumers[queue] = {}
self.consumers[queue][listener_id] = listener
return listener_id
def remove_queue_consumer(self, listener_id: str) -> None:
with self.lock:
for queue in self.consumers:
if listener_id in self.consumers[queue]:
del self.consumers[queue][listener_id]
def add_listener(
self,
listener: Callable[[Event], Awaitable[None]],
type_filter: Optional[List[EventType]] = None,
) -> str:
with self.lock:
listener_id = str(uuid.uuid4())
types = type_filter or [EventType.ANY]
for listener_type in types:
self.listeners[listener_type][listener_id] = listener
return listener_id
def METHOD_NAME(self, listener_id: str) -> None:
with self.lock:
for listener_type in self.listeners:
if listener_id in self.listeners[listener_type]:
del self.listeners[listener_type][listener_id]
async def _run_loop(self) -> None:
while True:
time.sleep(0.2)
try:
await self._on_events()
except Exception as e:
logger.error("Unexpected error when processing events", exc_info=e)
async def _on_events(self) -> None:
with self.lock:
for queue in self.consumers:
if len(self.consumers[queue]) > 0:
event = self.backend.pull_queue(queue)
while event is not None:
try:
await list(self.consumers[queue].values())[
random.randint(0, len(self.consumers[queue]) - 1)
](event)
except Exception as ex:
logger.error(
f"Failed to process queue event {event.type}",
exc_info=ex,
)
event = self.backend.pull_queue(queue)
for e in self.backend.get_events():
if e.type in self.listeners:
responses = await asyncio.gather(
*[
listener(e)
for listener in list(self.listeners[e.type].values())
+ list(self.listeners[EventType.ANY].values())
]
)
for res in responses:
if isinstance(res, Exception):
logger.error(
f"Failed to process event {e.type}",
exc_info=res,
)
self.backend.clear_events()
def _async_loop(self, new_loop: bool = True) -> None:
loop = asyncio.new_event_loop() if new_loop else asyncio.get_event_loop()
loop.run_until_complete(self._run_loop())
def start(self, threaded: bool = True) -> None:
if threaded:
t = threading.Thread(
target=self._async_loop,
name=self.__class__.__name__,
daemon=True,
)
logger.info("Starting event bus")
t.start()
else:
self._async_loop(new_loop=False) |
6,859 | languages get | # coding: utf-8
"""
LanguagesApi.py
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import, unicode_literals
from six import iteritems
from ..api_client import ApiClient
from ..configuration import Configuration
class LanguagesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def METHOD_NAME(self, **kwargs):
"""
All available languages.
These language abbreviations can be used in the `Accept-Language` header
for routes that return translation records.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.languages_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: LanguageData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['callback']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method languages_get" % key
)
params[key] = val
del params['kwargs']
resource_path = '/languages'
method = 'GET'
path_params = {}
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client. \
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client. \
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['jwtToken']
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='LanguageData',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def languages_id_get(self, id, **kwargs):
"""
Information about a particular language, given the language ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.languages_id_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: ID of the language (required)
:return: Language
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'callback']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method languages_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `languages_id_get`")
resource_path = '/languages/{id}'
method = 'GET'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client. \
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client. \
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['jwtToken']
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='Language',
auth_settings=auth_settings,
callback=params.get('callback'))
return response |
6,860 | set tr | # Created By: Virgil Dupras
# Created On: 2010-06-23
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
# Doing i18n with GNU gettext for the core text gets complicated, so what I do is that I make the
# GUI layer responsible for supplying a tr() function.
import locale
import logging
import os
import os.path as op
from typing import Callable, Union
from hscommon.plat import ISLINUX
_trfunc = None
_trget = None
installed_lang = None
def tr(s: str, context: Union[str, None] = None) -> str:
if _trfunc is None:
return s
else:
if context:
return _trfunc(s, context)
else:
return _trfunc(s)
def trget(domain: str) -> Callable[[str], str]:
# Returns a tr() function for the specified domain.
if _trget is None:
return lambda s: tr(s, domain)
else:
return _trget(domain)
def METHOD_NAME(
new_tr: Callable[[str, Union[str, None]], str],
new_trget: Union[Callable[[str], Callable[[str], str]], None] = None,
) -> None:
global _trfunc, _trget
_trfunc = new_tr
if new_trget is not None:
_trget = new_trget
def get_locale_name(lang: str) -> Union[str, None]:
# Removed old conversion code as windows seems to support these
LANG2LOCALENAME = {
"cs": "cs_CZ",
"de": "de_DE",
"el": "el_GR",
"en": "en",
"es": "es_ES",
"fr": "fr_FR",
"hy": "hy_AM",
"it": "it_IT",
"ja": "ja_JP",
"ko": "ko_KR",
"ms": "ms_MY",
"nl": "nl_NL",
"pl_PL": "pl_PL",
"pt_BR": "pt_BR",
"ru": "ru_RU",
"tr": "tr_TR",
"uk": "uk_UA",
"vi": "vi_VN",
"zh_CN": "zh_CN",
}
if lang not in LANG2LOCALENAME:
return None
result = LANG2LOCALENAME[lang]
if ISLINUX:
result += ".UTF-8"
return result
# --- Qt
def install_qt_trans(lang: str = None) -> None:
from PyQt5.QtCore import QCoreApplication, QTranslator, QLocale
if not lang:
lang = str(QLocale.system().name())[:2]
localename = get_locale_name(lang)
if localename is not None:
try:
locale.setlocale(locale.LC_ALL, localename)
except locale.Error:
logging.warning("Couldn't set locale %s", localename)
else:
lang = "en"
qtr1 = QTranslator(QCoreApplication.instance())
qtr1.load(":/qt_%s" % lang)
QCoreApplication.installTranslator(qtr1)
qtr2 = QTranslator(QCoreApplication.instance())
qtr2.load(":/%s" % lang)
QCoreApplication.installTranslator(qtr2)
def qt_tr(s: str, context: Union[str, None] = "core") -> str:
if context is None:
context = "core"
return str(QCoreApplication.translate(context, s, None))
METHOD_NAME(qt_tr)
# --- gettext
def install_gettext_trans(base_folder: os.PathLike, lang: str) -> None:
import gettext
def gettext_trget(domain: str) -> Callable[[str], str]:
if not lang:
return lambda s: s
try:
return gettext.translation(domain, localedir=base_folder, languages=[lang]).gettext
except OSError:
return lambda s: s
default_gettext = gettext_trget("core")
def gettext_tr(s: str, context: Union[str, None] = None) -> str:
if not context:
return default_gettext(s)
else:
trfunc = gettext_trget(context)
return trfunc(s)
METHOD_NAME(gettext_tr, gettext_trget)
global installed_lang
installed_lang = lang
def install_gettext_trans_under_qt(base_folder: os.PathLike, lang: str = None) -> None:
# So, we install the gettext locale, great, but we also should try to install qt_*.qm if
# available so that strings that are inside Qt itself over which I have no control are in the
# right language.
from PyQt5.QtCore import QCoreApplication, QTranslator, QLocale, QLibraryInfo
if not lang:
lang = str(QLocale.system().name())[:2]
localename = get_locale_name(lang)
if localename is None:
lang = "en"
localename = get_locale_name(lang)
try:
locale.setlocale(locale.LC_ALL, localename)
except locale.Error:
logging.warning("Couldn't set locale %s", localename)
qmname = "qt_%s" % lang
if ISLINUX:
# Under linux, a full Qt installation is already available in the system, we didn't bundle
# up the qm files in our package, so we have to load translations from the system.
qmpath = op.join(QLibraryInfo.location(QLibraryInfo.TranslationsPath), qmname)
else:
qmpath = op.join(base_folder, qmname)
qtr = QTranslator(QCoreApplication.instance())
qtr.load(qmpath)
QCoreApplication.installTranslator(qtr)
install_gettext_trans(base_folder, lang) |
6,861 | anonymize external immatriculation | from collections import defaultdict
import numpy as np
from faker import Faker
class DataAnonymizer(object):
__faker = Faker("FR")
def __init__(self, data_type: str = "cfr"):
self.__cfr_dict = defaultdict(self.make_fake_cfr)
self.__external_immatriculation_dict = defaultdict(
self.make_fake_external_immatriculation
)
self.__mmsi_dict = defaultdict(self.make_fake_mmsi)
self.__ircs_dict = defaultdict(self.make_fake_ircs)
self.__vessel_name_dict = defaultdict(self.make_fake_vessel_name)
self.__registry_port_dict = defaultdict(self.make_fake_port)
self.__district_dict = defaultdict(self.make_fake_port)
self.__district_code_dict = defaultdict(self.make_fake_district_code)
# CFR
def anonymize_cfr(self, value):
return self.__cfr_dict[value] if value else value
def anonymize_cfr_arr(self, values):
return np.array([self.anonymize_cfr(value) for value in values])
# External immatriculation
def METHOD_NAME(self, value):
return self.__external_immatriculation_dict[value] if value else value
def anonymize_external_immatriculation_arr(self, values):
return np.array(
[self.METHOD_NAME(value) for value in values]
)
# MMSI
def anonymize_mmsi(self, value):
return self.__mmsi_dict[value] if value else value
def anonymize_mmsi_arr(self, values):
return np.array([self.anonymize_mmsi(value) for value in values])
# IRCS
def anonymize_ircs(self, value):
return self.__ircs_dict[value] if value else value
def anonymize_ircs_arr(self, values):
return np.array([self.anonymize_ircs(value) for value in values])
# Vessel name
def anonymize_vessel_name(self, value):
return self.__vessel_name_dict[value] if value else value
def anonymize_vessel_name_arr(self, values):
return np.array([self.anonymize_vessel_name(value) for value in values])
# Registry port
def anonymize_registry_port(self, value):
return self.__registry_port_dict[value] if value else value
def anonymize_registry_port_arr(self, values):
return np.array([self.anonymize_registry_port(value) for value in values])
# District
def anonymize_district(self, value):
return self.__district_dict[value] if value else value
def anonymize_district_arr(self, values):
return np.array([self.anonymize_district(value) for value in values])
# District code
def anonymize_district_code(self, value):
return self.__district_code_dict[value] if value else value
def anonymize_district_code_arr(self, values):
return np.array([self.anonymize_district_code(value) for value in values])
@classmethod
def make_random_upper_case_letters(cls, n: int) -> str:
return "".join([cls.__faker.random_uppercase_letter() for x in range(n)])
@classmethod
def make_fake_cfr(cls):
fake_cfr = "ABC" + str(np.random.randint(0, 999999)).zfill(9)
return fake_cfr
@classmethod
def make_fake_external_immatriculation(cls):
fake_external_immatriculation = cls.make_random_upper_case_letters(2) + str(
np.random.randint(0, 999999)
).zfill(6)
return fake_external_immatriculation
@classmethod
def make_fake_mmsi(cls):
fake_mmsi = str(np.random.randint(0, 999999999)).zfill(9)
return fake_mmsi
@classmethod
def make_fake_ircs(cls):
n_letters = cls.__faker.random_element(elements=(2, 3, 4))
letters = cls.make_random_upper_case_letters(n_letters)
digits = "" if n_letters == 4 else str(np.random.randint(0, 9999)).zfill(4)
fake_ircs = letters + digits
return fake_ircs
@classmethod
def make_fake_vessel_name(cls):
return " ".join(cls.__faker.words(3)).upper()
@classmethod
def make_fake_port(cls):
return cls.__faker.city()
@classmethod
def make_fake_district_code(cls):
return cls.make_random_upper_case_letters(2) |
6,862 | is observation task present | # pylint:disable=protected-access
# pylint:disable=redefined-outer-name
# pylint:disable=unused-argument
from collections.abc import AsyncIterator
from unittest.mock import AsyncMock
import pytest
from fastapi import FastAPI
from pytest_mock.plugin import MockerFixture
from pytest_simcore.helpers.utils_envs import EnvVarsDict, setenvs_from_dict
from simcore_service_director_v2.core.settings import AppSettings
from simcore_service_director_v2.models.dynamic_services_scheduler import SchedulerData
from simcore_service_director_v2.modules.dynamic_sidecar.api_client import (
setup,
shutdown,
)
from simcore_service_director_v2.modules.dynamic_sidecar.scheduler import (
DynamicSidecarsScheduler,
setup_scheduler,
shutdown_scheduler,
)
from simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._observer import (
_apply_observation_cycle,
)
@pytest.fixture
def disable_observation(mocker: MockerFixture) -> None: # noqa: PT004
mocker.patch(
"simcore_service_director_v2.modules.dynamic_sidecar.scheduler._task.DynamicSidecarsScheduler.start",
autospec=True,
)
@pytest.fixture
def mock_are_sidecar_and_proxy_services_present( # noqa: PT004
mocker: MockerFixture,
) -> None:
mocker.patch(
"simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._observer.are_sidecar_and_proxy_services_present",
autospec=True,
return_value=False,
)
@pytest.fixture
def mock_events(mocker: MockerFixture) -> None: # noqa: PT004
for event_to_mock in (
"CreateSidecars",
"WaitForSidecarAPI",
"UpdateHealth",
"GetStatus",
"PrepareServicesEnvironment",
"CreateUserServices",
"AttachProjectsNetworks",
"RemoveUserCreatedServices",
):
mocker.patch(
f"simcore_service_director_v2.modules.dynamic_sidecar.scheduler._core._events.{event_to_mock}.action",
autospec=True,
return_value=True,
)
@pytest.fixture
def mock_env( # noqa: PT004
disable_postgres: None,
docker_swarm: None,
mock_env: EnvVarsDict,
monkeypatch: pytest.MonkeyPatch,
) -> None:
setenvs_from_dict(
monkeypatch,
{
"SIMCORE_SERVICES_NETWORK_NAME": "test_network",
"DIRECTOR_HOST": "mocked_out",
"DIRECTOR_V2_DYNAMIC_SCHEDULER_ENABLED": "true",
"S3_ENDPOINT": "endpoint",
"S3_ACCESS_KEY": "access_key",
"S3_SECRET_KEY": "secret_key",
"S3_BUCKET_NAME": "bucket_name",
"S3_SECURE": "false",
},
)
@pytest.fixture
def mocked_app(mock_env: None) -> FastAPI:
app = FastAPI()
app.state.settings = AppSettings.create_from_envs()
app.state.rabbitmq_client = AsyncMock()
return app
@pytest.fixture
async def dynamic_sidecar_scheduler(
mocked_app: FastAPI,
) -> AsyncIterator[DynamicSidecarsScheduler]:
await setup_scheduler(mocked_app)
await setup(mocked_app)
yield mocked_app.state.dynamic_sidecar_scheduler
await shutdown_scheduler(mocked_app)
await shutdown(mocked_app)
def METHOD_NAME(
dynamic_sidecar_scheduler,
scheduler_data_from_http_request,
) -> bool:
return (
scheduler_data_from_http_request.service_name
in dynamic_sidecar_scheduler._scheduler._service_observation_task # noqa: SLF001
)
@pytest.mark.parametrize("can_save", [False, True])
async def test_regression_break_endless_loop_cancellation_edge_case(
disable_observation: None,
mock_are_sidecar_and_proxy_services_present: None,
mock_events: None,
dynamic_sidecar_scheduler: DynamicSidecarsScheduler,
scheduler_data_from_http_request: SchedulerData,
can_save: bool | None,
):
# in this situation the scheduler would never end loops forever
await dynamic_sidecar_scheduler._scheduler._add_service( # noqa: SLF001
scheduler_data_from_http_request
)
# simulate edge case
scheduler_data_from_http_request.dynamic_sidecar.were_containers_created = True
assert (
METHOD_NAME(
dynamic_sidecar_scheduler, scheduler_data_from_http_request
)
is False
)
# NOTE: this will create the observation task as well!
# Simulates user action like going back to the dashboard.
await dynamic_sidecar_scheduler.mark_service_for_removal(
scheduler_data_from_http_request.node_uuid, can_save=can_save
)
assert (
METHOD_NAME(
dynamic_sidecar_scheduler, scheduler_data_from_http_request
)
is True
)
# requires an extra pass to remove the service
for _ in range(3):
await _apply_observation_cycle(
dynamic_sidecar_scheduler, scheduler_data_from_http_request
)
assert (
METHOD_NAME(
dynamic_sidecar_scheduler, scheduler_data_from_http_request
)
is False
) |
6,863 | assert changes produce expected model | import asyncio
from collections.abc import Sequence
from typing import Any
from jsonpointer import set_pointer
import reactpy
from reactpy.core.layout import Layout
from reactpy.core.serve import serve_layout
from reactpy.core.types import LayoutUpdateMessage
from reactpy.testing import StaticEventHandler
from tests.tooling.common import event_message
EVENT_NAME = "on_event"
STATIC_EVENT_HANDLER = StaticEventHandler()
def make_send_recv_callbacks(events_to_inject):
changes = []
# We need a semaphore here to simulate receiving an event after each update is sent.
# The effect is that the send() and recv() callbacks trade off control. If we did
# not do this, it would easy to determine when to halt because, while we might have
# received all the events, they might not have been sent since the two callbacks are
# executed in separate loops.
sem = asyncio.Semaphore(0)
async def send(patch):
changes.append(patch)
sem.release()
if not events_to_inject:
raise reactpy.Stop()
async def recv():
await sem.acquire()
try:
return events_to_inject.pop(0)
except IndexError:
# wait forever
await asyncio.Event().wait()
return changes, send, recv
def make_events_and_expected_model():
events = [event_message(STATIC_EVENT_HANDLER.target)] * 4
expected_model = {
"tagName": "",
"children": [
{
"tagName": "div",
"attributes": {"count": 4},
"eventHandlers": {
EVENT_NAME: {
"target": STATIC_EVENT_HANDLER.target,
"preventDefault": False,
"stopPropagation": False,
}
},
}
],
}
return events, expected_model
def METHOD_NAME(
changes: Sequence[LayoutUpdateMessage],
expected_model: Any,
) -> None:
model_from_changes = {}
for update in changes:
if update["path"]:
model_from_changes = set_pointer(
model_from_changes, update["path"], update["model"]
)
else:
model_from_changes.update(update["model"])
assert model_from_changes == expected_model
@reactpy.component
def Counter():
count, change_count = reactpy.hooks.use_reducer(
(lambda old_count, diff: old_count + diff),
initial_value=0,
)
handler = STATIC_EVENT_HANDLER.use(lambda: change_count(1))
return reactpy.html.div({EVENT_NAME: handler, "count": count})
async def test_dispatch():
events, expected_model = make_events_and_expected_model()
changes, send, recv = make_send_recv_callbacks(events)
await asyncio.wait_for(serve_layout(Layout(Counter()), send, recv), 1)
METHOD_NAME(changes, expected_model)
async def test_dispatcher_handles_more_than_one_event_at_a_time():
block_and_never_set = asyncio.Event()
will_block = asyncio.Event()
second_event_did_execute = asyncio.Event()
blocked_handler = StaticEventHandler()
non_blocked_handler = StaticEventHandler()
@reactpy.component
def ComponentWithTwoEventHandlers():
@blocked_handler.use
async def block_forever():
will_block.set()
await block_and_never_set.wait()
@non_blocked_handler.use
async def handle_event():
second_event_did_execute.set()
return reactpy.html.div(
reactpy.html.button({"on_click": block_forever}),
reactpy.html.button({"on_click": handle_event}),
)
send_queue = asyncio.Queue()
recv_queue = asyncio.Queue()
task = asyncio.create_task(
serve_layout(
reactpy.Layout(ComponentWithTwoEventHandlers()),
send_queue.put,
recv_queue.get,
)
)
await recv_queue.put(event_message(blocked_handler.target))
await will_block.wait()
await recv_queue.put(event_message(non_blocked_handler.target))
await second_event_did_execute.wait()
task.cancel() |
6,864 | test delete job |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from turbogears.database import session
from bkr.inttest import data_setup, with_transaction
from bkr.inttest.client import run_client, create_client_config, ClientError, \
ClientTestCase
class JobDeleteTest(ClientTestCase):
@with_transaction
def setUp(self):
self.user = data_setup.create_user(password=u'asdf')
self.job = data_setup.create_completed_job(owner=self.user)
self.client_config = create_client_config(username=self.user.user_name,
password='asdf')
def test_delete_group_job(self):
with session.begin():
group = data_setup.create_group()
user = data_setup.create_user(password='password')
user2 = data_setup.create_user()
group.add_member(user)
group.add_member(user2)
self.job.group = group
self.job.owner = user2
client_config = create_client_config(username=user.user_name,
password='password')
out = run_client(['bkr', 'job-delete', self.job.t_id],
config=client_config)
self.assert_(out.startswith('Jobs deleted:'), out)
self.assert_(self.job.t_id in out, out)
def METHOD_NAME(self):
out = run_client(['bkr', 'job-delete', self.job.t_id],
config=self.client_config)
self.assert_(out.startswith('Jobs deleted:'), out)
self.assert_(self.job.t_id in out, out)
def test_delete_others_job(self):
with session.begin():
other_user = data_setup.create_user(password=u'asdf')
other_job = data_setup.create_completed_job(owner=other_user)
try:
out = run_client(['bkr', 'job-delete', other_job.t_id],
config=self.client_config)
self.fail('should raise')
except ClientError as e:
self.assert_("don't have permission" in e.stderr_output)
def test_cant_delete_group_mates_job(self):
# The test_delete_group_job case above is similar, but here the job is
# *not* declared as a group job, therefore we don't have permission to
# delete it.
with session.begin():
group = data_setup.create_group()
mate = data_setup.create_user(password=u'asdf')
test_job = data_setup.create_completed_job(owner=mate)
group.add_member(self.user)
group.add_member(mate)
try:
run_client(['bkr', 'job-delete', test_job.t_id],
config=self.client_config)
self.fail('We should not have permission to delete %s' % \
test_job.t_id)
except ClientError as e:
self.assertIn("You don't have permission to delete job %s" %
test_job.t_id, e.stderr_output)
def test_delete_job_with_admin(self):
with session.begin():
other_user = data_setup.create_user(password=u'asdf')
tag = data_setup.create_retention_tag(name=u'myblahtag')
job1 = data_setup.create_completed_job(owner=other_user)
job2 = data_setup.create_completed_job(owner=other_user,
retention_tag=tag.tag)
# As the default admin user
# Admin can delete other's job with job ID
out = run_client(['bkr', 'job-delete', job1.t_id])
self.assert_(out.startswith('Jobs deleted:'), out)
self.assert_(job1.t_id in out, out)
# Admin can not delete other's job with tags
out = run_client(['bkr', 'job-delete', '-t%s' % tag.tag])
self.assert_(out.startswith('Jobs deleted:'), out)
self.assert_(job2.t_id not in out, out)
# https://bugzilla.redhat.com/show_bug.cgi?id=595512
def test_invalid_taskspec(self):
try:
run_client(['bkr', 'job-delete', '12345'])
self.fail('should raise')
except ClientError as e:
self.assert_('Invalid taskspec' in e.stderr_output)
# https://bugzilla.redhat.com/show_bug.cgi?id=990943
def test_zero_value_completeDays(self):
try:
run_client(['bkr', 'job-delete', '--completeDays', '0'])
self.fail('Must raise')
except ClientError as e:
self.assertIn('Please pass a positive integer to completeDays', e.stderr_output)
# https://bugzilla.redhat.com/show_bug.cgi?id=990943
def test_negative_value_completeDays(self):
try:
run_client(['bkr', 'job-delete', '--completeDays', '-1'])
self.fail('Must raise')
except ClientError as e:
self.assertIn('Please pass a positive integer to completeDays', e.stderr_output) |
6,865 | get literal string | import collections
from _typeshed import Incomplete
from types import TracebackType
from typing import Any
from typing_extensions import Literal
def encode_text(s: str) -> bytes: ...
PDFDocEncoding: dict[int, str]
def decode_text(b: bytes) -> str: ...
class PdfFormatError(RuntimeError): ...
def check_format_condition(condition, error_message) -> None: ...
class IndirectReference:
def __bytes__(self) -> bytes: ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __hash__(self) -> int: ...
class IndirectObjectDef(IndirectReference): ...
class XrefTable:
existing_entries: Incomplete
new_entries: Incomplete
deleted_entries: Incomplete
reading_finished: bool
def __init__(self) -> None: ...
def __setitem__(self, key, value) -> None: ...
def __getitem__(self, key): ...
def __delitem__(self, key) -> None: ...
def __contains__(self, key): ...
def __len__(self) -> int: ...
def keys(self): ...
def write(self, f): ...
class PdfName:
name: Incomplete
def __init__(self, name) -> None: ...
def name_as_str(self): ...
def __eq__(self, other): ...
def __hash__(self) -> int: ...
@classmethod
def from_pdf_stream(cls, data): ...
allowed_chars: Incomplete
def __bytes__(self) -> bytes: ...
class PdfArray(list[Any]):
def __bytes__(self) -> bytes: ...
class PdfDict(collections.UserDict[bytes, Any]):
def __setattr__(self, key: str, value) -> None: ...
def __getattr__(self, key: str): ...
def __bytes__(self) -> bytes: ...
class PdfBinary:
data: Incomplete
def __init__(self, data) -> None: ...
def __bytes__(self) -> bytes: ...
class PdfStream:
dictionary: Incomplete
buf: Incomplete
def __init__(self, dictionary, buf) -> None: ...
def decode(self): ...
def pdf_repr(x: Incomplete) -> bytes: ...
class PdfParser:
filename: Incomplete
buf: Incomplete
f: Incomplete
start_offset: Incomplete
should_close_buf: bool
should_close_file: bool
cached_objects: Incomplete
file_size_total: int
root: Incomplete
root_ref: Incomplete
info: Incomplete
info_ref: Incomplete
page_tree_root: Incomplete
pages: Incomplete
orig_pages: Incomplete
pages_ref: Incomplete
last_xref_section_offset: Incomplete
trailer_dict: Incomplete
xref_table: Incomplete
def __init__(
self,
filename: Incomplete | None = None,
f: Incomplete | None = None,
buf: Incomplete | None = None,
start_offset: int = 0,
mode: str = "rb",
) -> None: ...
def __enter__(self): ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None
) -> Literal[False]: ...
def start_writing(self) -> None: ...
def close_buf(self) -> None: ...
def close(self) -> None: ...
def seek_end(self) -> None: ...
def write_header(self) -> None: ...
def write_comment(self, s) -> None: ...
def write_catalog(self): ...
def rewrite_pages(self) -> None: ...
def write_xref_and_trailer(self, new_root_ref: Incomplete | None = None) -> None: ...
def write_page(self, ref, *objs, **dict_obj): ...
def write_obj(self, ref, *objs, **dict_obj): ...
def del_root(self) -> None: ...
@staticmethod
def get_buf_from_file(f): ...
file_size_this: Incomplete
def read_pdf_info(self) -> None: ...
def next_object_id(self, offset: Incomplete | None = None): ...
delimiter: bytes
delimiter_or_ws: bytes
whitespace: bytes
whitespace_or_hex: bytes
whitespace_optional: Incomplete
whitespace_mandatory: Incomplete
whitespace_optional_no_nl: bytes
newline_only: bytes
newline: Incomplete
re_trailer_end: Incomplete
re_trailer_prev: Incomplete
def read_trailer(self) -> None: ...
def read_prev_trailer(self, xref_section_offset) -> None: ...
re_whitespace_optional: Incomplete
re_name: Incomplete
re_dict_start: Incomplete
re_dict_end: Incomplete
@classmethod
def interpret_trailer(cls, trailer_data): ...
re_hashes_in_name: Incomplete
@classmethod
def interpret_name(cls, raw, as_text: bool = False): ...
re_null: Incomplete
re_true: Incomplete
re_false: Incomplete
re_int: Incomplete
re_real: Incomplete
re_array_start: Incomplete
re_array_end: Incomplete
re_string_hex: Incomplete
re_string_lit: Incomplete
re_indirect_reference: Incomplete
re_indirect_def_start: Incomplete
re_indirect_def_end: Incomplete
re_comment: Incomplete
re_stream_start: Incomplete
re_stream_end: Incomplete
@classmethod
def get_value(cls, data, offset, expect_indirect: Incomplete | None = None, max_nesting: int = -1): ...
re_lit_str_token: Incomplete
escaped_chars: Incomplete
@classmethod
def METHOD_NAME(cls, data, offset): ...
re_xref_section_start: Incomplete
re_xref_subsection_start: Incomplete
re_xref_entry: Incomplete
def read_xref_table(self, xref_section_offset): ...
def read_indirect(self, ref, max_nesting: int = -1): ...
def linearize_page_tree(self, node: Incomplete | None = None): ... |
6,866 | unset verbose files | # -*- coding: utf-8 -*-
r"""
Verbosity System and Logging in SageMath
Howto: Logging
==============
Using Python's Logging Module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Import it::
sage: import logging
sage: logging.basicConfig() # only needed once
Setting the level::
sage: logging.getLogger().setLevel(logging.INFO)
Log something::
sage: logger = logging.getLogger(__name__)
sage: logger.info('Hello. I am talking to you.')
INFO:__main__:Hello. I am talking to you.
If we haven't set the logging level to ``logging.INFO``, then the previous
wouldn't have been shown.
::
sage: logger.debug('Hello. I am really talking a lot.')
The latter is not shown as the current logging level is only
``logging.INFO`` and not ``logging.DEBUG``.
Reset the level::
sage: logging.getLogger().setLevel(logging.WARNING)
Warnings are still shown at this default level (``logging.WARNING``)::
sage: logger.warning('Hello. I am warning you.')
WARNING:__main__:Hello. I am warning you.
And that's all.
There are a lot more features, see
:python:`Logging facility for Python<library/logging.html>`.
Using SageMath's Verbosity System
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Alternatively, this module provides
:func:`verbose`, :func:`set_verbose`, :func:`get_verbose` which can
be used as follows::
sage: from sage.misc.verbose import verbose, set_verbose, get_verbose
sage: set_verbose(1)
sage: t = verbose("This is SageMath.", level=0)
verbose 0 (<module>) This is SageMath.
sage: t = verbose("This is SageMath.", level=1)
verbose 1 (<module>) This is SageMath.
sage: t = verbose("This is SageMath.", level=2)
Logging Levels of SageMath and Python
=====================================
.. csv-table::
:class: contentstable
:widths: 20, 20
:delim: |
SageMath | Python
`-2` | ``logging.CRITICAL``
`-1` | ``logging.ERROR``
`0` | ``logging.WARNING``
`1` | ``logging.INFO``
`2` | ``logging.DEBUG``
Various
=======
AUTHORS:
- Daniel Krenn (2016)
Functions
=========
"""
#*****************************************************************************
# Copyright (C) 2006, 2007 William Stein <wstein@gmail.com>
# Copyright (C) 2006 Gonzalo Tornaria
# Copyright (C) 2008 John H. Palmieri
# Copyright (C) 2009 Mike Hansen
# Copyright (C) 2016 Daniel Krenn <dev@danielkrenn.at>
# Copyright (C) 2018 Frédéric Chapoton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
import sys
import os
LEVEL = 0 # default
verbose_files = []
def verbose(mesg="", t=0, level=1, caller_name=None):
"""
Print a message if the current verbosity is at least level.
INPUT:
- ``mesg`` - str, a message to print
- ``t`` - int, optional, if included, will also print
cputime(t), - which is the time since time t. Thus t should have
been obtained with t=cputime()
- ``level`` - int, (default: 1) the verbosity level of
what we are printing
- ``caller_name`` - string (default: None), the name
of the calling function; in most cases Python can deduce this, so
it need not be provided.
OUTPUT: possibly prints a message to stdout; also returns
cputime()
EXAMPLES::
sage: set_verbose(1)
sage: t = cputime()
sage: t = verbose("This is Sage.", t, level=1, caller_name="william") # not tested
VERBOSE1 (william): This is Sage. (time = 0.0)
sage: set_verbose(0)
"""
from sage.misc.timing import cputime
if level > LEVEL:
return cputime()
frame = sys._getframe(1).f_code
file_name = frame.co_filename
lineno = frame.co_firstlineno
if 'all' in verbose_files or level <= 0:
show = True
else:
show = False
for X in verbose_files:
if file_name.find(X) != -1:
show = True
break
if not show:
return cputime()
if t != 0 and mesg == "":
mesg = "Finished."
# see recipe 14.7 in Python Cookbook
if caller_name is None:
caller_name = frame.co_name
if caller_name == "?: ":
caller_name = ""
short_file_name = os.path.split(frame.co_filename)[1]
if '<' in short_file_name and '>' in short_file_name:
s = "verbose %s (%s) %s" % (level, caller_name, mesg)
else:
s = "verbose %s (%s: %s, %s) %s" % (level, lineno,
short_file_name, caller_name, mesg)
if t != 0:
s = s + " (time = %s)" % cputime(t)
print(s)
sys.stdout.flush()
return cputime()
def set_verbose(level, files='all'):
"""
Set the global Sage verbosity level.
INPUT:
- ``level`` -- an integer between 0 and 2, inclusive.
- ``files`` (default: 'all'): list of files to make verbose, or
'all' to make ALL files verbose (the default).
OUTPUT: changes the state of the verbosity flag and possibly
appends to the list of files that are verbose.
EXAMPLES::
sage: set_verbose(2)
sage: verbose("This is Sage.", level=1) # not tested
VERBOSE1 (?): This is Sage.
sage: verbose("This is Sage.", level=2) # not tested
VERBOSE2 (?): This is Sage.
sage: verbose("This is Sage.", level=3) # not tested
[no output]
sage: set_verbose(0)
"""
if level is None:
level = -1
if isinstance(level, str):
set_verbose_files([level])
global LEVEL
LEVEL = level
if isinstance(files, str):
files = [files]
set_verbose_files(files)
def set_verbose_files(file_name):
"""
"""
if not isinstance(file_name, list):
file_name = [file_name]
global verbose_files
verbose_files = file_name
def get_verbose_files():
"""
"""
return verbose_files
def METHOD_NAME(file_name):
"""
"""
if not isinstance(file_name, list):
file_name = [file_name]
for X in file_name:
verbose_files.remove(X)
def get_verbose():
"""
Return the global Sage verbosity level.
INPUT: int level: an integer between 0 and 2, inclusive.
OUTPUT: changes the state of the verbosity flag.
EXAMPLES::
sage: get_verbose()
0
sage: set_verbose(2)
sage: get_verbose()
2
sage: set_verbose(0)
"""
global LEVEL
return LEVEL |
6,867 | check | import numpy as np
import pyarrow as pa
import numpy as np
import sys, os
import tempfile
import random
from numpy.testing import assert_array_equal
# ************************************************************************** #
# Data generators #
# ************************************************************************** #
# python 2 vs 3 compatibility
if sys.hexversion >= 0x3000000:
getchr = chr
else:
getchr = unichr
def gen_chr(max, printable=False):
while True:
# TODO we exclude 0x0 here because the key API does not embedded NULL
s = getchr(random.randrange(1, max))
if printable and not s.isprintable():
continue
if len(s) > 0:
break
return s
def rand_datetime64_array(size, start=None, stop=None, dtype=None):
if not dtype:
dtype = np.dtype('M8[ns]')
# generate randint inbounds on the range of the dtype
units = np.datetime_data(dtype)[0]
intmin, intmax = np.iinfo(np.int64).min, np.iinfo(np.int64).max
if start is None:
start = np.datetime64(intmin + 1, units)
else:
start = np.datetime64(start)
if stop is None:
stop = np.datetime64(intmax, units)
else:
stop = np.datetime64(stop)
arr = np.random.randint(
start.astype(dtype).astype(np.int64), stop.astype(dtype).astype(np.int64),
size=size, dtype=np.int64
)
arr.sort()
return arr.astype(dtype)
def rand_utf8(size=5):
return u''.join([gen_chr(0xD7FF) for _ in range(0, size)])
def rand_ascii_bytes(size=5, printable=False):
return b''.join([gen_chr(127, printable).encode('utf-8') for _ in range(0,size)])
# ************************************************************************** #
# Test class #
# ************************************************************************** #
class DataFactory():
def __init__(self, col_size):
self.results = {}
self.col_size = col_size
self.create()
def __len__(self):
if not self.data:
raise ValueError("Uninitialized data")
return len(self.data)
def create(self):
# generate test data for all columns
col_size = self.col_size
self.data = {}
for dt in (np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64):
key = np.dtype(dt).name
dtinfo = np.iinfo(dt)
self.data[key] = np.random.randint(dtinfo.min, dtinfo.max, size=col_size, dtype=dt)
for dt in (np.float32, np.float64):
key = np.dtype(dt).name
self.data[key] = np.random.rand(col_size).astype(dt)
# var-len (strings)
self.data['tiledb_char'] = np.array([rand_ascii_bytes(np.random.randint(1,100))
for _ in range(col_size)]).astype("S1")
self.data['utf_string1'] = np.array([rand_utf8(np.random.randint(1, 100))
for _ in range(col_size)]).astype("U0")
# another version with some important cells set to empty
self.data['utf_string2'] = np.array([rand_utf8(np.random.randint(0, 100))
for _ in range(col_size)]).astype("U0")
utf_string2 = self.data['utf_string2']
for i in range(len(utf_string2)):
self.data['utf_string2'][i] = ''
range_start = len(utf_string2) - 1
range_end = len(utf_string2) % 3
for i in range(range_start, range_end, -1):
self.data['utf_string2'][i] = ''
self.data['datetime_ns'] = rand_datetime64_array(col_size)
##########################################################################
self.arrays = [pa.array(val) for val in self.data.values()]
self.names = list(self.data.keys())
def import_result(self, name, c_array, c_schema):
self.results[name] = pa.Array._import_from_c(c_array, c_schema)
def METHOD_NAME(self):
for key,val in self.data.items():
assert (key in self.results), "Expected key '{}' not found in results!".format(key)
res_val = self.results[key]
assert_array_equal(val, res_val)
return Tru |
6,868 | library | import pytest
from align.schema.model import Model
from align.schema.instance import Instance
from align.schema.subcircuit import Circuit
from align.schema.types import set_context, List
@pytest.fixture()
def METHOD_NAME():
METHOD_NAME = List[Model]()
with set_context(METHOD_NAME):
model = Model(
name = 'TESTMOS',
pins = ['D', 'G', 'S', 'B'],
parameters = {
'PARAM1': '1.0',
'PARAM2': '2'
})
METHOD_NAME.append(model)
return METHOD_NAME
@pytest.fixture()
def circuit(METHOD_NAME):
with set_context(METHOD_NAME):
mock_subckt = Circuit()
return mock_subckt
def test_instance_model(circuit):
with set_context(circuit.elements):
with pytest.raises(Exception):
M1 = Instance()
with pytest.raises(Exception):
M1 = Instance(
name='M1',
pins={'D': 'NET01', 'G': 'NET02', 'S':'NET03', 'B':'NET04'}
)
with pytest.raises(Exception):
M1 = Instance(
name='M1',
pins={'D': 'NET01', 'G': 'NET02', 'S': 'NET03', 'B': 'NET04'},
parameters={'PARAM1':'12', 'PARAM2': '13'}
)
with pytest.raises(Exception):
M1 = Instance(
name='M1',
model='undefinedmos',
pins={'D': 'NET01', 'G': 'NET02', 'S': 'NET03', 'B': 'NET04'},
parameters={'PARAM1':'12', 'PARAM2': '13'}
)
M1 = Instance(
name='M1',
model='testmos',
pins={'D': 'NET01', 'G': 'NET02', 'S': 'NET03', 'B': 'NET04'},
parameters={'PARAM1':'12', 'PARAM2': '13'}
)
def test_instance_name(circuit):
with set_context(circuit.elements):
with pytest.raises(Exception):
M1 = Instance(model='testmos')
with pytest.raises(Exception):
M1 = Instance(name='M1')
with pytest.raises(Exception):
M1 = Instance(name='M1', model='testmos')
def test_instance_pins(circuit):
with set_context(circuit.elements):
with pytest.raises(Exception):
M1 = Instance(
name='M1',
model='testmos',
pins={'D': 'NET01'}
)
with pytest.raises(Exception):
M1 = Instance(
name='M1',
model='testmos',
pins={'D': 'NET01', 'G': 'NET02', 'S': 'NET03', 'B':'NET04'},
parameters={'garbage': 'NET05'}
)
with pytest.raises(Exception):
M1 = Instance(
name='M1',
model='testmos',
pins={'D': 'NET01', 'G': 'NET02', 'S': 'NET03', 'B': 'NET04'},
parameters={'garbage':'dfddfd'}
)
M1 = Instance(
name='M1',
model='testmos',
pins={'D': 'NET01', 'G': 'NET02', 'S':'NET03', 'B':'NET04'}
)
assert M1.name == 'M1'
assert M1.model == 'TESTMOS'
assert M1.pins == {'D': 'NET01', 'G': 'NET02', 'S': 'NET03', 'B': 'NET04'}
assert M1.parameters == {'PARAM1': "1.0", 'PARAM2': "2"}
def test_instance_init_parameters(circuit):
with set_context(circuit.elements):
M1 = Instance(
name='M1',
model='testmos',
pins={'D': 'NET01', 'G': 'NET02', 'S': 'NET03', 'B': 'NET04'},
parameters={'PARAM1':'NF*4'}
)
assert M1.parameters == {'PARAM1': 'NF*4', 'PARAM2': "2"}
with set_context(circuit.elements):
M1 = Instance(
name='M1',
model='testmos',
pins={'D': 'NET01', 'G': 'NET02', 'S': 'NET03', 'B': 'NET04'},
parameters={'PARAM1':'12', 'PARAM2': '13'}
)
assert M1.parameters == {'PARAM1': "12", 'PARAM2': "13"}
def test_model_instantiation(circuit):
with set_context(circuit.elements):
M1 = Instance(
name='m1',
model='testmos',
pins={'d': 'net01', 'G': 'Net02', 's': 'NET03', 'B': 'NeT04'},
parameters={'PARAM1':'nf*4', 'param2':'2.0'}
)
M2 =Instance(
name='m2',
model='testmos',
pins={'d': 'net03', 'G': 'Net02', 's': 'NET01', 'B': 'NeT04'},
parameters={'PARAM1':'2.0', 'param2':'nf*4'}
)
assert M1 != M2
assert M1.name != M2.name
assert M1.pins != M2.pins
assert M1.parameters != M2.parameters
assert M1.model == M2.model
def test_instance_case_insensitivity(circuit):
'''
Everything should be converted to uppercase internally
(SPICE is case-insensitive)
'''
with set_context(circuit.elements):
M1 = Instance(
name='m1',
model='testmos',
pins={'d': 'net01', 'G': 'Net02', 's': 'NET03', 'B': 'NeT04'},
parameters={'PARAM1':'nf*4', 'param2':'2.0'}
)
assert M1.name == 'M1'
assert M1.pins == {'D': 'NET01', 'G': 'NET02', 'S': 'NET03', 'B': 'NET04'}
assert M1.parameters == {'PARAM1': 'NF*4', 'PARAM2': "2.0"}
def test_instance_json(circuit):
with set_context(circuit.elements):
M1 = Instance(
name='M1',
model='testmos',
pins={'D': 'NET01', 'G': 'NET02', 'S': 'NET03', 'B': 'NET04'},
parameters={'PARAM1':'NF*4'}
)
assert M1.json() == '{"model": "TESTMOS", "name": "M1", "pins": {"D": "NET01", "G": "NET02", "S": "NET03", "B": "NET04"}, "parameters": {"PARAM1": "NF*4", "PARAM2": "2"}, "abstract_name": null}' |
6,869 | connect | # -------------------------------------------------------------------------------------------------
# Copyright (C) 2015-2023 Nautech Systems Pty Ltd. All rights reserved.
# https://nautechsystems.io
#
# Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at https://www.gnu.org/licenses/lgpl-3.0.en.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------------------------
import asyncio
from decimal import Decimal
from typing import ClassVar, Optional
import pandas as pd
from nautilus_trader.backtest.exchange import SimulatedExchange
from nautilus_trader.backtest.execution_client import BacktestExecClient
from nautilus_trader.backtest.models import FillModel
from nautilus_trader.backtest.models import LatencyModel
from nautilus_trader.cache.cache import Cache
from nautilus_trader.common.clock import LiveClock
from nautilus_trader.common.clock import TestClock
from nautilus_trader.common.logging import Logger
from nautilus_trader.common.providers import InstrumentProvider
from nautilus_trader.core.data import Data
from nautilus_trader.execution.reports import OrderStatusReport
from nautilus_trader.execution.reports import PositionStatusReport
from nautilus_trader.execution.reports import TradeReport
from nautilus_trader.live.execution_client import LiveExecutionClient
from nautilus_trader.model.currency import Currency
from nautilus_trader.model.data import Bar
from nautilus_trader.model.data import OrderBookDelta
from nautilus_trader.model.data import OrderBookDeltas
from nautilus_trader.model.data import QuoteTick
from nautilus_trader.model.data import TradeTick
from nautilus_trader.model.enums import AccountType
from nautilus_trader.model.enums import OmsType
from nautilus_trader.model.identifiers import ClientId
from nautilus_trader.model.identifiers import ClientOrderId
from nautilus_trader.model.identifiers import InstrumentId
from nautilus_trader.model.identifiers import Venue
from nautilus_trader.model.identifiers import VenueOrderId
from nautilus_trader.model.instruments import Instrument
from nautilus_trader.model.objects import AccountBalance
from nautilus_trader.model.objects import Money
from nautilus_trader.msgbus.bus import MessageBus
class SandboxExecutionClient(LiveExecutionClient):
"""
Provides a sandboxed execution client for testing against.
Parameters
----------
loop : asyncio.AbstractEventLoop
The event loop for the client.
msgbus : MessageBus
The message bus for the client.
cache : Cache
The cache for the client.
clock : LiveClock
The clock for the client.
logger : Logger
The logger for the client.
"""
INSTRUMENTS: ClassVar[list[Instrument]] = []
def __init__(
self,
loop: asyncio.AbstractEventLoop,
msgbus: MessageBus,
cache: Cache,
clock: LiveClock,
logger: Logger,
venue: str,
currency: str,
balance: int,
oms_type: OmsType = OmsType.NETTING,
account_type: AccountType = AccountType.MARGIN,
) -> None:
self._currency = Currency.from_str(currency)
money = Money(value=balance, currency=self._currency)
self.balance = AccountBalance(total=money, locked=Money(0, money.currency), free=money)
self.test_clock = TestClock()
self._account_type = account_type
sandbox_venue = Venue(venue)
super().__init__(
loop=loop,
client_id=ClientId(venue),
venue=sandbox_venue,
oms_type=oms_type,
account_type=account_type,
base_currency=self._currency,
instrument_provider=InstrumentProvider(venue=sandbox_venue, logger=logger),
msgbus=msgbus,
cache=cache,
clock=clock,
logger=logger,
config=None,
)
self.exchange = SimulatedExchange(
venue=sandbox_venue,
oms_type=oms_type,
account_type=self._account_type,
base_currency=self._currency,
starting_balances=[self.balance.free],
default_leverage=Decimal(10),
leverages={},
instruments=self.INSTRUMENTS,
modules=[],
msgbus=self._msgbus,
cache=cache,
fill_model=FillModel(),
latency_model=LatencyModel(0),
clock=self.test_clock,
logger=logger,
frozen_account=True, # <-- Freezing account
)
self._client = BacktestExecClient(
exchange=self.exchange,
msgbus=msgbus,
cache=self._cache,
clock=self.test_clock,
logger=logger,
)
self.exchange.register_client(self._client)
def METHOD_NAME(self) -> None:
"""
Connect the client.
"""
self._log.info("Connecting...")
self._msgbus.subscribe("data.*", handler=self.on_data)
self._client._set_connected(True)
self._set_connected(True)
self._log.info("Connected.")
def disconnect(self) -> None:
"""
Disconnect the client.
"""
self._log.info("Disconnecting...")
self._set_connected(False)
self._log.info("Disconnected.")
async def generate_order_status_report(
self,
instrument_id: InstrumentId,
client_order_id: Optional[ClientOrderId] = None,
venue_order_id: Optional[VenueOrderId] = None,
) -> Optional[OrderStatusReport]:
return None
async def generate_order_status_reports(
self,
instrument_id: Optional[InstrumentId] = None,
start: Optional[pd.Timestamp] = None,
end: Optional[pd.Timestamp] = None,
open_only: bool = False,
) -> list[OrderStatusReport]:
return []
async def generate_trade_reports(
self,
instrument_id: Optional[InstrumentId] = None,
venue_order_id: Optional[VenueOrderId] = None,
start: Optional[pd.Timestamp] = None,
end: Optional[pd.Timestamp] = None,
) -> list[TradeReport]:
return []
async def generate_position_status_reports(
self,
instrument_id: Optional[InstrumentId] = None,
start: Optional[pd.Timestamp] = None,
end: Optional[pd.Timestamp] = None,
) -> list[PositionStatusReport]:
return []
def submit_order(self, command):
return self._client.submit_order(command)
def modify_order(self, command):
return self._client.modify_order(command)
def cancel_order(self, command):
return self._client.cancel_order(command)
def cancel_all_orders(self, command):
return self._client.cancel_all_orders(command)
def on_data(self, data: Data) -> None:
# Taken from main backtest loop of BacktestEngine
if isinstance(data, (OrderBookDelta)):
self.exchange.process_order_book_delta(data)
elif isinstance(data, (OrderBookDeltas)):
self.exchange.process_order_book_deltas(data)
elif isinstance(data, QuoteTick):
self.exchange.process_quote_tick(data)
elif isinstance(data, TradeTick):
self.exchange.process_trade_tick(data)
elif isinstance(data, Bar):
self.exchange.process_bar(data)
self.exchange.process(data.ts_init) |
6,870 | test array of array | # -*- coding: utf-8 -*-
import copy
import datetime
import pytest
from bravado_core.exception import SwaggerMappingError
from bravado_core.spec import Spec
from bravado_core.unmarshal import unmarshal_array
@pytest.fixture
def int_array_spec():
return {
'type': 'array',
'items': {
'type': 'integer',
},
}
def test_primitive_array(empty_swagger_spec, int_array_spec):
result = unmarshal_array(empty_swagger_spec, int_array_spec, [1, 2, 3])
assert [1, 2, 3] == result
def test_empty_array(empty_swagger_spec, int_array_spec):
result = unmarshal_array(empty_swagger_spec, int_array_spec, [])
assert [] == result
def test_default_with_format(empty_swagger_spec):
result = unmarshal_array(
empty_swagger_spec,
{
'type': 'array',
'items': {
'type': 'string',
'format': 'date',
},
'default': ['2019-05-22'],
},
None,
)
assert [datetime.date(2019, 5, 22)] == result
def test_with_no_items_schema_defined(empty_swagger_spec):
value = [1, 2.3, '4', ['5'], {'6': 7}]
result = unmarshal_array(
empty_swagger_spec,
{
'type': 'array',
},
value,
)
assert result == value
def test_type_not_array_raises_error(empty_swagger_spec, int_array_spec):
with pytest.raises(SwaggerMappingError) as excinfo:
unmarshal_array(empty_swagger_spec, int_array_spec, 'not a list')
assert 'Expected list like type' in str(excinfo.value)
def METHOD_NAME(empty_swagger_spec):
array_of_array_spec = {
'type': 'array',
'items': {
'type': 'array',
'items': {
'type': 'string',
},
},
}
input = [
['one'],
['two', 'three'],
['four', 'five', 'six'],
]
expected = copy.deepcopy(input)
result = unmarshal_array(empty_swagger_spec, array_of_array_spec, input)
assert expected == result
def test_array_of_objects(empty_swagger_spec):
array_of_addresses_spec = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'number': {
'type': 'number',
},
'street_name': {
'type': 'string',
},
'street_type': {
'type': 'string',
'enum': [
'Street',
'Avenue',
'Boulevard',
],
},
},
},
}
input = [
{
'number': 1600,
'street_name': 'Pennsylvania',
'street_type': 'Avenue',
},
{
'number': 1700,
'street_name': 'Main',
'street_type': 'Street',
},
{
'number': 1800,
'street_name': 'Yelpy',
'street_type': 'Boulevard',
},
]
expected = copy.deepcopy(input)
result = unmarshal_array(empty_swagger_spec, array_of_addresses_spec, input)
assert expected == result
def test_array_of_models(petstore_dict):
petstore_spec = Spec.from_dict(petstore_dict)
Pet = petstore_spec.definitions['Pet']
Category = petstore_spec.definitions['Category']
Tag = petstore_spec.definitions['Tag']
array_of_pets_spec = {
'type': 'array',
'items': petstore_spec.spec_dict['definitions']['Pet'],
}
fido_dict = {
'id': 1,
'name': 'Fido',
'status': 'sold',
'photoUrls': ['wagtail.png', 'bark.png'],
'category': {
'id': 200,
'name': 'friendly',
},
'tags': [
{
'id': 99,
'name': 'mini',
},
{
'id': 100,
'name': 'brown',
},
],
}
pet_dicts = [fido_dict]
pets = unmarshal_array(petstore_spec, array_of_pets_spec, pet_dicts)
assert isinstance(pets, list)
assert 1 == len(pets)
fido = pets[0]
assert isinstance(fido, Pet)
assert isinstance(fido.category, Category)
assert isinstance(fido.tags[0], Tag) |
6,871 | spec for sensitive tests | # don't import any costly modules
import sys
import os
is_pypy = '__pypy__' in sys.builtin_module_names
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
if is_pypy and sys.version_info < (3, 7):
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
return
import warnings
warnings.warn(
"Distutils was imported before Setuptools, but importing Setuptools "
"also replaces the `distutils` module in `sys.modules`. This may lead "
"to undesirable behaviors or errors. To avoid these issues, avoid "
"using distutils directly, ensure that setuptools is installed in the "
"traditional way (e.g. not an editable install), and/or make sure "
"that setuptools is always imported before distutils."
)
def clear_distutils():
if 'distutils' not in sys.modules:
return
import warnings
warnings.warn("Setuptools is replacing distutils.")
mods = [
name
for name in sys.modules
if name == "distutils" or name.startswith("distutils.")
]
for name in mods:
del sys.modules[name]
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
return which == 'local'
def ensure_local_distutils():
import importlib
clear_distutils()
# With the DistutilsMetaFinder in place,
# perform an import to cause distutils to be
# loaded from setuptools._distutils. Ref #2906.
with shim():
importlib.import_module('distutils')
# check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
assert 'setuptools._distutils.log' not in sys.modules
def do_override():
"""
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
if enabled():
warn_distutils_present()
ensure_local_distutils()
class _TrivialRe:
def __init__(self, *patterns):
self._patterns = patterns
def match(self, string):
return all(pat in string for pat in self._patterns)
class DistutilsMetaFinder:
def find_spec(self, fullname, path, target=None):
# optimization: only consider top level modules and those
# found in the CPython test suite.
if path is not None and not fullname.startswith('test.'):
return
method_name = 'spec_for_{fullname}'.format(**locals())
method = getattr(self, method_name, lambda: None)
return method()
def spec_for_distutils(self):
if self.is_cpython():
return
import importlib
import importlib.abc
import importlib.util
try:
mod = importlib.import_module('setuptools._distutils')
except Exception:
# There are a couple of cases where setuptools._distutils
# may not be present:
# - An older Setuptools without a local distutils is
# taking precedence. Ref #2957.
# - Path manipulation during sitecustomize removes
# setuptools from the path but only after the hook
# has been loaded. Ref #2980.
# In either case, fall back to stdlib behavior.
return
class DistutilsLoader(importlib.abc.Loader):
def create_module(self, spec):
mod.__name__ = 'distutils'
return mod
def exec_module(self, module):
pass
return importlib.util.spec_from_loader(
'distutils', DistutilsLoader(), origin=mod.__file__
)
@staticmethod
def is_cpython():
"""
Suppress supplying distutils for CPython (build and tests).
Ref #2965 and #3007.
"""
return os.path.isfile('pybuilddir.txt')
def spec_for_pip(self):
"""
Ensure stdlib distutils when running under pip.
See pypa/pip#8761 for rationale.
"""
if sys.version_info >= (3, 12) or self.pip_imported_during_build():
return
clear_distutils()
self.spec_for_distutils = lambda: None
@classmethod
def pip_imported_during_build(cls):
"""
Detect if pip is being imported in a build script. Ref #2355.
"""
import traceback
return any(
cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
)
@staticmethod
def frame_file_is_setup(frame):
"""
Return True if the indicated frame suggests a setup.py file.
"""
# some frames may not have __file__ (#2940)
return frame.f_globals.get('__file__', '').endswith('setup.py')
def METHOD_NAME(self):
"""
Ensure stdlib distutils when running select tests under CPython.
python/cpython#91169
"""
clear_distutils()
self.spec_for_distutils = lambda: None
sensitive_tests = (
[
'test.test_distutils',
'test.test_peg_generator',
'test.test_importlib',
]
if sys.version_info < (3, 10)
else [
'test.test_distutils',
]
)
for name in DistutilsMetaFinder.sensitive_tests:
setattr(
DistutilsMetaFinder,
f'spec_for_{name}',
DistutilsMetaFinder.METHOD_NAME,
)
DISTUTILS_FINDER = DistutilsMetaFinder()
def add_shim():
DISTUTILS_FINDER in sys.meta_path or insert_shim()
class shim:
def __enter__(self):
insert_shim()
def __exit__(self, exc, value, tb):
_remove_shim()
def insert_shim():
sys.meta_path.insert(0, DISTUTILS_FINDER)
def _remove_shim():
try:
sys.meta_path.remove(DISTUTILS_FINDER)
except ValueError:
pass
if sys.version_info < (3, 12):
# DistutilsMetaFinder can only be disabled in Python < 3.12 (PEP 632)
remove_shim = _remove_shim |
6,872 | main | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import shutil
import sys
import pwnlib.args
pwnlib.args.free_form = False
from pwn import *
from pwnlib.commandline import common
parser = common.parser_commands.add_parser(
'libcdb',
help = 'Print various information about a libc binary',
description = 'Print various information about a libc binary'
)
libc_commands = parser.add_subparsers(
dest = 'libc_command'
)
lookup_parser = libc_commands.add_parser(
'lookup',
help = 'Lookup a libc version by function offsets',
description = 'Lookup a libc version by function offsets'
)
lookup_parser.add_argument(
'symbol_offset_pairs',
metavar = 'symbol_offset_pairs',
nargs = '+',
help = 'Symbol and offset pairs to lookup matching libc version. Can be any number of pairs to narrow the search. Example: "read 3e0 write 520"'
)
lookup_parser.add_argument(
'--download-libc',
action = 'store_true',
default = False,
help = 'Attempt to download the matching libc.so'
)
lookup_parser.add_argument(
'--unstrip',
action = 'store_true',
default = True,
help = 'Attempt to unstrip the libc binary with debug symbols from a debuginfod server'
)
lookup_parser.add_argument(
'--no-unstrip',
action = 'store_false',
dest = 'unstrip',
help = 'Do NOT attempt to unstrip the libc binary with debug symbols from a debuginfod server'
)
hash_parser = libc_commands.add_parser(
'hash',
help = 'Display information of a libc version given an unique hash',
description = 'Display information of a libc version given an unique hash'
)
hash_parser.add_argument(
'hash_value',
metavar = 'hash_value',
nargs = '+',
help = 'Hex encoded hash value'
)
hash_parser.add_argument(
'-t', '--hash_type',
nargs = '?',
type = str,
choices = ['id', 'buildid', 'md5', 'sha1', 'sha256'],
default = 'buildid',
help = 'The type of the provided hash value. Supported hashtypes: id, buildid, md5, sha1, sha256'
)
hash_parser.add_argument(
'--download-libc',
action = 'store_true',
default = False,
help = 'Attempt to download the matching libc.so'
)
hash_parser.add_argument(
'--unstrip',
action = 'store_true',
default = True,
help = 'Attempt to unstrip the libc binary with debug symbols from a debuginfod server'
)
hash_parser.add_argument(
'--no-unstrip',
action = 'store_false',
dest = 'unstrip',
help = 'Do NOT attempt to unstrip the libc binary with debug symbols from a debuginfod server'
)
file_parser = libc_commands.add_parser(
'file',
help = 'Dump information about a libc binary',
description = 'Dump information about a libc binary'
)
file_parser.add_argument(
'files',
metavar = 'files',
nargs = '+',
help = 'Libc binary to dump'
)
file_parser.add_argument(
'-s', '--symbols',
metavar = 'symbols',
nargs = '*',
help = 'List of symbol offsets to dump in addition to the common ones'
)
file_parser.add_argument(
'-o', '--offset',
metavar = 'offset',
type = str,
help = 'Display all offsets relative to this symbol'
)
file_parser.add_argument(
'--unstrip',
action = 'store_true',
default = False,
help = 'Attempt to unstrip the libc binary inplace with debug symbols from a debuginfod server'
)
common_symbols = ['dup2', 'printf', 'puts', 'read', 'system', 'write']
def find_libc(params):
import requests
url = "https://libc.rip/api/find"
result = requests.post(url, json=params, timeout=20)
log.debug('Request: %s', params)
log.debug('Result: %s', result.json())
if result.status_code != 200 or len(result.json()) == 0:
log.failure("Could not find libc for %s on libc.rip", params)
return []
return result.json()
def print_libc(libc):
log.info('%s', text.red(libc['id']))
log.indented('\t%-20s %s', text.green('BuildID:'), libc['buildid'])
log.indented('\t%-20s %s', text.green('MD5:'), libc['md5'])
log.indented('\t%-20s %s', text.green('SHA1:'), libc['sha1'])
log.indented('\t%-20s %s', text.green('SHA256:'), libc['sha256'])
log.indented('\t%s', text.green('Symbols:'))
for symbol in libc['symbols'].items():
log.indented('\t%25s = %s', symbol[0], symbol[1])
def handle_remote_libc(args, libc):
print_libc(libc)
if args.download_libc:
path = libcdb.search_by_build_id(libc['buildid'], args.unstrip)
if path:
if args.unstrip:
libcdb.unstrip_libc(path)
shutil.copy(path, './{}.so'.format(libc['id']))
def translate_offset(offs, args, exe):
if args.offset:
if args.offset not in exe.symbols:
log.info_once('offset symbol %s not found. ignoring.', args.offset)
return offs
return offs - exe.symbols[args.offset]
return offs
def collect_synthetic_symbols(exe):
available_symbols = ['str_bin_sh']
exe.symbols['str_bin_sh'] = next(exe.search(b'/bin/sh\x00'))
libc_start_main_return = exe.libc_start_main_return
if libc_start_main_return > 0:
exe.symbols['__libc_start_main_ret'] = libc_start_main_return
available_symbols.append('__libc_start_main_ret')
return available_symbols
def METHOD_NAME(args):
if len(sys.argv) < 3:
parser.print_usage()
sys.exit()
if args.libc_command == 'lookup':
pairs = args.symbol_offset_pairs
if len(pairs) % 2 != 0:
log.failure('Uneven number of arguments. Please provide "symbol offset" pairs')
return
symbols = {pairs[i]:pairs[i+1] for i in range(0, len(pairs), 2)}
matched_libcs = find_libc({'symbols': symbols})
for libc in matched_libcs:
handle_remote_libc(args, libc)
elif args.libc_command == 'hash':
for hash_value in args.hash_value:
matched_libcs = find_libc({args.hash_type: hash_value})
for libc in matched_libcs:
handle_remote_libc(args, libc)
elif args.libc_command == 'file':
from hashlib import md5, sha1, sha256
for file in args.files:
if not os.path.exists(file) or not os.path.isfile(file):
log.failure('File does not exist %s', args.file)
continue
if args.unstrip:
libcdb.unstrip_libc(file)
exe = ELF(file, checksec=False)
log.info('%s', text.red(os.path.basename(file)))
libc_version = re.search(b'libc[ -](\d+\.\d+)', exe.data)
if libc_version:
log.indented('%-20s %s', text.green('Version:'), libc_version.group(1).decode())
if exe.buildid:
log.indented('%-20s %s', text.green('BuildID:'), enhex(exe.buildid))
log.indented('%-20s %s', text.green('MD5:'), md5(exe.data).hexdigest())
log.indented('%-20s %s', text.green('SHA1:'), sha1(exe.data).hexdigest())
log.indented('%-20s %s', text.green('SHA256:'), sha256(exe.data).hexdigest())
# Always dump the basic list of common symbols
log.indented('%s', text.green('Symbols:'))
synthetic_symbols = collect_synthetic_symbols(exe)
symbols = common_symbols + (args.symbols or []) + synthetic_symbols
symbols.sort()
for symbol in symbols:
if symbol not in exe.symbols:
log.indented('%25s = %s', symbol, text.red('not found'))
else:
log.indented('%25s = %#x', symbol, translate_offset(exe.symbols[symbol], args, exe))
if __name__ == '__main__':
pwnlib.commandline.common.METHOD_NAME(__file__) |
6,873 | alternative id | ######################################################################################################################
# Copyright (C) 2017-2023 Spine project consortium
# This file is part of Spine Toolbox.
# Spine Toolbox is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option)
# any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
# Public License for more details. You should have received a copy of the GNU Lesser General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
######################################################################################################################
"""Classes to represent items in scenario tree."""
from PySide6.QtCore import Qt
from .tree_item_utility import (
BoldTextMixin,
EditableMixin,
EmptyChildMixin,
FetchMoreMixin,
GrayIfLastMixin,
LeafItem,
StandardDBItem,
)
_SCENARIO_ICON = "\uf008" # film
class ScenarioDBItem(EmptyChildMixin, FetchMoreMixin, StandardDBItem):
"""A root item representing a db."""
@property
def item_type(self):
return "db"
@property
def fetch_item_type(self):
return "scenario"
def empty_child(self):
return ScenarioItem()
def _make_child(self, id_):
return ScenarioItem(id_)
class ScenarioItem(GrayIfLastMixin, EditableMixin, EmptyChildMixin, FetchMoreMixin, BoldTextMixin, LeafItem):
"""A scenario leaf item."""
@property
def item_type(self):
return "scenario"
@property
def fetch_item_type(self):
return "scenario_alternative"
@property
def icon_code(self):
return _SCENARIO_ICON
@property
def tool_tip(self):
if not self.id:
return "<p><b>Note</b>: Scenario names longer than 20 characters might appear shortened in generated files.</p>"
def _do_set_up(self):
"""Doesn't add children to the last row."""
if not self.id:
return
super()._do_set_up()
def add_item_to_db(self, db_item):
self.db_mngr.add_scenarios({self.db_map: [db_item]})
def update_item_in_db(self, db_item):
self.db_mngr.update_scenarios({self.db_map: [db_item]})
def handle_updated_in_db(self):
super().handle_updated_in_db()
self.update_alternative_id_list()
def flags(self, column):
if self.id is not None:
return super().flags(column) | Qt.ItemFlag.ItemIsDropEnabled
return super().flags(column) | Qt.ItemFlag.ItemNeverHasChildren
@property
def alternative_id_list(self):
return self.db_mngr.get_scenario_alternative_id_list(self.db_map, self.id)
def update_alternative_id_list(self):
alt_count = len(self.alternative_id_list)
curr_alt_count = len(self.non_empty_children)
if alt_count > curr_alt_count:
added_count = alt_count - curr_alt_count
children = [ScenarioAlternativeItem() for _ in range(added_count)]
self.insert_children(curr_alt_count, children)
elif curr_alt_count > alt_count:
removed_count = curr_alt_count - alt_count
self.remove_children(alt_count, removed_count)
def handle_items_added(self, _db_map_data):
self.update_alternative_id_list()
def handle_items_removed(self, _db_map_data):
self.update_alternative_id_list()
def handle_items_updated(self, _db_map_data):
self.update_alternative_id_list()
def empty_child(self):
"""See base class."""
return ScenarioAlternativeItem()
def _make_child(self, id_):
"""Not needed - we don't quite add children here, but rather update them in update_alternative_id_list."""
class ScenarioAlternativeItem(GrayIfLastMixin, EditableMixin, LeafItem):
"""A scenario alternative leaf item."""
@property
def item_type(self):
return "scenario_alternative"
@property
def tool_tip(self):
return "<p>Drag and drop this item to reorder scenario alternatives</p>"
def _make_item_data(self):
return {"name": "Type scenario alternative name here...", "description": ""}
@property
def item_data(self):
if self.METHOD_NAME is None:
return self._make_item_data()
return self.db_mngr.get_item(self.db_map, "alternative", self.METHOD_NAME)
@property
def METHOD_NAME(self):
try:
return self.parent_item.alternative_id_list[self.child_number()]
except IndexError:
return None
def add_item_to_db(self, db_item):
raise NotImplementedError()
def update_item_in_db(self, db_item):
raise NotImplementedError()
def flags(self, column):
flags = super().flags(column) | Qt.ItemFlag.ItemNeverHasChildren
if self.METHOD_NAME is not None:
flags |= Qt.ItemIsDragEnabled
else:
flags |= Qt.ItemIsEditable
return flags
def set_data(self, column, value, role=Qt.ItemDataRole.EditRole):
if role != Qt.ItemDataRole.EditRole or value == self.data(column, role):
return False
if self.METHOD_NAME is not None:
return False
if column == 0:
alternative_id_list = list(self.parent_item.alternative_id_list)
alternative_id_list.append(value)
db_item = {"id": self.parent_item.id, "alternative_id_list": alternative_id_list}
self.db_mngr.set_scenario_alternatives({self.db_map: [db_item]})
return True |
6,874 | test non doi uri to doi | # -*- coding: utf-8 -*-
import unittest
from .DataTestTemplate import _DataTest
from owmeta_core.graph_object import IdentifierMissingException
from owmeta.document import (Document,
_doi_uri_to_doi,
WormbaseRetrievalException)
import pytest
class DocumentTest(_DataTest):
ctx_classes = (Document,)
def test_bibtex_init(self):
bibtex = u"""@ARTICLE{Cesar2013,
author = {Jean César},
title = {An amazing title},
year = {2013},
month = jan,
volume = {12},
pages = {12--23},
journal = {Nice Journal},
abstract = {This is an abstract. This line should be long enough to test
multilines...},
comments = {A comment},
keywords = {keyword1, keyword2},
}
"""
self.assertIn(u'Jean César', self.ctx.Document(bibtex=bibtex).author())
def test_doi_param_sets_id(self):
doc = Document(doi='blah')
self.assertIsNotNone(doc.identifier)
def test_doi_uri_param_sets_id(self):
doc1 = Document(doi='http://doi.org/blah')
doc2 = Document(doi='blah')
self.assertEquals(doc2.identifier, doc1.identifier)
def METHOD_NAME(self):
doc = Document(doi='http://example.org/blah')
self.assertIsNotNone(doc.identifier)
class DOIURITest(unittest.TestCase):
def test_match(self):
doi = _doi_uri_to_doi('http://doi.org/blah')
self.assertEqual('blah', doi)
def test_nomatch(self):
doi = _doi_uri_to_doi('http://example.org/blah')
self.assertIsNone(doi)
def test_not_a_uri(self):
doi = _doi_uri_to_doi('10.1098/rstb.1952.0012')
self.assertIsNone(doi)
def test_not_doi(self):
doi = _doi_uri_to_doi('blahblah')
self.assertIsNone(doi)
@pytest.mark.inttest
class DocumentElaborationTest(_DataTest):
'''
Tests for Document 'elaboration', the process of looking up documents from external
resources by using their identifiers and setting those values on the object
Use a Pubmed API key when running all of them at once to avoid hitting the public
request rate limit
'''
ctx_classes = (Document,)
def test_pubmed_init1(self):
"""
A pubmed uri
"""
uri = 'http://www.ncbi.nlm.nih.gov/pubmed/24098140?dopt=abstract'
doc = self.ctx.Document(pubmed=uri)
doc.update_from_pubmed()
self.assertIn(u'Frédéric MY', list(doc.author()))
def test_pmid_init1(self):
"""
A pubmed uri doesn't work
"""
uri = 'http://www.ncbi.nlm.nih.gov/pubmed/24098140?dopt=abstract'
doc = self.ctx.Document(pmid=uri)
doc.update_from_pubmed()
self.assertEqual([], list(doc.author()))
def test_pmid_init2(self):
"""
A pubmed id
"""
pmid = "24098140"
doc = self.ctx.Document(pmid=pmid)
doc.update_from_pubmed()
self.assertIn(u'Frédéric MY', list(doc.author()))
def test_pubmed_multiple_authors_list(self):
"""
When multiple authors are on a paper, all of their names should be
returned in an iterator. Publication order not necessarily preserved
"""
pmid = "24098140"
alist = [
u"Frédéric MY",
"Lundin VF",
"Whiteside MD",
"Cueva JG",
"Tu DK",
"Kang SY",
"Singh H",
"Baillie DL",
"Hutter H",
"Goodman MB",
"Brinkman FS",
"Leroux MR"]
doc = self.ctx.Document(pmid=pmid)
doc.update_from_pubmed()
self.assertEqual(set(alist), set(doc.author()))
def test_wormbase_init(self):
""" Initialize with wormbase source """
doc = self.ctx.Document(wormbase="WBPaper00044287")
doc.update_from_wormbase()
self.assertIn(u'Frederic MY', list(doc.author()))
def test_wormbase_year(self):
for i in range(600, 610):
wbid = 'WBPaper00044' + str(i)
doc = self.ctx.Document(wormbase=wbid)
doc.update_from_wormbase()
doc.year()
def test_no_wormbase_id(self):
doc = self.ctx.Document()
with self.assertRaises(WormbaseRetrievalException):
doc.update_from_wormbase() |
6,875 | get oauth client | import logging
from collections.abc import Collection
from typing import Any, NamedTuple
from airflow.exceptions import AirflowSkipException
from airflow.models import Variable
from requests_oauthlib import OAuth2Session
log = logging.getLogger(__name__)
class OauthProvider(NamedTuple):
"""Representation of the information needed to define an OAuth2 provider."""
name: str
auth_url: str
refresh_url: str
OAUTH2_TOKEN_KEY = "OAUTH2_ACCESS_TOKENS"
OAUTH2_AUTH_KEY = "OAUTH2_AUTH_KEYS"
OAUTH2_PROVIDERS_KEY = "OAUTH2_PROVIDER_SECRETS"
OAUTH_PROVIDERS = [
OauthProvider(
name="freesound",
auth_url="https://freesound.org/apiv2/oauth2/access_token/",
refresh_url="https://freesound.org/apiv2/oauth2/access_token/",
),
]
def _var_get(key: str) -> dict[str, Any]:
"""Shortcut for Variable retrieval with deserialization and dictionary default."""
return Variable.get(key, default_var={}, deserialize_json=True)
def _update_tokens(
provider_name: str,
tokens: dict[str, str],
) -> None:
"""
Update the access/refresh tokens for a provider in the Airflow Variable store.
This update does not affect the tokens for any other existing providers.
"""
log.info(f"Updating tokens for provider: {provider_name}")
current_tokens = _var_get(OAUTH2_TOKEN_KEY)
current_tokens[provider_name] = {
"access_token": tokens["access_token"],
"refresh_token": tokens["refresh_token"],
}
Variable.set(OAUTH2_TOKEN_KEY, current_tokens, serialize_json=True)
def _get_provider_secrets(
name: str, provider_secrets: dict[str, dict] = None
) -> dict[str, str]:
"""
Retrieve provider secrets from the Airflow Variable store.
Optionally provide a previously retrieved Variable value for improved performance.
Providers are expected to *at least* have a `client_id`, and may have more
information defined as necessary.
"""
if provider_secrets is None:
provider_secrets = _var_get(OAUTH2_PROVIDERS_KEY)
secrets = provider_secrets.get(name)
if secrets is None or "client_id" not in secrets:
raise ValueError(
f"Authorization requested for provider {name} but no secrets "
f"were provided! Add secrets to the {OAUTH2_PROVIDERS_KEY} Variable and"
f" ensure the provider has a client_id."
)
return secrets
def METHOD_NAME(provider_name: str) -> OAuth2Session:
"""
Create an OAuth2 client.
This client behaves like a `requests.Session` instance, but will automatically add
the authorization necessary for a particular provider.
"""
secrets = _get_provider_secrets(provider_name)
tokens = _var_get(OAUTH2_TOKEN_KEY)
if provider_name not in tokens:
raise KeyError(f"Access token not found for provider {provider_name}")
return OAuth2Session(
client_id=secrets["client_id"],
token={**tokens[provider_name], "token_type": "Bearer"},
)
def authorize_providers(providers: Collection[OauthProvider]) -> None:
"""
Iterate through all the specified providers and authorize those that may need it.
The authorization flow will only be attempted if a provider has an authorization
key defined in the Airflow Variable store.
"""
provider_secrets = _var_get(OAUTH2_PROVIDERS_KEY)
auth_tokens = _var_get(OAUTH2_AUTH_KEY)
for provider in providers:
# Only authorize if a token was provided
if provider.name not in auth_tokens:
continue
auth_token = auth_tokens[provider.name]
log.info(f"Attempting to authorize provider: {provider.name}")
secrets = _get_provider_secrets(provider.name, provider_secrets)
client = OAuth2Session(secrets["client_id"])
# NOTE: It's possible that the secrets being stored might not all be needed
# here, and may in fact be rejected. We won't know until we add more providers.
tokens = client.fetch_token(provider.auth_url, code=auth_token, **secrets)
_update_tokens(provider.name, tokens)
# Remove the auth token since it is no longer needed nor accurate
auth_tokens.pop(provider.name)
Variable.set(OAUTH2_AUTH_KEY, auth_tokens, serialize_json=True)
def refresh(provider: OauthProvider) -> None:
"""
Refresh the tokens for a given provider.
This will use the stored refresh token to attempt a fetch of a new access/refresh
token pair. The new tokens will be updated in the Airflow Variable store. Raises
an AirflowSkipException if no tokens are defined for the provider.
"""
current_tokens = _var_get(OAUTH2_TOKEN_KEY)
if provider.name not in current_tokens:
raise AirflowSkipException(
f"Provider {provider.name} had no stored tokens, it may need to be "
f"authorized first."
)
refresh_token = current_tokens[provider.name]["refresh_token"]
secrets = _get_provider_secrets(provider.name)
client = OAuth2Session(secrets["client_id"])
log.info(f"Attempting token refresh for provider: {provider.name}")
# NOTE: Same as above, **secrets may be too much info for some requests.
new_tokens = client.refresh_token(
provider.refresh_url, refresh_token=refresh_token, **secrets
)
_update_tokens(provider.name, new_tokens) |
6,876 | getsize | """Simple class to read IFF chunks.
An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File
Format)) has the following structure:
+----------------+
| ID (4 bytes) |
+----------------+
| size (4 bytes) |
+----------------+
| data |
| ... |
+----------------+
The ID is a 4-byte string which identifies the type of chunk.
The size field (a 32-bit value, encoded using big-endian byte order)
gives the size of the whole chunk, including the 8-byte header.
Usually an IFF-type file consists of one or more chunks. The proposed
usage of the Chunk class defined here is to instantiate an instance at
the start of each chunk and read from the instance until it reaches
the end, after which a new instance can be instantiated. At the end
of the file, creating a new instance will fail with an EOFError
exception.
Usage:
while True:
try:
chunk = Chunk(file)
except EOFError:
break
chunktype = chunk.getname()
while True:
data = chunk.read(nbytes)
if not data:
pass
# do something with data
The interface is file-like. The implemented methods are:
read, close, seek, tell, isatty.
Extra methods are: skip() (called by close, skips to the end of the chunk),
getname() (returns the name (ID) of the chunk)
The __init__ method has one required argument, a file-like object
(including a chunk instance), and one optional argument, a flag which
specifies whether or not chunks are aligned on 2-byte boundaries. The
default is 1, i.e. aligned.
"""
import warnings
warnings._deprecated(__name__, remove=(3, 13))
class Chunk:
def __init__(self, file, align=True, bigendian=True, inclheader=False):
import struct
self.closed = False
self.align = align # whether to align to word (2-byte) boundaries
if bigendian:
strflag = '>'
else:
strflag = '<'
self.file = file
self.chunkname = file.read(4)
if len(self.chunkname) < 4:
raise EOFError
try:
self.chunksize = struct.unpack_from(strflag+'L', file.read(4))[0]
except struct.error:
raise EOFError from None
if inclheader:
self.chunksize = self.chunksize - 8 # subtract header
self.size_read = 0
try:
self.offset = self.file.tell()
except (AttributeError, OSError):
self.seekable = False
else:
self.seekable = True
def getname(self):
"""Return the name (ID) of the current chunk."""
return self.chunkname
def METHOD_NAME(self):
"""Return the size of the current chunk."""
return self.chunksize
def close(self):
if not self.closed:
try:
self.skip()
finally:
self.closed = True
def isatty(self):
if self.closed:
raise ValueError("I/O operation on closed file")
return False
def seek(self, pos, whence=0):
"""Seek to specified position into the chunk.
Default position is 0 (start of chunk).
If the file is not seekable, this will result in an error.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if not self.seekable:
raise OSError("cannot seek")
if whence == 1:
pos = pos + self.size_read
elif whence == 2:
pos = pos + self.chunksize
if pos < 0 or pos > self.chunksize:
raise RuntimeError
self.file.seek(self.offset + pos, 0)
self.size_read = pos
def tell(self):
if self.closed:
raise ValueError("I/O operation on closed file")
return self.size_read
def read(self, size=-1):
"""Read at most size bytes from the chunk.
If size is omitted or negative, read until the end
of the chunk.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if self.size_read >= self.chunksize:
return b''
if size < 0:
size = self.chunksize - self.size_read
if size > self.chunksize - self.size_read:
size = self.chunksize - self.size_read
data = self.file.read(size)
self.size_read = self.size_read + len(data)
if self.size_read == self.chunksize and \
self.align and \
(self.chunksize & 1):
dummy = self.file.read(1)
self.size_read = self.size_read + len(dummy)
return data
def skip(self):
"""Skip the rest of the chunk.
If you are not interested in the contents of the chunk,
this method should be called so that the file points to
the start of the next chunk.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if self.seekable:
try:
n = self.chunksize - self.size_read
# maybe fix alignment
if self.align and (self.chunksize & 1):
n = n + 1
self.file.seek(n, 1)
self.size_read = self.size_read + n
return
except OSError:
pass
while self.size_read < self.chunksize:
n = min(8192, self.chunksize - self.size_read)
dummy = self.read(n)
if not dummy:
raise EOFError |
6,877 | get settings | from .utils import NamespacedClient, query_params, _make_path
class ClusterClient(NamespacedClient):
@query_params('level', 'local', 'master_timeout', 'timeout',
'wait_for_active_shards', 'wait_for_nodes',
'wait_for_relocating_shards', 'wait_for_status')
def health(self, index=None, params=None):
"""
Get a very simple status on the health of the cluster.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html>`_
:arg index: Limit the information returned to a specific index
:arg level: Specify the level of detail for returned information,
default 'cluster', valid choices are: 'cluster', 'indices', 'shards'
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Wait until the specified number of shards
is active
:arg wait_for_nodes: Wait until the specified number of nodes is
available
:arg wait_for_relocating_shards: Wait until the specified number of
relocating shards is finished
:arg wait_for_status: Wait until cluster is in a specific state, default
None, valid choices are: 'green', 'yellow', 'red'
"""
_, data = self.transport.perform_request('GET', _make_path('_cluster',
'health', index), params=params)
return data
@query_params('local', 'master_timeout')
def pending_tasks(self, params=None):
"""
The pending cluster tasks API returns a list of any cluster-level
changes (e.g. create index, update mapping, allocate or fail shard)
which have not yet been executed.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-pending.html>`_
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Specify timeout for connection to master
"""
_, data = self.transport.perform_request('GET',
'/_cluster/pending_tasks', params=params)
return data
@query_params('allow_no_indices', 'expand_wildcards', 'flat_settings',
'ignore_unavailable', 'local', 'master_timeout')
def state(self, metric=None, index=None, params=None):
"""
Get a comprehensive state information of the whole cluster.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html>`_
:arg metric: Limit the information returned to the specified metrics
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg flat_settings: Return settings in flat format (default: false)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Specify timeout for connection to master
"""
if index and not metric:
metric = '_all'
_, data = self.transport.perform_request('GET', _make_path('_cluster',
'state', metric, index), params=params)
return data
@query_params('flat_settings', 'human')
def stats(self, node_id=None, params=None):
"""
The Cluster Stats API allows to retrieve statistics from a cluster wide
perspective. The API returns basic index metrics and information about
the current nodes that form the cluster.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html>`_
:arg node_id: A comma-separated list of node IDs or names to limit the
returned information; use `_local` to return information from the
node you're connecting to, leave empty to get information from all
nodes
:arg flat_settings: Return settings in flat format (default: false)
:arg human: Whether to return time and byte values in human-readable
format., default False
"""
url = '/_cluster/stats'
if node_id:
url = _make_path('_cluster/stats/nodes', node_id)
_, data = self.transport.perform_request('GET', url, params=params)
return data
@query_params('dry_run', 'explain', 'master_timeout', 'metric', 'timeout')
def reroute(self, body=None, params=None):
"""
Explicitly execute a cluster reroute allocation command including specific commands.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html>`_
:arg body: The definition of `commands` to perform (`move`, `cancel`,
`allocate`)
:arg dry_run: Simulate the operation only and return the resulting state
:arg explain: Return an explanation of why the commands can or cannot be
executed
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg metric: Limit the information returned to the specified metrics.
Defaults to all but metadata, valid choices are: '_all', 'blocks',
'metadata', 'nodes', 'routing_table', 'master_node', 'version'
:arg timeout: Explicit operation timeout
"""
_, data = self.transport.perform_request('POST', '/_cluster/reroute',
params=params, body=body)
return data
@query_params('flat_settings', 'master_timeout', 'timeout')
def METHOD_NAME(self, params=None):
"""
Get cluster settings.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html>`_
:arg flat_settings: Return settings in flat format (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout
"""
_, data = self.transport.perform_request('GET', '/_cluster/settings',
params=params)
return data
@query_params('flat_settings', 'master_timeout', 'timeout')
def put_settings(self, body=None, params=None):
"""
Update cluster wide specific settings.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html>`_
:arg body: The settings to be updated. Can be either `transient` or
`persistent` (survives cluster restart).
:arg flat_settings: Return settings in flat format (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout
"""
_, data = self.transport.perform_request('PUT', '/_cluster/settings',
params=params, body=body)
return data
|
6,878 | validate | # Copyright (c) 2017, Frappe Technologies and contributors
# License: MIT. See LICENSE
import json
import frappe
import frappe.utils
from frappe import _
from frappe.model.document import Document
from frappe.utils.jinja import validate_template
from frappe.utils.weasyprint import download_pdf, get_html
class PrintFormat(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
absolute_value: DF.Check
align_labels_right: DF.Check
css: DF.Code | None
custom_format: DF.Check
default_print_language: DF.Link | None
disabled: DF.Check
doc_type: DF.Link
font: DF.Data | None
font_size: DF.Int
format_data: DF.Code | None
html: DF.Code | None
line_breaks: DF.Check
margin_bottom: DF.Float
margin_left: DF.Float
margin_right: DF.Float
margin_top: DF.Float
module: DF.Link | None
page_number: DF.Literal[
"Hide", "Top Left", "Top Center", "Top Right", "Bottom Left", "Bottom Center", "Bottom Right"
]
print_format_builder: DF.Check
print_format_builder_beta: DF.Check
print_format_type: DF.Literal["Jinja", "JS"]
raw_commands: DF.Code | None
raw_printing: DF.Check
show_section_headings: DF.Check
standard: DF.Literal["No", "Yes"]
# end: auto-generated types
def onload(self):
templates = frappe.get_all(
"Print Format Field Template",
fields=["template", "field", "name"],
filters={"document_type": self.doc_type},
)
self.set_onload("print_templates", templates)
def get_html(self, docname, letterhead=None):
return get_html(self.doc_type, docname, self.name, letterhead)
def download_pdf(self, docname, letterhead=None):
return download_pdf(self.doc_type, docname, self.name, letterhead)
def METHOD_NAME(self):
if (
self.standard == "Yes"
and not frappe.local.conf.get("developer_mode")
and not (frappe.flags.in_import or frappe.flags.in_test)
):
frappe.throw(frappe._("Standard Print Format cannot be updated"))
# old_doc_type is required for clearing item cache
self.old_doc_type = frappe.db.get_value("Print Format", self.name, "doc_type")
self.extract_images()
if not self.module:
self.module = frappe.db.get_value("DocType", self.doc_type, "module")
if self.html and self.print_format_type != "JS":
validate_template(self.html)
if self.custom_format and self.raw_printing and not self.raw_commands:
frappe.throw(
_("{0} are required").format(frappe.bold(_("Raw Commands"))), frappe.MandatoryError
)
if self.custom_format and not self.html and not self.raw_printing:
frappe.throw(_("{0} is required").format(frappe.bold(_("HTML"))), frappe.MandatoryError)
def extract_images(self):
from frappe.core.doctype.file.utils import extract_images_from_html
if self.print_format_builder_beta:
return
if self.format_data:
data = json.loads(self.format_data)
for df in data:
if df.get("fieldtype") and df["fieldtype"] in ("HTML", "Custom HTML") and df.get("options"):
df["options"] = extract_images_from_html(self, df["options"])
self.format_data = json.dumps(data)
def on_update(self):
if hasattr(self, "old_doc_type") and self.old_doc_type:
frappe.clear_cache(doctype=self.old_doc_type)
if self.doc_type:
frappe.clear_cache(doctype=self.doc_type)
self.export_doc()
def after_rename(self, old: str, new: str, *args, **kwargs):
if self.doc_type:
frappe.clear_cache(doctype=self.doc_type)
# update property setter default_print_format if set
frappe.db.set_value(
"Property Setter",
{
"doctype_or_field": "DocType",
"doc_type": self.doc_type,
"property": "default_print_format",
"value": old,
},
"value",
new,
)
def export_doc(self):
from frappe.modules.utils import export_module_json
return export_module_json(self, self.standard == "Yes", self.module)
def on_trash(self):
if self.doc_type:
frappe.clear_cache(doctype=self.doc_type)
@frappe.whitelist()
def make_default(name):
"""Set print format as default"""
frappe.has_permission("Print Format", "write")
print_format = frappe.get_doc("Print Format", name)
doctype = frappe.get_doc("DocType", print_format.doc_type)
if doctype.custom:
doctype.default_print_format = name
doctype.save()
else:
# "Customize form"
frappe.make_property_setter(
{
"doctype_or_field": "DocType",
"doctype": print_format.doc_type,
"property": "default_print_format",
"value": name,
}
)
frappe.msgprint(
frappe._("{0} is now default print format for {1} doctype").format(
frappe.bold(name), frappe.bold(print_format.doc_type)
)
) |
6,879 | parse kv | import os
import pathlib
import subprocess
import sys
import urllib
from typing import Dict, List, Optional, Tuple
# Path component is a node in a tree.
# It's the equivalent of a short file/directory name in a file system.
# In our abstraction, it's represented as arbitrary bag of attributes
TestPathComponent = Dict[str, str]
# TestPath is a full path to a node in a tree from the root
# It's the equivalent of an absolute file name in a file system
TestPath = List[TestPathComponent]
def parse_test_path(tp_str: str) -> TestPath:
"""Parse a string representation of TestPath."""
if tp_str == '':
return []
ret: TestPath = []
for component_str in tp_str.split('#'):
if component_str == '&':
# Technically, this should be mapped to {None:None}. But because the
# TestPath definition is now Dict[str, str], not Dict[Optional[str],
# Optinal[str]], we cannot add it. Fixing this definition needs to
# fix callers not to assume they are always str. In practice, this
# is a rare case. Do not appent {None: None} now...
# ret.append({None: None})
continue
first = True
component = {}
for kv in component_str.split('&'):
if first:
first = False
if kv:
(component['type'], component['name']) = METHOD_NAME(kv)
else:
(k, v) = METHOD_NAME(kv)
component[k] = v
ret.append(component)
return ret
def METHOD_NAME(kv: str) -> Tuple[str, str]:
kvs = kv.split('=')
if len(kvs) != 2:
raise ValueError('Malformed TestPath component: ' + kv)
return (_decode_str(kvs[0]), _decode_str(kvs[1]))
def unparse_test_path(tp: TestPath) -> str:
"""Create a string representation of TestPath."""
ret = []
for component in tp:
s = ''
pairs = []
if component.get('type', None) and component.get('name', None):
s += _encode_str(component['type']) + '=' + _encode_str(component['name'])
for k, v in component.items():
if k not in ('type', 'name'):
pairs.append((k, v))
else:
for k, v in component.items():
if not k or not v:
continue
pairs.append((k, v))
if len(pairs) == 0:
s = '&'
pairs = sorted(pairs, key=lambda p: p[0])
for (k, v) in pairs:
s += '&'
s += _encode_str(k) + '=' + _encode_str(v)
ret.append(s)
return '#'.join(ret)
def _decode_str(s: str) -> str:
return urllib.parse.unquote(s)
def _encode_str(s: str) -> str:
return s.replace('%', '%25').replace('=', '%3D').replace('#', '%23').replace('&', '%26')
def _relative_to(p: pathlib.Path, base: str) -> pathlib.Path:
if sys.version_info[0:2] >= (3, 6):
return p.resolve(strict=False).relative_to(base)
else:
try:
resolved = p.resolve()
except BaseException:
resolved = p
return resolved.relative_to(base)
class FilePathNormalizer:
"""Normalize file paths based on the Git repository root
Some test runners output absolute file paths. This is not preferrable when
making statistical data on tests as the absolute paths can vary per machine
or per run. FilePathNormalizer guesses the relative paths based on the Git
repository root.
"""
def __init__(self, base_path: Optional[str] = None, no_base_path_inference: bool = False):
self._base_path = base_path
self._no_base_path_inference = no_base_path_inference
self._inferred_base_path = None # type: Optional[str]
def relativize(self, p: str) -> str:
return str(self._relativize(pathlib.Path(os.path.normpath(p))))
def _relativize(self, p: pathlib.Path) -> pathlib.Path:
if not p.is_absolute():
return p
if self._base_path:
return _relative_to(p, self._base_path)
if self._no_base_path_inference:
return p
if not self._inferred_base_path:
self._inferred_base_path = self._auto_infer_base_path(p)
if self._inferred_base_path:
return _relative_to(p, self._inferred_base_path)
return p
def _auto_infer_base_path(self, p: pathlib.Path) -> Optional[str]:
p = p.parent
while p != p.root and not p.exists():
p = p.parent
try:
toplevel = subprocess.check_output(
['git', 'rev-parse', '--show-superproject-working-tree'],
cwd=str(p),
stderr=subprocess.DEVNULL,
universal_newlines=True).strip()
if toplevel:
return toplevel
return subprocess.check_output(
['git', 'rev-parse', '--show-toplevel'],
cwd=str(p),
stderr=subprocess.DEVNULL,
universal_newlines=True).strip()
except subprocess.CalledProcessError:
# Cannot infer the Git repo. Continue with the abs path...
return None |
6,880 | forward in | import struct
import logging
import asyncio
from amaranth import *
from ....support.bits import *
from ....support.logging import *
from ....support.endpoint import *
from ....gateware.pads import *
from ....database.jedec import *
from ....arch.jtag import *
from ... import *
from ..jtag_probe import JTAGProbeBus
class JTAGOpenOCDSubtarget(Elaboratable):
def __init__(self, pads, out_fifo, in_fifo, period_cyc):
self.pads = pads
self.out_fifo = out_fifo
self.in_fifo = in_fifo
self.period_cyc = period_cyc
def elaborate(self, platform):
m = Module()
out_fifo = self.out_fifo
in_fifo = self.in_fifo
m.submodules.bus = bus = JTAGProbeBus(self.pads)
m.d.comb += [
bus.trst_z.eq(0),
]
blink = Signal()
timer = Signal(range(self.period_cyc))
with m.If(timer != 0):
m.d.sync += timer.eq(timer - 1)
with m.Else():
with m.If(out_fifo.r_rdy):
with m.Switch(out_fifo.r_data):
m.d.comb += out_fifo.r_en.eq(1)
# remote_bitbang_write(int tck, int tms, int tdi)
with m.Case(*b"01234567"):
m.d.sync += Cat(bus.tdi, bus.tms, bus.tck).eq(out_fifo.r_data[:3])
# remote_bitbang_reset(int trst, int srst)
with m.Case(*b"rs"):
m.d.sync += Cat(bus.trst_o).eq(0b0)
with m.Case(*b"tu"):
m.d.sync += Cat(bus.trst_o).eq(0b1)
# remote_bitbang_sample(void)
with m.Case(*b"R"):
m.d.comb += out_fifo.r_en.eq(in_fifo.w_rdy)
m.d.comb += in_fifo.w_en.eq(1)
m.d.comb += in_fifo.w_data.eq(b"0"[0] | Cat(bus.tdo))
# remote_bitbang_blink(int on)
with m.Case(*b"Bb"):
m.d.sync += blink.eq(~out_fifo.r_data[5])
# remote_bitbang_quit(void)
with m.Case(*b"Q"):
pass
with m.Default():
m.d.comb += out_fifo.r_en.eq(0)
with m.If(out_fifo.r_en):
m.d.sync += timer.eq(self.period_cyc - 1)
return m
class JTAGOpenOCDApplet(GlasgowApplet):
logger = logging.getLogger(__name__)
help = "expose JTAG via OpenOCD remote bitbang interface"
description = """
Expose JTAG via a socket using the OpenOCD remote bitbang protocol.
Usage with TCP sockets:
::
glasgow run jtag-openocd tcp:localhost:2222
openocd -c 'interface remote_bitbang; remote_bitbang_port 2222'
Usage with Unix domain sockets:
::
glasgow run jtag-openocd unix:/tmp/jtag.sock
openocd -c 'interface remote_bitbang; remote_bitbang_host /tmp/jtag.sock'
"""
__pins = ("tck", "tms", "tdi", "tdo", "trst")
@classmethod
def add_build_arguments(cls, parser, access):
super().add_build_arguments(parser, access)
for pin in ("tck", "tms", "tdi", "tdo"):
access.add_pin_argument(parser, pin, default=True)
access.add_pin_argument(parser, "trst")
parser.add_argument(
"-f", "--frequency", metavar="FREQ", type=int, default=100,
help="set TCK frequency to FREQ kHz (default: %(default)s)")
def build(self, target, args):
self.mux_interface = iface = target.multiplexer.claim_interface(self, args)
iface.add_subtarget(JTAGOpenOCDSubtarget(
pads=iface.get_pads(args, pins=self.__pins),
out_fifo=iface.get_out_fifo(),
in_fifo=iface.get_in_fifo(),
period_cyc=int(target.sys_clk_freq // (args.frequency * 1000)),
))
async def run(self, device, args):
return await device.demultiplexer.claim_interface(self, self.mux_interface, args)
@classmethod
def add_interact_arguments(cls, parser):
ServerEndpoint.add_argument(parser, "endpoint")
async def interact(self, device, args, iface):
endpoint = await ServerEndpoint("socket", self.logger, args.endpoint)
async def forward_out():
while True:
try:
data = await endpoint.recv()
await iface.write(data)
await iface.flush()
except asyncio.CancelledError:
pass
async def METHOD_NAME():
while True:
try:
data = await iface.read()
await endpoint.send(data)
except asyncio.CancelledError:
pass
forward_out_fut = asyncio.ensure_future(forward_out())
forward_in_fut = asyncio.ensure_future(METHOD_NAME())
await asyncio.wait([forward_out_fut, forward_in_fut],
return_when=asyncio.FIRST_EXCEPTION)
# -------------------------------------------------------------------------------------------------
class JTAGOpenOCDAppletTestCase(GlasgowAppletTestCase, applet=JTAGOpenOCDApplet):
@synthesis_test
def test_build(self):
self.assertBuilds() |
6,881 | remove tmp file | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Alejandro Gomez <alexgomez2202@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: gunicorn
short_description: Run gunicorn with various settings
description:
- Starts gunicorn with the parameters specified. Common settings for gunicorn
configuration are supported. For additional configuration use a config file
See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more
options. It's recommended to always use the chdir option to avoid problems
with the location of the app.
requirements: [gunicorn]
author:
- "Alejandro Gomez (@agmezr)"
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
app:
type: str
required: true
aliases: ['name']
description:
- The app module. A name refers to a WSGI callable that should be found in the specified module.
venv:
type: path
aliases: ['virtualenv']
description:
- 'Path to the virtualenv directory.'
config:
type: path
description:
- 'Path to the gunicorn configuration file.'
aliases: ['conf']
chdir:
type: path
description:
- 'Chdir to specified directory before apps loading.'
pid:
type: path
description:
- 'A filename to use for the PID file. If not set and not found on the configuration file a tmp
pid file will be created to check a successful run of gunicorn.'
worker:
type: str
choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']
description:
- 'The type of workers to use. The default class (sync) should handle most "normal" types of workloads.'
user:
type: str
description:
- 'Switch worker processes to run as this user.'
notes:
- If not specified on config file, a temporary error log will be created on /tmp dir.
Please make sure you have write access in /tmp dir. Not needed but will help you to
identify any problem with configuration.
'''
EXAMPLES = '''
- name: Simple gunicorn run example
community.general.gunicorn:
app: 'wsgi'
chdir: '/workspace/example'
- name: Run gunicorn on a virtualenv
community.general.gunicorn:
app: 'wsgi'
chdir: '/workspace/example'
venv: '/workspace/example/venv'
- name: Run gunicorn with a config file
community.general.gunicorn:
app: 'wsgi'
chdir: '/workspace/example'
conf: '/workspace/example/gunicorn.cfg'
- name: Run gunicorn as ansible user with specified pid and config file
community.general.gunicorn:
app: 'wsgi'
chdir: '/workspace/example'
conf: '/workspace/example/gunicorn.cfg'
venv: '/workspace/example/venv'
pid: '/workspace/example/gunicorn.pid'
user: 'ansible'
'''
RETURN = '''
gunicorn:
description: process id of gunicorn
returned: changed
type: str
sample: "1234"
'''
import os
import time
from ansible.module_utils.basic import AnsibleModule
def search_existing_config(config, option):
''' search in config file for specified option '''
if config and os.path.isfile(config):
with open(config, 'r') as f:
for line in f:
if option in line:
return line
return None
def METHOD_NAME(file_path):
''' remove temporary files '''
if os.path.isfile(file_path):
os.remove(file_path)
def main():
# available gunicorn options on module
gunicorn_options = {
'config': '-c',
'chdir': '--chdir',
'worker': '-k',
'user': '-u',
}
module = AnsibleModule(
argument_spec=dict(
app=dict(required=True, type='str', aliases=['name']),
venv=dict(type='path', aliases=['virtualenv']),
config=dict(type='path', aliases=['conf']),
chdir=dict(type='path'),
pid=dict(type='path'),
user=dict(type='str'),
worker=dict(type='str', choices=['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']),
)
)
# temporary files in case no option provided
tmp_error_log = os.path.join(module.tmpdir, 'gunicorn.temp.error.log')
tmp_pid_file = os.path.join(module.tmpdir, 'gunicorn.temp.pid')
# remove temp file if exists
METHOD_NAME(tmp_pid_file)
METHOD_NAME(tmp_error_log)
# obtain app name and venv
params = module.params
app = params['app']
venv = params['venv']
pid = params['pid']
# use venv path if exists
if venv:
gunicorn_command = "/".join((venv, 'bin', 'gunicorn'))
else:
gunicorn_command = module.get_bin_path('gunicorn')
# to daemonize the process
options = ["-D"]
# fill options
for option in gunicorn_options:
param = params[option]
if param:
options.append(gunicorn_options[option])
options.append(param)
error_log = search_existing_config(params['config'], 'errorlog')
if not error_log:
# place error log somewhere in case of fail
options.append("--error-logfile")
options.append(tmp_error_log)
pid_file = search_existing_config(params['config'], 'pid')
if not params['pid'] and not pid_file:
pid = tmp_pid_file
# add option for pid file if not found on config file
if not pid_file:
options.append('--pid')
options.append(pid)
# put args together
args = [gunicorn_command] + options + [app]
rc, out, err = module.run_command(args, use_unsafe_shell=False, encoding=None)
if not err:
# wait for gunicorn to dump to log
time.sleep(0.5)
if os.path.isfile(pid):
with open(pid, 'r') as f:
result = f.readline().strip()
if not params['pid']:
os.remove(pid)
module.exit_json(changed=True, pid=result, debug=" ".join(args))
else:
# if user defined own error log, check that
if error_log:
error = 'Please check your {0}'.format(error_log.strip())
else:
if os.path.isfile(tmp_error_log):
with open(tmp_error_log, 'r') as f:
error = f.read()
# delete tmp log
os.remove(tmp_error_log)
else:
error = "Log not found"
module.fail_json(msg='Failed to start gunicorn. {0}'.format(error), error=err)
else:
module.fail_json(msg='Failed to start gunicorn {0}'.format(err), error=err)
if __name__ == '__main__':
main() |
6,882 | test checkout customer attach by id | import graphene
from .....checkout.error_codes import CheckoutErrorCode
from ....tests.utils import assert_no_permission, get_graphql_content
CHECKOUT_CUSTOMER_ATTACH_MUTATION = """
mutation checkoutCustomerAttach($checkoutId: ID, $token: UUID) {
checkoutCustomerAttach(checkoutId: $checkoutId, token: $token) {
checkout {
token
}
errors {
field
message
code
}
}
}
"""
def METHOD_NAME(
api_client, user_api_client, checkout_with_item, customer_user
):
checkout = checkout_with_item
checkout.email = "old@email.com"
checkout.save()
assert checkout.user is None
query = CHECKOUT_CUSTOMER_ATTACH_MUTATION
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
variables = {"checkoutId": checkout_id, "customerId": customer_id}
# Mutation should fail for unauthenticated customers
response = api_client.post_graphql(query, variables)
assert_no_permission(response)
# Mutation should succeed for authenticated customer
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutCustomerAttach"]
assert not data["errors"]
checkout.refresh_from_db()
assert checkout.user == customer_user
assert checkout.email == customer_user.email
def test_checkout_customer_attach_by_token(
api_client, user_api_client, checkout_with_item, customer_user
):
checkout = checkout_with_item
checkout.email = "old@email.com"
checkout.save()
assert checkout.user is None
query = CHECKOUT_CUSTOMER_ATTACH_MUTATION
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
variables = {"token": checkout.token, "customerId": customer_id}
# Mutation should fail for unauthenticated customers
response = api_client.post_graphql(query, variables)
assert_no_permission(response)
# Mutation should succeed for authenticated customer
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutCustomerAttach"]
assert not data["errors"]
checkout.refresh_from_db()
assert checkout.user == customer_user
assert checkout.email == customer_user.email
def test_checkout_customer_attach_neither_token_and_id_given(
user_api_client, checkout_with_item, customer_user
):
checkout = checkout_with_item
checkout.email = "old@email.com"
checkout.save()
assert checkout.user is None
query = CHECKOUT_CUSTOMER_ATTACH_MUTATION
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
variables = {"customerId": customer_id}
# Mutation should succeed for authenticated customer
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutCustomerAttach"]
assert len(data["errors"]) == 1
assert not data["checkout"]
assert data["errors"][0]["code"] == CheckoutErrorCode.GRAPHQL_ERROR.name
def test_checkout_customer_attach_both_token_and_id_given(
user_api_client, checkout_with_item, customer_user
):
checkout = checkout_with_item
checkout.email = "old@email.com"
checkout.save()
assert checkout.user is None
query = CHECKOUT_CUSTOMER_ATTACH_MUTATION
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
variables = {
"checkoutId": checkout_id,
"token": checkout.token,
"customerId": customer_id,
}
# Mutation should succeed for authenticated customer
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutCustomerAttach"]
assert len(data["errors"]) == 1
assert not data["checkout"]
assert data["errors"][0]["code"] == CheckoutErrorCode.GRAPHQL_ERROR.name |
6,883 | parse x name | # -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ dataset.py ]
# Synopsis [ the phone dataset ]
# Author [ S3PRL ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import random
#-------------#
import pandas as pd
#-------------#
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data.dataset import Dataset
#-------------#
import torchaudio
HALF_BATCHSIZE_TIME = 2000
#################
# Phone Dataset #
#################
class PhoneDataset(Dataset):
def __init__(self, split, bucket_size, libri_root, phone_path, bucket_file, sample_rate=16000, train_dev_seed=1337, **kwargs):
super(PhoneDataset, self).__init__()
self.libri_root = libri_root
self.phone_path = phone_path
self.sample_rate = sample_rate
self.class_num = 41 # NOTE: pre-computed, should not need change
self.Y = {}
phone_file = open(os.path.join(phone_path, 'converted_aligned_phones.txt')).readlines()
for line in phone_file:
line = line.strip('\n').split(' ')
self.Y[line[0]] = [int(p) for p in line[1:]]
if split == 'train' or split == 'dev':
usage_list = open(os.path.join(phone_path, 'train_split.txt')).readlines()
random.seed(train_dev_seed)
random.shuffle(usage_list)
percent = int(len(usage_list)*0.9)
usage_list = usage_list[:percent] if split == 'train' else usage_list[percent:]
elif split == 'test':
usage_list = open(os.path.join(phone_path, 'test_split.txt')).readlines()
else:
raise ValueError('Invalid \'split\' argument for dataset: PhoneDataset!')
usage_list = {line.strip('\n'):None for line in usage_list}
print('[Dataset] - # phone classes: ' + str(self.class_num) + ', number of data for ' + split + ': ' + str(len(usage_list)))
# Read table for bucketing
assert os.path.isdir(bucket_file), 'Please first run `preprocess/generate_len_for_bucket.py to get bucket file.'
table = pd.read_csv(os.path.join(bucket_file, 'train-clean-100.csv')).sort_values(by=['length'], ascending=False)
X = table['file_path'].tolist()
X_lens = table['length'].tolist()
# Use bucketing to allow different batch sizes at run time
self.X = []
batch_x, batch_len = [], []
for x, x_len in zip(X, X_lens):
if self.METHOD_NAME(x) in usage_list:
batch_x.append(x)
batch_len.append(x_len)
# Fill in batch_x until batch is full
if len(batch_x) == bucket_size:
# Half the batch size if seq too long
if (bucket_size >= 2) and (max(batch_len) > HALF_BATCHSIZE_TIME):
self.X.append(batch_x[:bucket_size//2])
self.X.append(batch_x[bucket_size//2:])
else:
self.X.append(batch_x)
batch_x, batch_len = [], []
# Gather the last batch
if len(batch_x) > 1:
if self.METHOD_NAME(x) in usage_list:
self.X.append(batch_x)
def METHOD_NAME(self, x):
return x.split('/')[-1].split('.')[0]
def _load_wav(self, wav_path):
wav, sr = torchaudio.load(os.path.join(self.libri_root, wav_path))
# assert sr == self.sample_rate, f'Sample rate mismatch: real {sr}, config {self.sample_rate}'
return wav.view(-1)
def __len__(self):
return len(self.X)
def __getitem__(self, index):
# Load acoustic feature and pad
wav_batch = [self._load_wav(x_file) for x_file in self.X[index]]
label_batch = [torch.LongTensor(self.Y[self.METHOD_NAME(x_file)]) for x_file in self.X[index]]
return wav_batch, label_batch # bucketing, return ((wavs, labels))
def collate_fn(self, items):
return items[0][0], items[0][1] # hack bucketing, return (wavs, labels) |
6,884 | visualize | import glob
import itertools
import math
from copy import deepcopy
from itertools import combinations
import cv2
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from numba import jit, prange
from scipy.spatial import ConvexHull
class EgocentricalAlignmentFeaturizer(object):
def __init__(
self,
data_path: str,
anchor: str = "Centroid",
fps: int = 30,
METHOD_NAME: bool = False,
):
self.data_files = glob.glob(data_path + "/*.csv")
self.anchor = (f"{anchor}_x", f"{anchor}_y")
self.fps = fps
self.METHOD_NAME = METHOD_NAME
self.img_size = (500, 500)
self.rolling_window_sizes = {}
for i in [1, 1.5, 3]:
self.rolling_window_sizes[f"{str(i)}s"] = int(fps * i)
for i in [2, 4, 10]:
self.rolling_window_sizes[f"{str(1/i)}s"] = int(fps / i)
self.run()
def run(self):
for file_path in self.data_files:
df = pd.read_csv(file_path, header=[0, 1, 2], index_col=0)
df.columns = df.columns.droplevel().map("_".join)
self.bp_headers, self.bp_dict = {}, {}
for i, j in zip(["_x", "_y", "_p"], ["x", "y", "p"]):
self.bp_headers[j] = [x for x in df.columns if x.endswith(i)]
df = df[self.bp_headers["x"] + self.bp_headers["y"]]
self.scaled_df = deepcopy(df)
for bp in self.bp_headers["x"]:
self.bp_dict[bp.rstrip("_x")] = (bp, bp.rstrip("_x") + "_y")
df["correction_x"] = df[self.anchor[0]] - (self.img_size[0] / 2)
df["correction_y"] = df[self.anchor[1]] - (self.img_size[1] / 2)
for c in self.bp_dict.values():
self.scaled_df[c[0]] = self.scaled_df[c[0]] - df["correction_x"]
self.scaled_df[c[1]] = self.scaled_df[c[1]] - df["correction_y"]
self.scaled_df = self.scaled_df.fillna((self.img_size[0] / 2))
if self.METHOD_NAME:
self.METHOD_NAME()
self.featurize()
def METHOD_NAME(self):
max_x, max_y = np.nanmax(
self.scaled_df[self.bp_headers["x"]].values
), np.nanmax(self.scaled_df[self.bp_headers["y"]].values)
img = np.zeros(shape=[int(max_x), int(max_y), 3], dtype=np.uint8)
for frm in range(len(self.scaled_df)):
frm_data = self.scaled_df.iloc[frm].astype(int)
frm_img = deepcopy(img)
for bp_name, bp in self.bp_dict.items():
x, y = frm_data[bp[0]], frm_data[bp[1]]
cv2.circle(frm_img, (int(x), int(y)), 0, (255, 255, 0), 8)
for bp_c in combinations(list(self.bp_dict.keys()), 2):
bp_1_x, bp_1_y = self.bp_dict[bp_c[0]][0], self.bp_dict[bp_c[0]][1]
bp_2_x, bp_2_y = self.bp_dict[bp_c[1]][0], self.bp_dict[bp_c[1]][1]
point_one, point_two = (frm_data[bp_1_x], frm_data[bp_1_y]), (
frm_data[bp_2_x],
frm_data[bp_2_y],
)
cv2.line(frm_img, point_one, point_two, (255, 255, 0), 1)
cv2.imshow("img", frm_img)
cv2.waitKey(33)
@staticmethod
@jit(nopython=True, cache=True, fastmath=True)
def three_point_angles(data: np.array):
results = np.full((data.shape[0]), 0)
for i in prange(data.shape[0]):
angle = math.degrees(
math.atan2(data[i][5] - data[i][3], data[i][4] - data[i][2])
- math.atan2(data[i][1] - data[i][3], data[i][0] - data[i][2])
)
if angle < 0:
angle += 360
results[i] = angle
return results
@staticmethod
def subhull_calculator(data: np.array):
results = np.full((len(data)), np.nan)
data = np.reshape(data.values, (len(data), -1, 2))
for cnt, i in enumerate(data):
results[cnt] = ConvexHull(i).area
return results.astype(int)
@staticmethod
@jit(nopython=True)
def euclidean_distance(bp_1_x_vals, bp_2_x_vals, bp_1_y_vals, bp_2_y_vals):
return np.sqrt(
(bp_1_x_vals - bp_2_x_vals) ** 2 + (bp_1_y_vals - bp_2_y_vals) ** 2
)
@staticmethod
def convex_hull_calculator_mp(data: np.array):
results = np.full((data.shape[0]), np.nan)
data = np.reshape(data.values, (len(data), -1, 2))
for cnt, i in enumerate(data):
results[cnt] = ConvexHull(i).area
return results.astype(int)
def featurize(self):
three_point_combinations = np.array(
list(combinations(list(self.bp_dict.keys()), 3))
)
four_point_combinations = np.array(
list(combinations(list(self.bp_dict.keys()), 4))
)
two_point_combinations = np.array(
list(combinations(list(self.bp_dict.keys()), 2))
)
results = pd.DataFrame()
split_data = np.array_split(self.scaled_df, 100)
hull_area = Parallel(n_jobs=-1, verbose=0, backend="threading")(
delayed(self.convex_hull_calculator_mp)(x) for x in split_data
)
results["hull_area"] = np.concatenate(hull_area).ravel().tolist()
for c in three_point_combinations:
col_names = list(sum([(x + "_x", y + "_y") for (x, y) in zip(c, c)], ()))
split_data = np.array_split(self.scaled_df[col_names], 100)
three_point_hull = Parallel(n_jobs=-1, verbose=0, backend="threading")(
delayed(self.subhull_calculator)(x) for x in split_data
)
results[f"hull_{c[0]}_{c[1]}_{c[2]}"] = (
np.concatenate(three_point_hull).ravel().tolist()
)
results[f"angle_{c[0]}_{c[1]}_{c[2]}"] = self.three_point_angles(
data=self.scaled_df[col_names].values
)
for c in four_point_combinations:
col_names = list(sum([(x + "_x", y + "_y") for (x, y) in zip(c, c)], ()))
split_data = np.array_split(self.scaled_df[col_names], 100)
four_point_hull = Parallel(n_jobs=-1, verbose=0, backend="threading")(
delayed(self.subhull_calculator)(x) for x in split_data
)
results[f"hull_{c[0]}_{c[1]}_{c[2]}_{c[3]}"] = (
np.concatenate(four_point_hull).ravel().tolist()
)
for c in two_point_combinations:
col_names = list(sum([(x + "_x", y + "_y") for (x, y) in zip(c, c)], ()))
results[f"distance_{c[0]}_{c[1]}"] = self.euclidean_distance(
self.scaled_df[col_names[0]].values,
self.scaled_df[col_names[2]].values,
self.scaled_df[col_names[1]].values,
self.scaled_df[col_names[3]].values,
)
for c, t in list(
itertools.product(results.columns, self.rolling_window_sizes.keys())
):
results[f"{c}_rolling_{t}_window_mean"] = (
results[c]
.rolling(int(self.rolling_window_sizes[t]), min_periods=1)
.mean()
)
results[f"{c}_rolling_{t}_window_stdev"] = (
results[c]
.rolling(int(self.rolling_window_sizes[t]), min_periods=1)
.std()
)
self.results = results.fillna(-1)
aligner = EgocentricalAlignmentFeaturizer(
data_path="/Users/simon/Desktop/envs/simba_dev/simba/features_scripts/misc/test_data_mouse_OF",
METHOD_NAME=False,
) |
6,885 | download xbuildenv | import json
import shutil
import subprocess
from pathlib import Path
from urllib.error import HTTPError
from urllib.request import urlopen, urlretrieve
from pyodide_lock import PyodideLockSpec
from . import build_env
from .common import exit_with_stdio
from .create_pypa_index import create_pypa_index
from .logger import logger
def METHOD_NAME(
version: str, xbuildenv_path: Path, *, url: str | None = None
) -> None:
from shutil import rmtree, unpack_archive
from tempfile import NamedTemporaryFile
logger.info("Downloading xbuild environment")
rmtree(xbuildenv_path, ignore_errors=True)
xbuildenv_url = (
url
or f"https://github.com/pyodide/pyodide/releases/download/{version}/xbuildenv-{version}.tar.bz2"
)
with NamedTemporaryFile(suffix=".tar") as f:
urlretrieve(
xbuildenv_url,
f.name,
)
unpack_archive(f.name, xbuildenv_path)
def install_xbuildenv(version: str, xbuildenv_path: Path) -> Path:
logger.info("Installing xbuild environment")
xbuildenv_path = xbuildenv_path / "xbuildenv"
xbuildenv_root = xbuildenv_path / "pyodide-root"
if (xbuildenv_path / ".installed").exists():
return xbuildenv_root
# TODO: use a separate configuration file for variables that are used only in package building
host_site_packages = Path(
build_env._get_make_environment_vars(pyodide_root=xbuildenv_root)[
"HOSTSITEPACKAGES"
]
)
host_site_packages.mkdir(exist_ok=True, parents=True)
result = subprocess.run(
[
"pip",
"install",
"--no-user",
"-t",
host_site_packages,
"-r",
xbuildenv_path / "requirements.txt",
],
capture_output=True,
encoding="utf8",
)
if result.returncode != 0:
exit_with_stdio(result)
# Copy the site-packages-extras (coming from the cross-build-files meta.yaml
# key) over the site-packages directory with the newly installed packages.
shutil.copytree(
xbuildenv_path / "site-packages-extras", host_site_packages, dirs_exist_ok=True
)
cdn_base = f"https://cdn.jsdelivr.net/pyodide/v{version}/full/"
lockfile_path = xbuildenv_root / "dist" / "pyodide-lock.json"
if lockfile_path.exists():
lockfile = PyodideLockSpec.from_json(lockfile_path)
else:
try:
with urlopen(cdn_base + "pyodide-lock.json") as response:
lockfile_bytes = response.read()
except HTTPError:
# Try again with old url
with urlopen(cdn_base + "repodata.json") as response:
lockfile_bytes = response.read()
lockfile = PyodideLockSpec(**json.loads(lockfile_bytes))
create_pypa_index(lockfile.packages, xbuildenv_root, cdn_base)
(xbuildenv_path / ".installed").touch()
return xbuildenv_root
def install(path: Path, *, download: bool = True, url: str | None = None) -> Path:
"""
Install cross-build environment.
Parameters
----------
path
A path to the cross-build environment.
download
Whether to download the cross-build environment before installing it.
url
URL to download the cross-build environment from. This is only used
if `download` is True. The URL should point to a tarball containing
the cross-build environment. If not specified, the corresponding
release on GitHub is used.
Warning: if you are downloading from a version that is not the same
as the current version of pyodide-build, make sure that the cross-build
environment is compatible with the current version of Pyodide.
Returns
-------
Path to the Pyodide root directory for the cross-build environment.
"""
from . import __version__
version = __version__
if not download and not path.exists():
logger.error("xbuild environment not exists")
raise FileNotFoundError(path)
if download and path.exists():
logger.warning("xbuild environment already exists, skipping download")
elif download:
METHOD_NAME(version, path, url=url)
return install_xbuildenv(version, path) |
6,886 | sanitize text | import os
import logging
from hashlib import sha1
from babel import Locale
from gettext import translation
from threading import local
from typing import cast, Dict, Any, List, Optional, TypeVar, Union, Sequence
from normality import stringify
from normality.cleaning import compose_nfc
from normality.cleaning import remove_unsafe_chars
from normality.encoding import DEFAULT_ENCODING
from banal import is_mapping, unique_list, ensure_list
MEGABYTE = 1024 * 1024
DEFAULT_LOCALE = "en"
T = TypeVar("T")
K = TypeVar("K")
V = TypeVar("V")
try:
# Work-around for Python 3.8 backward compat:
PathLike = Union[str, os.PathLike[str]]
except TypeError:
PathLike = Union[str, os.PathLike] # type: ignore
i18n_path = os.path.join(os.path.dirname(__file__), "translations")
state = local()
log = logging.getLogger(__name__)
def gettext(*args: Optional[str], **kwargs: Dict[str, str]) -> str:
if not hasattr(state, "translation"):
set_model_locale(Locale.parse(DEFAULT_LOCALE))
return cast(str, state.translation.gettext(*args, **kwargs))
def defer(text: str) -> str:
return text
def set_model_locale(locale: Locale) -> None:
state.locale = locale
state.translation = translation(
"followthemoney", i18n_path, [str(locale)], fallback=True
)
def get_locale() -> Locale:
if not hasattr(state, "locale"):
return Locale.parse(DEFAULT_LOCALE)
return Locale.parse(state.locale)
def get_env_list(name: str, default: List[str] = []) -> List[str]:
value = stringify(os.environ.get(name))
if value is not None:
values = value.split(":")
if len(values):
return values
return default
def METHOD_NAME(text: Any, encoding: str = DEFAULT_ENCODING) -> Optional[str]:
text = stringify(text, encoding_default=encoding)
if text is None:
return None
try:
text = compose_nfc(text)
except (SystemError, Exception) as ex:
log.warning("Cannot NFC text: %s", ex)
return None
text = remove_unsafe_chars(text)
if text is None:
return None
byte_text = text.encode(DEFAULT_ENCODING, "replace")
return cast(str, byte_text.decode(DEFAULT_ENCODING, "replace"))
def value_list(value: Union[T, Sequence[T]]) -> List[T]:
if not isinstance(value, (str, bytes)):
try:
return [v for v in cast(Sequence[T], value)]
except TypeError:
pass
return [cast(T, value)]
def key_bytes(key: Any) -> bytes:
"""Convert the given data to a value appropriate for hashing."""
if isinstance(key, bytes):
return key
text = stringify(key)
if text is None:
return b""
return text.encode("utf-8")
def join_text(*parts: Any, sep: str = " ") -> Optional[str]:
"""Join all the non-null arguments using sep."""
texts: List[str] = []
for part in parts:
text = stringify(part)
if text is not None:
texts.append(text)
if not len(texts):
return None
return sep.join(texts)
def get_entity_id(obj: Any) -> Optional[str]:
"""Given an entity-ish object, try to get the ID."""
if is_mapping(obj):
obj = obj.get("id")
else:
try:
obj = obj.id
except AttributeError:
pass
return stringify(obj)
def make_entity_id(*parts: Any, key_prefix: Optional[str] = None) -> Optional[str]:
digest = sha1()
if key_prefix:
digest.update(key_bytes(key_prefix))
base = digest.digest()
for part in parts:
digest.update(key_bytes(part))
if digest.digest() == base:
return None
return digest.hexdigest()
def merge_context(left: Dict[K, V], right: Dict[K, V]) -> Dict[K, List[V]]:
"""When merging two entities, make lists of all the duplicate context
keys."""
combined = {}
keys = [*left.keys(), *right.keys()]
for key in set(keys):
if key in ("caption",):
continue
lval: List[V] = [i for i in ensure_list(left.get(key)) if i is not None]
rval: List[V] = [i for i in ensure_list(right.get(key)) if i is not None]
combined[key] = unique_list([*lval, *rval])
return combined
def dampen(short: int, long: int, text: str) -> float:
length = len(text) - short
baseline = max(1.0, (long - short))
return max(0, min(1.0, (length / baseline)))
def shortest(*texts: str) -> str:
return min(texts, key=len)
def longest(*texts: str) -> str:
return max(texts, key=len) |
6,887 | list | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.ServiceBus/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.servicebus.v2021_11_01.ServiceBusManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = METHOD_NAME(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def METHOD_NAME(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists all of the available ServiceBus REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.servicebus.v2021_11_01.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
METHOD_NAME.metadata = {"url": "/providers/Microsoft.ServiceBus/operations"} |
6,888 | name | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetConfigurationProfileAssignmentResult',
'AwaitableGetConfigurationProfileAssignmentResult',
'get_configuration_profile_assignment',
'get_configuration_profile_assignment_output',
]
@pulumi.output_type
class GetConfigurationProfileAssignmentResult:
"""
Configuration profile assignment is an association between a VM and automanage profile configuration.
"""
def __init__(__self__, id=None, managed_by=None, METHOD_NAME=None, properties=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if managed_by and not isinstance(managed_by, str):
raise TypeError("Expected argument 'managed_by' to be a str")
pulumi.set(__self__, "managed_by", managed_by)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(METHOD_NAME="managedBy")
def managed_by(self) -> str:
"""
Azure resource id. Indicates if this resource is managed by another Azure resource.
"""
return pulumi.get(self, "managed_by")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ConfigurationProfileAssignmentPropertiesResponse':
"""
Properties of the configuration profile assignment.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(METHOD_NAME="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetConfigurationProfileAssignmentResult(GetConfigurationProfileAssignmentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConfigurationProfileAssignmentResult(
id=self.id,
managed_by=self.managed_by,
METHOD_NAME=self.METHOD_NAME,
properties=self.properties,
system_data=self.system_data,
type=self.type)
def get_configuration_profile_assignment(configuration_profile_assignment_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
vm_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConfigurationProfileAssignmentResult:
"""
Get information about a configuration profile assignment
:param str configuration_profile_assignment_name: The configuration profile assignment name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str vm_name: The name of the virtual machine.
"""
__args__ = dict()
__args__['configurationProfileAssignmentName'] = configuration_profile_assignment_name
__args__['resourceGroupName'] = resource_group_name
__args__['vmName'] = vm_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:automanage/v20220504:getConfigurationProfileAssignment', __args__, opts=opts, typ=GetConfigurationProfileAssignmentResult).value
return AwaitableGetConfigurationProfileAssignmentResult(
id=pulumi.get(__ret__, 'id'),
managed_by=pulumi.get(__ret__, 'managed_by'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_configuration_profile_assignment)
def get_configuration_profile_assignment_output(configuration_profile_assignment_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
vm_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConfigurationProfileAssignmentResult]:
"""
Get information about a configuration profile assignment
:param str configuration_profile_assignment_name: The configuration profile assignment name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str vm_name: The name of the virtual machine.
"""
... |
6,889 | split lora model | # Convert LoRA to different rank approximation (should only be used to go to lower rank)
# This code is based off the extract_lora_from_models.py file which is based on https://github.com/cloneofsimo/lora/blob/develop/lora_diffusion/cli_svd.py
# Thanks to cloneofsimo
import argparse
import math
import os
import torch
from safetensors.torch import load_file, save_file, safe_open
from tqdm import tqdm
from library import train_util, model_util
import numpy as np
def load_state_dict(file_name):
if model_util.is_safetensors(file_name):
sd = load_file(file_name)
with safe_open(file_name, framework="pt") as f:
metadata = f.metadata()
else:
sd = torch.load(file_name, map_location="cpu")
metadata = None
return sd, metadata
def save_to_file(file_name, model, metadata):
if model_util.is_safetensors(file_name):
save_file(model, file_name, metadata)
else:
torch.save(model, file_name)
def METHOD_NAME(lora_sd, unit):
max_rank = 0
# Extract loaded lora dim and alpha
for key, value in lora_sd.items():
if "lora_down" in key:
rank = value.size()[0]
if rank > max_rank:
max_rank = rank
print(f"Max rank: {max_rank}")
rank = unit
split_models = []
new_alpha = None
while rank < max_rank:
print(f"Splitting rank {rank}")
new_sd = {}
for key, value in lora_sd.items():
if "lora_down" in key:
new_sd[key] = value[:rank].contiguous()
elif "lora_up" in key:
new_sd[key] = value[:, :rank].contiguous()
else:
# なぜかscaleするとおかしくなる……
# this_rank = lora_sd[key.replace("alpha", "lora_down.weight")].size()[0]
# scale = math.sqrt(this_rank / rank) # rank is > unit
# print(key, value.size(), this_rank, rank, value, scale)
# new_alpha = value * scale # always same
# new_sd[key] = new_alpha
new_sd[key] = value
split_models.append((new_sd, rank, new_alpha))
rank += unit
return max_rank, split_models
def split(args):
print("loading Model...")
lora_sd, metadata = load_state_dict(args.model)
print("Splitting Model...")
original_rank, split_models = METHOD_NAME(lora_sd, args.unit)
comment = metadata.get("ss_training_comment", "")
for state_dict, new_rank, new_alpha in split_models:
# update metadata
if metadata is None:
new_metadata = {}
else:
new_metadata = metadata.copy()
new_metadata["ss_training_comment"] = f"split from DyLoRA, rank {original_rank} to {new_rank}; {comment}"
new_metadata["ss_network_dim"] = str(new_rank)
# new_metadata["ss_network_alpha"] = str(new_alpha.float().numpy())
model_hash, legacy_hash = train_util.precalculate_safetensors_hashes(state_dict, metadata)
metadata["sshs_model_hash"] = model_hash
metadata["sshs_legacy_hash"] = legacy_hash
filename, ext = os.path.splitext(args.save_to)
model_file_name = filename + f"-{new_rank:04d}{ext}"
print(f"saving model to: {model_file_name}")
save_to_file(model_file_name, state_dict, new_metadata)
def setup_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument("--unit", type=int, default=None, help="size of rank to split into / rankを分割するサイズ")
parser.add_argument(
"--save_to",
type=str,
default=None,
help="destination base file name: ckpt or safetensors file / 保存先のファイル名のbase、ckptまたはsafetensors",
)
parser.add_argument(
"--model",
type=str,
default=None,
help="DyLoRA model to resize at to new rank: ckpt or safetensors file / 読み込むDyLoRAモデル、ckptまたはsafetensors",
)
return parser
if __name__ == "__main__":
parser = setup_parser()
args = parser.parse_args()
split(args) |
6,890 | test log prob | # Copyright 2020 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Type
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from _pytest.fixtures import SubRequest
from check_shapes import ShapeChecker
import gpflow
from gpflow.base import AnyNDArray, TensorType
from gpflow.likelihoods import HeteroskedasticTFPConditional
tf.random.set_seed(99012)
EquivalentLikelihoods = Tuple[
gpflow.likelihoods.ScalarLikelihood, gpflow.likelihoods.HeteroskedasticTFPConditional
]
class Data:
cs = ShapeChecker().check_shape
g_var = 0.345
rng = np.random.RandomState(123)
N = 5
X = cs(rng.randn(N, 2), "[N, D]")
Y = cs(rng.randn(N, 1), "[N, P]")
# single "GP" (for the mean):
f_mean = cs(rng.randn(N, 1), "[N, Q]")
f_var: AnyNDArray = cs(rng.randn(N, 1) ** 2, "[N, Q]") # ensure positivity
equivalent_f2 = cs(np.log(g_var) / 2, "[]")
f2_mean = cs(np.full((N, 1), equivalent_f2), "[N, Q]")
f2_var = cs(np.zeros((N, 1)), "[N, Q]")
F2_mean = cs(np.c_[f_mean, f2_mean], "[N, Q2]")
F2_var = cs(np.c_[f_var, f2_var], "[N, Q2]")
def student_t_class_factory(df: int = 3) -> Type[tfp.distributions.StudentT]:
r"""
Returns tfp.Distribution.StudentT class (not instance!)
where df (degrees of freedom) is pre-specified.
This class allows to instantiate a StundentT object by passing
loc and sale at initialisation for a given degree-of-freedom.
"""
class _StudentT(tfp.distributions.StudentT):
def __init__(self, loc: TensorType, scale: TensorType) -> None:
super().__init__(df, loc=loc, scale=scale)
return _StudentT
@pytest.fixture(name="equivalent_likelihoods", params=["studentt", "gaussian"])
def _equivalent_likelihoods_fixture(
request: SubRequest,
) -> EquivalentLikelihoods:
if request.param == "studentt":
return (
gpflow.likelihoods.StudentT(scale=Data.g_var ** 0.5, df=3.0),
HeteroskedasticTFPConditional(distribution_class=student_t_class_factory(df=3)),
)
elif request.param == "gaussian":
return (
gpflow.likelihoods.Gaussian(variance=Data.g_var),
HeteroskedasticTFPConditional(distribution_class=tfp.distributions.Normal),
)
assert False, f"Unknown likelihood {request.param}."
def METHOD_NAME(equivalent_likelihoods: EquivalentLikelihoods) -> None:
"""
heteroskedastic likelihood where the variance parameter is always constant
giving the same answers for variational_expectations, predict_mean_and_var,
etc as the regular Gaussian likelihood
"""
homoskedastic_likelihood, heteroskedastic_likelihood = equivalent_likelihoods
np.testing.assert_array_almost_equal(
homoskedastic_likelihood.log_prob(Data.X, Data.f_mean, Data.Y),
heteroskedastic_likelihood.log_prob(Data.X, Data.F2_mean, Data.Y),
)
def test_variational_expectations(equivalent_likelihoods: EquivalentLikelihoods) -> None:
homoskedastic_likelihood, heteroskedastic_likelihood = equivalent_likelihoods
np.testing.assert_array_almost_equal(
homoskedastic_likelihood.variational_expectations(Data.X, Data.f_mean, Data.f_var, Data.Y),
heteroskedastic_likelihood.variational_expectations(
Data.X, Data.F2_mean, Data.F2_var, Data.Y
),
decimal=2, # student-t case has a max absolute difference of 0.0034
)
def test_predict_mean_and_var(equivalent_likelihoods: EquivalentLikelihoods) -> None:
homoskedastic_likelihood, heteroskedastic_likelihood = equivalent_likelihoods
np.testing.assert_allclose(
homoskedastic_likelihood.predict_mean_and_var(Data.X, Data.f_mean, Data.f_var),
heteroskedastic_likelihood.predict_mean_and_var(Data.X, Data.F2_mean, Data.F2_var),
)
def test_conditional_mean(equivalent_likelihoods: EquivalentLikelihoods) -> None:
homoskedastic_likelihood, heteroskedastic_likelihood = equivalent_likelihoods
np.testing.assert_allclose(
homoskedastic_likelihood.conditional_mean(Data.X, Data.f_mean),
heteroskedastic_likelihood.conditional_mean(Data.X, Data.F2_mean),
)
def test_conditional_variance(equivalent_likelihoods: EquivalentLikelihoods) -> None:
homoskedastic_likelihood, heteroskedastic_likelihood = equivalent_likelihoods
np.testing.assert_allclose(
homoskedastic_likelihood.conditional_variance(Data.X, Data.f_mean),
heteroskedastic_likelihood.conditional_variance(Data.X, Data.F2_mean),
)
def test_predict_log_density(equivalent_likelihoods: EquivalentLikelihoods) -> None:
homoskedastic_likelihood, heteroskedastic_likelihood = equivalent_likelihoods
np.testing.assert_array_almost_equal(
homoskedastic_likelihood.predict_log_density(Data.X, Data.f_mean, Data.f_var, Data.Y),
heteroskedastic_likelihood.predict_log_density(Data.X, Data.F2_mean, Data.F2_var, Data.Y),
decimal=1, # student-t has a max absolute difference of 0.025
) |
6,891 | finalize options | """distutils.command.bdist_dumb
Implements the Distutils 'bdist_dumb' command (create a "dumb" built
distribution -- i.e., just an archive to be unpacked under $prefix or
$exec_prefix)."""
import os
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import remove_tree, ensure_relative
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_dumb(Command):
description = "create a \"dumb\" built distribution"
user_options = [('bdist-dir=', 'd',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('format=', 'f',
"archive format to create (tar, gztar, bztar, xztar, "
"ztar, zip)"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths "
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative']
default_format = { 'posix': 'gztar',
'nt': 'zip' }
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = None
self.relative = 0
self.owner = None
self.group = None
def METHOD_NAME(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError(
"don't know how to create dumb built distributions "
"on platform %s" % os.name)
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
('skip_build', 'skip_build'))
def run(self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
log.info("installing to %s", self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = "%s.%s" % (self.distribution.get_fullname(),
self.plat_name)
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
if (self.distribution.has_ext_modules() and
(install.install_base != install.install_platbase)):
raise DistutilsPlatformError(
"can't make a dumb built distribution where "
"base and platbase are different (%s, %s)"
% (repr(install.install_base),
repr(install.install_platbase)))
else:
archive_root = os.path.join(self.bdist_dir,
ensure_relative(install.install_base))
# Make the archive
filename = self.make_archive(pseudoinstall_root,
self.format, root_dir=archive_root,
owner=self.owner, group=self.group)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_dumb', pyversion,
filename))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run) |
6,892 | should pre quantize | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Activation layer which applies emulates quantization during training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_model_optimization.python.core.quantization.keras.vitis.utils import common_utils
activations = tf.keras.activations
logger = common_utils.VAILogger
register_keras_serializable = tf.keras.utils.register_keras_serializable
@register_keras_serializable(package='Vitis', name='NoQuantizeActivation')
class NoQuantizeActivation(object):
"""No quantize activation which simply returns the incoming tensor.
This activation is required to distinguish between `keras.activations.linear`
which does the same thing. The main difference is that NoQuantizeActivation should
not have any quantize operation applied to it.
"""
def __call__(self, x):
return x
def get_config(self):
return {}
def __eq__(self, other):
if not other or not isinstance(other, NoQuantizeActivation):
return False
return True
def __ne__(self, other):
"""Ensure this works on Python2."""
return not self.__eq__(other)
@register_keras_serializable(package='Vitis', name='QuantizeAwareActivation')
class QuantizeAwareActivation(object):
"""Activation wrapper for quantization aware training.
The goal of this class is to apply quantize operations during training such
that the training network mimics quantization loss experienced in activations
during inference.
It introduces quantization loss before and after activations as required to
mimic inference loss. The layer has built-in knowledge of how quantized
activations are laid out during inference to emulate exact behavior.
For example, ReLU activations are typically fused into their parent layer
such as Conv/Dense. Hence, loss is introduced only after the activation has
been applied. For Softmax on the other hand quantization loss is experienced
both before and after the activation.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
"""
# TODO(pulkitb): Other activations such as elu, tanh etc., should just work
# on inclusion. Verify in TFLite before enabling.
# These activations should be quantized prior to the activation being applied.
_PRE_QUANT_ACTIVATIONS = frozenset({
'softmax', 'elu', 'selu', 'softplus', 'softsign', 'swish', 'gelu', 'tanh',
'sigmoid', 'exponential', 'hard_sigmoid'
})
# These activations should be quantized after the activation has been applied.
_POST_QUANT_ACTIVATIONS = frozenset(
{'linear', 'relu', 'ReLU', 'relu6', 'LeakyReLU', 'PReLU'})
# Don't take any quantize operations for these activations.
_NO_QUANT_ACTIVATIONS = frozenset({'NoQuantizeActivation'})
_CUSTOM_ACTIVATION_ERR_MSG = (
'Only some Keras activations under `tf.keras.activations` are supported. '
'For other activations, use `Quantizer` directly, and update layer '
'config using `QuantizeConfig`.')
def __init__(self, activation, quantizer, mode, step, quantize_wrapper):
"""Constructs object, and initializes weights for quantization.
Args:
activation: Activation function to use.
quantizer: `Quantizer` to be used to quantize the activation.
step: Variable which tracks optimizer step.
quantize_wrapper: `QuantizeWrapper` which owns this activation.
"""
self.activation = activation
self.quantizer = quantizer
self._mode = mode
self.step = step
self.quantize_wrapper = quantize_wrapper
if not self._is_supported_activation(self.activation):
logger.error(self._CUSTOM_ACTIVATION_ERR_MSG)
if self.METHOD_NAME():
self._pre_activation_vars = quantizer.build(None, 'pre_activation',
quantize_wrapper)
if self._should_post_quantize():
self._post_activation_vars = quantizer.build(None, 'post_activation',
quantize_wrapper)
@staticmethod
def _name(activation):
if hasattr(activation, '__name__'):
return activation.__name__
return activation.__class__.__name__
def _is_supported_activation(self, activation):
activation_name = self._name(activation)
return activation_name in self._PRE_QUANT_ACTIVATIONS \
or activation_name in self._POST_QUANT_ACTIVATIONS \
or activation_name in self._NO_QUANT_ACTIVATIONS
def METHOD_NAME(self):
return self._name(self.activation) in self._PRE_QUANT_ACTIVATIONS
def _should_post_quantize(self):
return self._name(self.activation) in self._POST_QUANT_ACTIVATIONS
def _should_not_quantize(self):
return self._name(self.activation) in self._NO_QUANT_ACTIVATIONS
def get_quantize_info(self):
if self._should_not_quantize():
return {}
quantize_info = {}
if self.METHOD_NAME():
quantize_info['type'] = 'pre_activation'
if self._should_post_quantize():
quantize_info['type'] = 'post_activation'
quantize_info['info'] = self.quantizer.get_quantize_info()
return quantize_info
def set_quantize_info(self, new_quantize_info):
if not self._should_not_quantize():
self.quantizer.set_quantize_info(new_quantize_info['info'])
@property
def training(self):
return self._training
@training.setter
def training(self, value):
self._training = value
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
self._mode = value
def _dict_vars(self, min_var, max_var):
return {'min_var': min_var, 'max_var': max_var}
def __call__(self, inputs, *args, **kwargs):
def make_quantizer_fn(quantizer, x, training, mode, quantizer_vars):
"""Use currying to return True/False specialized fns to the cond."""
def quantizer_fn():
return quantizer(x, training, mode, weights=quantizer_vars)
return quantizer_fn
x = inputs
if self.METHOD_NAME():
x = common_utils.smart_cond(
self._training,
make_quantizer_fn(self.quantizer, x, True, self.mode,
self._pre_activation_vars),
make_quantizer_fn(self.quantizer, x, False, self.mode,
self._pre_activation_vars))
x = self.activation(x, *args, **kwargs)
if self._should_post_quantize():
x = common_utils.smart_cond(
self._training,
make_quantizer_fn(self.quantizer, x, True, self.mode,
self._post_activation_vars),
make_quantizer_fn(self.quantizer, x, False, self.mode,
self._post_activation_vars))
return x
# `QuantizeAwareActivation` wraps the activation within a layer to perform
# quantization. In the process, the layer's activation is replaced with
# `QuantizeAwareActivation`.
# However, when the layer is serialized and deserialized, we want the original
# activation to be reconstructed. This ensures that when `QuantizeWrapper`
# wraps the layer, it can again replace the original activation.
@classmethod
def from_config(cls, config):
return activations.deserialize(config['activation'])
def get_config(self):
return {'activation': activations.serialize(self.activation)} |
6,893 | get range func | """ Base class for integrators. """
# Third-party
import numpy as np
# This project
from gala.units import UnitSystem, DimensionlessUnitSystem
__all__ = ["Integrator"]
class Integrator(object):
def __init__(
self,
func,
func_args=(),
func_units=None,
progress=False,
store_all=True,
):
if not hasattr(func, "__call__"):
raise ValueError(
"func must be a callable object, e.g., a function."
)
self.F = func
self._func_args = func_args
if func_units is not None and not isinstance(
func_units, DimensionlessUnitSystem
):
func_units = UnitSystem(func_units)
else:
func_units = DimensionlessUnitSystem()
self._func_units = func_units
self.progress = bool(progress)
self.store_all = store_all
def METHOD_NAME(self):
if self.progress:
try:
from tqdm import trange
return trange
except ImportError:
raise ImportError(
"tqdm must be installed to use progress=True when running "
f"{self.__class__.__name__}"
)
return range
def _prepare_ws(self, w0, mmap, n_steps):
"""
Decide how to make the return array. If ``mmap`` is False, this returns a full
array of zeros, but with the correct shape as the output. If ``mmap`` is True,
return a pointer to a memory-mapped array. The latter is particularly useful for
integrating a large number of orbits or integrating a large number of time
steps.
"""
from ..dynamics import PhaseSpacePosition
if not isinstance(w0, PhaseSpacePosition):
w0 = PhaseSpacePosition.from_w(w0)
arr_w0 = w0.w(self._func_units)
self.ndim, self.norbits = arr_w0.shape
self.ndim = self.ndim // 2
if self.store_all:
return_shape = (2 * self.ndim, n_steps + 1, self.norbits)
else:
return_shape = (2 * self.ndim, self.norbits)
if mmap is None:
# create the return arrays
ws = np.zeros(return_shape, dtype=float)
else:
if mmap.shape != return_shape:
raise ValueError(
"Shape of memory-mapped array doesn't match expected shape of "
f"return array ({mmap.shape} vs {return_shape})"
)
if not mmap.flags.writeable:
raise TypeError(
f"Memory-mapped array must be a writable mode, not '{mmap.mode}'"
)
ws = mmap
return w0, arr_w0, ws
def _handle_output(self, w0, t, w):
""" """
if w.shape[-1] == 1:
w = w[..., 0]
pos_unit = self._func_units["length"]
t_unit = self._func_units["time"]
vel_unit = pos_unit / t_unit
from ..dynamics import Orbit
orbit = Orbit(
pos=w[:self.ndim] * pos_unit,
vel=w[self.ndim:] * vel_unit,
t=t * t_unit,
)
return orbit
def run(self):
"""
Run the integrator starting from the specified phase-space position.
The initial conditions ``w0`` should be a
`~gala.dynamics.PhaseSpacePosition` instance.
There are a few combinations of keyword arguments accepted for
specifying the timestepping. For example, you can specify a fixed
timestep (``dt``) and a number of steps (``n_steps``), or an array of
times::
dt, n_steps[, t1] : (numeric, int[, numeric])
A fixed timestep dt and a number of steps to run for.
dt, t1, t2 : (numeric, numeric, numeric)
A fixed timestep dt, an initial time, and a final time.
t : array-like
An array of times to solve on.
Parameters
----------
w0 : `~gala.dynamics.PhaseSpacePosition`
Initial conditions.
**time_spec
Timestep information passed to
`~gala.integrate.time_spec.parse_time_specification`.
Returns
-------
orbit : `~gala.dynamics.Orbit`
"""
pass |
6,894 | test generate thumbnail more than one given | from django.test import tag
from django.http import QueryDict
from tests.BaseTestWithDB import BaseTestWithDB
from tests.resources.ResourcesTestDataGenerator import ResourcesTestDataGenerator
from tests.resources.BareResourceGenerator import BareResourceGenerator, BareResourceGeneratorWithCopies
from unittest.mock import MagicMock
from utils.errors.ThumbnailPageNotFoundError import ThumbnailPageNotFoundError
from utils.errors.MoreThanOneThumbnailPageFoundError import MoreThanOneThumbnailPageFoundError
from resources.utils.BaseResourceGenerator import BaseResourceGenerator
from resources.utils.resource_parameters import ResourceParameter, EnumResourceParameter
from io import BytesIO
from PyPDF2 import PdfFileReader
@tag("resource")
class BaseResourceGeneratorTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_data = ResourcesTestDataGenerator()
def test_pdf_single_page(self):
generator = BareResourceGenerator()
(pdf_file, filename) = generator.pdf("Test")
pdf = PdfFileReader(BytesIO(pdf_file))
self.assertEqual(pdf.getNumPages(), 1)
def test_pdf_single_page_copies(self):
generator = BareResourceGeneratorWithCopies(QueryDict("paper_size=a4&copies=8"))
(pdf_file, filename) = generator.pdf("Test")
pdf = PdfFileReader(BytesIO(pdf_file))
self.assertEqual(pdf.getNumPages(), 8)
def test_pdf_multiple_pages(self):
generator = BareResourceGenerator()
generator.data = MagicMock(
return_value=[
{"type": "html", "data": "Page 1"},
{"type": "html", "data": "Page 2"},
]
)
(pdf_file, filename) = generator.pdf("Test")
pdf = PdfFileReader(BytesIO(pdf_file))
self.assertEqual(pdf.getNumPages(), 2)
def test_pdf_multiple_pages_copies(self):
generator = BareResourceGeneratorWithCopies(QueryDict("paper_size=a4&copies=8"))
generator.data = MagicMock(
return_value=[
{"type": "html", "data": "Page 1"},
{"type": "html", "data": "Page 2"},
]
)
(pdf_file, filename) = generator.pdf("Test")
pdf = PdfFileReader(BytesIO(pdf_file))
self.assertEqual(pdf.getNumPages(), 16)
def test_generate_thumbnail_valid_single_page(self):
generator = BareResourceGenerator()
thumbnail_data = generator.generate_thumbnail()
self.assertEqual(thumbnail_data["data"], "Page 1")
def test_generate_thumbnail_valid_multiple_pages(self):
generator = BareResourceGenerator()
generator.data = MagicMock(
return_value=[
{"type": "html", "data": "Page 1"},
{"type": "html", "data": "Page 2", "thumbnail": True}
]
)
thumbnail_data = generator.generate_thumbnail()
self.assertEqual(thumbnail_data["data"], "Page 2")
def test_generate_thumbnail_none_given(self):
generator = BareResourceGenerator()
generator.data = MagicMock(
return_value=[
{"type": "html", "data": ""},
{"type": "html", "data": ""}
]
)
self.assertRaises(
ThumbnailPageNotFoundError,
generator.generate_thumbnail,
)
def METHOD_NAME(self):
generator = BareResourceGenerator()
generator.data = MagicMock(
return_value=[
{"type": "html", "data": "", "thumbnail": True},
{"type": "html", "data": "", "thumbnail": True}
]
)
self.assertRaises(
MoreThanOneThumbnailPageFoundError,
generator.generate_thumbnail,
)
def test_data_not_implemented(self):
# Create generator without data method defined.
class InvalidGenerator(BaseResourceGenerator):
pass
with self.assertRaises(TypeError):
InvalidGenerator()
def test_get_options(self):
options = BaseResourceGenerator.get_options()
options_order = ["paper_size"]
# check options are correct, including ordering
self.assertListEqual(options_order, list(options))
for option in options.values():
self.assertIsInstance(option, ResourceParameter)
def test_get_local_options(self):
local_options = BaseResourceGenerator.get_local_options()
options_order = ["header_text"]
self.assertListEqual(options_order, list(local_options))
for option in local_options.values():
self.assertIsInstance(option, ResourceParameter)
def test_get_local_options_with_copies(self):
class SubclassWithCopies(BaseResourceGenerator):
copies = True
local_options = SubclassWithCopies.get_local_options()
options_order = ["header_text", "copies"]
self.assertListEqual(options_order, list(local_options))
for option in local_options.values():
self.assertIsInstance(option, ResourceParameter)
def test_all_options(self):
generator = BareResourceGenerator()
normal_options = generator.get_options()
local_options = generator.get_local_options()
# Order should be normal options, then local options
options_order = list(normal_options) + list(local_options)
self.assertListEqual(options_order, list(generator.options))
for option in local_options.values():
self.assertIsInstance(option, ResourceParameter)
def test_get_options_subclass_additional_options(self):
class GeneratorSubclass(BaseResourceGenerator):
@classmethod
def get_additional_options(cls):
return {
"subclass_option": EnumResourceParameter(
name="subclass_option",
description="Description",
values={"value1": "Value 1"}
),
}
options = GeneratorSubclass.get_options()
# Subclass options before base class options
options_order = ["subclass_option", "paper_size"]
self.assertListEqual(options_order, list(options))
for option in options.values():
self.assertIsInstance(option, ResourceParameter)
def test_get_local_options_subclass_additional_local_options(self):
class GeneratorSubclass(BaseResourceGenerator):
@classmethod
def get_additional_options(cls):
return {
"subclass_local_option": EnumResourceParameter(
name="subclass_local_option",
description="Description",
values={"value1": "Value 1"}
),
}
local_options = GeneratorSubclass.get_options()
# Subclass options before base class options
options_order = ["subclass_local_option", "paper_size"]
self.assertListEqual(options_order, list(local_options))
for option in local_options.values():
self.assertIsInstance(option, ResourceParameter)
def test_get_option_defaults(self):
option_defaults = BaseResourceGenerator.get_option_defaults()
self.assertEqual(
option_defaults,
{
"paper_size": "a4",
}
) |
6,895 | invalid test parameters | from common import Protocols
from providers import S2N
from global_flags import get_flag, S2N_FIPS_MODE
def to_bytes(val):
return bytes(str(val).encode('utf-8'))
def to_string(val: bytes):
return val.decode(encoding="ascii", errors="backslashreplace")
def get_expected_s2n_version(protocol, provider):
"""
s2nd and s2nc print a number for the negotiated TLS version.
provider is s2n's peer. If s2n tries to speak to s2n < tls13,
tls12 is always chosen. This is true even when the requested
protocol is less than tls12.
"""
if provider == S2N and protocol != Protocols.TLS13:
version = '33'
else:
version = protocol.value
return version
def get_expected_openssl_version(protocol):
return {
Protocols.TLS10.value: "TLSv1",
Protocols.TLS11.value: "TLSv1.1",
Protocols.TLS12.value: "TLSv1.2",
Protocols.TLS13.value: "TLSv1.3"
}.get(protocol.value)
def get_expected_gnutls_version(protocol):
return {
Protocols.TLS10.value: "TLS1.0",
Protocols.TLS11.value: "TLS1.1",
Protocols.TLS12.value: "TLS1.2",
Protocols.TLS13.value: "TLS1.3"
}.get(protocol.value)
def get_parameter_name(item):
if isinstance(item, type):
return item.__name__
return str(item)
def METHOD_NAME(*args, **kwargs):
"""
Determine if the parameters chosen for a test makes sense.
This function returns True or False, indicating whether a
test should be "deselected" based on the arguments.
"""
protocol = kwargs.get('protocol')
provider = kwargs.get('provider')
other_provider = kwargs.get('other_provider')
certificate = kwargs.get('certificate')
client_certificate = kwargs.get('client_certificate')
cipher = kwargs.get('cipher')
curve = kwargs.get('curve')
signature = kwargs.get('signature')
providers = [provider_ for provider_ in [provider, other_provider] if provider_]
# Always consider S2N
providers.append(S2N)
# Only TLS1.3 supports RSA-PSS-PSS certificates
# (Earlier versions support RSA-PSS signatures, just via RSA-PSS-RSAE)
if protocol and protocol is not Protocols.TLS13:
if client_certificate and client_certificate.algorithm == 'RSAPSS':
return True
if certificate and certificate.algorithm == 'RSAPSS':
return True
for provider_ in providers:
if not provider_.supports_protocol(protocol):
return True
if cipher is not None:
# If the selected protocol doesn't allow the cipher, don't test
if protocol is not None:
if cipher.min_version > protocol:
return True
# Ciphersuites prior to TLS13 can not be used with TLS13
# https://wiki.openssl.org/index.php/TLS1.3#Differences_with_TLS1.2_and_below
if protocol is Protocols.TLS13 and cipher.min_version < protocol:
return True
for provider_ in providers:
if not provider_.supports_cipher(cipher, with_curve=curve):
return True
if get_flag(S2N_FIPS_MODE):
if not cipher.fips:
return True
# If we are using a cipher that depends on a specific certificate algorithm
# deselect the test if the wrong certificate is used.
if certificate is not None:
if protocol is not None:
for provider_ in providers:
if provider_.supports_protocol(protocol, with_cert=certificate) is False:
return True
if cipher is not None and certificate.compatible_with_cipher(cipher) is False:
return True
# If the curve is specified, then all signatures must use that curve
if curve:
if certificate and not certificate.compatible_with_curve(curve):
return True
if client_certificate and not client_certificate.compatible_with_curve(curve):
return True
# Prevent situations like using X25519 with TLS1.2
if curve is not None:
if protocol is not None and curve.min_protocol > protocol:
return True
if signature is not None:
for provider_ in providers:
if provider_.supports_signature(signature) is False:
return True
return False |
6,896 | internal nodes in cell | #-------------------------------------------------------------------------------
# NestedGridNeighbor
#-------------------------------------------------------------------------------
from PYB11Generator import *
from Neighbor import *
from NeighborAbstractMethods import *
@PYB11template("Dimension")
class NestedGridNeighbor(Neighbor):
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef NodeList<%(Dimension)s> NodeListType;
typedef GridCellIndex<%(Dimension)s> GridCellIndexType;
typedef GeomPlane<%(Dimension)s> Plane;
"""
#...........................................................................
# Constructors
def pyinit(self,
nodeList = "NodeListType&",
searchType = ("const NeighborSearchType", "NeighborSearchType::GatherScatter"),
numGridLevels = ("int", "31"),
topGridCellSize = ("double", "100.0"),
origin = ("Vector", "Vector::zero"),
kernelExtent = ("const double", "2.0"),
gridCellInfluenceRadius = ("int", "1")):
"Construct a NestedGridNeighbor"
#...........................................................................
# Methods
@PYB11const
def gridLevel(self, nodeID="const int"):
"Find the gridlevel for the given nodeID"
return "int"
@PYB11pycppname("gridLevel")
@PYB11const
def gridLevel1(self, H="const SymTensor&"):
"Find the gridlevel for the given H"
return "int"
@PYB11pycppname("gridLevel")
@PYB11const
def gridLevel2(self, H="const Scalar&"):
"Find the gridlevel for the given H"
return "int"
@PYB11const
def gridCellIndex(self, nodeID="const int", gridLevel="const int"):
"Find the GridCellIndex for the given node on the given level"
return "GridCellIndexType"
@PYB11pycppname("gridCellIndex")
@PYB11const
def gridCellIndex1(self, position="const Vector&", gridLevel="const int"):
"Find the GridCellIndex for the given position on the given level"
return "GridCellIndexType"
def translateGridCellRange(self):
return
def cellOccupied(self):
"Test if the given (grid cell, grid level) is occupied"
return
@PYB11returnpolicy("reference_internal")
@PYB11const
def occupiedGridCells(self):
"The full set of occupied gridcells on all gridlevels"
return "const std::vector<std::vector<GridCellIndexType>>&"
@PYB11returnpolicy("reference_internal")
@PYB11pycppname("occupiedGridCells")
@PYB11const
def occupiedGridCells1(self, gridLevel="const int"):
"The set of occupied gridcells on the given gridlevel"
return "const std::vector<GridCellIndexType>&"
def headOfGridCell(self):
"Return the head of the chain for (grid cell, grid level)"
def nextNodeInCell(self):
"Find the next node in the chain from a given node"
def METHOD_NAME(self):
"Return a list of the internal nodes in the given (grid cell, grid level)"
def nodesInCell(self):
"Return a list of the nodes in the given (grid cell, grid level)"
def appendNodesInCell(self):
"Add to the chain of nodes for a given (grid cell, grid level)"
def occupiedGridCellsInRange(self):
"Find the occupied grid cells given (min, max) cells and grid level"
def gridNormal(self):
"Convert a coordinate vector to an integer normal"
def mapGridCell(self):
"Map a (grid cell, grid level) through a pair of planes"
@PYB11const
def setNestedMasterList(self,
gridCell = "const GridCellIndexType&",
gridLevel = "const int",
masterList = "std::vector<int>&",
coarseNeighbors = "std::vector<int>&",
ghostConnectivity = "const bool"):
"Worker method used to set master/coarse information"
return "void"
def findNestedNeighbors(self):
"Return the neighbors for the given (grid cell, grid level)"
@PYB11virtual
@PYB11const
def valid(self):
"Test if the Neighbor is valid, i.e., ready to be queried for connectivity information."
return "bool"
#...........................................................................
# Properties
numGridLevels = PYB11property("int", "numGridLevels", "numGridLevels", doc="The maximum number of grid levels allowed")
numOccupiedGridLevels = PYB11property("int", "numOccupiedGridLevels", doc="The number of grid levels populated by nodes")
occupiedGridLevels = PYB11property("std::vector<int>", "occupiedGridLevels", doc="Array of the occupied grid levels")
origin = PYB11property("const Vector&", "origin", "origin", doc="The origin for computing the GridCellIndex of a coordinate Vector")
topGridSize = PYB11property("double", "topGridSize", "topGridSize", doc="The cell size on the coarsest (top) grid level")
gridCellInfluenceRadius = PYB11property("int", "gridCellInfluenceRadius", "gridCellInfluenceRadius", doc="The radius in grid cells on a level a cell can interact with")
gridCellSizeInv = PYB11property("const std::vector<double>&", "gridCellSizeInv", doc="The array of 1/grid cell size for each level")
nodeInCell = PYB11property("const std::vector<std::vector<GridCellIndexType>>&", "nodeInCell", doc="The cell each node is in")
#masterGridLevel = PYB11property("int", "masterGridLevel", doc="The current master grid level")
#masterGridCellIndex = PYB11property("const GridCellIndexType&", "masterGridCellIndex", doc="The current master grid cell index")
endOfLinkList = PYB11property("int", "endOfLinkList", doc="Value used to terminate a link list chain")
#-------------------------------------------------------------------------------
# Add the virtual interface
#-------------------------------------------------------------------------------
PYB11inject(NeighborAbstractMethods, NestedGridNeighbor, virtual=True) |
6,897 | test boto3 yaml validate template url successful | import json
import boto3
import botocore
import pytest
from moto import mock_cloudformation, mock_s3
from tests import EXAMPLE_AMI_ID
json_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Resources": {
"EC2Instance1": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "dummy",
"InstanceType": "t2.micro",
"Tags": [
{"Key": "Description", "Value": "Test tag"},
{"Key": "Name", "Value": "Name tag for tests"},
],
},
}
},
}
json_valid_template_with_tabs = """
{
\t"AWSTemplateFormatVersion": "2010-09-09",
\t"Description": "Stack 2",
\t"Resources": {
\t\t"Queue": {"Type": "AWS::SQS::Queue", "Properties": {"VisibilityTimeout": 60}}
\t}
}
"""
# One resource is required
json_bad_template = {"AWSTemplateFormatVersion": "2010-09-09", "Description": "Stack 1"}
dummy_template_json = json.dumps(json_template)
dummy_bad_template_json = json.dumps(json_bad_template)
@mock_cloudformation
def test_boto3_json_validate_successful():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
response = cf_conn.validate_template(TemplateBody=dummy_template_json)
assert response["Description"] == "Stack 1"
assert response["Parameters"] == []
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
@mock_cloudformation
def test_boto3_json_with_tabs_validate_successful():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
response = cf_conn.validate_template(TemplateBody=json_valid_template_with_tabs)
assert response["Description"] == "Stack 2"
assert response["Parameters"] == []
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
@mock_cloudformation
def test_boto3_json_invalid_missing_resource():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
with pytest.raises(botocore.exceptions.ClientError) as exc:
cf_conn.validate_template(TemplateBody=dummy_bad_template_json)
err = exc.value.response["Error"]
assert (
err["Message"]
== "Stack with id Missing top level template section Resources does not exist"
)
yaml_template = """
AWSTemplateFormatVersion: '2010-09-09'
Description: Simple CloudFormation Test Template
Resources:
S3Bucket:
Type: AWS::S3::Bucket
Properties:
AccessControl: PublicRead
BucketName: cf-test-bucket-1
"""
yaml_bad_template = """
AWSTemplateFormatVersion: '2010-09-09'
Description: Simple CloudFormation Test Template
"""
@mock_cloudformation
def test_boto3_yaml_validate_successful():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
response = cf_conn.validate_template(TemplateBody=yaml_template)
assert response["Description"] == "Simple CloudFormation Test Template"
assert response["Parameters"] == []
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
@mock_cloudformation
@mock_s3
def METHOD_NAME():
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
s3_conn.create_bucket(Bucket="foobar")
s3_conn.Object("foobar", "template-key").put(Body=yaml_template)
key_url = s3.generate_presigned_url(
ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"}
)
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
response = cf_conn.validate_template(TemplateURL=key_url)
assert response["Description"] == "Simple CloudFormation Test Template"
assert response["Parameters"] == []
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
@mock_cloudformation
def test_boto3_yaml_invalid_missing_resource():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
with pytest.raises(botocore.exceptions.ClientError) as exc:
cf_conn.validate_template(TemplateBody=yaml_bad_template)
err = exc.value.response["Error"]
assert (
err["Message"]
== "Stack with id Missing top level template section Resources does not exist"
) |
6,898 | test has local classes | import jpyutil
import unittest
def some_function(x):
return x * x
def some_function2(x):
return x * x * x
class TheContext:
def __init__(self, amount):
self.amount = amount
def plus(self, other):
return self.amount + other
# Note: all jpy unit testing should go in this class for now, due to
# IDS-4102
class TestJpy(unittest.TestCase):
# While we would like to do this, we *can't* until
# IDS-4102 is fixed
'''
def setUp(self):
jpyutil.init_jvm()
def tearDown(self):
jpy.destroy_jvm()
'''
@classmethod
def setUpClass(cls):
jpyutil.init_jvm(jvm_maxmem='512M')
@classmethod
def tearDownClass(cls):
import jpy
jpy.destroy_jvm()
def test_has_jvm(self):
import jpy
self.assertTrue(jpy.has_jvm())
def test_has_jvm_again(self):
import jpy
self.assertTrue(jpy.has_jvm())
def METHOD_NAME(self):
import jpy
jpy.get_type('io.deephaven.jpy.integration.Empty')
def test_has_local_classes_dne(self):
import jpy
with self.assertRaises(ValueError):
jpy.get_type('io.deephaven.jpy.integration.DoesNotExist')
def test_has_jpy_classes(self):
import jpy
jpy.get_type('org.jpy.PyLib')
def test_has_gil(self):
import jpy
PyLib = jpy.get_type('org.jpy.PyLib')
self.assertFalse(PyLib.hasGil())
def test_reenter_python(self):
import jpy
ReenterPython = jpy.get_type('io.deephaven.jpy.integration.ReenterPython')
self.assertEquals(42, ReenterPython.calc1Plus41InPython())
def test_ping_pong_stack(self):
import jpy
PingPongStack = jpy.get_type('io.deephaven.jpy.integration.PingPongStack')
self.assertEquals('test_jpy(java,5)(python,4)(java,3)(python,2)(java,1)', PingPongStack.pingPongPython('test_jpy', 5))
self.assertEquals('test_jpy(java,4)(python,3)(java,2)(python,1)', PingPongStack.pingPongPython('test_jpy', 4))
# todo: consider running tests where JPY is *not* on the classpath, which is a completely acceptable use case
def test_org_jpy_pylib(self):
import jpy
jpy.get_type('org.jpy.PyLib')
def test_pass_function_to_java(self):
import jpy
PassPyObjectToJava = jpy.get_type('io/deephaven/jpy/integration/PassPyObjectToJava')
#jpy.diag.flags = jpy.diag.F_ALL
PassPyObjectToJava.from_python_with_love(some_function)
def test_pass_function_to_java_var(self):
import jpy
PassPyObjectToJava = jpy.get_type('io/deephaven/jpy/integration/PassPyObjectToJava')
#jpy.diag.flags = jpy.diag.F_ALL
PassPyObjectToJava.from_python_with_love_var(some_function, some_function2)
def test_pass_the_context_to_java(self):
import jpy
PassPyObjectToJava = jpy.get_type('io/deephaven/jpy/integration/PassPyObjectToJava')
context_42 = TheContext(42)
self.assertEquals(43, PassPyObjectToJava.invoke_the_context_plus(context_42, 1))
context_99 = TheContext(99)
self.assertEquals(104, PassPyObjectToJava.invoke_the_context_plus(context_99, 5))
def test_py_object_overload_test_1(self):
import jpy
PassPyObjectToJava = jpy.get_type('io/deephaven/jpy/integration/PassPyObjectToJava')
self.assertEquals("String", PassPyObjectToJava.overload_test_1('a string'))
self.assertEquals("PyObject", PassPyObjectToJava.overload_test_1(42))
def test_numpy_array(self):
import jpy
import numpy
jpy_array = jpy.array('int', range(100))
jpy_array_id = id(jpy_array)
jpy_array_refcount = get_refcount(jpy_array_id)
np_array = numpy.frombuffer(jpy_array, numpy.int32)
self.assertEqual(list(jpy_array), list(np_array))
self.assertEqual(get_refcount(jpy_array_id), jpy_array_refcount + 1)
np_array = None
self.assertEqual(get_refcount(jpy_array_id), jpy_array_refcount)
def test_pyobject_unwrap(self):
import jpy
class CustomClass:
def __init__(self):
pass
obj = CustomClass()
obj_id = id(obj)
self.assertEqual(get_refcount(obj_id), 1)
# Note: a temporary PyObject is created, and that holds onto a ref until Java GCs.
# While the following counts are racy, it is probably very rare to fail here.
echo = jpy.get_type('io.deephaven.jpy.integration.Echo').echo(obj)
self.assertTrue(obj is echo)
self.assertEqual(get_refcount(obj_id), 3)
del obj
self.assertEqual(get_refcount(obj_id), 2)
del echo
self.assertEqual(get_refcount(obj_id), 1)
def test_pyobject_unwrap_via_array(self):
import jpy
# Very similar to test_pyobject_unwrap, but we are doing the unwrapping via array
class CustomClass:
def __init__(self):
pass
obj = CustomClass()
obj_id = id(obj)
self.assertEqual(get_refcount(obj_id), 1)
obj_in_array = jpy.array('org.jpy.PyObject', [obj])
self.assertEqual(get_refcount(obj_id), 2)
extracted_obj = obj_in_array[0]
self.assertTrue(obj is extracted_obj)
self.assertEqual(get_refcount(obj_id), 3)
del extracted_obj
self.assertEqual(get_refcount(obj_id), 2)
del obj
self.assertEqual(get_refcount(obj_id), 1)
# Note: the ref count will not decrease until Java GCs and PyObject does the decRef.
# While this del + check is racy, it is probably very rare to fail here.
del obj_in_array
self.assertEqual(get_refcount(obj_id), 1)
def test_pyproxy_unwrap(self):
import jpy
class SomeJavaInterfaceImpl:
def __init__(self):
pass
def foo(self, bar, baz):
return bar + baz
obj = SomeJavaInterfaceImpl()
obj_id = id(obj)
self.assertEqual(get_refcount(obj_id), 1)
# Note: a temporary PyObject is created, and that holds onto a ref until Java GCs.
# While the following counts are racy, it is probably very rare to fail here.
obj_proxy = jpy.get_type('io.deephaven.jpy.integration.SomeJavaInterface').proxy(obj)
self.assertTrue(obj is obj_proxy)
self.assertEqual(get_refcount(obj_id), 3)
del obj
self.assertEqual(get_refcount(obj_id), 2)
del obj_proxy
self.assertEqual(get_refcount(obj_id), 1)
def get_refcount(obj_id):
import ctypes
return ctypes.c_long.from_address(obj_id).value
|
6,899 | long running task | """Test class for Notifications API
:Requirement: Notifications
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: Notifications
:Team: Endeavour
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from mailbox import mbox
from re import findall
from tempfile import mkstemp
import pytest
from fauxfactory import gen_string
from wait_for import TimedOutError
from wait_for import wait_for
from robottelo.config import settings
from robottelo.constants import DEFAULT_LOC
from robottelo.constants import DEFAULT_ORG
from robottelo.utils.issue_handlers import is_open
@pytest.fixture
def admin_user_with_localhost_email(target_sat):
"""Admin user with e-mail set to `root@localhost`."""
user = target_sat.api.User(
admin=True,
default_organization=DEFAULT_ORG,
default_location=DEFAULT_LOC,
description='created by nailgun',
login=gen_string("alphanumeric"),
password=gen_string("alphanumeric"),
mail='root@localhost',
).create()
user.mail_enabled = True
user.update()
yield user
user.delete()
@pytest.fixture
def reschedule_long_running_tasks_notification(target_sat):
"""Reschedule long-running tasks checker from midnight (default) to every minute.
Reset it back after the test.
"""
default_cron_schedule = '0 0 * * *'
every_minute_cron_schedule = '* * * * *'
assert (
target_sat.execute(
f"FOREMAN_TASKS_CHECK_LONG_RUNNING_TASKS_CRONLINE='{every_minute_cron_schedule}' "
"foreman-rake foreman_tasks:reschedule_long_running_tasks_checker"
).status
== 0
)
yield
assert (
target_sat.execute(
f"FOREMAN_TASKS_CHECK_LONG_RUNNING_TASKS_CRONLINE='{default_cron_schedule}' "
"foreman-rake foreman_tasks:reschedule_long_running_tasks_checker"
).status
== 0
)
@pytest.fixture
def start_postfix_service(target_sat):
"""Start postfix service (disabled by default)."""
assert target_sat.execute('systemctl start postfix').status == 0
@pytest.fixture
def clean_root_mailbox(target_sat):
"""Backup & purge local mailbox of the Satellite's root@localhost user.
Restore it afterwards.
"""
root_mailbox = '/var/spool/mail/root'
root_mailbox_backup = f'{root_mailbox}-{gen_string("alphanumeric")}.bak'
target_sat.execute(f'cp -f {root_mailbox} {root_mailbox_backup}')
target_sat.execute(f'truncate -s 0 {root_mailbox}')
yield root_mailbox
target_sat.execute(f'mv -f {root_mailbox_backup} {root_mailbox}')
@pytest.fixture
def wait_for_long_running_task_mail(target_sat, clean_root_mailbox, METHOD_NAME):
"""Wait until the long-running task ID is found in the Satellite's mbox file."""
timeout = 300
try:
wait_for(
func=target_sat.execute,
func_args=[f'grep --quiet {METHOD_NAME["task"]["id"]} {clean_root_mailbox}'],
fail_condition=lambda res: res.status == 0,
timeout=timeout,
delay=5,
)
except TimedOutError:
raise AssertionError(
f'No notification e-mail with long-running task ID {METHOD_NAME["task"]["id"]} '
f'has arrived to {clean_root_mailbox} after {timeout} seconds.'
)
return True
@pytest.fixture
def root_mailbox_copy(target_sat, clean_root_mailbox, wait_for_long_running_task_mail):
"""Parsed local system copy of the Satellite's root user mailbox.
:returns: :class:`mailbox.mbox` instance
"""
assert wait_for_long_running_task_mail
result = target_sat.execute(f'cat {clean_root_mailbox}')
assert result.status == 0, f'Could not read mailbox {clean_root_mailbox} on Satellite host.'
mbox_content = result.stdout
_, local_mbox_file = mkstemp()
with open(local_mbox_file, 'w') as fh:
fh.writelines(mbox_content)
return mbox(path=local_mbox_file)
@pytest.fixture
def METHOD_NAME(target_sat):
"""Create an async task and set its start time and last report time to two days ago.
After the test finishes, the task is cancelled.
"""
template_id = (
target_sat.api.JobTemplate()
.search(query={'search': 'name="Run Command - Script Default"'})[0]
.id
)
job = target_sat.api.JobInvocation().run(
synchronous=False,
data={
'job_template_id': template_id,
'organization': DEFAULT_ORG,
'location': DEFAULT_LOC,
'inputs': {
'command': 'sleep 300',
},
'targeting_type': 'static_query',
'search_query': f'name = {target_sat.hostname}',
'password': settings.server.ssh_password,
},
)
sql_date_2_days_ago = "now() - INTERVAL \'2 days\'"
result = target_sat.execute(
"su - postgres -c \"psql foreman postgres <<EOF\n"
"UPDATE foreman_tasks_tasks "
f"SET start_at = {sql_date_2_days_ago}, "
f" started_at = {sql_date_2_days_ago}, "
f" state_updated_at = {sql_date_2_days_ago} "
f"WHERE id=\'{job['task']['id']}\';\nEOF\n\" "
)
assert 'UPDATE 1' in result.stdout, f'Failed to age task {job["task"]["id"]}: {result.stderr}'
yield job
result = target_sat.api.ForemanTask().bulk_cancel(data={"task_ids": [job['task']['id']]})
assert 'cancelled' in result
@pytest.mark.tier3
@pytest.mark.usefixtures(
'admin_user_with_localhost_email',
'reschedule_long_running_tasks_notification',
'start_postfix_service',
)
def test_positive_notification_for_long_running_tasks(METHOD_NAME, root_mailbox_copy):
"""Check that a long-running task (i.e., running or paused for more than two days)
is detected and an e-mail notification is sent to admin users.
:id: effc1ff2-263b-11ee-b623-000c2989e153
:setup:
1. Create an admin user with e-mail 'root@localhost'.
2. Change the long-running tasks checker cron schedule from '0 0 * * * ' (midnight)
to '* * * * * ' (every minute).
3. Start the `sendmail` service (disabled by default).
:steps:
1. Create a long-running task:
1a. Schedule a sample task to run on the Satellite host.
2b. In DB, update the task start time and status report time to two days back,
so it is considered by Satellite as a long-running task.
2. Update the long-running task checker schedule to run every minute
(it runs at midnight by default).
3. Wait for the notification e-mail to be sent to the admin user address.
4. Check the e-mail if it contains all the important information, like,
the task ID, link to the task, link to all long-running tasks.
:BZ: 1950836, 2223996
:customerscenario: true
"""
task_id = METHOD_NAME['task']['id']
assert task_id
for email in root_mailbox_copy:
if task_id in email.as_string():
assert 'Tasks pending since' in email.get(
'Subject'
), f'Notification e-mail has wrong subject: {email.get("Subject")}'
for mime_body in email.get_payload():
body_text = mime_body.as_string()
assert 'Tasks lingering in states running, paused since' in body_text
assert f'/foreman_tasks/tasks/{task_id}' in body_text
assert (
'/foreman_tasks/tasks?search=state+%5E+%28running%2C+paused'
'%29+AND+state_updated_at' in body_text
), 'Link for long-running tasks is missing in the e-mail body.'
if not is_open('BZ:2223996'):
assert findall(r'_\("[\w\s]*"\)', body_text), 'Untranslated strings found.' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.