id
int64
0
300k
label
stringlengths
1
74
text
stringlengths
4k
8k
299,100
set up
#!/usr/bin/env python # Impacket - Collection of Python classes for working with network protocols. # # Copyright (C) 2023 Fortra. All rights reserved. # # This software is provided under a slightly modified version # of the Apache Software License. See the accompanying LICENSE file # for more information. # import unittest from impacket.dot11 import Dot11, Dot11Types, Dot11DataFrame class TestDot11DataFrames(unittest.TestCase): def METHOD_NAME(self): # 802.11 Data Frame # self.frame_orig=b'\x08\x01\x30\x00\x00\x08\x54\xac\x2f\x85\x00\x23\x4d\x09\x86\xfe\x00\x08\x54\xac\x2f\x85\x40\x44\xaa\xaa\x03\x00\x00\x00\x08\x00\x45\x00\x00\x28\x72\x37\x40\x00\x80\x06\x6c\x22\xc0\xa8\x01\x02\xc3\x7a\x97\x51\xd7\xa0\x00\x50\xa5\xa5\xb1\xe0\x12\x1c\xa9\xe1\x50\x10\x4e\x75\x59\x74\x00\x00\xed\x13\x22\x91' d = Dot11(self.frame_orig) type = d.get_type() self.assertEqual(type,Dot11Types.DOT11_TYPE_DATA) subtype = d.get_subtype() self.assertEqual(subtype,Dot11Types.DOT11_SUBTYPE_DATA) typesubtype = d.get_type_n_subtype() self.assertEqual(typesubtype,Dot11Types.DOT11_TYPE_DATA_SUBTYPE_DATA) self.data = Dot11DataFrame(d.get_body_as_string()) d.contains(self.data) def test_01_HeaderSize(self): 'Test Header and Tail Size field' self.assertEqual(self.data.get_header_size(), 22) self.assertEqual(self.data.get_tail_size(), 0) def test_02_Duration(self): 'Test Duration field' self.assertEqual(self.data.get_duration(), 0x30) self.data.set_duration(0x1234) self.assertEqual(self.data.get_duration(), 0x1234) def test_03_Address_1(self): 'Test Address 1 field' addr=self.data.get_address1() self.assertEqual(addr.tolist(), [0x00,0x08,0x54,0xac,0x2f,0x85]) addr[0]=0x12 addr[5]=0x34 self.data.set_address1(addr) self.assertEqual(self.data.get_address1().tolist(), [0x12,0x08,0x54,0xac,0x2f,0x34]) def test_04_Address_2(self): 'Test Address 2 field' addr=self.data.get_address2() self.assertEqual(addr.tolist(), [0x00,0x23,0x4d,0x09,0x86,0xfe]) addr[0]=0x12 addr[5]=0x34 self.data.set_address2(addr) self.assertEqual(self.data.get_address2().tolist(), [0x12,0x23,0x4d,0x09,0x86,0x34]) def test_05_Address_3(self): 'Test Address 3 field' addr=self.data.get_address3() self.assertEqual(addr.tolist(), [0x00,0x08,0x54,0xac,0x2f,0x85]) addr[0]=0x12 addr[5]=0x34 self.data.set_address3(addr) self.assertEqual(self.data.get_address3().tolist(), [0x12,0x08,0x54,0xac,0x2f,0x34]) def test_06_sequence_control(self): 'Test Sequence control field' self.assertEqual(self.data.get_sequence_control(), 0x4440) self.data.set_sequence_control(0x1234) self.assertEqual(self.data.get_sequence_control(), 0x1234) def test_07_fragment_number(self): 'Test Fragment number field' self.assertEqual(self.data.get_fragment_number(), 0x0000) self.data.set_fragment_number(0xF1) # Es de 4 bit self.assertEqual(self.data.get_fragment_number(), 0x01) def test_08_sequence_number(self): 'Test Sequence number field' self.assertEqual(self.data.get_sequence_number(), 0x0444) self.data.set_sequence_number(0xF234) # Es de 12 bit self.assertEqual(self.data.get_sequence_number(), 0x0234) def test_09_frame_data(self): 'Test Frame Data field' # Test with packet without addr4 frame_body=b"\xaa\xaa\x03\x00\x00\x00\x08\x00\x45\x00\x00\x28\x72\x37\x40\x00\x80\x06\x6c\x22\xc0\xa8\x01\x02\xc3\x7a\x97\x51\xd7\xa0\x00\x50\xa5\xa5\xb1\xe0\x12\x1c\xa9\xe1\x50\x10\x4e\x75\x59\x74\x00\x00" self.assertEqual(self.data.get_frame_body(), frame_body) if __name__ == '__main__': unittest.main(verbosity=1)
299,101
test post template with non existent template
import pytest from flask import json from app.models import EMAIL_TYPE, SMS_TYPE, TEMPLATE_TYPES from tests import create_authorization_header from tests.app.db import create_template valid_personalisation = {"personalisation": {"Name": "Jo"}} valid_post = [ ( "Some subject", "Some content", None, "Some subject", "Some content", ('<p style="Margin: 0 0 20px 0; font-size: 19px; line-height: 25px; color: #0B0C0C;">' "Some content" "</p>"), ), ( "Some subject", "Dear ((Name)), Hello. Yours Truly, The Government.", valid_personalisation, "Some subject", "Dear Jo, Hello. Yours Truly, The Government.", ( '<p style="Margin: 0 0 20px 0; font-size: 19px; line-height: 25px; color: #0B0C0C;">' "Dear Jo, Hello. Yours Truly, The Government." "</p>" ), ), ( "Message for ((Name))", "Dear ((Name)), Hello. Yours Truly, The Government.", valid_personalisation, "Message for Jo", "Dear Jo, Hello. Yours Truly, The Government.", ( '<p style="Margin: 0 0 20px 0; font-size: 19px; line-height: 25px; color: #0B0C0C;">' "Dear Jo, Hello. Yours Truly, The Government." "</p>" ), ), ( "Message for ((Name))", "Some content", valid_personalisation, "Message for Jo", "Some content", ('<p style="Margin: 0 0 20px 0; font-size: 19px; line-height: 25px; color: #0B0C0C;">' "Some content" "</p>"), ), ] @pytest.mark.parametrize("tmp_type", TEMPLATE_TYPES) @pytest.mark.parametrize( "subject,content,post_data,expected_subject,expected_content,expected_html", valid_post, ) def test_valid_post_template_returns_200( client, sample_service, tmp_type, subject, content, post_data, expected_subject, expected_content, expected_html, ): template = create_template(sample_service, template_type=tmp_type, subject=subject, content=content) auth_header = create_authorization_header(service_id=sample_service.id) response = client.post( path="/v2/template/{}/preview".format(template.id), data=json.dumps(post_data), headers=[("Content-Type", "application/json"), auth_header], ) assert response.status_code == 200 resp_json = json.loads(response.get_data(as_text=True)) assert resp_json["id"] == str(template.id) if tmp_type != SMS_TYPE: assert expected_subject in resp_json["subject"] if tmp_type == EMAIL_TYPE: assert resp_json["html"] == expected_html else: assert resp_json["html"] is None assert expected_content in resp_json["body"] @pytest.mark.parametrize("tmp_type", TEMPLATE_TYPES) def test_invalid_post_template_returns_400(client, sample_service, tmp_type): template = create_template( sample_service, template_type=tmp_type, content="Dear ((Name)), Hello ((Missing)). Yours Truly, The Government.", ) auth_header = create_authorization_header(service_id=sample_service.id) response = client.post( path="/v2/template/{}/preview".format(template.id), data=json.dumps(valid_personalisation), headers=[("Content-Type", "application/json"), auth_header], ) assert response.status_code == 400 resp_json = json.loads(response.get_data(as_text=True)) assert resp_json["errors"][0]["error"] == "BadRequestError" assert "Missing personalisation: Missing" in resp_json["errors"][0]["message"] def METHOD_NAME(client, fake_uuid, sample_service): auth_header = create_authorization_header(service_id=sample_service.id) response = client.post( path="/v2/template/{}/preview".format(fake_uuid), data=json.dumps(valid_personalisation), headers=[("Content-Type", "application/json"), auth_header], ) assert response.status_code == 404 assert response.headers["Content-type"] == "application/json" json_response = json.loads(response.get_data(as_text=True)) assert json_response == { "errors": [{"error": "NoResultFound", "message": "No result found"}], "status_code": 404, }
299,102
run test
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test re-org scenarios with a mempool that contains transactions # that spend (directly or indirectly) coinbase transactions. # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * # Create one-input, one-output, no-fee transaction: class MempoolCoinbaseTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 2 self.setup_clean_chain = False alert_filename = None # Set by setup_network def setup_network(self): args = ["-checkmempool", "-debug=mempool"] self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, args)) self.nodes.append(start_node(1, self.options.tmpdir, args)) connect_nodes(self.nodes[1], 0) self.is_network_split = False self.sync_all() def METHOD_NAME(self): assert_equal(self.nodes[0].getblockcount(), 200) # Mine three blocks. After this, nodes[0] blocks # 101, 102, and 103 are spend-able. new_blocks = self.nodes[1].generate(4) self.sync_all() print(self.nodes[0].getblockcount()) node0_address = self.nodes[0].getnewaddress() node1_address = self.nodes[1].getnewaddress() # Three scenarios for re-orging coinbase spends in the memory pool: # 1. Direct coinbase spend : spend_101 # 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1 # 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1 # Use invalidatblock to make all of the above coinbase sp # ends invalid (immature coinbase), # and make sure the mempool code behaves correctly. b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ] coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ] spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 34) spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 34.1) spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 34) # Create a block-height-locked transaction which will be invalid after reorg timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 34}) # Set the time lock timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1) timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000" timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"] assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx) # Broadcast and mine spend_102 and 103: spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw) spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw) self.nodes[0].generate(1) assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx) # Create 102_1 and 103_1: spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 33.01) spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 33.1) # Broadcast and mine 103_1: spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw) last_block = self.nodes[0].generate(1) timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx) # ... now put spend_101 and spend_102_1 in memory pools: spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw) spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw) self.sync_all() assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id}) for node in self.nodes: node.invalidateblock(last_block[0]) assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id}) # Use invalidateblock to re-org back and make all those coinbase spends # immature/invalid: for node in self.nodes: node.invalidateblock(new_blocks[0]) self.sync_all() # mempool should be empty. assert_equal(set(self.nodes[0].getrawmempool()), set()) if __name__ == '__main__': MempoolCoinbaseTest().main()
299,103
get excludes tables
# Copyright 2022 Collate # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Redshift E2E tests """ from typing import List from metadata.generated.schema.entity.data.table import Histogram from .common.test_cli_db import CliCommonDB from .common_e2e_sqa_mixins import SQACommonMethods class RedshiftCliTest(CliCommonDB.TestSuite, SQACommonMethods): create_table_query: str = """ CREATE TABLE IF NOT EXISTS e2e_cli_tests.dbt_jaffle.persons ( person_id int, full_name varchar(255), birthdate date ) """ create_view_query: str = """ CREATE OR REPLACE VIEW e2e_cli_tests.dbt_jaffle.view_persons AS SELECT * FROM e2e_cli_tests.dbt_jaffle.persons; """ insert_data_queries: List[str] = [ """ INSERT INTO e2e_cli_tests.dbt_jaffle.persons (person_id, full_name, birthdate) VALUES (1,'Peter Parker', '2004-08-10'), (2,'Bruce Banner', '1988-12-18'), (3,'Steve Rogers', '1988-07-04'), (4,'Natasha Romanoff', '1997-12-03'), (5,'Wanda Maximoff', '1998-02-10'), (6,'Diana Prince', '1976-03-17'); """ ] drop_table_query: str = """ DROP TABLE IF EXISTS "e2e_cli_tests"."dbt_jaffle"."persons"; """ drop_view_query: str = """ DROP VIEW IF EXISTS "e2e_cli_tests"."dbt_jaffle"."view_persons"; """ def setUp(self) -> None: self.create_table_and_view() def tearDown(self) -> None: self.delete_table_and_view() def create_table_and_view(self) -> None: SQACommonMethods.create_table_and_view(self) def delete_table_and_view(self) -> None: SQACommonMethods.delete_table_and_view(self) def delete_table_rows(self) -> None: SQACommonMethods.run_delete_queries(self) def update_table_row(self) -> None: SQACommonMethods.run_update_queries(self) @staticmethod def get_connector_name() -> str: return "redshift" @staticmethod def expected_tables() -> int: return 5 def inserted_rows_count(self) -> int: return 100 def view_column_lineage_count(self) -> int: """ Gives us the lineage for the view_listing """ return 9 @staticmethod def fqn_created_table() -> str: return "e2e_redshift.e2e_cli_tests.dbt_jaffle.listing" @staticmethod def _fqn_deleted_table() -> str: return "e2e_redshift.e2e_cli_tests.dbt_jaffle.persons" @staticmethod def get_profiler_time_partition() -> dict: return { "fullyQualifiedName": "e2e_redshift.e2e_cli_tests.dbt_jaffle.listing", "partitionConfig": { "enablePartitioning": True, "partitionColumnName": "date", "partitionIntervalType": "TIME-UNIT", "partitionInterval": 5, "partitionIntervalUnit": "YEAR", }, } @staticmethod def get_includes_schemas() -> List[str]: return ["dbt_jaffle"] @staticmethod def get_includes_tables() -> List[str]: return ["customer", "listing"] @staticmethod def METHOD_NAME() -> List[str]: return ["foo"] @staticmethod def expected_filtered_schema_includes() -> int: return 3 @staticmethod def expected_filtered_schema_excludes() -> int: return 1 @staticmethod def expected_filtered_table_includes() -> int: return 45 @staticmethod def expected_filtered_table_excludes() -> int: return 2 @staticmethod def expected_filtered_mix() -> int: return 8 @staticmethod def get_profiler_time_partition_results() -> dict: return { "table_profile": { "columnCount": 9.0, "rowCount": 101.0, }, "column_profile": [ { "totalprice": { "distinctCount": 22.0, "distinctProportion": 1.0, "duplicateCount": None, "firstQuartile": -451.0775, "histogram": Histogram( boundaries=[ "-999.63 to -665.73", "-665.73 to -331.83", "-331.83 to 2.06", "2.06 to 335.96", "335.96 to 669.86", "669.86 and up", ], frequencies=[3, 7, 6, 1, 2, 3], ), "interQuartileRange": 467.7975, "max": 856.41, "maxLength": None, "mean": -160.16, "median": -288.81, "min": -999.63, "minLength": None, "missingCount": None, "missingPercentage": None, "nonParametricSkew": 0.24351799263849705, "nullCount": 0.0, "nullProportion": 0.0, "stddev": 528.297718809555, "sum": -3518.0, "thirdQuartile": 16.72, "uniqueCount": 22.0, "uniqueProportion": 1.0, "validCount": None, "valuesCount": 22.0, "valuesPercentage": None, "variance": None, } } ], } @staticmethod def delete_queries() -> List[str]: return [ """ DELETE FROM e2e_cli_tests.dbt_jaffle.persons WHERE person_id IN (1,2) """, ] @staticmethod def update_queries() -> List[str]: return [ """ UPDATE e2e_cli_tests.dbt_jaffle.persons SET full_name = 'Bruce Wayne' WHERE person_id = 3 """, ]
299,104
test app contains window
from pathlib import Path from unittest.mock import MagicMock import toga from toga.widgets.base import WidgetRegistry from toga_dummy.utils import TestCase class AppTests(TestCase): def setUp(self): super().setUp() self.name = "Test App" self.app_id = "org.beeware.test-app" self.id = "dom-id" self.content = MagicMock() self.content_id = "content-id" self.content.id = self.content_id self.started = False def test_startup_function(app): self.started = True return self.content self.app = toga.App( formal_name=self.name, app_id=self.app_id, startup=test_startup_function, id=self.id, ) def test_app_name(self): self.assertEqual(self.app.name, self.name) def test_app_icon(self): # App icon will default to a name autodetected from the running module self.assertEqual(self.app.icon.path, Path("resources/toga")) # This icon will be bound self.assertIsNotNone(self.app.icon._impl) # Set the icon to a different resource self.app.icon = "other.icns" self.assertEqual(self.app.icon.path, Path("other.icns")) # This icon name will *not* exist. The Impl will be the DEFAULT_ICON's impl self.assertEqual(self.app.icon._impl, toga.Icon.DEFAULT_ICON._impl) def test_app_app_id(self): self.assertEqual(self.app.app_id, self.app_id) def test_app_id(self): self.assertEqual(self.app.id, self.id) def test_widgets_registry(self): self.assertTrue(isinstance(self.app.widgets, WidgetRegistry)) self.assertEqual(len(self.app.widgets), 0) def test_app_main_loop_call_impl_main_loop(self): self.app.main_loop() self.assertActionPerformed(self.app, "main loop") def test_app_startup(self): self.app.startup() self.assertTrue(self.started) self.assertEqual(self.app.main_window.content, self.content) self.assertEqual(self.app.main_window.app, self.app) self.assertActionPerformed(self.app.main_window, "show") def test_is_full_screen(self): self.assertFalse(self.app.is_full_screen) self.app.set_full_screen(self.app.main_window) self.assertTrue(self.app.is_full_screen) self.app.set_full_screen(["window1", "window2", "window3"]) self.assertTrue(self.app.is_full_screen) self.app.set_full_screen() self.assertFalse(self.app.is_full_screen) def test_app_exit(self): def exit_handler(widget): return True self.app.on_exit = exit_handler self.assertIs(self.app.on_exit._raw, exit_handler) self.app.exit() self.assertActionPerformed(self.app, "exit") def test_full_screen(self): # set full screen and exit full screen self.app.set_full_screen(self.app.main_window) self.assertTrue(self.app.is_full_screen) self.app.exit_full_screen() self.assertFalse(self.app.is_full_screen) # set full screen and set full with no args self.app.set_full_screen(self.app.main_window) self.assertTrue(self.app.is_full_screen) self.app.set_full_screen() self.assertFalse(self.app.is_full_screen) def test_add_window(self): test_window = toga.Window() self.assertEqual(len(self.app.windows), 0) self.app.windows += test_window self.assertEqual(len(self.app.windows), 1) self.app.windows += test_window self.assertEqual(len(self.app.windows), 1) self.assertIs(test_window.app, self.app) not_a_window = "not_a_window" with self.assertRaises(TypeError): self.app.windows += not_a_window def test_remove_window(self): test_window = toga.Window() self.app.windows += test_window self.assertEqual(len(self.app.windows), 1) self.app.windows -= test_window self.assertEqual(len(self.app.windows), 0) not_a_window = "not_a_window" with self.assertRaises(TypeError): self.app.windows -= not_a_window test_window_not_in_app = toga.Window() with self.assertRaises(AttributeError): self.app.windows -= test_window_not_in_app def METHOD_NAME(self): test_window = toga.Window() self.assertFalse(test_window in self.app.windows) self.app.windows += test_window self.assertTrue(test_window in self.app.windows) def test_window_iteration(self): test_windows = [ toga.Window(id=1), toga.Window(id=2), toga.Window(id=3), ] for window in test_windows: self.app.windows += window self.assertEqual(len(self.app.windows), 3) for window in self.app.windows: self.assertIn(window, test_windows) def test_beep(self): self.app.beep() self.assertActionPerformed(self.app, "beep") def test_add_background_task(self): async def test_handler(sender): pass self.app.add_background_task(test_handler) self.assertActionPerformedWith( self.app, "loop:call_soon_threadsafe", handler=test_handler, args=(None,), ) def test_override_startup(self): class BadApp(toga.App): "A startup method that doesn't assign main window raises an error (#760)" def startup(self): # Override startup but don't create a main window pass app = BadApp(app_name="bad_app", formal_name="Bad Aoo", app_id="org.beeware") with self.assertRaisesRegex( ValueError, r"Application does not have a main window.", ): app.main_loop() class DocumentAppTests(TestCase): def setUp(self): super().setUp() self.name = "Test Document App" self.app_id = "beeware.org" self.id = "id" self.content = MagicMock() self.app = toga.DocumentApp(self.name, self.app_id, id=self.id) def test_app_documents(self): self.assertEqual(self.app.documents, []) doc = MagicMock() self.app._documents.append(doc) self.assertEqual(self.app.documents, [doc]) def test_override_startup(self): mock = MagicMock() class DocApp(toga.DocumentApp): def startup(self): # A document app doesn't have to provide a Main Window. mock() app = DocApp(app_name="docapp", formal_name="Doc App", app_id="org.beeware") app.main_loop() mock.assert_called_once()
299,105
jq upload
#!/usr/bin/env python3 # Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runs bigquery metrics and uploads the result to GCS.""" import argparse import glob import os import pipes import re import subprocess import sys import time import traceback import requests import ruamel.yaml as yaml BACKFILL_DAYS = 30 DEFAULT_JQ_BIN = '/usr/bin/jq' def check(cmd, **kwargs): """Logs and runs the command, raising on errors.""" print('Run:', ' '.join(pipes.quote(c) for c in cmd), end=' ', file=sys.stderr) if hasattr(kwargs.get('stdout'), 'name'): print(' > %s' % kwargs['stdout'].name, file=sys.stderr) else: print() # If 'stdin' keyword arg is a string run command and communicate string to stdin if 'stdin' in kwargs and isinstance(kwargs['stdin'], str): in_string = kwargs['stdin'] kwargs['stdin'] = subprocess.PIPE proc = subprocess.Popen(cmd, **kwargs) proc.communicate(input=in_string.encode('utf-8')) return subprocess.check_call(cmd, **kwargs) def validate_metric_name(name): """Raise ValueError if name is non-trivial.""" # Regex '$' symbol matches an optional terminating new line # so we have to check that the name # doesn't have one if the regex matches. if not re.match(r'^[\w-]+$', name) or name[-1] == '\n': raise ValueError(name) def do_jq(jq_filter, data_filename, out_filename, jq_bin=DEFAULT_JQ_BIN): """Executes jq on a file and outputs the results to a file.""" with open(out_filename, 'w') as out_file: check([jq_bin, jq_filter, data_filename], stdout=out_file) class BigQuerier: def __init__(self, project, bucket_path): if not project: raise ValueError('project', project) self.project = project if not bucket_path: print('Not uploading results, no bucket specified.', file=sys.stderr) self.prefix = bucket_path def do_query(self, query, out_filename): """Executes a bigquery query, outputting the results to a file.""" cmd = [ 'bq', 'query', '--format=prettyjson', '--project_id=%s' % self.project, '--max_rows=1000000', # Results may have more than 100 rows query, ] with open(out_filename, 'w') as out_file: check(cmd, stdout=out_file) out_file.write('\n') def METHOD_NAME(self, config, data_filename): """Filters a data file with jq and uploads the results to GCS.""" filtered = 'daily-%s.json' % time.strftime('%Y-%m-%d') latest = '%s-latest.json' % config['metric'] do_jq(config['jqfilter'], data_filename, filtered) self.copy(filtered, os.path.join(config['metric'], filtered)) self.copy(filtered, latest) def run_metric(self, config): """Runs query and filters results, uploading data to GCS.""" raw = 'raw-%s.json' % time.strftime('%Y-%m-%d') self.update_query(config) self.do_query(config['query'], raw) self.copy(raw, os.path.join(config['metric'], raw)) consumer_error = False for consumer in [self.METHOD_NAME]: try: consumer(config, raw) except ( ValueError, KeyError, IOError, requests.exceptions.ConnectionError, ): print(traceback.format_exc(), file=sys.stderr) consumer_error = True if consumer_error: raise ValueError('Error(s) were thrown by query result consumers.') def copy(self, src, dest): """Use gsutil to copy src to <bucket_path>/dest with minimal caching.""" if not self.prefix: return # no destination dest = os.path.join(self.prefix, dest) check(['gsutil', '-h', 'Cache-Control:max-age=60', 'cp', src, dest]) @staticmethod def update_query(config): """Modifies config['query'] based on the metric configuration.""" last_time = int(time.time() - (60*60*24)*BACKFILL_DAYS) config['query'] = config['query'].replace('<LAST_DATA_TIME>', str(last_time)) def all_configs(search='**.yaml'): """Returns config files in the metrics dir.""" return glob.glob(os.path.join( os.path.dirname(__file__), 'configs', search)) def ints_to_floats(point): for key, val in point.items(): if key == 'time': continue if isinstance(val, int): point[key] = float(val) elif isinstance(val, dict): point[key] = ints_to_floats(val) return point def main(configs, project, bucket_path): """Loads metric config files and runs each metric.""" queryer = BigQuerier(project, bucket_path) # authenticate as the given service account if our environment is providing one if 'GOOGLE_APPLICATION_CREDENTIALS' in os.environ: keyfile = os.environ['GOOGLE_APPLICATION_CREDENTIALS'] check(['gcloud', 'auth', 'activate-service-account', f'--key-file={keyfile}']) # the 'bq show' command is called as a hack to dodge the config prompts that bq presents # the first time it is run. A newline is passed to stdin to skip the prompt for default project # when the service account in use has access to multiple projects. check(['bq', 'show'], stdin='\n') errs = [] for path in configs or all_configs(): try: with open(path) as config_raw: config = yaml.safe_load(config_raw) if not config: raise ValueError('invalid yaml: %s.' % path) config['metric'] = config['metric'].strip() validate_metric_name(config['metric']) queryer.run_metric(config) except ( ValueError, KeyError, IOError, subprocess.CalledProcessError, ): print(traceback.format_exc(), file=sys.stderr) errs.append(path) if errs: print('Failed %d configs: %s' % (len(errs), ', '.join(errs))) sys.exit(1) if __name__ == '__main__': PARSER = argparse.ArgumentParser() PARSER.add_argument( '--config', action='append', help='YAML file describing a metric.') PARSER.add_argument( '--project', default='k8s-gubernator', help='Charge the specified account for bigquery usage.') PARSER.add_argument( '--bucket', help='Upload results to the specified gcs bucket.') PARSER.add_argument( '--jq', help='path to jq binary') ARGS = PARSER.parse_args() if ARGS.jq: DEFAULT_JQ_BIN = ARGS.jq main(ARGS.config, ARGS.project, ARGS.bucket)
299,106
full name
import sys import marshal import contextlib import dis from setuptools.extern.packaging import version from ._imp import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE from . import _imp __all__ = [ 'Require', 'find_module', 'get_module_constant', 'extract_constant' ] class Require: """A prerequisite to building or installing a distribution""" def __init__( self, name, requested_version, module, homepage='', attribute=None, format=None): if format is None and requested_version is not None: format = version.Version if format is not None: requested_version = format(requested_version) if attribute is None: attribute = '__version__' self.__dict__.update(locals()) del self.self def METHOD_NAME(self): """Return full package/distribution name, w/version""" if self.requested_version is not None: return '%s-%s' % (self.name, self.requested_version) return self.name def version_ok(self, version): """Is 'version' sufficiently up-to-date?""" return self.attribute is None or self.format is None or \ str(version) != "unknown" and self.format(version) >= self.requested_version def get_version(self, paths=None, default="unknown"): """Get version number of installed module, 'None', or 'default' Search 'paths' for module. If not found, return 'None'. If found, return the extracted version attribute, or 'default' if no version attribute was specified, or the value cannot be determined without importing the module. The version is formatted according to the requirement's version format (if any), unless it is 'None' or the supplied 'default'. """ if self.attribute is None: try: f, p, i = find_module(self.module, paths) if f: f.close() return default except ImportError: return None v = get_module_constant(self.module, self.attribute, default, paths) if v is not None and v is not default and self.format is not None: return self.format(v) return v def is_present(self, paths=None): """Return true if dependency is present on 'paths'""" return self.get_version(paths) is not None def is_current(self, paths=None): """Return true if dependency is present and up-to-date on 'paths'""" version = self.get_version(paths) if version is None: return False return self.version_ok(str(version)) def maybe_close(f): @contextlib.contextmanager def empty(): yield return if not f: return empty() return contextlib.closing(f) def get_module_constant(module, symbol, default=-1, paths=None): """Find 'module' by searching 'paths', and extract 'symbol' Return 'None' if 'module' does not exist on 'paths', or it does not define 'symbol'. If the module defines 'symbol' as a constant, return the constant. Otherwise, return 'default'.""" try: f, path, (suffix, mode, kind) = info = find_module(module, paths) except ImportError: # Module doesn't exist return None with maybe_close(f): if kind == PY_COMPILED: f.read(8) # skip magic & date code = marshal.load(f) elif kind == PY_FROZEN: code = _imp.get_frozen_object(module, paths) elif kind == PY_SOURCE: code = compile(f.read(), path, 'exec') else: # Not something we can parse; we'll have to import it. :( imported = _imp.get_module(module, paths, info) return getattr(imported, symbol, None) return extract_constant(code, symbol, default) def extract_constant(code, symbol, default=-1): """Extract the constant value of 'symbol' from 'code' If the name 'symbol' is bound to a constant value by the Python code object 'code', return that value. If 'symbol' is bound to an expression, return 'default'. Otherwise, return 'None'. Return value is based on the first assignment to 'symbol'. 'symbol' must be a global, or at least a non-"fast" local in the code block. That is, only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' must be present in 'code.co_names'. """ if symbol not in code.co_names: # name's not there, can't possibly be an assignment return None name_idx = list(code.co_names).index(symbol) STORE_NAME = 90 STORE_GLOBAL = 97 LOAD_CONST = 100 const = default for byte_code in dis.Bytecode(code): op = byte_code.opcode arg = byte_code.arg if op == LOAD_CONST: const = code.co_consts[arg] elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL): return const else: const = default def _update_globals(): """ Patch the globals to remove the objects not available on some platforms. XXX it'd be better to test assertions about bytecode instead. """ if not sys.platform.startswith('java') and sys.platform != 'cli': return incompatible = 'extract_constant', 'get_module_constant' for name in incompatible: del globals()[name] __all__.remove(name) _update_globals()
299,107
num qubits
# pylint: disable=wrong-or-nonexistent-copyright-notice from typing import Any, Dict, FrozenSet, Iterable, Mapping, Tuple, TYPE_CHECKING, Union import numpy as np from cirq import linalg, protocols, value from cirq._compat import proper_repr from cirq.ops import raw_types if TYPE_CHECKING: import cirq # TODO(#3241): support qudits and non-square operators. class KrausChannel(raw_types.Gate): """A generic channel that can record the index of its selected operator. Args: kraus_ops: a list of Kraus operators, formatted as numpy array. Currently, only square-matrix operators on qubits (not qudits) are supported by this type. key: an optional measurement key string for this channel. Simulations which select a single Kraus operator to apply will store the index of that operator in the measurement result list with this key. validate: if True, validate that `kraus_ops` describe a valid channel. This validation can be slow; prefer pre-validating if possible. """ def __init__( self, kraus_ops: Iterable[np.ndarray], key: Union[str, 'cirq.MeasurementKey', None] = None, validate: bool = False, ): kraus_ops = list(kraus_ops) if not kraus_ops: raise ValueError('KrausChannel must have at least one operation.') METHOD_NAME = np.log2(kraus_ops[0].shape[0]) if not METHOD_NAME.is_integer() or kraus_ops[0].shape[1] != kraus_ops[0].shape[0]: raise ValueError( f'Input Kraus ops of shape {kraus_ops[0].shape} does not ' 'represent a square operator over qubits.' ) self._num_qubits = int(METHOD_NAME) for i, op in enumerate(kraus_ops): if not op.shape == kraus_ops[0].shape: raise ValueError( 'Inconsistent Kraus operator shapes: ' f'op[0]: {kraus_ops[0].shape}, op[{i}]: {op.shape}' ) if validate and not linalg.is_cptp(kraus_ops=kraus_ops): raise ValueError('Kraus operators do not describe a CPTP map.') self._kraus_ops = kraus_ops if not isinstance(key, value.MeasurementKey) and key is not None: key = value.MeasurementKey(key) self._key = key @staticmethod def from_channel(channel: 'cirq.Gate', key: Union[str, 'cirq.MeasurementKey', None] = None): """Creates a copy of a channel with the given measurement key.""" return KrausChannel(kraus_ops=list(protocols.kraus(channel)), key=key) def __eq__(self, other) -> bool: # TODO(#3241): provide a protocol to test equivalence between channels, # ignoring measurement keys and channel/mixture distinction if not isinstance(other, KrausChannel): return NotImplemented if self._key != other._key: return False return np.allclose(np.asarray(self._kraus_ops), np.asarray(other._kraus_ops)) def METHOD_NAME(self) -> int: return self._num_qubits def _kraus_(self): return self._kraus_ops def _measurement_key_name_(self) -> str: if self._key is None: return NotImplemented return str(self._key) def _measurement_key_obj_(self) -> 'cirq.MeasurementKey': if self._key is None: return NotImplemented return self._key def _with_measurement_key_mapping_(self, key_map: Mapping[str, str]): if self._key is None: return NotImplemented if self._key not in key_map: return self return KrausChannel(kraus_ops=self._kraus_ops, key=key_map[str(self._key)]) def _with_key_path_(self, path: Tuple[str, ...]): return KrausChannel(kraus_ops=self._kraus_ops, key=protocols.with_key_path(self._key, path)) def _with_key_path_prefix_(self, prefix: Tuple[str, ...]): return KrausChannel( kraus_ops=self._kraus_ops, key=protocols.with_key_path_prefix(self._key, prefix) ) def _with_rescoped_keys_( self, path: Tuple[str, ...], bindable_keys: FrozenSet['cirq.MeasurementKey'] ): return KrausChannel( kraus_ops=self._kraus_ops, key=protocols.with_rescoped_keys(self._key, path, bindable_keys), ) def __str__(self): if self._key is not None: return f'KrausChannel({self._kraus_ops}, key={self._key})' return f'KrausChannel({self._kraus_ops})' def __repr__(self): args = ['kraus_ops=[' + ', '.join(proper_repr(op) for op in self._kraus_ops) + ']'] if self._key is not None: args.append(f'key=\'{self._key}\'') return f'cirq.KrausChannel({", ".join(args)})' def _json_dict_(self) -> Dict[str, Any]: return protocols.obj_to_dict_helper(self, ['_kraus_ops', '_key']) @classmethod def _from_json_dict_(cls, _kraus_ops, _key, **kwargs): ops = [np.asarray(op) for op in _kraus_ops] return cls(kraus_ops=ops, key=_key)
299,108
test renders proto struct without lists
#!/usr/bin/env python """Tests for API value renderers.""" import base64 from absl import app from absl.testing import absltest from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_core.lib.rdfvalues import flows as rdf_flows from grr_response_core.lib.rdfvalues import structs as rdf_structs from grr_response_proto import tests_pb2 from grr_response_server.gui import api_value_renderers from grr.test_lib import test_lib class ApiRDFProtoStructRendererSample(rdf_structs.RDFProtoStruct): protobuf = tests_pb2.ApiRDFProtoStructRendererSample class ApiAnyValueRendererTest(absltest.TestCase): def testRenderValueSimple(self): value = rdf_client.User() value.username = "foobar" renderer = api_value_renderers.ApiAnyValueRenderer() rendered = renderer.RenderValue(rdf_structs.AnyValue.Pack(value)) self.assertEqual( rendered, { "type": "User", "value": { "username": { "type": "unicode", "value": "foobar", }, }, }) def testRenderValueNotExisting(self): value = rdf_structs.AnyValue() value.type_url = "type.googleapis.com/foo.bar.Quux" value.value = b"foobarbaz" renderer = api_value_renderers.ApiAnyValueRenderer() rendered = renderer.RenderValue(value) self.assertEqual( rendered, { "type": "AnyValue", "value": { "type_url": { "type": "unicode", "value": value.type_url, }, "value": { "type": "bytes", "value": base64.b64encode(b"foobarbaz").decode("ascii"), }, }, }) class ApiRDFProtoStructRendererTest(test_lib.GRRBaseTest): """Test for ApiRDFProtoStructRenderer.""" def METHOD_NAME(self): sample = ApiRDFProtoStructRendererSample(index=0, values=["foo", "bar"]) renderer = api_value_renderers.ApiRDFProtoStructRenderer(limit_lists=0) data = renderer.RenderValue(sample) self.assertEqual( data, { "type": "ApiRDFProtoStructRendererSample", "value": { "index": { "type": "long", "value": 0 }, "values": "<lists are omitted>" } }) def testRendersProtoStructWithoutListsLimit(self): sample = ApiRDFProtoStructRendererSample(index=0, values=["foo", "bar"]) renderer = api_value_renderers.ApiRDFProtoStructRenderer(limit_lists=-1) data = renderer.RenderValue(sample) self.assertEqual( data, { "type": "ApiRDFProtoStructRendererSample", "value": { "index": { "type": "long", "value": 0 }, "values": [{ "type": "unicode", "value": "foo" }, { "type": "unicode", "value": "bar" }] } }) def testRendersProtoStructWithListsLimit(self): sample = ApiRDFProtoStructRendererSample(index=0, values=["foo", "bar"]) renderer = api_value_renderers.ApiRDFProtoStructRenderer(limit_lists=1) data = renderer.RenderValue(sample) self.assertEqual( data, { "type": "ApiRDFProtoStructRendererSample", "value": { "index": { "type": "long", "value": 0 }, "values": [{ "type": "unicode", "value": u"foo" }, { "url": "to/be/implemented", "type": "FetchMoreLink" }] } }) class ApiGrrMessageRendererTest(test_lib.GRRBaseTest): """Test for ApiGrrMessageRenderer.""" def testRendersGrrMessagePayloadAsStructuredData(self): sample = rdf_flows.GrrMessage( task_id=42, payload=ApiRDFProtoStructRendererSample( index=43, values=["foo", "bar"])) renderer = api_value_renderers.ApiGrrMessageRenderer() data = renderer.RenderValue(sample) model_data = { "type": "GrrMessage", "value": { "task_id": { "type": "long", "value": 42 }, "payload_type": { "type": "unicode", "value": "ApiRDFProtoStructRendererSample" }, "payload": { "type": "ApiRDFProtoStructRendererSample", "value": { "index": { "type": "long", "value": 43 }, "values": [{ "type": "unicode", "value": "foo" }, { "type": "unicode", "value": "bar" }] } } } } self.assertEqual(data, model_data) def main(argv): test_lib.main(argv) if __name__ == "__main__": app.run(main)
299,109
test only split on cr lf
import io import email import unittest from email.message import Message, EmailMessage from email.policy import default from test.test_email import TestEmailBase class TestCustomMessage(TestEmailBase): class MyMessage(Message): def __init__(self, policy): self.check_policy = policy super().__init__() MyPolicy = TestEmailBase.policy.clone(linesep='boo') def test_custom_message_gets_policy_if_possible_from_string(self): msg = email.message_from_string("Subject: bogus\n\nmsg\n", self.MyMessage, policy=self.MyPolicy) self.assertIsInstance(msg, self.MyMessage) self.assertIs(msg.check_policy, self.MyPolicy) def test_custom_message_gets_policy_if_possible_from_file(self): source_file = io.StringIO("Subject: bogus\n\nmsg\n") msg = email.message_from_file(source_file, self.MyMessage, policy=self.MyPolicy) self.assertIsInstance(msg, self.MyMessage) self.assertIs(msg.check_policy, self.MyPolicy) # XXX add tests for other functions that take Message arg. class TestParserBase: def METHOD_NAME(self): # The unicode line splitter splits on unicode linebreaks, which are # more numerous than allowed by the email RFCs; make sure we are only # splitting on those two. for parser in self.parsers: with self.subTest(parser=parser.__name__): msg = parser( "Next-Line: not\x85broken\r\n" "Null: not\x00broken\r\n" "Vertical-Tab: not\vbroken\r\n" "Form-Feed: not\fbroken\r\n" "File-Separator: not\x1Cbroken\r\n" "Group-Separator: not\x1Dbroken\r\n" "Record-Separator: not\x1Ebroken\r\n" "Line-Separator: not\u2028broken\r\n" "Paragraph-Separator: not\u2029broken\r\n" "\r\n", policy=default, ) self.assertEqual(msg.items(), [ ("Next-Line", "not\x85broken"), ("Null", "not\x00broken"), ("Vertical-Tab", "not\vbroken"), ("Form-Feed", "not\fbroken"), ("File-Separator", "not\x1Cbroken"), ("Group-Separator", "not\x1Dbroken"), ("Record-Separator", "not\x1Ebroken"), ("Line-Separator", "not\u2028broken"), ("Paragraph-Separator", "not\u2029broken"), ]) self.assertEqual(msg.get_payload(), "") class MyMessage(EmailMessage): pass def test_custom_message_factory_on_policy(self): for parser in self.parsers: with self.subTest(parser=parser.__name__): MyPolicy = default.clone(message_factory=self.MyMessage) msg = parser("To: foo\n\ntest", policy=MyPolicy) self.assertIsInstance(msg, self.MyMessage) def test_factory_arg_overrides_policy(self): for parser in self.parsers: with self.subTest(parser=parser.__name__): MyPolicy = default.clone(message_factory=self.MyMessage) msg = parser("To: foo\n\ntest", Message, policy=MyPolicy) self.assertNotIsInstance(msg, self.MyMessage) self.assertIsInstance(msg, Message) # Play some games to get nice output in subTest. This code could be clearer # if staticmethod supported __name__. def message_from_file(s, *args, **kw): f = io.StringIO(s) return email.message_from_file(f, *args, **kw) class TestParser(TestParserBase, TestEmailBase): parsers = (email.message_from_string, message_from_file) def message_from_bytes(s, *args, **kw): return email.message_from_bytes(s.encode(), *args, **kw) def message_from_binary_file(s, *args, **kw): f = io.BytesIO(s.encode()) return email.message_from_binary_file(f, *args, **kw) class TestBytesParser(TestParserBase, TestEmailBase): parsers = (message_from_bytes, message_from_binary_file) if __name__ == '__main__': unittest.main()
299,110
check input
# -*- coding: utf-8 -*- # Copyright 2020 United Kingdom Research and Innovation # Copyright 2020 The University of Manchester # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # CIL Developers, listed at: https://github.com/TomographicImaging/CIL/blob/master/NOTICE.txt from cil.framework import Processor, AcquisitionData import numpy as np import logging logger = logging.getLogger(__name__) class CofR_xcorrelation(Processor): r'''CofR_xcorrelation processor uses the cross-correlation algorithm on a single slice between two projections at 180 degrees inteval. For use on parallel-beam geometry it requires two projections 180 degree apart. :param slice_index: An integer defining the vertical slice to run the algorithm on. :type slice_index: int, str='centre', optional :param projection_index: An integer defining the first projection the algorithm will use. The second projection at 180 degrees will be located automatically. :type projection_index: int, optional :param ang_tol: The angular tolerance in degrees between the two input projections 180degree gap :type ang_tol: float, optional :return: returns an AcquisitionData object with an updated AcquisitionGeometry :rtype: AcquisitionData ''' def __init__(self, slice_index='centre', projection_index=0, ang_tol=0.1): kwargs = { 'slice_index': slice_index, 'ang_tol': ang_tol, 'projection_index': 0 } super(CofR_xcorrelation, self).__init__(**kwargs) def METHOD_NAME(self, data): if not isinstance(data, AcquisitionData): raise Exception('Processor supports only AcquisitionData') if data.geometry == None: raise Exception('Geometry is not defined.') if data.geometry.geom_type == 'cone': raise ValueError("Only parallel-beam data is supported with this algorithm") if data.geometry.channels > 1: raise ValueError("Only single channel data is supported with this algorithm") if self.slice_index != 'centre': try: int(self.slice_index) except: raise ValueError("slice_index expected to be a positive integer or the string 'centre'. Got {0}".format(self.slice_index)) if self.slice_index < 0 or self.slice_index >= data.get_dimension_size('vertical'): raise ValueError('slice_index is out of range. Must be in range 0-{0}. Got {1}'.format(data.get_dimension_size('vertical'), self.slice_index)) if self.projection_index >= data.geometry.config.angles.num_positions: raise ValueError('projection_index is out of range. Must be less than {0}. Got {1}'.format(data.geometry.config.angles.num_positions, self.projection_index)) return True def process(self, out=None): data_full = self.get_input() if data_full.geometry.dimension == '3D': data = data_full.get_slice(vertical=self.slice_index) else: data = data_full geometry = data.geometry angles_deg = geometry.config.angles.angle_data.copy() if geometry.config.angles.angle_unit == "radian": angles_deg *= 180/np.pi #keep angles in range -180 to 180 while angles_deg.min() <=-180: angles_deg[angles_deg<=-180] += 360 while angles_deg.max() > 180: angles_deg[angles_deg>180] -= 360 target = angles_deg[self.projection_index] + 180 if target <= -180: target += 360 elif target > 180: target -= 360 ind = np.abs(angles_deg - target).argmin() ang_diff = abs(angles_deg[ind] - angles_deg[0]) if abs(ang_diff-180) > self.ang_tol: raise ValueError('Method requires projections at 180 +/- {0} degrees interval, got {1}.\nPick a different initial projection or increase the angular tolerance `ang_tol`.'.format(self.ang_tol, ang_diff)) #cross correlate single slice with the 180deg one reversed data1 = data.get_slice(angle=0).as_array() data2 = np.flip(data.get_slice(angle=ind).as_array()) border = int(data1.size * 0.05) lag = np.correlate(data1[border:-border],data2[border:-border],"full") ind = lag.argmax() #fit quadratic to 3 centre points a = (lag[ind+1] + lag[ind-1] - 2*lag[ind]) * 0.5 b = a + lag[ind] - lag[ind-1] quad_max = -b / (2*a) + ind shift = (quad_max - (lag.size-1)/2)/2 shift = np.floor(shift *100 +0.5)/100 new_geometry = data_full.geometry.copy() #set up new geometry new_geometry.config.system.rotation_axis.position[0] = shift * geometry.config.panel.pixel_size[0] logger.info("Centre of rotation correction found using cross-correlation") logger.info("Calculated from slice: %s", str(self.slice_index)) logger.info("Centre of rotation shift = %f pixels", shift) logger.info("Centre of rotation shift = %f units at the object", shift * geometry.config.panel.pixel_size[0]) logger.info("Return new dataset with centred geometry") if out is None: return AcquisitionData(array = data_full, deep_copy = True, geometry = new_geometry, supress_warning=True) else: out.geometry = new_geometry
299,111
fill na n towards seafloor
import logging import numpy as np from scipy.ndimage import map_coordinates, grey_dilation import logging; logging.captureWarnings(True); logger = logging.getLogger(__name__) from scipy.interpolate import interp1d, LinearNDInterpolator logger = logging.getLogger('opendrift') # using common logger def expand_numpy_array(data): if isinstance(data, np.ma.MaskedArray): logger.warning('Converting masked array to numpy array before interpolating') data = np.ma.filled(data, fill_value=np.nan) if not np.isfinite(data).any(): logger.warning('Only NaNs, returning') return mask = ~np.isfinite(data) data[mask] = np.finfo(np.float64).min data[mask] = grey_dilation(data, size=3)[mask] data[data==np.finfo(np.float64).min] = np.nan ########################### # 2D interpolator classes ########################### class Nearest2DInterpolator(): def __init__(self, xgrid, ygrid, x, y): self.x = x self.y = y self.xi = (x - xgrid.min())/(xgrid.max()-xgrid.min())*len(xgrid) self.yi = (y - ygrid.min())/(ygrid.max()-ygrid.min())*len(ygrid) self.xi = np.round(self.xi).astype(np.uint32) self.yi = np.round(self.yi).astype(np.uint32) self.xi[self.xi >= len(xgrid)] = len(xgrid)-1 self.yi[self.yi >= len(ygrid)] = len(ygrid)-1 def __call__(self, array2d): return array2d[self.yi, self.xi] class NDImage2DInterpolator(): def __init__(self, xgrid, ygrid, x, y): self.x = x self.y = y self.xi = (x - xgrid.min())/(xgrid.max()-xgrid.min())*len(xgrid) self.yi = (y - ygrid.min())/(ygrid.max()-ygrid.min())*len(ygrid) def __call__(self, array2d): try: array2d = np.ma.array(array2d, mask=array2d.mask) array2d[array2d.mask] = np.nan # Gives holes except: pass return np.ma.masked_invalid( map_coordinates(array2d, [self.yi, self.xi], cval=np.nan, order=0)) class LinearND2DInterpolator(): logger = logging.getLogger('opendrift') def __init__(self, xgrid, ygrid, x, y): self.block_x, self.block_y = np.meshgrid(xgrid, ygrid) self.block_x = self.block_x.ravel() self.block_y = self.block_y.ravel() self.x = x self.y = y def __call__(self, array2d): array_ravel = array2d.ravel() valid = np.isfinite(array_ravel) #if isinstance(array2d.mask, np.ndarray): # valid = ~array2d.ravel().mask #elif array2d.mask == False: # valid = np.ones(array_ravel.shape, dtype=bool) #elif array2d.mask == True: # valid = np.zeros(array_ravel.shape, dtype=bool) if hasattr(self, 'interpolator'): if not np.array_equal(valid, self.interpolator.valid): logger.debug('Cannot reuse interpolator - validity of ' 'array is different from original.') if hasattr(self, 'interpolator') and (np.array_equal( valid, self.interpolator.valid)): # Reuse stored interpolator with new data self.interpolator.values[:, 0] = \ (array_ravel[valid]) else: # Make new interpolator for given x,y self.interpolator = LinearNDInterpolator( (self.block_y[valid], self.block_x[valid]), array_ravel[valid]) # Store valid array, to determine if can be used again self.interpolator.valid = valid # Call interpolator to avoid threading-problem: # https://github.com/scipy/scipy/issues/8856 self.interpolator((0,0)) return self.interpolator(self.y, self.x) class Linear2DInterpolator(): logger = logging.getLogger('opendrift') def __init__(self, xgrid, ygrid, x, y): self.x = x self.y = y self.xi = (x - xgrid[0])/(xgrid[-1]-xgrid[0])*(len(xgrid)-1) self.yi = (y - ygrid[0])/(ygrid[-1]-ygrid[0])*(len(ygrid)-1) def __call__(self, array2d): if isinstance(array2d,np.ma.MaskedArray): logger.debug('Converting masked array to numpy array for interpolation') array2d = np.ma.filled(array2d, fill_value=np.nan) if not np.isfinite(array2d).any(): logger.warning('Only NaNs input to linearNDFast - returning') return np.nan*np.ones(len(self.xi)) # Fill NaN-values with nearby real values interp = map_coordinates(array2d, [self.yi, self.xi], cval=np.nan, order=1) missing = np.where(~np.isfinite(interp))[0] i=0 while len(missing) > 0: i += 1 if i > 10: logger.warning('Still NaN-values after 10 iterations, exiting!') return interp logger.debug('Linear2DInterpolator informational: NaN values for %i elements, expanding data %i' % (len(missing), i)) expand_numpy_array(array2d) interp[missing] = map_coordinates( array2d, [self.yi[missing], self.xi[missing]], cval=np.nan, order=1, mode='nearest') missing = np.where(~np.isfinite(interp))[0] return interp horizontal_interpolation_methods = { 'nearest': Nearest2DInterpolator, 'ndimage': NDImage2DInterpolator, 'linearND': LinearND2DInterpolator, 'linearNDFast': Linear2DInterpolator} ########################### # 1D interpolator classes ########################### class Nearest1DInterpolator(): def __init__(self, zgrid, z): # Truncating above and below z[z < zgrid.min()] = zgrid.min() z[z > zgrid.max()] = zgrid.max() # Interpolator zgrid -> index if zgrid[1] > zgrid[0]: # increasing z_interpolator = interp1d(zgrid, range(len(zgrid))) else: # decreasing values, must flip for interpolator z_interpolator = interp1d(zgrid[::-1], range(len(zgrid))[::-1]) # Indices corresponding to nearest value in zgrid self.zi = np.round(z_interpolator(z)).astype(np.uint8) self.zi[self.zi < 0] = 0 self.zi[self.zi >= len(zgrid)] = len(zgrid) - 1 def __call__(self, array2d): return array2d[self.zi, range(len(self.zi))] class Linear1DInterpolator(): def __init__(self, zgrid, z): # Truncating above and below z[z < zgrid.min()] = zgrid.min() z[z > zgrid.max()] = zgrid.max() # Interpolator zgrid -> index if zgrid[1] > zgrid[0]: # increasing z_interpolator = interp1d(zgrid, range(len(zgrid))) else: # decreasing values, must flip for interpolator z_interpolator = interp1d(zgrid[::-1], range(len(zgrid))[::-1]) z_interpolator(z[0]) # to prevent threading issues # Indices corresponding to layers above and below interp_zi = z_interpolator(z) self.index_above = np.floor(interp_zi).astype(np.int8) self.index_above[self.index_above < 0] = 0 self.index_below = np.minimum(self.index_above + 1, len(zgrid) - 1) self.weight_above = 1 - (interp_zi - self.index_above) self.xi = range(len(z)) def __call__(self, array2d): return array2d[self.index_above, self.xi]*self.weight_above + \ array2d[self.index_below, self.xi]*(1 - self.weight_above) vertical_interpolation_methods = { 'nearest': Nearest1DInterpolator, 'linear': Linear1DInterpolator} def METHOD_NAME(array): """Extrapolate NaN-values (missing) towards seafloor""" filled = False for i in range(1, array.shape[0]): mask = np.isnan(array[i,:,:]) if np.sum(mask) > 0: array[i, mask] = array[i-1, mask] filled = True return filled
299,112
is gzip
"""Most of this code is from torchvision. I will remove all this once verbosity is reduced. More info: https://github.com/pytorch/vision/issues/2830 """ import gzip import hashlib import os import tarfile import urllib import zipfile from typing import Optional from torch.hub import tqdm from .typing import TypePath def calculate_md5(fpath, chunk_size=1024 * 1024): md5 = hashlib.md5() with open(fpath, 'rb') as f: for chunk in iter(lambda: f.read(chunk_size), b''): md5.update(chunk) return md5.hexdigest() def check_md5(fpath, md5, **kwargs): return md5 == calculate_md5(fpath, **kwargs) def check_integrity(fpath, md5=None): if not os.path.isfile(fpath): return False if md5 is None: return True return check_md5(fpath, md5) def gen_bar_updater(): pbar = tqdm(total=None) def bar_update(count, block_size, total_size): if pbar.total is None and total_size: pbar.total = total_size progress_bytes = count * block_size pbar.update(progress_bytes - pbar.n) return bar_update # Adapted from torchvision, removing print statements def download_and_extract_archive( url: str, download_root: TypePath, extract_root: Optional[TypePath] = None, filename: Optional[TypePath] = None, md5: Optional[str] = None, remove_finished: bool = False, ) -> None: download_root = os.path.expanduser(download_root) if extract_root is None: extract_root = download_root if not filename: filename = os.path.basename(url) download_url(url, download_root, filename, md5) archive = os.path.join(download_root, filename) extract_archive(archive, extract_root, remove_finished) def _is_tarxz(filename): return filename.endswith('.tar.xz') def _is_tar(filename): return filename.endswith('.tar') def _is_targz(filename): return filename.endswith('.tar.gz') def _is_tgz(filename): return filename.endswith('.tgz') def METHOD_NAME(filename): return filename.endswith('.gz') and not filename.endswith('.tar.gz') def _is_zip(filename): return filename.endswith('.zip') def extract_archive(from_path, to_path=None, remove_finished=False): if to_path is None: to_path = os.path.dirname(from_path) if _is_tar(from_path): with tarfile.open(from_path, 'r') as tar: tar.extractall(path=to_path) elif _is_targz(from_path) or _is_tgz(from_path): with tarfile.open(from_path, 'r:gz') as tar: tar.extractall(path=to_path) elif _is_tarxz(from_path): with tarfile.open(from_path, 'r:xz') as tar: tar.extractall(path=to_path) elif METHOD_NAME(from_path): stem = os.path.splitext(os.path.basename(from_path))[0] to_path = os.path.join(to_path, stem) with open(to_path, 'wb') as out_f, gzip.GzipFile(from_path) as zip_f: out_f.write(zip_f.read()) elif _is_zip(from_path): with zipfile.ZipFile(from_path, 'r') as z: z.extractall(to_path) else: raise ValueError(f'Extraction of {from_path} not supported') if remove_finished: os.remove(from_path) # Adapted from torchvision, removing print statements def download_url( url: str, root: TypePath, filename: Optional[TypePath] = None, md5: Optional[str] = None, ) -> None: """Download a file from a url and place it in root. Args: url: URL to download file from root: Directory to place downloaded file in filename: Name to save the file under. If ``None``, use the basename of the URL md5: MD5 checksum of the download. If None, do not check """ root = os.path.expanduser(root) if not filename: filename = os.path.basename(url) fpath = os.path.join(root, filename) os.makedirs(root, exist_ok=True) # check if file is already present locally if not check_integrity(fpath, md5): try: print('Downloading ' + url + ' to ' + fpath) # noqa: T201 urllib.request.urlretrieve( url, fpath, reporthook=gen_bar_updater(), ) except (urllib.error.URLError, OSError) as e: if url[:5] == 'https': url = url.replace('https:', 'http:') message = ( 'Failed download. Trying https -> http instead. Downloading ' + url + ' to ' + fpath ) print(message) # noqa: T201 urllib.request.urlretrieve( url, fpath, reporthook=gen_bar_updater(), ) else: raise e # check integrity of downloaded file if not check_integrity(fpath, md5): raise RuntimeError('File not found or corrupted.')
299,113
test recv with closed connection
""" Copyright(C) 2023 Altom Consulting This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. """ import json import unittest.mock as mock import pytest from alttester._websocket import Store, WebsocketConnection, CommandHandler, NotificationHandler from alttester.exceptions import ConnectionError class TestStore: @pytest.fixture(autouse=True) def setup(self): self.store = Store() def test_has(self): assert not self.store.has("key") self.store.push("key", mock.sentinel.value) assert self.store.has("key") def test_push(self): assert not self.store.has("key") self.store.push("key", mock.sentinel.value) assert self.store.has("key") assert self.store.pop("key") == mock.sentinel.value assert not self.store.has("key") def test_pop(self): assert not self.store.has("key") assert self.store.pop("key") is None self.store.push("key", mock.sentinel.first) self.store.push("key", mock.sentinel.second) assert self.store.has("key") assert self.store.pop("key") == mock.sentinel.first assert self.store.has("key") assert self.store.pop("key") == mock.sentinel.second assert not self.store.has("key") assert self.store.pop("key") is None def test_multiple_keys(self): assert not self.store.has("a") assert self.store.pop("a") is None assert not self.store.has("b") assert self.store.pop("b") is None self.store.push("a", mock.sentinel.first) self.store.push("b", mock.sentinel.second) self.store.push("a", mock.sentinel.third) assert self.store.has("a") assert self.store.pop("a") == mock.sentinel.first assert self.store.has("b") assert self.store.pop("b") == mock.sentinel.second assert self.store.has("a") assert self.store.pop("a") == mock.sentinel.third assert not self.store.has("a") assert self.store.pop("a") is None assert not self.store.has("a") assert self.store.pop("a") is None class TestWebsocketConnection: @pytest.fixture(autouse=True) def setup(self): self.host = "127.0.0.1" self.port = 1300 self.timeout = 5 self.websocket_mock = mock.Mock() self.create_connection_mock = mock.Mock( return_value=self.websocket_mock ) self.connection = WebsocketConnection( host=self.host, port=self.port, timeout=self.timeout, command_handler=CommandHandler(), notification_handler=NotificationHandler() ) self.connection._create_connection = self.create_connection_mock self.connection._is_open = True def test_connect(self): self.connection.connect() self.create_connection_mock.assert_called_once() self.websocket_mock.close.assert_not_called() def test_on_open(self): self.connection._is_open = False self.connection._on_open(self.websocket_mock) assert self.connection._is_open def test_on_message(self): command = { "messageId": "0", "commandName": "TestCommand" } self.connection._command_handler.set_current_command(command) assert not self.connection._command_handler.has_response() self.connection._on_message(self.connection._websocket, json.dumps(command)) assert self.connection._command_handler.has_response() assert self.connection._command_handler.get_response() == command def test_on_error(self): assert not self.connection._errors error_message = "Error message." self.connection._on_error(self.connection._websocket, error_message) assert self.connection._errors.pop() == error_message def test_on_close(self): self.connection._is_open = True self.connection._on_close(self.websocket_mock, None, None) assert not self.connection._is_open assert self.connection._websocket is None def test_recv(self): command = { "messageId": "0", "commandName": "TestCommand" } self.connection.connect() self.connection._websocket = self.websocket_mock self.connection._command_handler.set_current_command(command) self.connection._command_handler.handle_command(command) response = self.connection.recv() assert response == command def METHOD_NAME(self): self.connection._is_open = False with pytest.raises(ConnectionError): self.connection.recv() def test_send(self): self.connection.connect() self.connection._websocket = self.websocket_mock command = { "messageId": "0", "commandName": "TestCommand" } self.connection.send(command) assert self.connection._command_handler.get_current_command() == (command["messageId"], command["commandName"]) def test_send_with_close_connection(self): self.connection._is_open = False command = { "messageId": "0", "commandName": "TestCommand" } with pytest.raises(ConnectionError): self.connection.send(command) assert self.connection._command_handler.get_current_command() != (command["messageId"], command["commandName"])
299,114
get indexed schemas for user
# -*- coding: utf-8 -*- # # This file is part of CERN Analysis Preservation Framework. # Copyright (C) 2016 CERN. # # CERN Analysis Preservation Framework is free software; you can redistribute # it and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # CERN Analysis Preservation Framework is distributed in the hope that it will # be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with CERN Analysis Preservation Framework; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """Methods for schemas module.""" from itertools import groupby from invenio_access.models import ActionRoles, ActionUsers from invenio_access.permissions import Permission from invenio_cache import current_cache from sqlalchemy.event import listen from .models import Schema from .permissions import ( AdminSchemaPermission, ReadSchemaPermission, deposit_schema_create_action, deposit_schema_read_action, record_schema_create_action, record_schema_read_action, ) def _filter_only_latest(schemas_list): """Return only latest version of schemas.""" return [next(g) for k, g in groupby(schemas_list, lambda s: s.name)] @current_cache.memoize() def get_mappings(): """Implementation for mappings getter for invenio_search module.""" mappings = {} schemas = Schema.query.filter_by(is_indexed=True).all() for schema in schemas: mappings[schema.deposit_index] = {} mappings[schema.record_index] = {} return mappings def _filter_by_deposit_read_access(schemas_list): """Return only schemas that user has read access to.""" return [ x for x in schemas_list if Permission(deposit_schema_read_action(x.id)).can() ] def _filter_by_deposit_create_access(schemas_list): """Return only schemas that user has create access to.""" return [ x for x in schemas_list if Permission(deposit_schema_create_action(x.id)).can() ] def _filter_by_admin_access(schemas_list): """Return only schemas that user has admin access to.""" return [x for x in schemas_list if AdminSchemaPermission(x).can()] def _filter_by_record_read_access(schemas_list): """Return only schemas that user has read access to.""" return [ x for x in schemas_list if Permission(record_schema_read_action(x.id)).can() ] def _filter_by_record_create_access(schemas_list): """Return only schemas that user has create access to.""" return [ x for x in schemas_list if Permission(record_schema_create_action(x.id)).can() ] @current_cache.memoize() def get_cached_indexed_schemas_for_user_create(latest=True, user_id=None): """Return all indexed schemas current user has read access to.""" schemas = get_indexed_schemas(latest=latest) schemas = _filter_by_deposit_create_access(schemas) return schemas @current_cache.memoize() def get_cached_indexed_schemas_for_user_admin(latest=True, user_id=None): """Return all indexed schemas current user has read access to.""" schemas = get_indexed_schemas(latest=latest) schemas = _filter_by_admin_access(schemas) return schemas @current_cache.memoize() def get_cached_indexed_schemas_for_user_read(latest=True, user_id=None): """Return all indexed schemas current user has read access to.""" schemas = get_indexed_schemas(latest=latest) schemas = _filter_by_deposit_read_access(schemas) return schemas @current_cache.memoize() def get_cached_indexed_record_schemas_for_user_create( latest=True, user_id=None ): """Return all indexed schemas current user has read access to.""" schemas = get_indexed_schemas(latest=latest) schemas = _filter_by_record_create_access(schemas) return schemas @current_cache.memoize() def get_cached_indexed_record_schemas_for_user_read(latest=True, user_id=None): """Return all indexed schemas current user has read access to.""" schemas = get_indexed_schemas(latest=latest) schemas = _filter_by_record_read_access(schemas) return schemas def get_indexed_schemas(latest=True): """Return all indexed schemas current user has read access to.""" schemas = ( Schema.query.filter_by(is_indexed=True) .order_by( Schema.name, Schema.major.desc(), Schema.minor.desc(), Schema.patch.desc(), ) .all() ) if latest: schemas = _filter_only_latest(schemas) return schemas def _filter_by_read_access(schemas_list): """Return only schemas that user has read access to.""" return [x for x in schemas_list if ReadSchemaPermission(x).can()] def get_schemas_for_user(latest=True): """Return all schemas current user has read access to.""" schemas = Schema.query.order_by( Schema.name, Schema.major.desc(), Schema.minor.desc(), Schema.patch.desc(), ).all() schemas = _filter_by_read_access(schemas) if latest: schemas = _filter_only_latest(schemas) return schemas def METHOD_NAME(latest=True): """Return all indexed schemas current user has read access to.""" schemas = ( Schema.query.filter_by(is_indexed=True) .order_by( Schema.name, Schema.major.desc(), Schema.minor.desc(), Schema.patch.desc(), ) .all() ) schemas = _filter_by_read_access(schemas) if latest: schemas = _filter_only_latest(schemas) return schemas def clear_schema_access_cache(mapper, connection, target): if target.action.startswith("deposit-schema-") or target.action.startswith( "record-schema-" ): get_cached_indexed_schemas_for_user_create.delete_memoized() get_cached_indexed_schemas_for_user_read.delete_memoized() get_cached_indexed_schemas_for_user_admin.delete_memoized() get_cached_indexed_record_schemas_for_user_create.delete_memoized() get_cached_indexed_record_schemas_for_user_read.delete_memoized() listen(ActionUsers, "after_insert", clear_schema_access_cache) listen(ActionUsers, "after_delete", clear_schema_access_cache) listen(ActionUsers, "after_update", clear_schema_access_cache) listen(ActionRoles, "after_insert", clear_schema_access_cache) listen(ActionRoles, "after_delete", clear_schema_access_cache) listen(ActionRoles, "after_update", clear_schema_access_cache)
299,115
binary response
import os import json try: from urllib.parse import parse_qs except ImportError: from urlparse import parse_qs import boto3.session from chalice import Chalice, BadRequestError, NotFoundError, Response,\ CORSConfig, UnauthorizedError, AuthResponse, AuthRoute # This is a test app that is used by integration tests. # This app exercises all the major features of chalice # and helps prevent regressions. app = Chalice(app_name=os.environ['APP_NAME']) app.websocket_api.session = boto3.session.Session() app.experimental_feature_flags.update([ 'WEBSOCKETS' ]) app.api.binary_types.append('application/binary') @app.authorizer(ttl_seconds=300) def dummy_auth(auth_request): if auth_request.token == 'yes': return AuthResponse( routes=['/builtin-auth', AuthRoute('/fake-profile', methods=['POST'])], context={'foo': 'bar'}, principal_id='foo' ) else: raise UnauthorizedError('Authorization failed') @app.route('/') def index(): return {'hello': 'world'} @app.route('/a/b/c/d/e/f/g') def nested_route(): return {'nested': True} @app.route('/path/{name}') def supports_path_params(name): return {'path': name} @app.route('/singledoc') def single_doc(): """Single line docstring.""" return {'docstring': 'single'} @app.route('/multidoc') def multi_doc(): """Multi-line docstring. And here is another line. """ return {'docstring': 'multi'} @app.route('/post', methods=['POST']) def supports_only_post(): return {'success': True} @app.route('/put', methods=['PUT']) def supports_only_put(): return {'success': True} @app.route('/jsonpost', methods=['POST']) def supports_post_body_as_json(): json_body = app.current_request.json_body return {'json_body': json_body} @app.route('/multimethod', methods=['GET', 'POST']) def multiple_methods(): return {'method': app.current_request.method} @app.route('/badrequest') def bad_request_error(): raise BadRequestError("Bad request.") @app.route('/notfound') def not_found_error(): raise NotFoundError("Not found") @app.route('/arbitrary-error') def raise_arbitrary_error(): raise TypeError("Uncaught exception") @app.route('/formencoded', methods=['POST'], content_types=['application/x-www-form-urlencoded']) def form_encoded(): parsed = parse_qs(app.current_request.raw_body.decode('utf-8')) return { 'parsed': parsed } @app.route('/json-only', content_types=['application/json']) def json_only(): return {'success': True} @app.route('/cors', methods=['GET', 'POST', 'PUT'], cors=True) def supports_cors(): # It doesn't really matter what we return here because # we'll be checking the response headers to verify CORS support. return {'cors': True} @app.route('/custom_cors', methods=['GET', 'POST', 'PUT'], cors=CORSConfig( allow_origin='https://foo.example.com', allow_headers=['X-Special-Header'], max_age=600, expose_headers=['X-Special-Header'], allow_credentials=True)) def supports_custom_cors(): return {'cors': True} @app.route('/todict', methods=['GET']) def todict(): return app.current_request.to_dict() @app.route('/multifile') def multifile(): from chalicelib import MESSAGE return {"message": MESSAGE} @app.route('/custom-response', methods=['GET']) def custom_response(): return Response( status_code=204, body='', headers={ 'Content-Type': 'text/plain', 'Set-Cookie': ['key=value', 'foo=bar'], }, ) @app.route('/api-key-required', methods=['GET'], api_key_required=True) def api_key_required(): return {"success": True} @app.route('/binary', methods=['POST'], content_types=['application/octet-stream']) def binary_round_trip(): return Response( app.current_request.raw_body, headers={ 'Content-Type': 'application/octet-stream' }, status_code=200) @app.route('/custom-binary', methods=['POST'], content_types=['application/binary']) def custom_binary_round_trip(): return Response( app.current_request.raw_body, headers={ 'Content-Type': 'application/binary' }, status_code=200) @app.route('/get-binary', methods=['GET']) def METHOD_NAME(): return Response( body=b'\xDE\xAD\xBE\xEF', headers={ 'Content-Type': 'application/octet-stream' }, status_code=200) @app.route('/shared', methods=['GET']) def shared_get(): return {'method': 'GET'} @app.route('/shared', methods=['POST']) def shared_post(): return {'method': 'POST'} @app.route('/builtin-auth', authorizer=dummy_auth) def builtin_auth(): return {'success': True, 'context': app.current_request.context} # Testing a common use case where you can have read only GET access # but you need to be auth'd to POST. @app.route('/fake-profile', methods=['GET']) def fake_profile_read_only(): return {'success': True, 'context': app.current_request.context} @app.route('/fake-profile', authorizer=dummy_auth, methods=['POST']) def fake_profile_post(): return {'success': True, 'context': app.current_request.context} @app.route('/repr-raw-body', methods=['POST']) def repr_raw_body(): return {'repr-raw-body': app.current_request.raw_body.decode('utf-8')} SOCKET_MESSAGES = [] @app.on_ws_connect() def connect(event): pass @app.on_ws_message() def message(event): SOCKET_MESSAGES.append((event.connection_id, event.body)) app.websocket_api.send(event.connection_id, json.dumps(SOCKET_MESSAGES)) @app.on_ws_disconnect() def disconnect(event): pass
299,116
scale meshes
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### import bpy from bpy.props import EnumProperty, FloatProperty, FloatVectorProperty, BoolProperty import numpy as np from sverchok.node_tree import SverchCustomTreeNode from sverchok.data_structure import updateNode, list_match_func, numpy_list_match_modes, numpy_list_match_func from sverchok.utils.sv_itertools import recurse_f_level_control def METHOD_NAME(params, constant, matching_f): ''' params are verts, movement vectors, strength - verts and movement vectors should be list as [[[float, float, float],],] (Level 3) - Strength should be list as [[float, float, ..], [float, ..], ..] (Level 2) desired_levels = [3, 3, 2] constant are the function options (data that does not need to be matched) matching_f stands for list matching formula to use ''' result = [] match_mode, output_numpy = constant params = matching_f(params) local_match = numpy_list_match_func[match_mode] for props in zip(*params): verts, centers, scale, strength = local_match([np.array(p) for p in props]) verts_out = centers + (verts - centers) * scale * strength[:, np.newaxis] result.append(verts_out if output_numpy else verts_out.tolist()) return result class SvScaleNodeMk3(SverchCustomTreeNode, bpy.types.Node): """ Triggers: Scale vertices Tooltip: Scales vectors from a center point """ bl_idname = 'SvScaleNodeMk3' bl_label = 'Scale' bl_icon = 'ORIENTATION_VIEW' sv_icon = 'SV_SCALE' centers: FloatVectorProperty( name='Centers', description='Center of the scaling transform', size=3, default=(0, 0, 0), update=updateNode) scale: FloatVectorProperty( name='Scale', description='Axis scaling', size=3, default=(1, 1, 1), update=updateNode) multiplier: FloatProperty( name='Multiplier', description='Multiplier factor of the Axis scaling', default=1.0, update=updateNode) list_match: EnumProperty( name="List Match", description="Behavior on different list lengths", items=numpy_list_match_modes, default="REPEAT", update=updateNode) output_numpy: BoolProperty( name='Output NumPy', description='Output NumPy arrays', default=False, update=updateNode) def sv_init(self, context): self.inputs.new('SvVerticesSocket', 'Vertices') self.inputs.new('SvVerticesSocket', 'Centers').prop_name = 'centers' self.inputs.new('SvVerticesSocket', 'Scale').prop_name = 'scale' self.inputs.new('SvStringsSocket', 'Strength').prop_name = 'multiplier' self.outputs.new('SvVerticesSocket', 'Vertices') def migrate_from(self, old_node): self.multiplier = old_node.factor_ def draw_buttons_ext(self, context, layout): '''draw buttons on the N-panel''' layout.prop(self, 'output_numpy') layout.prop(self, 'list_match', expand=False) def rclick_menu(self, context, layout): layout.prop(self, 'output_numpy') layout.prop_menu_enum(self, "list_match", text="List Match") def process(self): inputs, outputs = self.inputs, self.outputs if not outputs[0].is_linked: return result = [] params = [si.sv_get(default=[[]], deepcopy=False) for si in inputs] matching_f = list_match_func[self.list_match] desired_levels = [3, 3, 3, 2] ops = [self.list_match, self.output_numpy] result = recurse_f_level_control(params, ops, METHOD_NAME, matching_f, desired_levels) self.outputs[0].sv_set(result) classes = [SvScaleNodeMk3] register, unregister = bpy.utils.register_classes_factory(classes)
299,117
fit model
#!/usr/bin/python ################## # _fithelpers.py # # Copyright David Baddeley, 2009 # d.baddeley@auckland.ac.nz # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ################## #import scipy import scipy.optimize as optimize import numpy as np FWHM_CONV_FACTOR = 2*np.sqrt(2*np.log(2)) EPS_FCN = 1e-4 def missfit(p, fcn, data, *args): """Helper function which evaluates a model function (fcn) with parameters (p) and additional arguments (*args) and compares this with measured data (data)""" return data - fcn(p, *args).ravel() def missfit_fixed(p, fit, sp, fcn, data, weights, *args): """Helper function which evaluates a model function (fcn) with parameters (p) and additional arguments (*args) and compares this with measured data (data)""" p_ = sp.copy() p_[fit] = p return (data.ravel() - fcn(p_, *args).ravel())*weights.ravel() def weightedMissfit(p, fcn, data, sigmas, *args): """Helper function which evaluates a model function (fcn) with parameters (p) and additional arguments (*args) and compares this with measured data (data), scaling with the errors in the measured data (sigmas)""" mod = fcn(p, *args).ravel() #print mod.shape #print data.shape #print sigmas.shape return (data - mod)/sigmas def weightedMissfitF(p, fcn, data, weights, *args): """Helper function which evaluates a model function (fcn) with parameters (p) and additional arguments (*args) and compares this with measured data (data), scaling with precomputed weights corresponding to the errors in the measured data (weights)""" mod = fcn(p, *args) mod = mod.ravel() #print mod.shape #print data.shape #print weights.shape return (data - mod)*weights def weightedJacF(p, fcn, data, weights, *args): """Helper function which evaluates a model function (fcn) with parameters (p) and additional arguments (*args) and compares this with measured data (data), scaling with precomputed weights corresponding to the errors in the measured data (weights)""" r = weights[:,None]*fcn.D(p, *args) return -r def METHOD_NAME(modelFcn, startParameters, data, *args): return optimize.leastsq(missfit, startParameters, (modelFcn, data.ravel()) + args, full_output=1) def FitModel_(modelFcn, startParameters, data, *args): return optimize.leastsq(missfit, startParameters, (modelFcn, data.ravel()) + args, full_output=1, epsfcn=EPS_FCN) def FitModel_D(modelFcn, startParameters, data, diag, *args): return optimize.leastsq(missfit, startParameters, (modelFcn, data.ravel()) + args, full_output=1, epsfcn=EPS_FCN, diag=diag) def FitModel_E(modelFcn, startParameters, data, eps, *args): return optimize.leastsq(missfit, startParameters, (modelFcn, data.ravel()) + args, full_output=1, epsfcn=eps) def FitModelWeighted(modelFcn, startParameters, data, sigmas, *args): return optimize.leastsq(weightedMissfitF, startParameters, (modelFcn, data.ravel(), (1.0/sigmas).astype('f').ravel()) + args, full_output=1) def FitModelWeighted_(modelFcn, startParameters, data, sigmas, *args): return optimize.leastsq(weightedMissfitF, startParameters, (modelFcn, data.ravel(), (1.0/sigmas).astype('f').ravel()) + args, full_output=1, epsfcn=EPS_FCN) def FitModelWeighted_D(modelFcn, startParameters, data, sigmas, diag, *args): return optimize.leastsq(weightedMissfitF, startParameters, (modelFcn, data.ravel(), (1.0/sigmas).astype('f').ravel()) + args, full_output=1, epsfcn=EPS_FCN, diag=diag) def FitModelWeightedJac(modelFcn, startParameters, data, sigmas, *args): return optimize.leastsq(weightedMissfitF, startParameters, (modelFcn, data.ravel(), (1.0/sigmas).astype('d').ravel()) + args, Dfun = weightedJacF, full_output=1, col_deriv = 0) def FitModelWeightedJac_(modelFcn, startParameters, data, sigmas, *args): return optimize.leastsq(weightedMissfitF, startParameters, (modelFcn, data.ravel(), (1.0/sigmas).astype('d').ravel()) + args, Dfun = weightedJacF, full_output=1, col_deriv = 0, epsfcn=EPS_FCN) def FitWeightedMisfitFcn(misfitFcn, startParameters, data, sigmas, *args): return optimize.leastsq(misfitFcn, np.array(startParameters), (np.array(data, order='F'), np.array(1.0/sigmas, order='F')) + args, full_output=1) def poisson_lhood(p, fcn, data, bg, *args): """Helper function which evaluates a model function (fcn) with parameters (p) and additional arguments (*args) and compares this with measured data (data)""" mu = (fcn(p, *args) + bg) return -(data*np.log(mu) - mu).sum() def poisson_lhoodJ(p, fcn, data, bg, *args): """Helper function which evaluates a model function (fcn) with parameters (p) and additional arguments (*args) and compares this with measured data (data)""" f0 = poisson_lhood(p, fcn, data, bg, *args) df = 0*p for i in range(len(p)): dpi = 0.1*p[i] + 1 pt = 1.0*p + 0 pt[i] += dpi ft = poisson_lhood(pt, fcn, data, bg, *args) df[i] = (ft - f0)/dpi return df def poisson_lhood2(p, fcn, data, bg, *args): """Helper function which evaluates a model function (fcn) with parameters (p) and additional arguments (*args) and compares this with measured data (data)""" mu = (fcn(p, *args) + bg) return -(data*np.log(mu) - mu) def FitModelPoisson(modelFcn, startParmeters, data, *args, **kwargs): try: bg = kwargs['bg'] except KeyError: bg = 0 return [optimize.fmin_powell(poisson_lhood, startParmeters, ((modelFcn, data,bg) +args))] def FitModelPoissonBFGS(modelFcn, startParmeters, data, *args, **kwargs): try: bg = kwargs['bg'] except KeyError: bg = 0 return [optimize.fmin_bfgs(poisson_lhood, startParmeters, args=((modelFcn, data,bg) +args), epsilon=0.1)] def FitModelFixed(modelFcn, startParameters, fitWhich, data, *args, **kwargs): eps = kwargs.get('eps', EPS_FCN) weights = kwargs.get('weights', np.array(1.0)) startParameters = np.array(startParameters) fitWhich = np.where(fitWhich) p = startParameters[fitWhich] #print p, fitWhich res = optimize.leastsq(missfit_fixed, p, (fitWhich, startParameters, modelFcn, data, weights) + args, full_output=1, epsfcn=eps)[0] out = startParameters.copy() out[fitWhich] = res return [out] #def FitModelPoissonS(modelFcn, startParmeters, data, *args)
299,118
get last modified
""" Test all things related to the ``jedi.cache`` module. """ import os import os.path import pytest import time from parso.cache import (_CACHED_FILE_MAXIMUM_SURVIVAL, _VERSION_TAG, _get_cache_clear_lock, _get_hashed_path, _load_from_file_system, _NodeCacheItem, _remove_cache_and_update_lock, _save_to_file_system, load_module, parser_cache, try_to_save_module) from parso._compatibility import is_pypy, PermissionError from parso import load_grammar from parso import cache from parso import file_io from parso import parse skip_pypy = pytest.mark.skipif( is_pypy, reason="pickling in pypy is slow, since we don't pickle," "we never go into path of auto-collecting garbage" ) @pytest.fixture() def isolated_parso_cache(monkeypatch, tmpdir): """Set `parso.cache._default_cache_path` to a temporary directory during the test. """ cache_path = str(os.path.join(str(tmpdir), "__parso_cache")) monkeypatch.setattr(cache, '_default_cache_path', cache_path) monkeypatch.setattr(cache, '_get_default_cache_path', lambda *args, **kwargs: cache_path) return cache_path @pytest.mark.skip("SUBBOTNIK-2721 Disable load cache from disk") def test_modulepickling_change_cache_dir(tmpdir): """ ParserPickling should not save old cache when cache_directory is changed. See: `#168 <https://github.com/davidhalter/jedi/pull/168>`_ """ dir_1 = str(tmpdir.mkdir('first')) dir_2 = str(tmpdir.mkdir('second')) item_1 = _NodeCacheItem('bla', []) item_2 = _NodeCacheItem('bla', []) path_1 = 'fake path 1' path_2 = 'fake path 2' hashed_grammar = load_grammar()._hashed _save_to_file_system(hashed_grammar, path_1, item_1, cache_path=dir_1) parser_cache.clear() cached = load_stored_item(hashed_grammar, path_1, item_1, cache_path=dir_1) assert cached == item_1.node _save_to_file_system(hashed_grammar, path_2, item_2, cache_path=dir_2) cached = load_stored_item(hashed_grammar, path_1, item_1, cache_path=dir_2) assert cached is None def load_stored_item(hashed_grammar, path, item, cache_path): """Load `item` stored at `path` in `cache`.""" item = _load_from_file_system(hashed_grammar, path, item.change_time - 1, cache_path) return item @pytest.mark.usefixtures("isolated_parso_cache") def test_modulepickling_simulate_deleted_cache(tmpdir): """ Tests loading from a cache file after it is deleted. According to macOS `dev docs`__, Note that the system may delete the Caches/ directory to free up disk space, so your app must be able to re-create or download these files as needed. It is possible that other supported platforms treat cache files the same way. __ https://developer.apple.com/library/content/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/FileSystemOverview/FileSystemOverview.html """ grammar = load_grammar() module = 'fake parser' # Create the file path = tmpdir.dirname + '/some_path' with open(path, 'w'): pass io = file_io.FileIO(path) try_to_save_module(grammar._hashed, io, module, lines=[]) assert load_module(grammar._hashed, io) == module os.unlink(_get_hashed_path(grammar._hashed, path)) parser_cache.clear() cached2 = load_module(grammar._hashed, io) assert cached2 is None @pytest.mark.skip def test_cache_limit(): def cache_size(): return sum(len(v) for v in parser_cache.values()) try: parser_cache.clear() future_node_cache_item = _NodeCacheItem('bla', [], change_time=time.time() + 10e6) old_node_cache_item = _NodeCacheItem('bla', [], change_time=time.time() - 10e4) parser_cache['some_hash_old'] = { '/path/%s' % i: old_node_cache_item for i in range(300) } parser_cache['some_hash_new'] = { '/path/%s' % i: future_node_cache_item for i in range(300) } assert cache_size() == 600 parse('somecode', cache=True, path='/path/somepath') assert cache_size() == 301 finally: parser_cache.clear() class _FixedTimeFileIO(file_io.KnownContentFileIO): def __init__(self, path, content, last_modified): super(_FixedTimeFileIO, self).__init__(path, content) self._last_modified = last_modified def METHOD_NAME(self): return self._last_modified @pytest.mark.skip @pytest.mark.parametrize('diff_cache', [False, True]) @pytest.mark.parametrize('use_file_io', [False, True]) def test_cache_last_used_update(diff_cache, use_file_io): p = '/path/last-used' parser_cache.clear() # Clear, because then it's easier to find stuff. parse('somecode', cache=True, path=p) node_cache_item = next(iter(parser_cache.values()))[p] now = time.time() assert node_cache_item.last_used < now if use_file_io: f = _FixedTimeFileIO(p, 'code', node_cache_item.last_used - 10) parse(file_io=f, cache=True, diff_cache=diff_cache) else: parse('somecode2', cache=True, path=p, diff_cache=diff_cache) node_cache_item = next(iter(parser_cache.values()))[p] assert now < node_cache_item.last_used < time.time() @skip_pypy def test_inactive_cache(tmpdir, isolated_parso_cache): parser_cache.clear() test_subjects = "abcdef" for path in test_subjects: parse('somecode', cache=True, path=os.path.join(str(tmpdir), path)) raw_cache_path = os.path.join(isolated_parso_cache, _VERSION_TAG) assert os.path.exists(raw_cache_path) paths = os.listdir(raw_cache_path) a_while_ago = time.time() - _CACHED_FILE_MAXIMUM_SURVIVAL old_paths = set() for path in paths[:len(test_subjects) // 2]: # make certain number of paths old os.utime(os.path.join(raw_cache_path, path), (a_while_ago, a_while_ago)) old_paths.add(path) # nothing should be cleared while the lock is on assert os.path.exists(_get_cache_clear_lock().path) _remove_cache_and_update_lock() # it shouldn't clear anything assert len(os.listdir(raw_cache_path)) == len(test_subjects) assert old_paths.issubset(os.listdir(raw_cache_path)) os.utime(_get_cache_clear_lock().path, (a_while_ago, a_while_ago)) _remove_cache_and_update_lock() assert len(os.listdir(raw_cache_path)) == len(test_subjects) // 2 assert not old_paths.intersection(os.listdir(raw_cache_path)) @pytest.mark.skip @skip_pypy def test_permission_error(monkeypatch): def save(*args, **kwargs): was_called[0] = True # Python 2... Use nonlocal instead raise PermissionError was_called = [False] monkeypatch.setattr(cache, '_save_to_file_system', save) with pytest.warns(Warning): parse(path=__file__, cache=True, diff_cache=True) assert was_called[0]
299,119
build generator
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from fairseq.data import LanguagePairDataset from . import register_task from .translation import TranslationTask, load_langpair_dataset @register_task("translation_from_pretrained_bart") class TranslationFromPretrainedBARTTask(TranslationTask): """ Translate from source language to target language with a model initialized with a multilingual pretrain. Args: src_dict (~fairseq.data.Dictionary): dictionary for the source language tgt_dict (~fairseq.data.Dictionary): dictionary for the target language .. note:: The translation task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate` and :mod:`fairseq-interactive`. The translation task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.translation_parser :prog: """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off TranslationTask.add_args(parser) parser.add_argument('--langs', type=str, metavar='LANG', help='comma-separated list of monolingual language, ' 'for example, "en,de,fr". These should match the ' 'langs from pretraining (and be in the same order). ' 'You should always add all pretraining language idx ' 'during finetuning.') parser.add_argument('--prepend-bos', action='store_true', help='prepend bos token to each sentence, which matches ' 'mBART pretraining') # fmt: on def __init__(self, args, src_dict, tgt_dict): super().__init__(args, src_dict, tgt_dict) self.langs = args.langs.split(",") for d in [src_dict, tgt_dict]: for l in self.langs: d.add_symbol("[{}]".format(l)) d.add_symbol("<mask>") def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] # infer langcode src, tgt = self.args.source_lang, self.args.target_lang self.datasets[split] = load_langpair_dataset( data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=getattr(self.args, "max_source_positions", 1024), max_target_positions=getattr(self.args, "max_target_positions", 1024), load_alignments=self.args.load_alignments, prepend_bos=getattr(self.args, "prepend_bos", False), append_source_id=True, ) def METHOD_NAME(self, models, args, **unused): if getattr(args, "score_reference", False): from fairseq.sequence_scorer import SequenceScorer return SequenceScorer( self.target_dictionary, eos=self.tgt_dict.index("[{}]".format(self.args.target_lang)), ) else: from fairseq.sequence_generator import SequenceGenerator return SequenceGenerator( models, self.target_dictionary, beam_size=getattr(args, "beam", 5), max_len_a=getattr(args, "max_len_a", 0), max_len_b=getattr(args, "max_len_b", 200), min_len=getattr(args, "min_len", 1), normalize_scores=(not getattr(args, "unnormalized", False)), len_penalty=getattr(args, "lenpen", 1), unk_penalty=getattr(args, "unkpen", 0), temperature=getattr(args, "temperature", 1.0), match_source_len=getattr(args, "match_source_len", False), no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0), eos=self.tgt_dict.index("[{}]".format(self.args.target_lang)), ) def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): src_lang_id = self.source_dictionary.index("[{}]".format(self.args.source_lang)) source_tokens = [] for s_t in src_tokens: s_t = torch.cat([s_t, s_t.new(1).fill_(src_lang_id)]) source_tokens.append(s_t) dataset = LanguagePairDataset( source_tokens, src_lengths, self.source_dictionary, tgt_dict=self.target_dictionary, constraints=constraints, ) return dataset
299,120
main
#!/usr/bin/env python3 # SPDX-FileCopyrightText: 2023 Blender Authors # # SPDX-License-Identifier: GPL-2.0-or-later r""" This script exports permutations of key-maps with different settings modified. Useful for checking changes intended for one configuration don't impact others accidentally. ./blender.bin -b --factory-startup \ --python tools/utils/blender_keyconfig_export_permutations.py -- \ --preset=Blender \ --output-dir=./output \ --keymap-prefs=select_mouse:rmb_action /blender.bin -b --factory-startup \ --python tools/utils/blender_keyconfig_export_permutations.py -- \ --preset=Blender_27x \ --output-dir=output \ --keymap-prefs="select_mouse" The preferences setting: ``select_mouse:rmb_action`` expands into: config = [ ("select_mouse", ('LEFT', 'RIGHT')), ("rmb_action", ('TWEAK', 'FALLBACK_TOOL')), ] """ import os import sys def argparse_create(): import argparse parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawTextHelpFormatter, ) parser.add_argument( "--preset", dest="preset", default="Blender", metavar='PRESET', type=str, help="The name of the preset to export", required=False, ) parser.add_argument( "--output-dir", dest="output_dir", default=".", metavar='OUTPUT_DIR', type=str, help="The directory to output to.", required=False, ) parser.add_argument( "--keymap-prefs", dest="keymap_prefs", default="select_mouse:rmb_action", metavar='KEYMAP_PREFS', type=str, help=( "Colon separated list of attributes to generate key-map configuration permutations. " "WARNING: as all combinations are tested, their number increases exponentially!" ), required=False, ) return parser def permutations_from_attrs_impl(config, permutation, index): index_next = index + 1 attr, values = config[index] for val in values: permutation[index] = (attr, val) if index_next == len(config): yield tuple(permutation) else: # Keep walking down the list of permutations. yield from permutations_from_attrs_impl(config, permutation, index_next) # Not necessary, just ensure stale values aren't used. permutation[index] = None def permutations_from_attrs(config): """ Take a list of attributes and possible values: config = [ ("select_mouse", ('LEFT', 'RIGHT')), ("rmb_action", ('TWEAK', 'FALLBACK_TOOL')), ] Yielding all permutations: [("select_mouse", 'LEFT'), ("rmb_action", 'TWEAK')], [("select_mouse", 'LEFT'), ("rmb_action", 'FALLBACK_TOOL')], ... etc ... """ if not config: return () permutation = [None] * len(config) result = list(permutations_from_attrs_impl(config, permutation, 0)) assert permutation == ([None] * len(config)) return result def permutation_as_filename(preset, values): """ Takes a configuration, eg: [("select_mouse", 'LEFT'), ("rmb_action", 'TWEAK')] And returns a filename compatible path: """ from urllib.parse import quote if not values: return quote(preset) return quote( preset + "_" + ".".join([ "-".join((str(key), str(val))) for key, val in values ]), # Needed so forward slashes aren't included in the resulting name. safe="", ) def METHOD_NAME(): args = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else [] try: import bpy except ImportError: # Run outside of Blender, just show "--help". bpy = None args.insert(0, "--help") args = argparse_create().parse_args(args) if bpy is None: return from bpy import context preset = args.preset output_dir = args.output_dir os.makedirs(output_dir, exist_ok=True) # Needed for background mode. preset_filepath = bpy.utils.preset_find(preset, preset_path="keyconfig") bpy.ops.preferences.keyconfig_activate(filepath=preset_filepath) # Key-map preferences.. km_prefs = context.window_manager.keyconfigs.active.preferences config = [] # Use RNA introspection: if args.keymap_prefs: for attr in args.keymap_prefs.split(":"): if not hasattr(km_prefs, attr): print(f"KeyMap preferences does not have attribute: {attr:s}") sys.exit(1) prop_def = km_prefs.rna_type.properties.get(attr) match prop_def.type: case 'ENUM': value = tuple(val.identifier for val in prop_def.enum_items) case 'BOOLEAN': value = (True, False) case _ as prop_def_type: raise Exception(f"Unhandled attribute type {prop_def_type:s}") config.append((attr, value)) config = tuple(config) for attr_permutation in (permutations_from_attrs(config) or ((),)): # Reload and set. if attr_permutation is not None: km_prefs = context.window_manager.keyconfigs.active.preferences for attr, value in attr_permutation: setattr(km_prefs, attr, value) # Re-activate after setting preferences, tsk, ideally this shouldn't be necessary. bpy.ops.preferences.keyconfig_activate(filepath=preset_filepath) filepath = os.path.join(output_dir, permutation_as_filename(preset, attr_permutation) + ".py") print("Writing:", filepath) bpy.ops.preferences.keyconfig_export(filepath=filepath, all=True) sys.exit() if __name__ == "__main__": METHOD_NAME()
299,121
present
""" Management of OpenStack Neutron Subnets ========================================= .. versionadded:: 2018.3.0 :depends: shade :configuration: see :py:mod:`salt.modules.neutronng` for setup instructions Example States .. code-block:: yaml create subnet: neutron_subnet.present: - name: subnet1 - network_name_or_id: network1 - cidr: 192.168.199.0/24 delete subnet: neutron_subnet.absent: - name: subnet2 create subnet with optional params: neutron_subnet.present: - name: subnet1 - network_name_or_id: network1 - enable_dhcp: True - cidr: 192.168.199.0/24 - allocation_pools: - start: 192.168.199.5 end: 192.168.199.250 - host_routes: - destination: 192.168..0.0/24 nexthop: 192.168.0.1 - gateway_ip: 192.168.199.1 - dns_nameservers: - 8.8.8.8 - 8.8.8.7 create ipv6 subnet: neutron_subnet.present: - name: v6subnet1 - network_name_or_id: network1 - ip_version: 6 """ __virtualname__ = "neutron_subnet" def __virtual__(): if "neutronng.list_subnets" in __salt__: return __virtualname__ return ( False, "The neutronng execution module failed to load: shade python module is not available", ) def METHOD_NAME(name, auth=None, **kwargs): """ Ensure a subnet exists and is up-to-date name Name of the subnet network_name_or_id The unique name or ID of the attached network. If a non-unique name is supplied, an exception is raised. allocation_pools A list of dictionaries of the start and end addresses for the allocation pools gateway_ip The gateway IP address. dns_nameservers A list of DNS name servers for the subnet. host_routes A list of host route dictionaries for the subnet. ipv6_ra_mode IPv6 Router Advertisement mode. Valid values are: ‘dhcpv6-stateful’, ‘dhcpv6-stateless’, or ‘slaac’. ipv6_address_mode IPv6 address mode. Valid values are: ‘dhcpv6-stateful’, ‘dhcpv6-stateless’, or ‘slaac’. """ ret = {"name": name, "changes": {}, "result": True, "comment": ""} kwargs = __utils__["args.clean_kwargs"](**kwargs) __salt__["neutronng.setup_clouds"](auth) kwargs["subnet_name"] = name subnet = __salt__["neutronng.subnet_get"](name=name) if subnet is None: if __opts__["test"]: ret["result"] = None ret["changes"] = kwargs ret["comment"] = "Subnet will be created." return ret new_subnet = __salt__["neutronng.subnet_create"](**kwargs) ret["changes"] = new_subnet ret["comment"] = "Created subnet" return ret changes = __salt__["neutronng.compare_changes"](subnet, **kwargs) if changes: if __opts__["test"] is True: ret["result"] = None ret["changes"] = changes ret["comment"] = "Project will be updated." return ret # update_subnet does not support changing cidr, # so we have to delete and recreate the subnet in this case. if "cidr" in changes or "tenant_id" in changes: __salt__["neutronng.subnet_delete"](name=name) new_subnet = __salt__["neutronng.subnet_create"](**kwargs) ret["changes"] = new_subnet ret["comment"] = "Deleted and recreated subnet" return ret __salt__["neutronng.subnet_update"](**kwargs) ret["changes"].update(changes) ret["comment"] = "Updated subnet" return ret def absent(name, auth=None): """ Ensure a subnet does not exists name Name of the subnet """ ret = {"name": name, "changes": {}, "result": True, "comment": ""} __salt__["neutronng.setup_clouds"](auth) subnet = __salt__["neutronng.subnet_get"](name=name) if subnet: if __opts__["test"] is True: ret["result"] = None ret["changes"] = {"id": subnet.id} ret["comment"] = "Project will be deleted." return ret __salt__["neutronng.subnet_delete"](name=subnet) ret["changes"]["id"] = name ret["comment"] = "Deleted subnet" return ret
299,122
test budgets children
"""Fava's budget syntax.""" from __future__ import annotations from datetime import date from decimal import Decimal from typing import TYPE_CHECKING from fava.core.budgets import calculate_budget from fava.core.budgets import calculate_budget_children from fava.core.budgets import parse_budgets if TYPE_CHECKING: # pragma: no cover from fava.beans.abc import Custom from fava.core.budgets import BudgetDict def test_budgets(load_doc_custom_entries: list[Custom]) -> None: """ 2016-01-01 custom "budget" Expenses:Groceries "weekly" 100.00 CNY 2016-06-01 custom "budget" Expenses:Groceries "weekly" 10.00 EUR 2016-06-01 custom "budget" Expenses:Groceries "asdfasdf" 10.00 EUR 2016-06-01 custom "budget" Expenses:Groceries 10.00 EUR """ budgets, errors = parse_budgets(load_doc_custom_entries) assert len(errors) == 2 empty = calculate_budget( budgets, "Expenses", date(2016, 6, 1), date(2016, 6, 8), ) assert empty == {} budgets_ = calculate_budget( budgets, "Expenses:Groceries", date(2016, 6, 1), date(2016, 6, 8), ) assert budgets_["CNY"] == Decimal("100") assert budgets_["EUR"] == Decimal("10") def test_budgets_daily(budgets_doc: BudgetDict) -> None: """ 2016-05-01 custom "budget" Expenses:Books "daily" 2.5 EUR""" assert "EUR" not in calculate_budget( budgets_doc, "Expenses:Books", date(2010, 2, 1), date(2010, 2, 2), ) for start, end, num in [ (date(2016, 5, 1), date(2016, 5, 2), Decimal("2.5")), (date(2016, 5, 1), date(2016, 5, 3), Decimal("5.0")), (date(2016, 9, 2), date(2016, 9, 3), Decimal("2.5")), (date(2018, 12, 31), date(2019, 1, 1), Decimal("2.5")), ]: budget = calculate_budget(budgets_doc, "Expenses:Books", start, end) assert budget["EUR"] == num def test_budgets_weekly(budgets_doc: BudgetDict) -> None: """ 2016-05-01 custom "budget" Expenses:Books "weekly" 21 EUR""" for start, end, num in [ (date(2016, 5, 1), date(2016, 5, 2), Decimal("21") / 7), (date(2016, 9, 1), date(2016, 9, 2), Decimal("21") / 7), ]: budget = calculate_budget(budgets_doc, "Expenses:Books", start, end) assert budget["EUR"] == num def test_budgets_monthly(budgets_doc: BudgetDict) -> None: """ 2014-05-01 custom "budget" Expenses:Books "monthly" 100 EUR""" for start, end, num in [ (date(2016, 5, 1), date(2016, 5, 2), Decimal("100") / 31), (date(2016, 2, 1), date(2016, 2, 2), Decimal("100") / 29), (date(2018, 3, 31), date(2018, 4, 1), Decimal("100") / 31), ]: budget = calculate_budget(budgets_doc, "Expenses:Books", start, end) assert budget["EUR"] == num def test_budgets_doc_quarterly(budgets_doc: BudgetDict) -> None: """ 2014-05-01 custom "budget" Expenses:Books "quarterly" 123456.7 EUR""" for start, end, num in [ (date(2016, 5, 1), date(2016, 5, 2), Decimal("123456.7") / 91), (date(2016, 8, 15), date(2016, 8, 16), Decimal("123456.7") / 92), ]: budget = calculate_budget(budgets_doc, "Expenses:Books", start, end) assert budget["EUR"] == num def test_budgets_doc_yearly(budgets_doc: BudgetDict) -> None: """ 2010-01-01 custom "budget" Expenses:Books "yearly" 99999.87 EUR""" budget = calculate_budget( budgets_doc, "Expenses:Books", date(2011, 2, 1), date(2011, 2, 2), ) assert budget["EUR"] == Decimal("99999.87") / 365 def METHOD_NAME(budgets_doc: BudgetDict) -> None: """ 2017-01-01 custom "budget" Expenses:Books "daily" 10.00 USD 2017-01-01 custom "budget" Expenses:Books:Notebooks "daily" 2.00 USD""" budget = calculate_budget_children( budgets_doc, "Expenses", date(2017, 1, 1), date(2017, 1, 2), ) assert budget["USD"] == Decimal("12.00") budget = calculate_budget_children( budgets_doc, "Expenses:Books", date(2017, 1, 1), date(2017, 1, 2), ) assert budget["USD"] == Decimal("12.00") budget = calculate_budget_children( budgets_doc, "Expenses:Books:Notebooks", date(2017, 1, 1), date(2017, 1, 2), ) assert budget["USD"] == Decimal("2.00")
299,123
rbac
import importlib import json import os from subprocess import Popen, PIPE import daemon.handler import daemon.METHOD_NAME import daemon.shared as shared import core.exceptions as ex from env import Env from utilities.naming import split_path from utilities.render.command import format_command from utilities.string import bdecode GUEST_ACTIONS = ( "eval", "get", "keys", "print_config_mtime", ) OPERATOR_ACTIONS = ( "clear", "disable", "enable", "freeze", "push_status", "push_resinfo", "push_config", "push_encap_config", "presync", "prstatus", "resource_monitor", "restart", "resync", "run", "scale", "snooze", "start", "startstandby", "status", "stop", "stopstandby", "thaw", "unsnooze", ) ADMIN_ACTIONS = ( "add", "boot", "decode", "delete", "gen_cert", "install", "pg_kill", "pg_freeze", "pg_thaw", "provision", "run", "set_provisioned", "set_unprovisioned", "shutdown", "unprovision", "unset", ) class Handler(daemon.handler.BaseHandler, daemon.METHOD_NAME.ObjectCreateMixin): """ Execute an object instance action. """ routes = ( ("POST", "object_action"), ("POST", "service_action"), (None, "service_action"), ) prototype = [ { "name": "path", "desc": "The path of the object to execute the action on.", "required": True, "format": "object_path", }, { "name": "sync", "desc": "Execute synchronously and return the outputs.", "required": False, "default": True, "format": "boolean", }, { "name": "cmd", "desc": "The command vector.", "required": False, "format": "list", "deprecated": True, }, { "name": "action", "desc": "The action to execute.", "required": False, "format": "string", }, { "name": "options", "desc": "The action options.", "required": False, "format": "dict", "default": {}, }, ] access = "custom" def METHOD_NAME(self, nodename, thr=None, **kwargs): options = self.parse_options(kwargs) name, namespace, kind = split_path(options.path) if options.action in GUEST_ACTIONS: role = "guest" elif options.action in OPERATOR_ACTIONS: role = "operator" elif options.action in ADMIN_ACTIONS: role = "admin" else: role = "root" if options.action == "set": # load current config try: cf = shared.SERVICES[options.path].print_config_data() except Exception: cf = {} # purge unwanted sections try: del cf["metadata"] except Exception: pass # merge changes in current config for buff in options.options.get("kw", []): k, v = buff.split("=", 1) if k[-1] in ("+", "-"): k = k[:-1] k = k.strip() try: s, k = k.split(".", 1) except Exception: s = "DEFAULT" if s not in cf: cf[s] = {} cf[s][k] = v # apply object create rbac to the amended config payload = {options.path: cf} errors = self.rbac_create_data(payload=payload, thr=thr, **kwargs) if errors: raise ex.HTTP(403, errors) else: thr.rbac_requires(roles=[role], namespaces=[namespace], **kwargs) if options.cmd: # compat, requires root kwargs["roles"] = ["root"] thr.rbac_requires(**kwargs) def action(self, nodename, thr=None, **kwargs): options = self.parse_options(kwargs) name, namespace, kind = split_path(options.path) if thr.get_service(options.path) is None and options.action not in ("create", "deploy"): thr.log_request("service action (%s not installed)" % options.path, nodename, lvl="warning", **kwargs) raise ex.HTTP(404, "%s not found" % options.path) if not options.action and not options.cmd: thr.log_request("service action (no action set)", nodename, lvl="error", **kwargs) raise ex.HTTP(400, "action not set") for opt in ("node", "daemon", "svcs", "service", "s", "parm_svcs", "local", "id"): if opt in options.options: del options.options[opt] for opt, ropt in (("jsonpath_filter", "filter"),): if opt in options.options: options.options[ropt] = options.options[opt] del options.options[opt] options.options["local"] = True if options.cmd: cmd = [options.cmd] else: cmd = format_command(kind, options.action, options.options or {}) fullcmd = Env.om + ["svc", "-s", options.path] + cmd thr.log_request("run 'om %s %s'" % (options.path, " ".join(cmd)), nodename, **kwargs) if options.sync: proc = Popen(fullcmd, stdout=PIPE, stderr=PIPE, stdin=None, close_fds=True) out, err = proc.communicate() try: result = json.loads(out) except Exception: result = { "status": 0, "data": { "out": bdecode(out), "err": bdecode(err), "ret": proc.returncode, }, } else: import uuid session_id = str(uuid.uuid4()) env = {} env.update(os.environ) env["OSVC_PARENT_SESSION_UUID"] = session_id proc = Popen(fullcmd, stdin=None, close_fds=True, env=env) thr.parent.push_proc(proc, cmd=fullcmd, session_id=session_id) result = { "status": 0, "data": { "pid": proc.pid, "session_id": session_id, }, "info": "started %s action %s" % (options.path, " ".join(cmd)), } return result
299,124
internal paging
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from .. import models class LocationBasedPerformanceTierOperations(object): """LocationBasedPerformanceTierOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: The API version to use for the request. Constant value: "2018-06-01-preview". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2018-06-01-preview" self.config = config def list( self, location_name, custom_headers=None, raw=False, **operation_config): """List all the performance tiers at specified location in a given subscription. :param location_name: The name of the location. :type location_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of PerformanceTierProperties :rtype: ~azure.mgmt.rdbms.mariadb.models.PerformanceTierPropertiesPaged[~azure.mgmt.rdbms.mariadb.models.PerformanceTierProperties] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def METHOD_NAME(next_link=None, raw=False): if not next_link: # Construct URL url = self.list.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'locationName': self._serialize.url("location_name", location_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.PerformanceTierPropertiesPaged(METHOD_NAME, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.PerformanceTierPropertiesPaged(METHOD_NAME, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DBforMariaDB/locations/{locationName}/performanceTiers'}
299,125
delete
# Copyright (C) 2016-2018 Cuckoo Foundation. # This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org # See the file 'docs/LICENSE' for copying permission. import errno import hashlib import logging import os import shutil import tempfile from pathlib import PureWindowsPath from lib.cuckoo.common.config import Config from lib.cuckoo.common.exceptions import CuckooOperationalError from lib.cuckoo.common.misc import getuser from lib.cuckoo.common.path_utils import path_exists, path_mkdir cuckoo_conf = Config() log = logging.getLogger() def temppath(): """Return the true temporary directory.""" tmppath = cuckoo_conf.cuckoo.tmppath # Backwards compatibility with older configuration. if not tmppath or tmppath == "/tmp": return os.path.join(tempfile.gettempdir(), f"cuckoo-tmp-{getuser()}") return tmppath def open_exclusive(path, mode="xb"): """Open a file with O_EXCL, failing if it already exists [In Python 3, use open with x]""" fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o644) try: return os.fdopen(fd, mode) except OSError as e: log.error(e, "You might need to add whitelist folder in resultserver.py") os.close(fd) raise def open_inclusive(path, mode="ab"): fd = os.open(path, os.O_CREAT | os.O_APPEND | os.O_WRONLY, 0o644) try: return os.fdopen(fd, mode) except OSError as e: log.error(e, "You might need to add whitelist folder in resultserver.py") os.close(fd) raise class Storage: @staticmethod def get_filename_from_path(path): """Cross-platform filename extraction from path. @param path: file path. @return: filename. """ return PureWindowsPath(path).name class Folders(Storage): @staticmethod def create(root=".", folders=None): """Create a directory or multiple directories. @param root: root path. @param folders: folders list to be created. @raise CuckooOperationalError: if fails to create folder. If folders is None, we try to create the folder provided by `root`. """ if isinstance(root, (tuple, list)): root = os.path.join(*root) if folders is None: folders = [""] elif isinstance(folders, str): folders = (folders,) for folder in folders: folder_path = os.path.join(root, folder) if not os.path.isdir(folder_path): try: path_mkdir(folder_path) except OSError as e: if e.errno == errno.EEXIST: # Race condition, ignore continue raise CuckooOperationalError(f"Unable to create folder: {folder_path}") from e @staticmethod def copy(src, dest): if path_exists(dest): shutil.rmtree(dest) shutil.copytree(src, dest) @staticmethod def create_temp(path=None): return tempfile.mkdtemp(dir=path or temppath()) @staticmethod def METHOD_NAME(*folder): """Delete a folder and all its subdirectories. @param folder: path or components to path to delete. @raise CuckooOperationalError: if fails to delete folder. """ folder = os.path.join(*folder) if path_exists(folder): try: shutil.rmtree(folder) except OSError as e: raise CuckooOperationalError(f"Unable to delete folder: {folder}") from e class Files(Storage): @staticmethod def temp_put(content, path=None): """Store a temporary file or files. @param content: the content of this file @param path: directory path to store the file """ fd, filepath = tempfile.mkstemp(prefix="upload_", dir=path or temppath()) if hasattr(content, "read"): chunk = content.read(1024) while chunk: os.write(fd, chunk) chunk = content.read(1024) else: os.write(fd, content) os.close(fd) return filepath @staticmethod def temp_named_put(content, filename, path=None): """Store a named temporary file. @param content: the content of this file @param filename: filename that the file should have @param path: directory path to store the file @return: full path to the temporary file """ filename = Storage.get_filename_from_path(filename) dirpath = tempfile.mkdtemp(dir=path or temppath()) Files.create(dirpath, filename, content) return os.path.join(dirpath, filename) @staticmethod def create(root, filename, content): if isinstance(root, (tuple, list)): root = os.path.join(*root) filepath = os.path.join(root, filename) with open(filepath, "wb") as f: if hasattr(content, "read"): chunk = content.read(1024 * 1024) while chunk: f.write(chunk) chunk = content.read(1024 * 1024) else: f.write(content) return filepath @staticmethod def copy(path_target, path_dest): """Copy a file. The destination may be a directory. @param path_target: The @param path_dest: path_dest @return: path to the file or directory """ shutil.copy(src=path_target, dst=path_dest) return os.path.join(path_dest, os.path.basename(path_target)) @staticmethod def hash_file(method, filepath): """Calculate a hash on a file by path. @param method: callable hashing method @param path: file path @return: computed hash string """ with open(filepath, "rb") as f: h = method() buf = f.read(1024 * 1024) while buf: h.update(buf) buf = f.read(1024 * 1024) return h.hexdigest() @staticmethod def md5_file(filepath): return Files.hash_file(hashlib.md5, filepath) @staticmethod def sha1_file(filepath): return Files.hash_file(hashlib.sha1, filepath) @staticmethod def sha256_file(filepath): return Files.hash_file(hashlib.sha256, filepath)
299,126
get thumbnail
import os from pathlib import Path import httpx from django.conf import settings from django.utils import timezone from tika_client import TikaClient from documents.parsers import DocumentParser from documents.parsers import ParseError from documents.parsers import make_thumbnail_from_pdf class TikaDocumentParser(DocumentParser): """ This parser sends documents to a local tika server """ logging_name = "paperless.parsing.tika" def METHOD_NAME(self, document_path, mime_type, file_name=None): if not self.archive_path: self.archive_path = self.convert_to_pdf(document_path, file_name) return make_thumbnail_from_pdf( self.archive_path, self.tempdir, self.logging_group, ) def extract_metadata(self, document_path, mime_type): try: with TikaClient(tika_url=settings.TIKA_ENDPOINT) as client: parsed = client.metadata.from_file(document_path, mime_type) return [ { "namespace": "", "prefix": "", "key": key, "value": parsed.data[key], } for key in parsed.data ] except Exception as e: self.log.warning( f"Error while fetching document metadata for {document_path}: {e}", ) return [] def parse(self, document_path: Path, mime_type: str, file_name=None): self.log.info(f"Sending {document_path} to Tika server") try: with TikaClient(tika_url=settings.TIKA_ENDPOINT) as client: parsed = client.tika.as_text.from_file(document_path, mime_type) except Exception as err: raise ParseError( f"Could not parse {document_path} with tika server at " f"{settings.TIKA_ENDPOINT}: {err}", ) from err self.text = parsed.content if self.text is not None: self.text = self.text.strip() self.date = parsed.created if self.date is not None and timezone.is_naive(self.date): self.date = timezone.make_aware(self.date) self.archive_path = self.convert_to_pdf(document_path, file_name) def convert_to_pdf(self, document_path, file_name): pdf_path = os.path.join(self.tempdir, "convert.pdf") gotenberg_server = settings.TIKA_GOTENBERG_ENDPOINT url = gotenberg_server + "/forms/libreoffice/convert" self.log.info(f"Converting {document_path} to PDF as {pdf_path}") with open(document_path, "rb") as document_handle: files = { "files": ( "convert" + os.path.splitext(document_path)[-1], document_handle, ), } headers = {} data = {} # Set the output format of the resulting PDF # Valid inputs: https://gotenberg.dev/docs/modules/pdf-engines#uno if settings.OCR_OUTPUT_TYPE in {"pdfa", "pdfa-2"}: data["pdfFormat"] = "PDF/A-2b" elif settings.OCR_OUTPUT_TYPE == "pdfa-1": data["pdfFormat"] = "PDF/A-1a" elif settings.OCR_OUTPUT_TYPE == "pdfa-3": data["pdfFormat"] = "PDF/A-3b" try: response = httpx.post( url, files=files, headers=headers, data=data, timeout=settings.CELERY_TASK_TIME_LIMIT, ) response.raise_for_status() # ensure we notice bad responses except Exception as err: raise ParseError( f"Error while converting document to PDF: {err}", ) from err with open(pdf_path, "wb") as file: file.write(response.content) file.close() return pdf_path
299,127
get event columns
from __future__ import annotations from typing import Any, Dict, Sequence, Tuple from django import forms from sentry import tagstore from sentry.eventstore.models import GroupEvent from sentry.rules import MATCH_CHOICES, EventState, MatchType from sentry.rules.conditions.base import EventCondition from sentry.rules.history.preview_strategy import get_dataset_columns from sentry.snuba.dataset import Dataset from sentry.snuba.events import Columns from sentry.types.condition_activity import ConditionActivity class TaggedEventForm(forms.Form): key = forms.CharField(widget=forms.TextInput()) match = forms.ChoiceField(choices=list(MATCH_CHOICES.items()), widget=forms.Select()) value = forms.CharField(widget=forms.TextInput(), required=False) def clean(self) -> dict[str, Any] | None: cleaned_data: dict[str, Any] = super().clean() match = cleaned_data.get("match") value = cleaned_data.get("value") if match not in (MatchType.IS_SET, MatchType.NOT_SET) and not value: raise forms.ValidationError("This field is required.") return None class TaggedEventCondition(EventCondition): id = "sentry.rules.conditions.tagged_event.TaggedEventCondition" form_cls = TaggedEventForm label = "The event's tags match {key} {match} {value}" form_fields = { "key": {"type": "string", "placeholder": "key"}, "match": {"type": "choice", "choices": list(MATCH_CHOICES.items())}, "value": {"type": "string", "placeholder": "value"}, } def _passes(self, raw_tags: Sequence[Tuple[str, Any]]) -> bool: key = self.get_option("key") match = self.get_option("match") value = self.get_option("value") if not (key and match): return False key = key.lower() tags = ( k for gen in ( (k.lower() for k, v in raw_tags), (tagstore.get_standardized_key(k) for k, v in raw_tags), ) for k in gen ) if match == MatchType.IS_SET: return key in tags elif match == MatchType.NOT_SET: return key not in tags if not value: return False value = value.lower() values = ( v.lower() for k, v in raw_tags if k.lower() == key or tagstore.get_standardized_key(k) == key ) if match == MatchType.EQUAL: for t_value in values: if t_value == value: return True return False elif match == MatchType.NOT_EQUAL: for t_value in values: if t_value == value: return False return True elif match == MatchType.STARTS_WITH: for t_value in values: if t_value.startswith(value): return True return False elif match == MatchType.NOT_STARTS_WITH: for t_value in values: if t_value.startswith(value): return False return True elif match == MatchType.ENDS_WITH: for t_value in values: if t_value.endswith(value): return True return False elif match == MatchType.NOT_ENDS_WITH: for t_value in values: if t_value.endswith(value): return False return True elif match == MatchType.CONTAINS: for t_value in values: if value in t_value: return True return False elif match == MatchType.NOT_CONTAINS: for t_value in values: if value in t_value: return False return True raise RuntimeError("Invalid Match") def passes(self, event: GroupEvent, state: EventState, **kwargs: Any) -> bool: return self._passes(event.tags) def passes_activity( self, condition_activity: ConditionActivity, event_map: Dict[str, Any] ) -> bool: try: tags = event_map[condition_activity.data["event_id"]]["tags"] return self._passes(tags.items()) except (TypeError, KeyError): return False def render_label(self) -> str: data = { "key": self.data["key"], "value": self.data["value"], "match": MATCH_CHOICES[self.data["match"]], } return self.label.format(**data) def METHOD_NAME(self) -> Dict[Dataset, Sequence[str]]: columns: Dict[Dataset, Sequence[str]] = get_dataset_columns( [Columns.TAGS_KEY, Columns.TAGS_VALUE] ) return columns
299,128
get matched layout
# -*- encoding: utf-8 -*- from thefuck.utils import memoize, get_alias target_layout = '''qwertyuiop[]asdfghjkl;'zxcvbnm,./QWERTYUIOP{}ASDFGHJKL:"ZXCVBNM<>?''' # any new keyboard layout must be appended greek = u''';ςερτυθιοπ[]ασδφγηξκλ΄ζχψωβνμ,./:΅ΕΡΤΥΘΙΟΠ{}ΑΣΔΦΓΗΞΚΛ¨"ΖΧΨΩΒΝΜ<>?''' korean = u'''ㅂㅈㄷㄱㅅㅛㅕㅑㅐㅔ[]ㅁㄴㅇㄹㅎㅗㅓㅏㅣ;'ㅋㅌㅊㅍㅠㅜㅡ,./ㅃㅉㄸㄲㅆㅛㅕㅑㅒㅖ{}ㅁㄴㅇㄹㅎㅗㅓㅏㅣ:"ㅋㅌㅊㅍㅠㅜㅡ<>?''' source_layouts = [u'''йцукенгшщзхъфывапролджэячсмитьбю.ЙЦУКЕНГШЩЗХЪФЫВАПРОЛДЖЭЯЧСМИТЬБЮ,''', u'''йцукенгшщзхїфівапролджєячсмитьбю.ЙЦУКЕНГШЩЗХЇФІВАПРОЛДЖЄЯЧСМИТЬБЮ,''', u'''ضصثقفغعهخحجچشسیبلاتنمکگظطزرذدپو./ًٌٍَُِّْ][}{ؤئيإأآة»«:؛كٓژٰ‌ٔء><؟''', u'''/'קראטוןםפ][שדגכעיחלךף,זסבהנמצתץ.QWERTYUIOP{}ASDFGHJKL:"ZXCVBNM<>?''', greek, korean] source_to_target = { greek: {u';': "q", u'ς': "w", u'ε': "e", u'ρ': "r", u'τ': "t", u'υ': "y", u'θ': "u", u'ι': "i", u'ο': "o", u'π': "p", u'[': "[", u']': "]", u'α': "a", u'σ': "s", u'δ': "d", u'φ': "f", u'γ': "g", u'η': "h", u'ξ': "j", u'κ': "k", u'λ': "l", u'΄': "'", u'ζ': "z", u'χ': "x", u'ψ': "c", u'ω': "v", u'β': "b", u'ν': "n", u'μ': "m", u',': ",", u'.': ".", u'/': "/", u':': "Q", u'΅': "W", u'Ε': "E", u'Ρ': "R", u'Τ': "T", u'Υ': "Y", u'Θ': "U", u'Ι': "I", u'Ο': "O", u'Π': "P", u'{': "{", u'}': "}", u'Α': "A", u'Σ': "S", u'Δ': "D", u'Φ': "F", u'Γ': "G", u'Η': "H", u'Ξ': "J", u'Κ': "K", u'Λ': "L", u'¨': ":", u'"': '"', u'Ζ': "Z", u'Χ': "X", u'Ψ': "C", u'Ω': "V", u'Β': "B", u'Ν': "N", u'Μ': "M", u'<': "<", u'>': ">", u'?': "?", u'ά': "a", u'έ': "e", u'ύ': "y", u'ί': "i", u'ό': "o", u'ή': 'h', u'ώ': u"v", u'Ά': "A", u'Έ': "E", u'Ύ': "Y", u'Ί': "I", u'Ό': "O", u'Ή': "H", u'Ώ': "V"}, } '''Lists used for decomposing korean letters.''' HEAD_LIST = [u'ㄱ', u'ㄲ', u'ㄴ', u'ㄷ', u'ㄸ', u'ㄹ', u'ㅁ', u'ㅂ', u'ㅃ', u'ㅅ', u'ㅆ', u'ㅇ', u'ㅈ', u'ㅉ', u'ㅊ', u'ㅋ', u'ㅌ', u'ㅍ', u'ㅎ'] BODY_LIST = [u'ㅏ', u'ㅐ', u'ㅑ', u'ㅒ', u'ㅓ', u'ㅔ', u'ㅕ', u'ㅖ', u'ㅗ', u'ㅘ', u'ㅙ', u'ㅚ', u'ㅛ', u'ㅜ', u'ㅝ', u'ㅞ', u'ㅟ', u'ㅠ', u'ㅡ', u'ㅢ', u'ㅣ'] TAIL_LIST = [u' ', u'ㄱ', u'ㄲ', u'ㄳ', u'ㄴ', u'ㄵ', u'ㄶ', u'ㄷ', u'ㄹ', u'ㄺ', u'ㄻ', u'ㄼ', u'ㄽ', u'ㄾ', u'ㄿ', u'ㅀ', u'ㅁ', u'ㅂ', u'ㅄ', u'ㅅ', u'ㅆ', u'ㅇ', u'ㅈ', u'ㅊ', u'ㅋ', u'ㅌ', u'ㅍ', u'ㅎ'] DOUBLE_LIST = [u'ㅘ', u'ㅙ', u'ㅚ', u'ㅝ', u'ㅞ', u'ㅟ', u'ㅢ', u'ㄳ', u'ㄵ', u'ㄶ', u'ㄺ', u'ㄻ', u'ㄼ', u'ㄽ', u'ㄾ', u'ㅀ', u'ㅄ'] DOUBLE_MOD_LIST = [u'ㅗㅏ', u'ㅗㅐ', u'ㅗㅣ', u'ㅜㅓ', u'ㅜㅔ', u'ㅜㅣ', u'ㅡㅣ', u'ㄱㅅ', u'ㄴㅈ', u'ㄴㅎ', u'ㄹㄱ', u'ㄹㅁ', u'ㄹㅂ', u'ㄹㅅ', u'ㄹㅌ', u'ㄹㅎ', u'ㅂㅅ'] @memoize def METHOD_NAME(command): # don't use command.split_script here because a layout mismatch will likely # result in a non-splitable script as per shlex cmd = command.script.split(' ') for source_layout in source_layouts: is_all_match = True for cmd_part in cmd: if not all([ch in source_layout or ch in '-_' for ch in cmd_part]): is_all_match = False break if is_all_match: return source_layout def _switch(ch, layout): if ch in layout: return target_layout[layout.index(ch)] return ch def _switch_command(command, layout): # Layouts with different amount of characters than English if layout in source_to_target: return ''.join(source_to_target[layout].get(ch, ch) for ch in command.script) return ''.join(_switch(ch, layout) for ch in command.script) def _decompose_korean(command): def _change_double(ch): if ch in DOUBLE_LIST: return DOUBLE_MOD_LIST[DOUBLE_LIST.index(ch)] return ch hg_str = u'' for ch in command.script: if u'가' <= ch <= u'힣': ord_ch = ord(ch) - ord(u'가') hd = ord_ch // 588 bd = (ord_ch - 588 * hd) // 28 tl = ord_ch - 588 * hd - 28 * bd for ch in [HEAD_LIST[hd], BODY_LIST[bd], TAIL_LIST[tl]]: if ch != ' ': hg_str += _change_double(ch) else: hg_str += _change_double(ch) return hg_str def match(command): if 'not found' not in command.output: return False if any(u'ㄱ' <= ch <= u'ㅎ' or u'ㅏ' <= ch <= u'ㅣ' or u'가' <= ch <= u'힣' for ch in command.script): return True matched_layout = METHOD_NAME(command) return (matched_layout and _switch_command(command, matched_layout) != get_alias()) def get_new_command(command): if any(u'ㄱ' <= ch <= u'ㅎ' or u'ㅏ' <= ch <= u'ㅣ' or u'가' <= ch <= u'힣' for ch in command.script): command.script = _decompose_korean(command) matched_layout = METHOD_NAME(command) return _switch_command(command, matched_layout)
299,129
eval
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Any import torch import torch.distributed as dist import torch.nn as nn from torch import Tensor from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from torch.distributed import ReduceOp from torch.optim import Optimizer from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.nn.optimizer import ColossalaiOptimizer from ._fp16_optimizer import FP16Optimizer class NaiveAMPOptimizer(ColossalaiOptimizer): """A wrapper class for optimizer to cast all parameters to fp16 Args: optim (torch.optim.Optimizer): A normal optimizer like Adam or SGD. grad_scaler (BaseGradScaler): grad scaler for gradient chose in ``constant_grad_scaler`` or ``dynamic_grad_scaler``. clip_grad_norm (float, optional): clip gradients with this global L2 norm. Default 0. verbose (bool, optional): if set to `True`, will print debug info. Default False. Note: clipping is ignored if ``clip_grad_norm`` equals 0. """ def __init__(self, optim: Optimizer, *args, **kwargs): optim = FP16Optimizer(optim, *args, **kwargs) super().__init__(optim) def backward(self, loss: Tensor): self.optim.backward(loss) def step(self): return self.optim.step() def clip_grad_norm(self, model: nn.Module, max_norm: float): if self.optim.max_norm == max_norm: return raise RuntimeError("NaiveAMP optimizer has clipped gradients during optimizer.step(). " "If you have supplied clip_grad_norm in the amp_config, " "executing the method clip_grad_norm is not allowed.") class NaiveAMPModel(nn.Module): r"""A wrapper class for model to cast the model into fp16 and automatically cast the input and output Args: model (torch.nn.Module): torch.nn.Module to be wrapped. output_to_fp32 (bool, optional): Whether cast output of this module into fp32. (Default: True) parallel_mode (:class:`colossalai.context.ParallelMode`): Parallel group mode used in this module. (Default: ``ParallelMode.DATA``) sync_buffer (bool, optional): whether to synchronize buffer. (Default: True) Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ def __init__(self, model: nn.Module, output_to_fp32: bool = True, parallel_mode: ParallelMode = ParallelMode.DATA, sync_buffer: bool = True): super().__init__() self.model = model.half() self._output_to_fp32 = output_to_fp32 self._sync_buf = sync_buffer if gpc.is_initialized(parallel_mode) and gpc.get_world_size(parallel_mode) > 1: self._process_group = gpc.get_group(parallel_mode) self._world_size = gpc.get_world_size(parallel_mode) else: self._process_group = None self._world_size = 1 self._sync_buf = False self._first_eval_run = False @property def sync_buffer(self): return self._sync_buf @sync_buffer.setter def sync_buffer(self, state: bool): self._sync_buf = state def _convert_to_fp16(self, input_: Any): if isinstance(input_, Tensor) and input_.dtype == torch.float32: input_ = input_.half() return input_ def _convert_to_fp32(self, input_: Any): if isinstance(input_, Tensor) and input_.dtype == torch.float16: input_ = input_.float() return input_ def _reduce_module_buffer(self): """ All-reduce the buffers (e.g. running stats of batch normalization) across data parallel ranks so that all the ranks will produce consistent results when given the same input """ buf_list = [] # find valid buffers for buf in self.model.buffers(): if buf is not None: buf_list.append(buf) # reduce buffers across data parallel ranks if buf_list: coalesced_buf = _flatten_dense_tensors(buf_list) coalesced_buf.div_(self._world_size) dist.all_reduce(coalesced_buf, op=ReduceOp.SUM, group=self._process_group) unflattened_buf_list = _unflatten_dense_tensors(coalesced_buf, buf_list) for old, new in zip(buf_list, unflattened_buf_list): old.copy_(new) def METHOD_NAME(self): self.model.METHOD_NAME() # we only sync buffer in the first eval iteration # so that future eval iterations can be done without communication self._first_eval_run = True def forward(self, *args, **kwargs): # reduce buffers after forward will lead to error # as we cannot change the variables needed for gradient computation after forward # so we sync buffer before forward if (self.training or self._first_eval_run) and self._sync_buf: with torch.no_grad(): self._reduce_module_buffer() if self._first_eval_run: self._first_eval_run = False if args: args = [self._convert_to_fp16(arg) for arg in args] if kwargs: for k, v in kwargs.items(): kwargs[k] = self._convert_to_fp16(v) out = self.model(*args, **kwargs) if self._output_to_fp32: if isinstance(out, Tensor): out = self._convert_to_fp32(out) elif isinstance(out, (tuple, list)): out = [self._convert_to_fp32(val) for val in out] elif isinstance(out, dict): out = {key: self._convert_to_fp32(val) for key, val in out.items()} return out
299,130
decorator
import asyncio import functools import sys from asyncio import CancelledError from dataclasses import dataclass from datetime import datetime from enum import Enum from logging import getLogger from typing import Awaitable, Callable, AsyncIterable from aioredis import Redis, Channel, ChannelClosedError from virtool_core.models.basemodel import BaseModel from virtool_core.redis import resubscribe from virtool.api.custom_json import dump_string from virtool.utils import timestamp, get_model_by_name logger = getLogger("events") class Operation(str, Enum): """ The possible operations that can be performed on a resource. """ CREATE = "create" READ = "read" UPDATE = "update" DELETE = "delete" @dataclass class Event: data: BaseModel domain: str name: str operation: Operation timestamp: datetime class _InternalEventsTarget: """ A target for emitting events that are used internally by the application. Calls to ``emit()`` and functions decorated with ``@emits`` will add an event to the queue via this target. """ q = asyncio.Queue(maxsize=1000) def emit(self, event: Event): self.q.put_nowait(event) async def get(self) -> Event: """ Get an event from the target. """ return await self.q.get() def clear(self): self.q = asyncio.Queue() _events_target = _InternalEventsTarget() def dangerously_clear_events(): """ Clear all events from the internal queue. This should only be used in tests. """ _events_target.clear() async def dangerously_get_event() -> Event: """ Get an event directly from the target. This should only be used in tests. """ return await _events_target.get() def emit(data: BaseModel, domain: str, name: str, operation: Operation): """ Emit an event. """ _events_target.emit( Event( data=data, domain=domain, name=name, operation=operation, timestamp=timestamp(), ) ) def emits(operation: Operation, domain: str | None = None, name: str | None = None): """ Emits the return value of decorated method as an event. """ def METHOD_NAME(func: Callable[..., Awaitable[BaseModel]]): emitted_name = name or func.__name__ @functools.wraps(func) async def wrapper(*args, **kwargs): # This is the DataLayerPiece instance the method is bound to. obj = args[0] return_value = await func(*args, **kwargs) emit(return_value, domain or obj.name, emitted_name, operation) return return_value return wrapper return METHOD_NAME class EventPublisher: """ Publishes events emitted in the application. Events are published using Redis pub/sub. """ def __init__(self, redis: Redis): self._redis = redis async def run(self): """Start the event publisher.""" logger.info("Starting event publisher") try: while True: event = await _events_target.get() try: data = event.data.dict() except AttributeError: logger.exception( "Encountered exception while publishing event: name=%s.%s operation=%s", event.domain, event.name, event.operation, ) continue await self._redis.publish( "channel:events", dump_string( { "domain": event.domain, "name": event.name, "operation": event.operation, "payload": { "data": data, "model": event.data.__class__.__name__, }, "timestamp": event.timestamp, } ), ) logger.info( "Published event: name=%s.%s operation=%s", event.domain, event.name, event.operation, ) except CancelledError: pass class EventListener(AsyncIterable): """Pulls events as they are received and yields them as :class:`.Event` objects.""" def __init__(self, redis: Redis): self._redis = redis self._channel: Channel | None = None self._channel_name = "channel:events" def __aiter__(self): return self async def __anext__(self) -> Event: if not self._channel: (self._channel,) = await self._redis.subscribe(self._channel_name) while True: try: received = await self._channel.get_json() payload = received.pop("payload") cls = get_model_by_name(payload["model"]) return Event(**received, data=cls(**payload["data"])) except ChannelClosedError: try: self._channel = await asyncio.wait_for( resubscribe(self._redis, self._channel_name), 10 ) except asyncio.TimeoutError: logger.critical( "Could not resubscribe to Redis channel %s", self._channel_name ) sys.exit(1) except TypeError: pass
299,131
test filter out verified schedules
""" Tests for send_upgrade_reminder management command. """ import logging from unittest import skipUnless from unittest.mock import patch import ddt from django.conf import settings from edx_ace import Message from edx_ace.utils.date import serialize from opaque_keys.edx.locator import CourseLocator from common.djangoapps.course_modes.models import CourseMode from openedx.core.djangoapps.schedules import resolvers, tasks from openedx.core.djangoapps.schedules.management.commands import send_upgrade_reminder as reminder from openedx.core.djangoapps.schedules.management.commands.tests.send_email_base import ( ExperienceTest, ScheduleSendEmailTestMixin ) from openedx.core.djangoapps.schedules.models import ScheduleExperience from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms from common.djangoapps.student.tests.factories import UserFactory LOG = logging.getLogger(__name__) @ddt.ddt @skip_unless_lms @skipUnless('openedx.core.djangoapps.schedules.apps.SchedulesConfig' in settings.INSTALLED_APPS, "Can't test schedules if the app isn't installed") class TestUpgradeReminder(ScheduleSendEmailTestMixin, CacheIsolationTestCase): # lint-amnesty, pylint: disable=missing-class-docstring __test__ = True resolver = resolvers.UpgradeReminderResolver task = tasks.ScheduleUpgradeReminder deliver_task = tasks._upgrade_reminder_schedule_send # lint-amnesty, pylint: disable=protected-access command = reminder.Command deliver_config = 'deliver_upgrade_reminder' enqueue_config = 'enqueue_upgrade_reminder' expected_offsets = (2,) queries_deadline_for_each_course = True consolidates_emails_for_learner = True @ddt.data(True, False) @patch.object(tasks, 'ace') def test_verified_learner(self, is_verified, mock_ace): current_day, offset, target_day, upgrade_deadline = self._get_dates() # lint-amnesty, pylint: disable=unused-variable schedule = self._schedule_factory( enrollment__mode=CourseMode.VERIFIED if is_verified else CourseMode.AUDIT, ) self.task().apply(kwargs=dict( site_id=self.site_config.site.id, target_day_str=serialize(target_day), day_offset=offset, bin_num=self._calculate_bin_for_user(schedule.enrollment.user), )) assert mock_ace.send.called == (not is_verified) def METHOD_NAME(self): current_day, offset, target_day, upgrade_deadline = self._get_dates() # lint-amnesty, pylint: disable=unused-variable user = UserFactory.create() schedules = [ self._schedule_factory( enrollment__user=user, enrollment__course__id=CourseLocator('edX', 'toy', f'Course{i}'), enrollment__mode=CourseMode.VERIFIED if i in (0, 3) else CourseMode.AUDIT, ) for i in range(5) ] sent_messages = [] with patch.object(self.task, 'async_send_task') as mock_schedule_send: mock_schedule_send.apply_async = lambda args, *_a, **_kw: sent_messages.append(args[1]) self.task().apply(kwargs=dict( site_id=self.site_config.site.id, target_day_str=serialize(target_day), day_offset=offset, bin_num=self._calculate_bin_for_user(user), )) messages = [Message.from_string(m) for m in sent_messages] assert len(messages) == 1 message = messages[0] self.assertCountEqual( message.context['course_ids'], [str(schedules[i].enrollment.course.id) for i in (1, 2, 4)] ) @patch.object(tasks, 'ace') def test_course_without_verified_mode(self, mock_ace): current_day, offset, target_day, upgrade_deadline = self._get_dates() # lint-amnesty, pylint: disable=unused-variable schedule = self._schedule_factory() schedule.enrollment.course.modes.filter(mode_slug=CourseMode.VERIFIED).delete() self.task().apply(kwargs=dict( site_id=self.site_config.site.id, target_day_str=serialize(target_day), day_offset=offset, bin_num=self._calculate_bin_for_user(schedule.enrollment.user), )) assert mock_ace.send.called is False @ddt.data( ExperienceTest(experience=ScheduleExperience.EXPERIENCES.default, offset=expected_offsets[0], email_sent=True), ExperienceTest(experience=ScheduleExperience.EXPERIENCES.course_updates, offset=expected_offsets[0], email_sent=False), # lint-amnesty, pylint: disable=line-too-long ExperienceTest(experience=None, offset=expected_offsets[0], email_sent=True), ) def test_upgrade_reminder_experience(self, test_config): self._check_if_email_sent_for_experience(test_config)
299,132
get list
# pylint: disable=line-too-long """ Vector.py This module will store the Vector class """ import sys import math import common sys.path.append("..") class Vector: """ To store the position of layers val1 represents the x-axis value val2 represents the y-axis value For other parameters val1 represents the value of the parameter val2 represents the time parameter type represents what this vector is representing """ def __init__(self, val1=0, val2=0, _type=None): """ Args: val1 (float) : First value of the vector val2 (float) : Second value of the vector _type (:obj: `str`, optional) : Type of vector Returns: (None) """ #print(type(val2), isinstance(val2, common.Angle.Angle)) if isinstance(val2, common.Angle.Angle): self.val1 = val1 * common.Angle.CosAngle(val2).get() self.val2 = val1 * common.Angle.SinAngle(val2).get() self.type = _type else: self.val1 = val1 self.val2 = val2 self.type = _type def __str__(self): return "({0},{1}, {2})".format(self.val1, self.val2, self.type) def __add__(self, other): val1 = self.val1 + other.val1 val2 = self.val2 + other.val2 return Vector(val1, val2, self.type) def __sub__(self, other): val1 = self.val1 - other.val1 val2 = self.val2 - other.val2 return Vector(val1, val2, self.type) def __neg__(self): return -1 * self def __getitem__(self, key): if key: return round(self.val2, 3) return round(self.val1, 3) def __setitem__(self, key, value): if key: self.val2 = value else: self.val1 = value def isnan(self): """ Returns true is any value of this vector is nan """ return math.isnan(self.val1) or math.isnan(self.val2) def mag(self): """ Returns the magnitude of the vector Args: (None) Returns: (float) : The magnitude of the vector """ return math.sqrt(self.mag_squared()) def inv_mag(self): """ Returns the inverse of the magnitude of the vector Args: (None) Returns: (float) : Magnitude inversed """ if self.mag() == 0: return float('nan') return 1.0 / self.mag() def perp(self): """ Returns a perpendicular version of the vector Args: (None) Returns: (common.Vector.Vector) : Perpendicular vector with same magnitude """ return Vector(self.val2, -self.val1) def is_equal_to(self, other): """ Tells if the current vector is equal to `other` vector Args: other (common.Vector.Vector) : The vector to be compared with Returns: (bool) : True if the vectors are equal : False otherwise """ return common.misc.approximate_equal((self - other).mag_squared(), 0) def mag_squared(self): """ Returns the squared magnitude of the vector Args: (None) Returns: (float) : squared magnitude """ ret = self.val1 * self.val1 + self.val2 * self.val2 return ret def angle(self): """ Returns the angle of this Vector in Radians Args: (None) Returns: (common.Angle.RadAngle): The angle of Vector """ return common.Angle.RadAngle(math.atan2(self.val2, self.val1)) def norm(self): """ Returns a normalised version of the vector Args: (None) Returns: (common.Vector.Vector) : itselves whose magnitude is 1 """ obj = self * self.inv_mag() self.__dict__.update(obj.__dict__) return self # other can only be of type real def __mul__(self, other): if not isinstance(other, self.__class__): val1 = self.val1 * other val2 = self.val2 * other return Vector(val1, val2, self.type) elif isinstance(other, self.__class__): return self.val1*other.val1 + self.val2*other.val2 raise Exception('Multiplication with {} not defined'.format(type(other))) def __rmul__(self, other): return self.__mul__(other) def __truediv__(self, other): if not isinstance(other, self.__class__): val1 = self.val1 / other val2 = self.val2 / other return Vector(val1, val2, self.type) raise Exception('Division with {} not defined'.format(type(other))) def METHOD_NAME(self): """ Get val1 and val2 values in the format required by lottie Args: (None) Returns: (list) : Contains the Vector in list format """ return [self.val1, self.val2] def get_val(self): """ Get value in the format required by lottie Args: (None) Returns: (list) : Depending upon _type a list is returned """ if self.type == "origin": ret = [self.val1, self.val2] elif self.type == "circle_radius": ret = [self.val1, self.val1] elif self.type in {"rectangle_size", "image_scale", "scale_layer_zoom", "group_layer_scale", "stretch_layer_scale"}: ret = [self.val1, self.val3] else: ret = [self.val1] return ret def add_new_val(self, val3): """ This function store an additional value in the vector. This is currently required by the rectangle layer Args: val3 (float) : Some Vectors need additional value to be used later Returns: (None) """ self.val3 = val3 def set_type(self, _type): """ This set's the type of the Vector Args: _type (str) : Type of Vector to be set Returns: (None) """ self.type = _type
299,133
test faiss beir
# # Pyserini: Reproducible IR research with sparse and dense representations # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import requests import unittest from pyserini.prebuilt_index_info import TF_INDEX_INFO, IMPACT_INDEX_INFO, FAISS_INDEX_INFO class TestPrebuiltIndexes(unittest.TestCase): def test_tf_beir(self): urls = [] cnt = 0 for key in TF_INDEX_INFO: if 'beir' in key: cnt += 1 for url in TF_INDEX_INFO[key]['urls']: urls.append(url) # 29 each for flat and multifield self.assertEqual(cnt, 58) self._test_urls(urls) def test_tf_mrtydi(self): urls = [] cnt = 0 for key in TF_INDEX_INFO: if 'mrtydi' in key: cnt += 1 for url in TF_INDEX_INFO[key]['urls']: urls.append(url) # 11 languages self.assertEqual(cnt, 11) self._test_urls(urls) def test_tf_miracl(self): urls = [] cnt = 0 for key in TF_INDEX_INFO: if 'miracl' in key: cnt += 1 for url in TF_INDEX_INFO[key]['urls']: urls.append(url) # 18 languages including surprise self.assertEqual(cnt, 18) self._test_urls(urls) def test_impact_beir(self): urls = [] cnt = 0 for key in IMPACT_INDEX_INFO: if 'beir' in key: cnt += 1 for url in IMPACT_INDEX_INFO[key]['urls']: urls.append(url) # 29 from SPLADE-distill CoCodenser-medium self.assertEqual(cnt, 29) self._test_urls(urls) def test_impact_mrtydi(self): urls = [] cnt = 0 for key in IMPACT_INDEX_INFO: if 'miracl' in key: cnt += 1 for url in IMPACT_INDEX_INFO[key]['urls']: urls.append(url) # currently, none self.assertEqual(cnt, 0) def test_impact_miracl(self): urls = [] cnt = 0 for key in IMPACT_INDEX_INFO: if 'miracl' in key: cnt += 1 for url in IMPACT_INDEX_INFO[key]['urls']: urls.append(url) # currently, none self.assertEqual(cnt, 0) def METHOD_NAME(self): urls = [] cnt = 0 for key in FAISS_INDEX_INFO: if 'beir' in key: cnt += 1 for url in FAISS_INDEX_INFO[key]['urls']: urls.append(url) # each 29: contriever, contriever-msmarco self.assertEqual(cnt, 58) self._test_urls(urls) def test_faiss_mrtydi(self): urls = [] cnt = 0 for key in FAISS_INDEX_INFO: if 'mrtydi' in key: cnt += 1 for url in FAISS_INDEX_INFO[key]['urls']: urls.append(url) # each 11: mdpr-nq, mdpr-tied-pft-msmarco, mdpr-tied-pft-nq, mdpr-tied-pft-msmarco-ft-all self.assertEqual(cnt, 44) self._test_urls(urls) def test_faiss_miracl(self): urls = [] cnt = 0 for key in FAISS_INDEX_INFO: if 'miracl' in key: cnt += 1 for url in FAISS_INDEX_INFO[key]['urls']: urls.append(url) # 18 pFT MS MARCO, 18 pFT MS MARCO all, 16 pFT MS MARCO + per lang (no de, yo), 18 mContriever pFT MS MARCO self.assertEqual(cnt, 70) self._test_urls(urls) def test_faiss_msmarco(self): urls = [] cnt = 0 for key in FAISS_INDEX_INFO: if 'msmarco-v' in key: cnt += 1 for url in FAISS_INDEX_INFO[key]['urls']: urls.append(url) self.assertEqual(cnt, 15) self._test_urls(urls) def test_faiss_wikipedia(self): urls = [] cnt = 0 for key in FAISS_INDEX_INFO: if 'wiki' in key: cnt += 1 for url in FAISS_INDEX_INFO[key]['urls']: urls.append(url) self.assertEqual(cnt, 7) self._test_urls(urls) def _test_urls(self, urls): cnt = 0 for url in urls: cnt += 1 response = requests.head(url) self.assertEqual(response.status_code, 200) self.assertEqual(cnt, len(urls))
299,134
setcreated
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( jortel@redhat.com ) """ The I{wsse} module provides WS-Security. """ from suds.sudsobject import Object from suds.sax.element import Element from suds.sax.date import DateTime, UtcTimezone from datetime import datetime, timedelta try: from hashlib import sha256 except ImportError: # Python 2.4 compatibility from md5 import md5 as sha256 dsns = ('ds', 'http://www.w3.org/2000/09/xmldsig#') wssens = ( 'wsse', 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd' ) wsuns = ( 'wsu', 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd' ) wsencns = ( 'wsenc', 'http://www.w3.org/2001/04/xmlenc#' ) class Security(Object): """ WS-Security object. @ivar tokens: A list of security tokens @type tokens: [L{Token},...] @ivar signatures: A list of signatures. @type signatures: TBD @ivar references: A list of references. @type references: TBD @ivar keys: A list of encryption keys. @type keys: TBD """ def __init__(self): """ """ Object.__init__(self) self.mustUnderstand = True self.tokens = [] self.signatures = [] self.references = [] self.keys = [] def xml(self): """ Get xml representation of the object. @return: The root node. @rtype: L{Element} """ root = Element('Security', ns=wssens) root.set('mustUnderstand', str(self.mustUnderstand).lower()) for t in self.tokens: root.append(t.xml()) return root class Token(Object): """ I{Abstract} security token. """ @classmethod def now(cls): return datetime.now() @classmethod def utc(cls): return datetime.utcnow().replace(tzinfo=UtcTimezone()) @classmethod def sysdate(cls): utc = DateTime(cls.utc()) return str(utc) def __init__(self): Object.__init__(self) class UsernameToken(Token): """ Represents a basic I{UsernameToken} WS-Secuirty token. @ivar username: A username. @type username: str @ivar password: A password. @type password: str @ivar nonce: A set of bytes to prevent reply attacks. @type nonce: str @ivar created: The token created. @type created: L{datetime} """ def __init__(self, username=None, password=None): """ @param username: A username. @type username: str @param password: A password. @type password: str """ Token.__init__(self) self.username = username self.password = password self.nonce = None self.created = None def setnonce(self, text=None): """ Set I{nonce} which is arbitraty set of bytes to prevent reply attacks. @param text: The nonce text value. Generated when I{None}. @type text: str """ if text is None: s = [] s.append(self.username) s.append(self.password) s.append(Token.sysdate()) m = sha256() m.update(':'.join(s).encode("utf-8")) self.nonce = m.hexdigest() else: self.nonce = text def METHOD_NAME(self, dt=None): """ Set I{created}. @param dt: The created date & time. Set as datetime.utc() when I{None}. @type dt: L{datetime} """ if dt is None: self.created = Token.utc() else: self.created = dt def xml(self): """ Get xml representation of the object. @return: The root node. @rtype: L{Element} """ root = Element('UsernameToken', ns=wssens) u = Element('Username', ns=wssens) u.setText(self.username) root.append(u) p = Element('Password', ns=wssens) p.setText(self.password) root.append(p) if self.nonce is not None: n = Element('Nonce', ns=wssens) n.setText(self.nonce) root.append(n) if self.created is not None: n = Element('Created', ns=wsuns) n.setText(str(DateTime(self.created))) root.append(n) return root class Timestamp(Token): """ Represents the I{Timestamp} WS-Secuirty token. @ivar created: The token created. @type created: L{datetime} @ivar expires: The token expires. @type expires: L{datetime} """ def __init__(self, validity=90): """ @param validity: The time in seconds. @type validity: int """ Token.__init__(self) self.created = Token.utc() self.expires = self.created + timedelta(seconds=validity) def xml(self): root = Element("Timestamp", ns=wsuns) created = Element('Created', ns=wsuns) created.setText(str(DateTime(self.created))) expires = Element('Expires', ns=wsuns) expires.setText(str(DateTime(self.expires))) root.append(created) root.append(expires) return root
299,135
upgrade
"""Adds Job visibility column Revision ID: 1c87fd8da02e Revises: 735063d71b57 Create Date: 2017-12-13 12:18:59.551609 """ # revision identifiers, used by Alembic. revision = '1c87fd8da02e' down_revision = '735063d71b57' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa from sqlalchemy.sql import table import residue try: is_sqlite = op.get_context().dialect.name == 'sqlite' except: is_sqlite = False if is_sqlite: op.get_context().connection.execute('PRAGMA foreign_keys=ON;') utcnow_server_default = "(datetime('now', 'utc'))" else: utcnow_server_default = "timezone('utc', current_timestamp)" def sqlite_column_reflect_listener(inspector, table, column_info): """Adds parenthesis around SQLite datetime defaults for utcnow.""" if column_info['default'] == "datetime('now', 'utc')": column_info['default'] = utcnow_server_default sqlite_reflect_kwargs = { 'listeners': [('column_reflect', sqlite_column_reflect_listener)] } # =========================================================================== # HOWTO: Handle alter statements in SQLite # # def upgrade(): # if is_sqlite: # with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: # batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False) # else: # op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False) # # =========================================================================== dept_membership_request_table = table( 'dept_membership_request', sa.Column('id', residue.UUID()), sa.Column('attendee_id', residue.UUID()), sa.Column('department_id', residue.UUID()), ) def METHOD_NAME(): if is_sqlite: with op.batch_alter_table('job', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: batch_op.add_column(sa.Column('visibility', sa.Integer(), server_default='0', nullable=False)) batch_op.create_index('ix_job_department_id', ['department_id'], unique=False) with op.batch_alter_table('dept_membership', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: batch_op.create_index('ix_dept_membership_attendee_id', ['attendee_id'], unique=False) batch_op.create_index('ix_dept_membership_department_id', ['department_id'], unique=False) with op.batch_alter_table('dept_membership_dept_role', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: batch_op.create_index('ix_dept_membership_dept_role_dept_membership_id', ['dept_membership_id'], unique=False) batch_op.create_index('ix_dept_membership_dept_role_dept_role_id', ['dept_role_id'], unique=False) with op.batch_alter_table('dept_membership_request', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: batch_op.create_index('ix_dept_membership_request_attendee_id', ['attendee_id'], unique=False) batch_op.create_index('ix_dept_membership_request_department_id', ['department_id'], unique=False) with op.batch_alter_table('dept_role', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: batch_op.create_index('ix_dept_role_department_id', ['department_id'], unique=False) with op.batch_alter_table('job_required_role', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: batch_op.create_index('ix_job_required_role_dept_role_id', ['dept_role_id'], unique=False) batch_op.create_index('ix_job_required_role_job_id', ['job_id'], unique=False) with op.batch_alter_table('shift', reflect_kwargs=sqlite_reflect_kwargs) as batch_op: batch_op.create_index('ix_shift_attendee_id', ['attendee_id'], unique=False) batch_op.create_index('ix_shift_job_id', ['job_id'], unique=False) else: op.add_column('job', sa.Column('visibility', sa.Integer(), server_default='0', nullable=False)) op.create_index('ix_job_department_id', 'job', ['department_id'], unique=False) op.create_index('ix_dept_membership_attendee_id', 'dept_membership', ['attendee_id'], unique=False) op.create_index('ix_dept_membership_department_id', 'dept_membership', ['department_id'], unique=False) op.create_index('ix_dept_membership_dept_role_dept_membership_id', 'dept_membership_dept_role', ['dept_membership_id'], unique=False) op.create_index('ix_dept_membership_dept_role_dept_role_id', 'dept_membership_dept_role', ['dept_role_id'], unique=False) op.create_index('ix_dept_membership_request_attendee_id', 'dept_membership_request', ['attendee_id'], unique=False) op.create_index('ix_dept_membership_request_department_id', 'dept_membership_request', ['department_id'], unique=False) op.create_index('ix_dept_role_department_id', 'dept_role', ['department_id'], unique=False) op.create_index('ix_job_required_role_dept_role_id', 'job_required_role', ['dept_role_id'], unique=False) op.create_index('ix_job_required_role_job_id', 'job_required_role', ['job_id'], unique=False) op.create_index('ix_shift_attendee_id', 'shift', ['attendee_id'], unique=False) op.create_index('ix_shift_job_id', 'shift', ['job_id'], unique=False) # Removes duplicate membership requests caused by a bug that would save a # new "Anywhere" membership request everytime an attendee record was saved. connection = op.get_bind() membership_requests = connection.execute( dept_membership_request_table.select().where( dept_membership_request_table.c.department_id == None ) ) attende_ids = set() for membership_request in membership_requests: if membership_request.attendee_id in attende_ids: op.execute( dept_membership_request_table.delete().where( dept_membership_request_table.c.id == membership_request.id ) ) else: attende_ids.add(membership_request.attendee_id) def downgrade(): op.drop_column('job', 'visibility') op.drop_index('ix_shift_job_id', table_name='shift') op.drop_index('ix_shift_attendee_id', table_name='shift') op.drop_index('ix_job_required_role_job_id', table_name='job_required_role') op.drop_index('ix_job_required_role_dept_role_id', table_name='job_required_role') op.drop_index('ix_job_department_id', table_name='job') op.drop_index('ix_dept_role_department_id', table_name='dept_role') op.drop_index('ix_dept_membership_request_department_id', table_name='dept_membership_request') op.drop_index('ix_dept_membership_request_attendee_id', table_name='dept_membership_request') op.drop_index('ix_dept_membership_dept_role_dept_role_id', table_name='dept_membership_dept_role') op.drop_index('ix_dept_membership_dept_role_dept_membership_id', table_name='dept_membership_dept_role') op.drop_index('ix_dept_membership_department_id', table_name='dept_membership') op.drop_index('ix_dept_membership_attendee_id', table_name='dept_membership')
299,136
static file url
# -*- coding: utf-8 -*- import json from functools import wraps from urllib.parse import unquote, urljoin, urlparse import flask import flask_themes2 from pyload.core.api import Perms, Role, has_permission class JSONEncoder(json.JSONEncoder): def default(self, obj): try: return dict(obj) except TypeError: pass return super().default(obj) try: JSONProviderBase = flask.json.provider.JSONProvider except AttributeError: pass else: class JSONProvider(JSONProviderBase): def dumps(self, obj, **kwargs): return json.dumps(obj, **kwargs, cls=JSONEncoder) def loads(self, s, **kwargs): return json.loads(s, **kwargs) #: Checks if location belongs to same host address def is_safe_url(location): ref_url = urlparse(flask.request.host_url) test_url = urlparse(urljoin(flask.request.host_url, location)) return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc def get_redirect_url(fallback=None): login_url = urljoin(flask.request.url_root, flask.url_for('app.login')) request_url = unquote(flask.request.url) for location in flask.request.values.get("next"), flask.request.referrer: if not location: continue if location in (request_url, login_url): # don't redirect to same location continue if is_safe_url(location): return location return fallback def render_base(messages): return render_template("base.html", messages=messages) def clear_session(session=flask.session, permanent=True): session.permanent = bool(permanent) session.clear() # session.modified = True def current_theme_id(): api = flask.current_app.config["PYLOAD_API"] return api.get_config_value("webui", "theme").lower() #: tries to serve the file from the static directory of the current theme otherwise fallback to builtin one def METHOD_NAME(filename): themeid = current_theme_id() try: url = flask_themes2.METHOD_NAME(themeid, filename) except KeyError: url = flask.url_for("static", filename=filename) return url def theme_template(filename): return flask.url_for("app.render", filename=filename) #: tries to render the template of the current theme otherwise fallback to builtin template def render_template(template, **context): themeid = current_theme_id() return flask_themes2.render_theme_template(themeid, template, **context) def parse_permissions(session=flask.session): perms = {x.name: False for x in Perms} perms["ADMIN"] = False perms["is_admin"] = False if not session.get("authenticated", False): return perms perms["ANY"] = True if session.get("role") == Role.ADMIN: for key in perms.keys(): perms[key] = True elif session.get("perms"): p = session.get("perms") perms.update(get_permission(p)) return perms def permlist(): return [x.name for x in Perms if x.name != "ANY"] def get_permission(userperms): """ Returns a dict with permission key. :param userperms: permission bits """ return { name: has_permission(userperms, getattr(Perms, name).value) for name in permlist() } def set_permission(perms): """ generates permission bits from dictionary. :param perms: dict """ permission = 0 for name in permlist(): if name.startswith("_"): continue if name in perms and perms[name]: permission |= getattr(Perms, name) return permission def set_session(user_info, session=flask.session, permanent=True): session.permanent = bool(permanent) session.update( { "authenticated": True, "id": user_info["id"], "name": user_info["name"], "role": user_info["role"], "perms": user_info["permission"], "template": user_info["template"], } ) # session.modified = True return session # TODO: Recheck... def parse_userdata(session=flask.session): return { "name": session.get("name", "Anonymous"), "is_admin": session.get("role", 1) == 0, "is_authenticated": session.get("authenticated", False), } def apiver_check(func): # if no apiver is provided assumes latest @wraps(func) def wrapper(*args, **kwargs): api = flask.current_app.config["PYLOAD_API"] core_apiver = api.__version__ if int(kwargs.get("apiver", core_apiver).strip("v")) < core_apiver: return "Obsolete API", 404 return func(*args, **kwargs) return wrapper def is_authenticated(session=flask.session): api = flask.current_app.config["PYLOAD_API"] user = session.get("name") authenticated = session.get("authenticated") return authenticated and api.user_exists(user) def login_required(perm): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): s = flask.session #: already authenticated? if is_authenticated(s): perms = parse_permissions(s) if perm not in perms or not perms[perm]: response = "Forbidden", 403 else: response = func(*args, **kwargs) else: clear_session(s) if flask.request.headers.get("X-Requested-With") == "XMLHttpRequest": response = "Forbidden", 403 else: location = flask.url_for( "app.login", next=flask.request.url ) response = flask.redirect(location) return response return wrapper return decorator
299,137
test update vm image build
# Copyright Contributors to the Packit project. # SPDX-License-Identifier: MIT import pytest import packit_service from requests import HTTPError from flexmock import flexmock from flexmock import Mock from packit.config.job_config import JobConfigTriggerType, JobType from packit_service.config import ServiceConfig from packit_service.models import ( VMImageBuildTargetModel, VMImageBuildStatus, ProjectEventModelType, ) from packit_service.worker.helpers.build.babysit import ( check_pending_vm_image_builds, update_vm_image_build, UpdateImageBuildHelper, ) from packit_service.worker.events import VMImageBuildResultEvent from packit_service.worker.handlers import VMImageBuildResultHandler from packit_service.worker.monitoring import Pushgateway def test_check_pending_vm_image_builds(): flexmock(VMImageBuildTargetModel).should_receive("get_all_by_status").with_args( VMImageBuildStatus.pending ).and_return([flexmock(build_id=1)]) flexmock(packit_service.worker.helpers.build.babysit).should_receive( "update_vm_image_build" ).with_args(1, Mock) check_pending_vm_image_builds() def test_check_no_pending_vm_image_builds(): flexmock(VMImageBuildTargetModel).should_receive("get_all_by_status").with_args( VMImageBuildStatus.pending ).and_return([]) flexmock(packit_service.worker.helpers.build.babysit).should_receive( "update_vm_image_build" ).never() check_pending_vm_image_builds() @pytest.mark.parametrize( "stop_babysitting, build_status, vm_image_builder_result", ( pytest.param( True, "error", None, id="No result from vm image builder server. An exception was raised.", ), pytest.param( True, "failure", {"image_status": {"status": "failure", "error": "no dnf package found"}}, id="Failed build", ), pytest.param( True, "success", { "image_status": { "status": "success", "error": "", "upload_status": { "type": "aws", "options": { "ami": "ami-0c830793775595d4b", "region": "eu-west-1", }, }, } }, id="Successfull build", ), pytest.param( False, "building", {"image_status": {"status": "building", "error": ""}}, id="Still in progress build", ), ), ) def METHOD_NAME(stop_babysitting, build_status, vm_image_builder_result): db_project_object = flexmock( id=1, job_config_trigger_type=JobConfigTriggerType.pull_request, project_event_model_type=ProjectEventModelType.pull_request, ) if not vm_image_builder_result: flexmock(UpdateImageBuildHelper).should_receive("vm_image_builder").and_return( flexmock() .should_receive("image_builder_request") .and_raise(HTTPError("unknown ex")) .mock() ) else: flexmock(UpdateImageBuildHelper).should_receive("vm_image_builder").and_return( flexmock() .should_receive("image_builder_request") .and_return( flexmock() .should_receive("json") .and_return(vm_image_builder_result) .mock() ) .mock() ) flexmock(VMImageBuildResultEvent).should_receive("get_packages_config").and_return( flexmock( get_package_config_for=lambda job_config: flexmock(), get_job_views=lambda: [ flexmock( trigger=JobConfigTriggerType.pull_request, type=JobType.vm_image_build, manual_trigger=False, ) ], ) ) flexmock(VMImageBuildResultEvent).should_receive( "job_config_trigger_type" ).and_return(JobConfigTriggerType.pull_request) flexmock(VMImageBuildTargetModel).should_receive("get_all_by_build_id").with_args( 1 ).and_return( [ flexmock( status=None, runs=[ flexmock() .should_receive("get_project_event_object") .and_return(db_project_object) .mock() ], ) .should_receive("set_status") .with_args(build_status) .mock() ] ) flexmock(VMImageBuildResultHandler).should_receive("report_status") flexmock(ServiceConfig).should_receive("get_project").and_return() if stop_babysitting: flexmock(Pushgateway).should_receive("push").once().and_return() assert ( update_vm_image_build( 1, flexmock( build_id=1, project_url="an url", target="a target", get_pr_id=lambda: 21, owner="owner", commit_sha="123456", manual_trigger=False, ), ) == stop_babysitting )
299,138
tst 02
from __future__ import division, print_function import sys, os, time from libtbx import group_args import networkx as nx from packaging import version import libtbx.load_env data_dir = libtbx.env.under_dist( module_name="mmtbx", path="regression", test=os.path.isdir) from mmtbx.domains_from_pae import get_domain_selections_from_pae_matrix pae_file=os.path.join(data_dir,'pae.json') model_file=os.path.join(data_dir, 'pdbs','pae_model.pdb') pae_file_v3=os.path.join(data_dir,'AF_json_v3.json') model_file_v3=os.path.join(data_dir, 'pdbs','AF_json_v3.pdb') from iotbx.data_manager import DataManager dm = DataManager() distance_model = dm.get_model(model_file) distance_model.add_crystal_symmetry_if_necessary() distance_model_v3 = dm.get_model(model_file_v3) distance_model_v3.add_crystal_symmetry_if_necessary() def tst_01(log = sys.stdout): if version.parse(nx.__version__) < version.parse('2.6.2'): pae_power = 2.0 pae_cutoff = 5.0 resolution = 1.0 else: pae_power = 1.0 pae_cutoff = 5.0 resolution = 0.5 args = group_args( group_args_type = 'parameters', pae_file = pae_file, library = 'networkx', pae_power = pae_power, pae_cutoff = pae_cutoff, resolution = resolution, select_range = False) selections = get_domain_selections_from_pae_matrix(pae_file = args.pae_file, library = args.library, pae_power = args.pae_power, pae_cutoff = args.pae_cutoff, graph_resolution = args.resolution,) if version.parse(nx.__version__) < version.parse('2.6.2'): assert selections == [ "(resseq 0:113) or (resseq 184:187)", "(resseq 114:182) or (resseq 188:291)", "(resseq 183:183)", "(resseq 292:308)" ], selections else: assert selections == ['(resseq 0:117) or (resseq 181:181) or (resseq 183:187)', '(resseq 118:180) or (resseq 182:182) or (resseq 188:308)'], selections def METHOD_NAME(log = sys.stdout): if version.parse(nx.__version__) < version.parse('2.6.2'): pae_power = 2.0 pae_cutoff = 5.0 resolution = 1.0 else: pae_power = 1.0 pae_cutoff = 5.0 resolution = 0.5 args = group_args( group_args_type = 'parameters', pae_file = pae_file, library = 'networkx', pae_power = pae_power, pae_cutoff = pae_cutoff, resolution = resolution, weight_by_ca_ca_distance = 1.0, distance_power = 1.0, distance_model = distance_model, select_range = False) selections = get_domain_selections_from_pae_matrix(pae_file = args.pae_file, library=args.library, pae_power = args.pae_power, pae_cutoff = args.pae_cutoff, graph_resolution = args.resolution, weight_by_ca_ca_distance = args.weight_by_ca_ca_distance, distance_power = args.distance_power, distance_model = args.distance_model) if version.parse(nx.__version__) < version.parse('2.6.2'): assert selections == [ "(resseq 0:1) or (resseq 22:113) or (resseq 184:187)", "(resseq 2:21)", "(resseq 114:183) or (resseq 188:291)", "(resseq 292:308)" ] else: assert selections == ['(resseq 0:24)', '(resseq 25:62) or (resseq 66:96) or (resseq 100:111)', '(resseq 63:65)', '(resseq 97:99)', '(resseq 112:117)', '(resseq 118:135) or (resseq 142:181) or (resseq 190:199)', '(resseq 136:141)', '(resseq 182:189)', '(resseq 200:269) or (resseq 274:288)', '(resseq 270:273)', '(resseq 289:308)'], selections def tst_03(log = sys.stdout): if version.parse(nx.__version__) < version.parse('2.6.2'): pae_power = 2.0 pae_cutoff = 5.0 resolution = 1.0 else: pae_power = 1.0 pae_cutoff = 5.0 resolution = 0.5 args = group_args( group_args_type = 'parameters', pae_file = pae_file_v3, library = 'networkx', pae_power = pae_power, pae_cutoff = pae_cutoff, resolution = resolution, weight_by_ca_ca_distance = 1.0, distance_power = 1.0, distance_model = distance_model_v3, select_range = False) selections = get_domain_selections_from_pae_matrix(pae_file = args.pae_file, library=args.library, pae_power = args.pae_power, pae_cutoff = args.pae_cutoff, graph_resolution = args.resolution, weight_by_ca_ca_distance = args.weight_by_ca_ca_distance, distance_power = args.distance_power, distance_model = args.distance_model) if version.parse(nx.__version__) < version.parse('2.6.2'): assert selections == ['(resseq 0:8)', '(resseq 9:16)', '(resseq 17:85) or (resseq 95:226) or (resseq 229:253) or (resseq 258:330)', '(resseq 86:93)', '(resseq 94:94)', '(resseq 227:228)', '(resseq 254:257)', '(resseq 331:334)'], selections else: assert selections == ['(resseq 0:5)', '(resseq 6:14)', '(resseq 15:35) or (resseq 44:59) or (resseq 67:83) or (resseq 97:111) or (resseq 121:135) or (resseq 145:156) or (resseq 172:177)', '(resseq 36:43) or (resseq 60:66)', '(resseq 84:86)', '(resseq 87:92)', '(resseq 93:96)', '(resseq 112:120) or (resseq 136:144) or (resseq 157:171) or (resseq 179:180)', '(resseq 178:178) or (resseq 181:181) or (resseq 199:208)', '(resseq 182:192) or (resseq 209:214)', '(resseq 193:198) or (resseq 215:223) or (resseq 235:249) or (resseq 266:285) or (resseq 293:309) or (resseq 318:332)', '(resseq 224:226)', '(resseq 227:234)', '(resseq 250:254)', '(resseq 255:259)', '(resseq 260:265)', '(resseq 286:292) or (resseq 310:317)', '(resseq 333:334)'], selections if __name__ == "__main__": t0 = time.time() tst_01() print ("Time 01: ", time.time()-t0) t1 = time.time() METHOD_NAME() print ("Time 02: ", time.time()-t1) tst_03() print ("Time 03: ", time.time()-t1) print ("OK")
299,139
is auto deployment
from typing import List from cloudharness.utils.config import CloudharnessConfig, ConfigObject from cloudharness.models import ApplicationConfig class ConfigurationCallException(Exception): pass class ApplicationConfiguration(ApplicationConfig): def __new__(cls, *args, **kwargs): if len(args) == 1 and type(args[0]) == dict: return ApplicationConfiguration.from_dict(args[0]) return super().__new__(cls, *args, **kwargs) def __init__(self, *args, **kargs): if len(args) == 1 and type(args[0]) == dict: return ApplicationConfig.__init__(self, *args, **kargs) self.__conf = None def is_auto_service(self) -> bool: return self.harness.service.auto def METHOD_NAME(self) -> bool: return self.harness.deployment.auto def is_auto_db(self) -> bool: return self.harness.database.auto def is_sentry_enabled(self) -> bool: return self.harness["sentry"] def get_db_connection_string(self, **kwargs) -> str: if not self.is_auto_db(): raise ConfigurationCallException( f"Cannot get configuration string: application {self.name} has no database enabled.") if self.db_type == 'mongo': return f"mongodb://{self.harness.database.user}:{self.harness.database['pass']}@{self.db_name}:{self.harness.database.mongo.ports[0]['port']}/" elif self.db_type == 'postgres': database_name = kwargs.get('database_name', self.harness.database.postgres['initialdb']) return f"postgres://{self.db_name}:{self.harness.database.postgres.ports[0]['port']}/" \ f"{database_name}?user={self.harness.database.user}&password={self.harness.database['pass']}" elif self.db_type == 'neo4j': return f"{self.harness.database.neo4j.get('ports')[1]['name']}://{self.db_name}:" \ f"{self.harness.database.neo4j.get('ports')[1]['port']}/" else: raise NotImplementedError( f'Database connection string discovery not yet supported for database type {self.db_type}') @property def db_name(self) -> str: return self.harness.database.name @property def conf(self): """ Legacy object """ if self.__conf is None: self.__conf = ConfigObject(self.to_dict()) return self.__conf @property def image_name(self) -> str: return self.harness.deployment.image @property def db_type(self) -> str: return self.harness.database.type @property def service_name(self) -> str: name = self.harness.service.name if not name: raise ConfigurationCallException( f"Cannot get service address for {self.name}: auto service is not enabled") return name @property def service_port(self) -> int: port = self.harness.service.port if not port: raise ConfigurationCallException( f"Cannot get service port for {self.name}: auto service is not enabled") return port def get_service_address(self) -> str: return f"http://{self.service_name}.{CloudharnessConfig.get_namespace()}:{self.service_port}" def get_public_address(self) -> str: if not self.harness.subdomain: raise ConfigurationCallException( f"Cannot get public address for {self.name}: no subdomain is specified for this appplication.") return f"http{'s' if CloudharnessConfig.is_secured() else ''}://{self.harness.subdomain}.{CloudharnessConfig.get_domain()}" def get_configurations(**kwargs) -> List[ApplicationConfiguration]: return [ApplicationConfiguration(conf) for conf in CloudharnessConfig.get_application_by_filter(**kwargs)] def get_configuration(app_name) -> ApplicationConfiguration: conf = CloudharnessConfig.get_application_by_filter(harness__name=app_name) if len(conf) > 1: raise ConfigurationCallException( f'Application {app_name} is not unique inside the current deployment.') if not conf: raise ConfigurationCallException( f'Application {app_name} is not part of the current deployment.') return ApplicationConfiguration.from_dict(conf[0]) def get_current_configuration() -> ApplicationConfiguration: """ Get the configuration for the "current" application Returns: ApplicationConfiguration """ try: return get_configuration(CloudharnessConfig.get_current_app_name()) except Exception as e: raise ConfigurationCallException( f'Configuration error: cannot find current app - check env variable CH_CURRENT_APP_NAME') from e
299,140
get swp interfaces
#!/usr/bin/python import argparse import sys import subprocess import os """ This script prints to stdout /etc/network/interfaces entries for requested interfaces. Currently it supports generation of interfaces(5) section for all swp interfaces on the system. And also an interface section for a bridge with all swp ports. Example use of this script: generate the swp_defaults file: (bkup existing /etc/network/interfaces.d/swp_defaults file if one exists) #generate_interfaces.py -s > /etc/network/interfaces.d/swp_defaults User -m option if you want the new swp_defaults to be auto merged with the contents from the old file, use -m option #generate_interfaces.py -s -m /etc/network/interfaces.d/swp_defaults > /etc/network/interfaces.d/swp_defaults.new Include the swp_defaults file in /etc/network/interfaces file (if not already there) using the source command as shown below: source /etc/network/interfaces.d/swp_defaults """ def get_pci_interfaces(): ports = [] FNULL = open(os.devnull, 'w') try: cmd = '(ip -o link show | grep -v "@" | cut -d" " -f2 | sed \'s/:$//\')' output = subprocess.check_output(cmd, shell=True).split() for interface in output: cmd = 'udevadm info -a -p /sys/class/net/%s | grep \'SUBSYSTEMS=="pci"\'' % interface try: subprocess.check_call(cmd, shell=True, stdout=FNULL) ports.append(interface) except Exception: pass except Exception: pass finally: FNULL.close() return ports def METHOD_NAME(): porttab_path = '/var/lib/cumulus/porttab' ports = [] try: with open(porttab_path, 'r') as f: for line in f.readlines(): line = line.strip() if '#' in line: continue try: ports.append(line.split()[0]) except ValueError: continue except Exception: try: ports = get_pci_interfaces() except Exception as e: print('Error: Unsupported script: %s' % str(e)) exit(1) if not ports: print('Error: No ports found in %s' % porttab_path) exit(1) return ports def print_swp_defaults_header(): print(''' # ** This file is autogenerated by /usr/share/doc/ifupdown2/generate_interfaces.py ** # # This is /etc/network/interfaces section for all available swp # ports on the system. # # To include this file in the main /etc/network/interfaces file, # copy this file under /etc/network/interfaces.d/ and use the # source line in the /etc/network/interfaces file. # # example entry in /etc/network/interfaces: # source /etc/network/interfaces.d/<filename> # # See manpage interfaces(5) for details. ''') def print_bridge_untagged_defaults_header(): print(''' # ** This file is autogenerated by /usr/share/doc/ifupdown2/generate_interfaces.py ** # # This is /etc/network/interfaces section for a bridge device with all swp # ports in the system. # # To include this file in the main /etc/network/interfaces file, # copy this file under /etc/network/interfaces.d/ and use the # source line in the /etc/network/interfaces file as shown below. # details. # # example entry in /etc/network/interfaces: # source /etc/network/interfaces.d/filename # # See manpage interfaces(5) for details ''') def interfaces_print_swp_default(swp_intf): outbuf = None if args.mergefile: try: cmd = ['/sbin/ifquery', '%s' %swp_intf, '-i', '%s' %args.mergefile] outbuf = subprocess.check_output(cmd, stderr=subprocess.STDOUT) except Exception as e: # no interface found gen latest pass if not outbuf: outbuf = 'auto %s\niface %s\n\n' %(swp_intf, swp_intf) return outbuf def interfaces_print_swp_defaults(swp_intfs): print_swp_defaults_header() outbuf = '' for i in swp_intfs: outbuf += interfaces_print_swp_default(i) print(outbuf) def interfaces_print_bridge_default(swp_intfs): print_bridge_untagged_defaults_header() outbuf = 'auto bridge-untagged\n' outbuf += 'iface bridge-untagged\n' outbuf += ' bridge-ports \\\n' linen = 5 ports = '' for i in range(0, len(swp_intfs), linen): if ports: ports += ' \\\n' ports += ' %s' %(' '.join(swp_intfs[i:i+linen])) outbuf += ports print(outbuf) def populate_argparser(argparser): group = argparser.add_mutually_exclusive_group(required=False) group.add_argument('-s', '--swp-defaults', action='store_true', dest='swpdefaults', help='generate swp defaults file') group.add_argument('-b', '--bridge-default', action='store_true', dest='bridgedefault', help='generate default untagged bridge') argparser.add_argument('-m', '--merge', dest='mergefile', help='merge ' + 'new generated iface content with the old one') argparser = argparse.ArgumentParser(description='ifupdown interfaces file gen helper') populate_argparser(argparser) args = argparser.parse_args(sys.argv[1:]) if not args.swpdefaults and not args.bridgedefault: argparser.print_help() exit(1) if args.bridgedefault and args.mergefile: print('error: mergefile option currently only supported with -s') argparser.print_help() exit(1) swp_intfs = METHOD_NAME() if args.swpdefaults: interfaces_print_swp_defaults(swp_intfs) elif args.bridgedefault: interfaces_print_bridge_default(swp_intfs) else: argparser.print_help()
299,141
should forward
# # This file is part of the PyRDP project. # Copyright (C) 2018, 2019 GoSecure Inc. # Licensed under the GPLv3 or later. # from binascii import hexlify from typing import Union from pyrdp.core import ObservedBy from pyrdp.enum import EncryptionMethod, SecurityFlags from pyrdp.layer.layer import IntermediateLayer, LayerObserver from pyrdp.logging import log from pyrdp.parser import BasicSecurityParser, ClientInfoParser, FIPSSecurityParser, SignedSecurityParser from pyrdp.pdu import ClientInfoPDU, PDU, SecurityExchangePDU, SecurityPDU from pyrdp.security import RC4Crypter, RC4CrypterProxy class SecurityObserver(LayerObserver): def onSecurityExchangeReceived(self, pdu: SecurityExchangePDU): """ Called when a Security Exchange PDU is received. """ pass def onClientInfoReceived(self, data: bytes): """ Called when client info data is received. """ pass def onLicensingDataReceived(self, data: bytes): """ Called when licensing data is received. """ pass @ObservedBy(SecurityObserver) class SecurityLayer(IntermediateLayer): """ Layer for security related traffic. """ def __init__(self, parser: BasicSecurityParser): """ :param parser: the parser to use for security traffic. """ super().__init__(parser) self.clientInfoParser = ClientInfoParser() @staticmethod def create(encryptionMethod: EncryptionMethod, crypter: Union[RC4Crypter, RC4CrypterProxy]) -> 'SecurityLayer': """ Create a security layer using the chosen encryption method and crypter. """ if encryptionMethod in [EncryptionMethod.ENCRYPTION_40BIT, EncryptionMethod.ENCRYPTION_56BIT, EncryptionMethod.ENCRYPTION_128BIT]: parser = SignedSecurityParser(crypter) return SecurityLayer(parser) elif encryptionMethod == EncryptionMethod.ENCRYPTION_FIPS: parser = FIPSSecurityParser(crypter) return SecurityLayer(parser) def recv(self, data: bytes): pdu: SecurityPDU = self.mainParser.parse(data) try: self.dispatchPDU(pdu) except KeyboardInterrupt: raise except Exception: if isinstance(pdu, SecurityExchangePDU): log.error("Exception occurred when receiving Security Exchange. Data: %(securityExchangeData)s", {"securityExchangeData": hexlify(data)}) raise def dispatchPDU(self, pdu: SecurityPDU): """ Send the PDU to the proper object depending on its type. :param pdu: the pdu. """ if pdu.header & SecurityFlags.SEC_EXCHANGE_PKT != 0: if self.observer: self.observer.onSecurityExchangeReceived(pdu) elif pdu.header & SecurityFlags.SEC_INFO_PKT != 0: if self.observer: self.observer.onClientInfoReceived(pdu.payload) elif pdu.header & SecurityFlags.SEC_LICENSE_PKT != 0: if self.observer: self.observer.onLicensingDataReceived(pdu.payload) else: self.pduReceived(pdu) def sendBytes(self, data: bytes, header = 0): pdu = SecurityPDU(header, data) self.sendPDU(pdu) def sendSecurityExchange(self, clientRandom: bytes): """ Send a security exchange PDU through the layer. :param clientRandom: the client random data. """ pdu = SecurityExchangePDU(SecurityFlags.SEC_EXCHANGE_PKT, clientRandom + b"\x00" * 8) data = self.mainParser.writeSecurityExchange(pdu) self.previous.sendBytes(data) def sendClientInfo(self, pdu: ClientInfoPDU): """ Send a client info PDU. """ data = self.clientInfoParser.write(pdu) pdu = SecurityPDU(SecurityFlags.SEC_INFO_PKT, data) self.sendPDU(pdu) def sendLicensing(self, data: bytes): """ Send raw licensing data. """ pdu = SecurityPDU(SecurityFlags.SEC_LICENSE_PKT, data) self.sendPDU(pdu) def METHOD_NAME(self, pdu: PDU) -> bool: return True class TLSSecurityLayer(SecurityLayer): """ Security layer used when the connection uses TLS. If securityHeadExpected is True, then the layer expects to receive a basic security header. Otherwise, the layer just forwards all the data it receives to the next layer. """ def __init__(self, parser = BasicSecurityParser()): super().__init__(parser) self.securityHeaderExpected = False def recv(self, data: bytes): # Licensing happens in the security layer licensingBytes = b"\x80\x00" if not self.securityHeaderExpected and data[0:2] != licensingBytes: if self.next is not None: self.next.recv(data) else: SecurityLayer.recv(self, data) def sendBytes(self, data: bytes, header = 0): if not self.securityHeaderExpected: self.previous.sendBytes(data) else: SecurityLayer.sendBytes(self, data, header)
299,142
log table
# Copyright 2022 MosaicML Composer authors # SPDX-License-Identifier: Apache-2.0 """Base class for logger callback.""" from __future__ import annotations import pathlib from abc import ABC from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union import numpy as np import torch from composer.core.callback import Callback if TYPE_CHECKING: from composer.core import State __all__ = ['LoggerDestination'] class LoggerDestination(Callback, ABC): """Base class for logger destination. As this class extends :class:`~.callback.Callback`, logger destinations can run on any training loop :class:`.Event`. For example, it may be helpful to run on :attr:`.Event.EPOCH_END` to perform any flushing at the end of every epoch. Example: .. doctest:: >>> from composer.loggers import LoggerDestination >>> from composer.trainer import Trainer >>> class MyLogger(LoggerDestination): ... def log_hyperparameters(self, data): ... print(f'Batch {int(state.timestamp.batch)}: {data}') >>> logger = MyLogger() >>> trainer = Trainer( ... ..., ... loggers=[logger] ... ) Batch 0: {'num_nodes': ...} Batch 0: {'rank_zero_seed': ...} """ def log_hyperparameters(self, hyperparameters: Dict[str, Any]): """Log hyperparameters, configurations, and settings. Logs any parameter/configuration/setting that doesn't vary during the run. Args: hyperparameters (Dict[str, Any]): A dictionary mapping hyperparameter names (strings) to their values (Any). """ del hyperparameters # unused pass def METHOD_NAME(self, columns: List[str], rows: List[List[Any]], name: str = 'Table') -> None: """Log a table. Args: columns (List[str]): Names of the columns in the table. rows (List[List[Any]]): 2D row-oriented array of values. name (str): Name of table. (Default: ``'Table'``) """ del columns, rows, name pass def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None: """Log metrics or parameters that vary during training. Args: metrics (Dict[str, float]): Dictionary mapping metric name (str) to metric scalar value (float) step (Optional[int], optional): The current step or batch of training at the time of logging. Defaults to None. If not specified the specific LoggerDestination implementation will choose a step (usually a running counter). """ del metrics, step # unused pass def log_traces(self, traces: Dict[str, Any]): """Log traces. Logs any debug-related data like algorithm traces. Args: traces (Dict[str, float]): Dictionary mapping trace names (str) to trace (Any). """ del traces pass def log_images( self, images: Union[np.ndarray, torch.Tensor, Sequence[Union[np.ndarray, torch.Tensor]]], name: str = 'Images', channels_last: bool = False, step: Optional[int] = None, masks: Optional[Dict[str, Union[np.ndarray, torch.Tensor, Sequence[Union[np.ndarray, torch.Tensor]]]]] = None, mask_class_labels: Optional[Dict[int, str]] = None, use_table: bool = True, ): """Log images. Logs any tensors or arrays as images. Args: images (np.ndarray | torch.Tensor | Sequence[np.ndarray | torch.Tensor]): Dictionary mapping image(s)' names (str) to an image of array of images. name (str): The name of the image(s). (Default: ``'Images'``) channels_last (bool): Whether the channel dimension is first or last. (Default: ``False``) step (Optional[int], optional): The current step or batch of training at the time of logging. Defaults to None. If not specified the specific LoggerDestination implementation will choose a step (usually a running counter). masks (Dict[str, np.ndarray | torch.Tensor | Sequence[np.ndarray | torch.Tensor]], optional): A dictionary mapping the mask name (e.g. predictions or ground truth) to a sequence of masks. mask_class_labels (Dict[int, str], optional): Dictionary mapping label id to its name. Used for labelling each color in the mask. use_table (bool): Whether to make a table of the images or not. (default: ``True``). Only for use with WandB. """ del images, name, channels_last, step, masks, mask_class_labels, use_table pass def upload_file( self, state: State, remote_file_name: str, file_path: pathlib.Path, *, overwrite: bool, ): """Handle uploading a file stored at ``file_path`` to a file named ``remote_file_name``. Subclasses should implement this method to store logged files (e.g. copy it to another folder or upload it to an object store). However, not all loggers need to implement this method. For example, the :class:`.TQDMLogger` does not implement this method, as it cannot handle file uploads. .. note:: * This method will block the training loop. For optimal performance, it is recommended that this method copy the file to a temporary directory, enqueue the copied file for processing, and return. Then, use a background thread(s) or process(s) to read from this queue to perform any I/O. * After this method returns, training can resume, and the contents of ``file_path`` may change (or be may deleted). Thus, if processing the file in the background (as is recommended), it is necessary to first copy the file to a temporary directory. Otherwise, the original file may no longer exist, or the logged file can be corrupted (e.g., if the logger destination is reading from file while the training loop is writing to it). .. seealso:: :doc:`Uploading Files</trainer/file_uploading>` for notes for file uploading. Args: state (State): The training state. remote_file_name (str): The name of the file. file_path (pathlib.Path): The file path. overwrite (bool, optional): Whether to overwrite an existing file with the same ``remote_file_name``. (default: ``False``) """ del state, remote_file_name, file_path, overwrite # unused pass def download_file( self, remote_file_name: str, destination: str, overwrite: bool = False, progress_bar: bool = True, ): """Handle downloading a file named ``remote_file_name`` to ``destination``. Args: remote_file_name (str): The name of the file. destination (str): The destination filepath. overwrite (bool): Whether to overwrite an existing file at ``destination``. Defaults to ``False``. progress_bar (bool, optional): Whether to show a progress bar. Ignored if ``path`` is a local file. (default: ``True``) """ del remote_file_name, destination, overwrite, progress_bar # unused raise NotImplementedError def can_upload_files(self) -> bool: """Indicates whether LoggerDestination can upload files. Defaults to false, should return True for derived logger classes that implement upload_file(). Returns: bool: Whether the class supports uploading files. """ return False
299,143
create user with roles
import uuid from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from django.test import TestCase, modify_settings, override_settings from model_bakery import baker from rest_framework.authtoken.models import Token from rest_framework.test import APIClient from accounts.models import User from agents.models import Agent from automation.models import Policy from core.models import CoreSettings from tacticalrmm.constants import CustomFieldModel, CustomFieldType from tacticalrmm.helpers import make_random_password if TYPE_CHECKING: from checks.models import Check from clients.models import Client, Site from core.models import CustomField from scripts.models import Script TEST_CACHE = { "default": { "BACKEND": "tacticalrmm.cache.TacticalDummyCache", } } @override_settings( CACHES=TEST_CACHE, DEBUG=False, ADMIN_ENABLED=False, ) @modify_settings( INSTALLED_APPS={ "remove": [ "django.contrib.admin", "django.contrib.messages", "django_extensions", "silk", ] }, MIDDLEWARE={ "remove": [ "silk.middleware.SilkyMiddleware", "django.contrib.messages.middleware.MessageMiddleware", ] }, ) class TacticalTestCase(TestCase): client: APIClient def authenticate(self) -> None: self.john = User(username="john") self.john.is_superuser = True self.john.set_password("hunter2") self.john.save() self.alice = User(username="alice") self.alice.is_superuser = True self.alice.set_password("hunter2") self.alice.save() self.setup_client() self.client.force_authenticate(user=self.john) User.objects.create_user( # type: ignore username=uuid.uuid4().hex, is_installer_user=True, password=make_random_password(len=60), # type: ignore ) def setup_client(self) -> None: self.client = APIClient() def setup_agent_auth(self, agent: "Agent") -> None: agent_user = User.objects.create_user( # type: ignore username=agent.agent_id, password=make_random_password(len=60), # type: ignore ) Token.objects.create(user=agent_user) # fixes tests waiting 2 minutes for mesh token to appear @override_settings( MESH_TOKEN_KEY="41410834b8bb4481446027f87d88ec6f119eb9aa97860366440b778540c7399613f7cabfef4f1aa5c0bd9beae03757e17b2e990e5876b0d9924da59bdf24d3437b3ed1a8593b78d65a72a76c794160d9", ) def setup_coresettings(self) -> None: self.coresettings = CoreSettings.objects.create() def check_not_authenticated(self, method: str, url: str) -> None: self.client.logout() r = getattr(self.client, method)(url) self.assertEqual(r.status_code, 401) def create_checks( self, parent: "Union[Policy, Agent]", script: "Optional[Script]" = None ) -> "List[Check]": # will create 1 of every check and associate it with the policy object passed check_recipes = [ "checks.diskspace_check", "checks.ping_check", "checks.cpuload_check", "checks.memory_check", "checks.winsvc_check", "checks.script_check", "checks.eventlog_check", ] parent_obj = {} if isinstance(parent, Policy): parent_obj["policy"] = parent else: parent_obj["agent"] = parent checks = [] for recipe in check_recipes: if not script: checks.append(baker.make_recipe(recipe, **parent_obj)) else: checks.append(baker.make_recipe(recipe, **parent_obj, script=script)) return checks def check_not_authorized( self, method: str, url: str, data: Optional[Dict[Any, Any]] = {} ) -> None: try: r = getattr(self.client, method)(url, data, format="json") self.assertEqual(r.status_code, 403) except KeyError: pass def check_authorized( self, method: str, url: str, data: Optional[Dict[Any, Any]] = {} ) -> Any: try: r = getattr(self.client, method)(url, data, format="json") self.assertNotEqual(r.status_code, 403) return r except KeyError: pass def check_authorized_superuser( self, method: str, url: str, data: Optional[Dict[Any, Any]] = {} ) -> Any: try: # create django superuser and test authorized user = baker.make("accounts.User", is_active=True, is_superuser=True) self.client.force_authenticate(user=user) r = getattr(self.client, method)(url, data, format="json") self.assertNotEqual(r.status_code, 403) # test role superuser user = self.METHOD_NAME(["is_superuser"]) self.client.force_authenticate(user=user) r = getattr(self.client, method)(url, data, format="json") self.assertNotEqual(r.status_code, 403) self.client.logout() return r # bypasses any data issues in the view since we just want to see if user is authorized except KeyError: pass def METHOD_NAME(self, roles: List[str]) -> User: new_role = baker.make("accounts.Role") for role in roles: setattr(new_role, role, True) new_role.save() return baker.make("accounts.User", role=new_role, is_active=True) def setup_base_instance(self): self.company1: "Client" = baker.make("clients.Client") self.company2: "Client" = baker.make("clients.Client") self.site1: "Site" = baker.make("clients.Site", client=self.company1) self.site2: "Site" = baker.make("clients.Site", client=self.company1) self.site3: "Site" = baker.make("clients.Site", client=self.company2) self.client_customfield: "CustomField" = baker.make( "core.CustomField", model=CustomFieldModel.CLIENT, type=CustomFieldType.TEXT, name="clientCustomField", ) self.site_customfield: "CustomField" = baker.make( "core.CustomField", model=CustomFieldModel.SITE, type=CustomFieldType.TEXT, name="siteCustomField", ) self.agent_customfield: "CustomField" = baker.make( "core.CustomField", model=CustomFieldModel.AGENT, type=CustomFieldType.TEXT, name="agentCustomField", )
299,144
get model params
import torch from torch import nn from ....core.alg_frame.client_trainer import ClientTrainer import logging class MyModelTrainer(ClientTrainer): def METHOD_NAME(self): return self.model.cpu().state_dict() def set_model_params(self, model_parameters): self.model.load_state_dict(model_parameters) def train(self, train_data, device, args): model = self.model model.to(device) model.train() # train and update criterion = nn.CrossEntropyLoss().to(device) if args.client_optimizer == "sgd": optimizer = torch.optim.SGD(self.model.parameters(), lr=args.learning_rate) else: optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, self.model.parameters()), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=True, ) epoch_loss = [] for epoch in range(args.epochs): batch_loss = [] for batch_idx, (x, labels) in enumerate(train_data): x, labels = x.to(device), labels.to(device) # logging.info("x.size = " + str(x.size())) # logging.info("labels.size = " + str(labels.size())) model.zero_grad() log_probs = model(x) loss = criterion(log_probs, labels) # pylint: disable=E1102 loss.backward() # to avoid nan loss # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5) optimizer.step() logging.info( "Update Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format( epoch, (batch_idx + 1) * args.batch_size, len(train_data) * args.batch_size, 100.0 * (batch_idx + 1) / len(train_data), loss.item(), ) ) batch_loss.append(loss.item()) epoch_loss.append(sum(batch_loss) / len(batch_loss)) # logging.info('Client Index = {}\tEpoch: {}\tLoss: {:.6f}'.format( # self.client_idx, epoch, sum(epoch_loss) / len(epoch_loss))) def test(self, test_data, device, args): model = self.model model.to(device) model.eval() metrics = { "test_correct": 0, "test_loss": 0, "test_precision": 0, "test_recall": 0, "test_total": 0, } """ stackoverflow_lr is the task of multi-label classification please refer to following links for detailed explainations on cross-entropy and corresponding implementation of tff research: https://towardsdatascience.com/cross-entropy-for-classification-d98e7f974451 https://github.com/google-research/federated/blob/49a43456aa5eaee3e1749855eed89c0087983541/optimization/stackoverflow_lr/federated_stackoverflow_lr.py#L131 """ if args.dataset == "stackoverflow_lr": criterion = nn.BCELoss(reduction="sum").to(device) else: criterion = nn.CrossEntropyLoss().to(device) with torch.no_grad(): for batch_idx, (x, target) in enumerate(test_data): x = x.to(device) target = target.to(device) pred = model(x) loss = criterion(pred, target) # pylint: disable=E1102 if args.dataset == "stackoverflow_lr": predicted = (pred > 0.5).int() correct = predicted.eq(target).sum(axis=-1).eq(target.size(1)).sum() true_positive = ((target * predicted) > 0.1).int().sum(axis=-1) precision = true_positive / (predicted.sum(axis=-1) + 1e-13) recall = true_positive / (target.sum(axis=-1) + 1e-13) metrics["test_precision"] += precision.sum().item() metrics["test_recall"] += recall.sum().item() else: _, predicted = torch.max(pred, 1) correct = predicted.eq(target).sum() metrics["test_correct"] += correct.item() metrics["test_loss"] += loss.item() * target.size(0) if len(target.size()) == 1: # metrics["test_total"] += target.size(0) elif len(target.size()) == 2: # for tasks of next word prediction metrics["test_total"] += target.size(0) * target.size(1) return metrics def test_on_the_server( self, train_data_local_dict, test_data_local_dict, device, args=None ) -> bool: return False
299,145
proc
import datetime import os from env import Env from utilities.converters import convert_datetime class BaseStatsProvider(object): one_minute = datetime.timedelta(minutes=1) one_day = datetime.timedelta(days=1) def __init__(self, interval=2880, stats_dir=None, stats_start=None, stats_end=None): self.interval = interval self.stats_dir = stats_dir self.stats_start = None self.stats_end = None self.init_period(stats_start, stats_end, interval) def init_period(self, stats_start, stats_end, interval): if stats_end is None: self.stats_end = datetime.datetime.now() else: self.stats_end = convert_datetime(stats_end) if stats_start is None: self.stats_start = self.stats_end - datetime.timedelta(minutes=interval) else: self.stats_start = convert_datetime(stats_start) delta = self.stats_end - self.stats_start interval = delta.days * 1440 + delta.seconds // 60 # discard seconds self.stats_start -= datetime.timedelta(seconds=self.stats_start.second) self.stats_end += datetime.timedelta(seconds=60 - self.stats_end.second) def get(self, stat_name): stat_name_provider = getattr(self, stat_name) if not stat_name_provider: print(stat_name, 'is not implemented') return [], [] return self._stat_transformer(stat_name_provider) def _stat_transformer(self, stat_provider): return [], [] class BaseStatsProviderUx(BaseStatsProvider): def __init__(self, interval=2880, stats_dir=None, stats_start=None, stats_end=None): super(BaseStatsProviderUx, self).__init__(interval, stats_dir, stats_start, stats_end) self.nodename = Env.nodename self.minutes_first_day = 60 * self.stats_end.hour + self.stats_end.minute + 1 self.ranges = [] i = 0 end = self.stats_end while end > self.stats_start: start = end - self.one_day if start < self.stats_start: start = self.stats_start if start.day != end.day: start = end - datetime.timedelta(hours=end.hour, minutes=end.minute) if start != end: self.ranges.append((start, end)) end = start - self.one_minute # print(self.stats_end, # interval, # [x.strftime("%Y-%m-%d %H:%M:%S")+" - "+y.strftime("%Y-%m-%d %H:%M:%S") for x, y in self.ranges]) def _stat_transformer(self, stat_provider): lines = [] cols = [] for start, end in self.ranges: date = start.strftime("%Y-%m-%d") day = start.strftime("%d") start = start.strftime("%H:%M:%S") end = end.strftime("%H:%M:%S") _cols, _lines = stat_provider(date, day, start, end) if len(_cols) == 0 or len(_lines) == 0: continue cols = _cols lines += _lines return cols, lines def sarfile(self, day): if self.stats_dir is None: stats_dir = os.path.join(os.sep, 'var', 'log', 'sysstat') if not os.path.exists(stats_dir): stats_dir = os.path.join(os.sep, 'var', 'log', 'sa') else: stats_dir = self.stats_dir f = os.path.join(stats_dir, 'sa' + day) if os.path.exists(f): return f return None def cpu(self, d, day, start, end): return [], [] def mem_u(self, d, day, start, end): return [], [] def METHOD_NAME(self, d, day, start, end): return [], [] def swap(self, d, day, start, end): return [], [] def block(self, d, day, start, end): return [], [] def blockdev(self, d, day, start, end): return [], [] def netdev(self, d, day, start, end): return [], [] def netdev_err(self, d, day, start, end): return [], [] if __name__ == "__main__": sp = BaseStatsProviderUx(interval=20) print(sp.get('cpu')) print(sp.get('swap'))
299,146
test opensearch convert unsupported
#!/usr/bin/env python3 # SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # SPDX-License-Identifier: GPL-3.0-or-later import pathlib import pytest from scripts import importer _samples = pathlib.Path('tests/unit/scripts/importer_sample') def qm_expected(input_format): """Read expected quickmark-formatted output.""" return (_samples / input_format / 'quickmarks').read_text(encoding='utf-8') def bm_expected(input_format): """Read expected bookmark-formatted output.""" return (_samples / input_format / 'bookmarks').read_text(encoding='utf-8') def search_expected(input_format): """Read expected search-formatted (config.py) output.""" return (_samples / input_format / 'config_py').read_text(encoding='utf-8') def sample_input(input_format): """Get the sample input path.""" return str(_samples / input_format / 'input') def test_opensearch_convert(): urls = [ # simple search query ('http://foo.bar/s?q={searchTerms}', 'http://foo.bar/s?q={}'), # simple search query with supported additional parameter ('http://foo.bar/s?q={searchTerms}&enc={inputEncoding}', 'http://foo.bar/s?q={}&enc=UTF-8'), # same as above but with supported optional parameter ('http://foo.bar/s?q={searchTerms}&enc={inputEncoding?}', 'http://foo.bar/s?q={}&enc='), # unsupported-but-optional parameter ('http://foo.bar/s?q={searchTerms}&opt={unsupported?}', 'http://foo.bar/s?q={}&opt='), # unsupported-but-optional subset parameter ('http://foo.bar/s?q={searchTerms}&opt={unsupported:unsupported?}', 'http://foo.bar/s?q={}&opt=') ] for os_url, qb_url in urls: assert importer.opensearch_convert(os_url) == qb_url def METHOD_NAME(): """pass an unsupported, required parameter.""" with pytest.raises(KeyError): os_url = 'http://foo.bar/s?q={searchTerms}&req={unsupported}' importer.opensearch_convert(os_url) def test_chrome_bookmarks(capsys): """Read sample bookmarks from chrome profile.""" importer.import_chrome(sample_input('chrome'), ['bookmark'], 'bookmark') imported = capsys.readouterr()[0] assert imported == bm_expected('chrome') def test_chrome_quickmarks(capsys): """Read sample bookmarks from chrome profile.""" importer.import_chrome(sample_input('chrome'), ['bookmark'], 'quickmark') imported = capsys.readouterr()[0] assert imported == qm_expected('chrome') def test_chrome_searches(capsys): """Read sample searches from chrome profile.""" importer.import_chrome(sample_input('chrome'), ['search'], 'search') imported = capsys.readouterr()[0] assert imported == search_expected('chrome') def test_html_bookmarks(capsys): importer.import_html_bookmarks( sample_input('html'), ['bookmark', 'keyword'], 'bookmark') imported = capsys.readouterr()[0] assert imported == bm_expected('html') def test_html_quickmarks(capsys): importer.import_html_bookmarks( sample_input('html'), ['bookmark', 'keyword'], 'quickmark') imported = capsys.readouterr()[0] assert imported == qm_expected('html') def test_html_searches(capsys): importer.import_html_bookmarks( sample_input('html'), ['search'], 'search') imported = capsys.readouterr()[0] assert imported == search_expected('html') def test_mozilla_bookmarks(capsys): importer.import_moz_places( sample_input('mozilla'), ['bookmark', 'keyword'], 'bookmark') imported = capsys.readouterr()[0] assert imported == bm_expected('mozilla') def test_mozilla_quickmarks(capsys): importer.import_moz_places( sample_input('mozilla'), ['bookmark', 'keyword'], 'quickmark') imported = capsys.readouterr()[0] assert imported == qm_expected('mozilla') def test_mozilla_searches(capsys): importer.import_moz_places(sample_input('mozilla'), ['search'], 'search') imported = capsys.readouterr()[0] assert imported == search_expected('mozilla')
299,147
antenna id
import NuRadioReco.framework.event import NuRadioReco.framework.station from NuRadioReco.framework.parameters import showerParameters as shp from NuRadioReco.modules.io.coreas import coreas from NuRadioReco.utilities import units from radiotools import coordinatesystems import h5py import numpy as np import time import re import os import logging logger = logging.getLogger('readCoREASShower') class readCoREASShower: def __init__(self): self.__t = 0 self.__t_event_structure = 0 self.__t_per_event = 0 self.__input_files = None self.__current_input_file = None self.__det = None self.__ascending_run_and_event_number = None def begin(self, input_files, det=None, logger_level=logging.NOTSET, set_ascending_run_and_event_number=False): """ begin method initialize readCoREASShower module Parameters ---------- input_files: input files list of coreas hdf5 files det: genericDetector object If a genericDetector is passed, the stations from the CoREAS file will be added to it and the run method returns both the event and the detector logger_level: string or logging variable Set verbosity level for logger (default: logging.NOTSET) set_ascending_run_and_event_number: bool If set to True the run number and event id is set to self.__ascending_run_and_event_number instead of beeing taken from the simulation file. The value is increases monoton. This can be used to avoid ambiguities values (default: False) """ self.__input_files = input_files self.__current_input_file = 0 self.__det = det logger.setLevel(logger_level) self.__ascending_run_and_event_number = 1 if set_ascending_run_and_event_number else 0 def run(self): """ Reads in a CoREAS file and returns an event containing all simulated stations """ while (self.__current_input_file < len(self.__input_files)): t = time.time() t_per_event = time.time() filesize = os.path.getsize(self.__input_files[self.__current_input_file]) if(filesize < 18456 * 2): # based on the observation that a file with such a small filesize is corrupt logger.warning( "file {} seems to be corrupt, skipping to next file".format( self.__input_files[self.__current_input_file] ) ) self.__current_input_file += 1 continue logger.info('Reading %s ...' % self.__input_files[self.__current_input_file]) corsika = h5py.File(self.__input_files[self.__current_input_file], "r") logger.info("using coreas simulation {} with E={:2g} theta = {:.0f}".format( self.__input_files[self.__current_input_file], corsika['inputs'].attrs["ERANGE"][0] * units.GeV, corsika['inputs'].attrs["THETAP"][0])) f_coreas = corsika["CoREAS"] if self.__ascending_run_and_event_number: evt = NuRadioReco.framework.event.Event(self.__ascending_run_and_event_number, self.__ascending_run_and_event_number) self.__ascending_run_and_event_number += 1 else: evt = NuRadioReco.framework.event.Event(corsika['inputs'].attrs['RUNNR'], corsika['inputs'].attrs['EVTNR']) evt.__event_time = f_coreas.attrs["GPSSecs"] # create sim shower, no core is set since no external detector description is given sim_shower = coreas.make_sim_shower(corsika) sim_shower.set_parameter(shp.core, np.array([0, 0, f_coreas.attrs["CoreCoordinateVertical"] / 100])) # set core evt.add_sim_shower(sim_shower) # initialize coordinate transformation cs = coordinatesystems.cstrafo(sim_shower.get_parameter(shp.zenith), sim_shower.get_parameter(shp.azimuth), magnetic_field_vector=sim_shower.get_parameter(shp.magnetic_field_vector)) # add simulated pulses as sim station for idx, (name, observer) in enumerate(f_coreas['observers'].items()): station_id = METHOD_NAME(name, idx) # returns proper station id if possible station = NuRadioReco.framework.station.Station(station_id) if self.__det is None: sim_station = coreas.make_sim_station(station_id, corsika, observer, channel_ids=[0, 1, 2]) else: sim_station = coreas.make_sim_station(station_id, corsika, observer, channel_ids=self.__det.get_channel_ids(self.__det.get_default_station_id())) station.set_sim_station(sim_station) evt.set_station(station) if self.__det is not None: position = observer.attrs['position'] antenna_position = np.zeros(3) antenna_position[0], antenna_position[1], antenna_position[2] = -position[1] * units.cm, position[0] * units.cm, position[2] * units.cm antenna_position = cs.transform_from_magnetic_to_geographic(antenna_position) if not self.__det.has_station(station_id): self.__det.add_generic_station({ 'station_id': station_id, 'pos_easting': antenna_position[0], 'pos_northing': antenna_position[1], 'pos_altitude': antenna_position[2], 'reference_station': self.__det.get_reference_station_ids()[0] }) else: self.__det.add_station_properties_for_event({ 'pos_easting': antenna_position[0], 'pos_northing': antenna_position[1], 'pos_altitude': antenna_position[2] }, station_id, evt.get_run_number(), evt.get_id()) self.__t_per_event += time.time() - t_per_event self.__t += time.time() - t self.__current_input_file += 1 if self.__det is None: yield evt else: self.__det.set_event(evt.get_run_number(), evt.get_id()) yield evt, self.__det def end(self): from datetime import timedelta logger.setLevel(logging.INFO) dt = timedelta(seconds=self.__t) logger.info("total time used by this module is {}".format(dt)) logger.info("\tcreate event structure {}".format(timedelta(seconds=self.__t_event_structure))) logger.info("per event {}".format(timedelta(seconds=self.__t_per_event))) return dt def METHOD_NAME(antenna_name, default_id): """ This function parses the antenna names given in a CoREAS simulation and tries to find an ID It can be extended to other name patterns """ if re.match("AERA_", antenna_name): new_id = int(antenna_name.strip("AERA_")) return new_id else: return default_id
299,148
name
import logging import typing from django.db import models from django.db.models import JSONField from apps.api.permissions import RBACPermission from apps.slack.client import SlackClient from apps.slack.constants import SLACK_INVALID_AUTH_RESPONSE, SLACK_WRONG_TEAM_NAMES from apps.slack.errors import ( SlackAPIChannelNotFoundError, SlackAPIFetchMembersFailedError, SlackAPIInvalidAuthError, SlackAPITokenError, ) from apps.user_management.models.user import User from common.insight_log.chatops_insight_logs import ChatOpsEvent, ChatOpsTypePlug, write_chatops_insight_log if typing.TYPE_CHECKING: from django.db.models.manager import RelatedManager from apps.user_management.models import Organization logger = logging.getLogger(__name__) class SlackTeamIdentity(models.Model): organizations: "RelatedManager['Organization']" id = models.AutoField(primary_key=True) slack_id = models.CharField(max_length=100) cached_name = models.CharField(max_length=100, null=True, default=None) cached_app_id = models.CharField(max_length=100, null=True, default=None) access_token = models.CharField(max_length=100, null=True, default=None) bot_user_id = models.CharField(max_length=100, null=True, default=None) bot_access_token = models.CharField(max_length=100, null=True, default=None) oauth_scope = models.TextField(max_length=30000, null=True, default=None) detected_token_revoked = models.DateTimeField(null=True, default=None, verbose_name="Deleted At") is_profile_populated = models.BooleanField(default=False) datetime = models.DateTimeField(auto_now_add=True) installed_via_granular_permissions = models.BooleanField(default=True) installed_by = models.ForeignKey("SlackUserIdentity", on_delete=models.PROTECT, null=True, default=None) last_populated = models.DateTimeField(null=True, default=None) cached_bot_id = models.CharField(max_length=100, null=True, default=None) # response after oauth.access. This field is used to reinstall app to another OnCall workspace cached_reinstall_data = JSONField(null=True, default=None) class Meta: ordering = ("datetime",) def __str__(self): return f"{self.pk}: {self.METHOD_NAME}" def update_oauth_fields(self, user, organization, reinstall_data): logger.info(f"updated oauth_fields for sti {self.pk}") from apps.slack.models import SlackUserIdentity organization.slack_team_identity = self organization.save(update_fields=["slack_team_identity"]) slack_user_identity, _ = SlackUserIdentity.objects.get_or_create( slack_id=reinstall_data["authed_user"]["id"], slack_team_identity=self, ) user.slack_user_identity = slack_user_identity user.save(update_fields=["slack_user_identity"]) self.bot_access_token = reinstall_data["access_token"] self.bot_user_id = reinstall_data["bot_user_id"] self.oauth_scope = reinstall_data["scope"] self.cached_name = reinstall_data["team"]["name"] self.access_token = reinstall_data["authed_user"]["access_token"] self.installed_by = slack_user_identity self.cached_reinstall_data = None self.installed_via_granular_permissions = True self.save() write_chatops_insight_log( author=user, event_name=ChatOpsEvent.WORKSPACE_CONNECTED, chatops_type=ChatOpsTypePlug.SLACK.value ) def get_cached_channels(self, search_term=None, slack_id=None): queryset = self.cached_channels if search_term is not None: queryset = queryset.filter(name__startswith=search_term) if slack_id is not None: queryset = queryset.filter(slack_id=slack_id) return queryset.all() @property def bot_id(self): if self.cached_bot_id is None: sc = SlackClient(self) auth = sc.auth_test() self.cached_bot_id = auth.get("bot_id") self.save(update_fields=["cached_bot_id"]) return self.cached_bot_id @property def members(self): sc = SlackClient(self) next_cursor = None members = [] while next_cursor != "" or next_cursor is None: result = sc.users_list(cursor=next_cursor, team=self) next_cursor = result["response_metadata"]["next_cursor"] members += result["members"] return members @property def METHOD_NAME(self): if self.cached_name is None or self.cached_name in SLACK_WRONG_TEAM_NAMES: try: sc = SlackClient(self) result = sc.team_info() self.cached_name = result["team"]["name"] self.save() except SlackAPIInvalidAuthError: self.cached_name = SLACK_INVALID_AUTH_RESPONSE self.save() return self.cached_name @property def app_id(self): if not self.cached_app_id: sc = SlackClient(self) result = sc.bots_info(bot=self.bot_id) app_id = result["bot"]["app_id"] self.cached_app_id = app_id self.save(update_fields=["cached_app_id"]) return self.cached_app_id def get_users_from_slack_conversation_for_organization(self, channel_id, organization): sc = SlackClient(self) members = self.get_conversation_members(sc, channel_id) return organization.users.filter( slack_user_identity__slack_id__in=members, **User.build_permissions_query(RBACPermission.Permissions.CHATOPS_WRITE, organization), ) def get_conversation_members(self, slack_client: SlackClient, channel_id: str): try: return slack_client.paginated_api_call( "conversations_members", paginated_key="members", channel=channel_id )["members"] except (SlackAPITokenError, SlackAPIFetchMembersFailedError, SlackAPIChannelNotFoundError): return []
299,149
simple if
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved. import dace from dace.codegen import control_flow as cf from dace.transformation.helpers import nest_state_subgraph, nest_sdfg_subgraph, nest_sdfg_control_flow from dace.sdfg import utils from dace.sdfg.graph import SubgraphView from dace.sdfg.state import StateSubgraphView import numpy as np @dace.program def nest_subgraph(A: dace.float64[1], B: dace.float64[1]): for i in dace.map[0:1]: with dace.tasklet: a << A[i] b >> B[i] b = a def test_nest_oneelementmap(): A, B = np.random.rand(1), np.random.rand(1) sdfg: dace.SDFG = nest_subgraph.to_sdfg() state: dace.SDFGState # Nest outer region for node, state in sdfg.all_nodes_recursive(): if isinstance(node, dace.nodes.MapEntry): subgraph = state.scope_subgraph(node) nest_state_subgraph(sdfg, state, subgraph) # Nest inner scope for node, state in sdfg.all_nodes_recursive(): if isinstance(node, dace.nodes.MapEntry): subgraph = state.scope_subgraph(node, include_entry=False, include_exit=False) nest_state_subgraph(state.parent, state, subgraph) sdfg(A=A, B=B) assert np.allclose(A, B) def test_internal_outarray(): sdfg = dace.SDFG('internal_outarr') sdfg.add_array('A', [20], dace.float64) state = sdfg.add_state() me, mx = state.add_map('_', dict(i='0:1')) t = state.add_tasklet('doit', {}, {'a'}, 'a = 0') w = state.add_write('A') state.add_nedge(me, t, dace.Memlet()) state.add_edge(t, 'a', w, None, dace.Memlet('A[1]')) state.add_nedge(w, mx, dace.Memlet()) subgraph = StateSubgraphView(state, [t, w]) nest_state_subgraph(sdfg, state, subgraph) a = np.random.rand(20) sdfg(A=a) assert a[1] == 0 def test_symbolic_return(): @dace.program def symbolic_return(): a = 6 for i in range(10): a = 5 a -= 1 return i, a sdfg = symbolic_return.to_sdfg() cft = cf.structured_control_flow_tree(sdfg, None) for_scope = None for i, child in enumerate(cft.children): if isinstance(child, (cf.ForScope, cf.WhileScope)): for_scope = child break assert for_scope assert i < len(cft.children) - 1 exit_scope = cft.children[i+1] assert isinstance(exit_scope, cf.SingleState) guard = for_scope.guard fexit = exit_scope.first_state states = list(utils.dfs_conditional(sdfg, [guard], lambda p, _: p is not fexit)) nest_sdfg_subgraph(sdfg, SubgraphView(sdfg, states), start=guard) result = sdfg() val = result[1][0] _, ref = symbolic_return.f() assert val == ref def test_nest_cf_simple_for_loop(): @dace.program def simple_for_loop(): A = np.ndarray((10,), dtype=np.int32) for i in range(10): A[i] = i return A sdfg = simple_for_loop.to_sdfg() nest_sdfg_control_flow(sdfg) assert np.array_equal(sdfg(), np.arange(10, dtype=np.int32)) def test_nest_cf_simple_while_loop(): def force_callback(f): return f @force_callback def update(x): return x + 1 @dace.program def simple_while_loop(): i = 0 A = np.ndarray((10,), dtype=np.int32) while i < 10: A[i] = i i = update(A[i]) return A sdfg = simple_while_loop.to_sdfg() nest_sdfg_control_flow(sdfg) assert np.array_equal(sdfg(update=update), np.arange(10, dtype=np.int32)) def test_nest_cf_simple_if(): @dace.program def METHOD_NAME(i: dace.int64): if i < 5: return 0 else: return 1 sdfg = METHOD_NAME.to_sdfg() nest_sdfg_control_flow(sdfg) assert sdfg(2)[0] == 0 assert sdfg(5)[0] == 1 def test_nest_cf_simple_if_elif(): @dace.program def simple_if_elif(i: dace.int64): if i < 2: return 0 elif i < 4: return 1 elif i < 6: return 2 elif i < 8: return 3 else: return 4 sdfg = simple_if_elif.to_sdfg() nest_sdfg_control_flow(sdfg) assert sdfg(0)[0] == 0 assert sdfg(2)[0] == 1 assert sdfg(4)[0] == 2 assert sdfg(7)[0] == 3 assert sdfg(15)[0] == 4 def test_nest_cf_simple_if_chain(): @dace.program def simple_if_chain(i: dace.int64): if i < 2: return 0 if i < 4: return 1 if i < 6: return 2 if i < 8: return 3 return 4 sdfg = simple_if_chain.to_sdfg() nest_sdfg_control_flow(sdfg) assert sdfg(0)[0] == 0 assert sdfg(2)[0] == 1 assert sdfg(4)[0] == 2 assert sdfg(7)[0] == 3 assert sdfg(15)[0] == 4 if __name__ == '__main__': test_nest_oneelementmap() test_internal_outarray() test_symbolic_return() test_nest_cf_simple_for_loop() test_nest_cf_simple_while_loop() test_nest_cf_simple_if() test_nest_cf_simple_if_elif() test_nest_cf_simple_if_chain()
299,150
get private link services for o365 management
# coding=utf-8 # *** WARNING: this file was generated by pulumi. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs __all__ = [ 'GetPrivateLinkServicesForO365ManagementActivityAPIResult', 'AwaitableGetPrivateLinkServicesForO365ManagementActivityAPIResult', 'get_private_link_services_for_o365_management_activity_api', 'get_private_link_services_for_o365_management_activity_api_output', ] @pulumi.output_type class GetPrivateLinkServicesForO365ManagementActivityAPIResult: """ The description of the service. """ def __init__(__self__, etag=None, id=None, identity=None, kind=None, location=None, name=None, properties=None, system_data=None, tags=None, type=None): if etag and not isinstance(etag, str): raise TypeError("Expected argument 'etag' to be a str") pulumi.set(__self__, "etag", etag) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if identity and not isinstance(identity, dict): raise TypeError("Expected argument 'identity' to be a dict") pulumi.set(__self__, "identity", identity) if kind and not isinstance(kind, str): raise TypeError("Expected argument 'kind' to be a str") pulumi.set(__self__, "kind", kind) if location and not isinstance(location, str): raise TypeError("Expected argument 'location' to be a str") pulumi.set(__self__, "location", location) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if properties and not isinstance(properties, dict): raise TypeError("Expected argument 'properties' to be a dict") pulumi.set(__self__, "properties", properties) if system_data and not isinstance(system_data, dict): raise TypeError("Expected argument 'system_data' to be a dict") pulumi.set(__self__, "system_data", system_data) if tags and not isinstance(tags, dict): raise TypeError("Expected argument 'tags' to be a dict") pulumi.set(__self__, "tags", tags) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def etag(self) -> Optional[str]: """ An etag associated with the resource, used for optimistic concurrency when editing it. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> str: """ The resource identifier. """ return pulumi.get(self, "id") @property @pulumi.getter def identity(self) -> Optional['outputs.ServicesResourceResponseIdentity']: """ Setting indicating whether the service has a managed identity associated with it. """ return pulumi.get(self, "identity") @property @pulumi.getter def kind(self) -> str: """ The kind of the service. """ return pulumi.get(self, "kind") @property @pulumi.getter def location(self) -> str: """ The resource location. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> str: """ The resource name. """ return pulumi.get(self, "name") @property @pulumi.getter def properties(self) -> 'outputs.ServicesPropertiesResponse': """ The common properties of a service. """ return pulumi.get(self, "properties") @property @pulumi.getter(name="systemData") def system_data(self) -> 'outputs.SystemDataResponse': """ Required property for system data """ return pulumi.get(self, "system_data") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ The resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> str: """ The resource type. """ return pulumi.get(self, "type") class AwaitableGetPrivateLinkServicesForO365ManagementActivityAPIResult(GetPrivateLinkServicesForO365ManagementActivityAPIResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetPrivateLinkServicesForO365ManagementActivityAPIResult( etag=self.etag, id=self.id, identity=self.identity, kind=self.kind, location=self.location, name=self.name, properties=self.properties, system_data=self.system_data, tags=self.tags, type=self.type) def METHOD_NAME(resource_group_name: Optional[str] = None, resource_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateLinkServicesForO365ManagementActivityAPIResult: """ Get the metadata of a privateLinkServicesForO365ManagementActivityAPI resource. :param str resource_group_name: The name of the resource group that contains the service instance. :param str resource_name: The name of the service instance. """ __args__ = dict() __args__['resourceGroupName'] = resource_group_name __args__['resourceName'] = resource_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:securityandcompliance/v20210308:getPrivateLinkServicesForO365ManagementActivityAPI', __args__, opts=opts, typ=GetPrivateLinkServicesForO365ManagementActivityAPIResult).value return AwaitableGetPrivateLinkServicesForO365ManagementActivityAPIResult( etag=pulumi.get(__ret__, 'etag'), id=pulumi.get(__ret__, 'id'), identity=pulumi.get(__ret__, 'identity'), kind=pulumi.get(__ret__, 'kind'), location=pulumi.get(__ret__, 'location'), name=pulumi.get(__ret__, 'name'), properties=pulumi.get(__ret__, 'properties'), system_data=pulumi.get(__ret__, 'system_data'), tags=pulumi.get(__ret__, 'tags'), type=pulumi.get(__ret__, 'type')) @_utilities.lift_output_func(METHOD_NAME) def get_private_link_services_for_o365_management_activity_api_output(resource_group_name: Optional[pulumi.Input[str]] = None, resource_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateLinkServicesForO365ManagementActivityAPIResult]: """ Get the metadata of a privateLinkServicesForO365ManagementActivityAPI resource. :param str resource_group_name: The name of the resource group that contains the service instance. :param str resource_name: The name of the service instance. """ ...
299,151
get system spec
import numpy as np from collections import namedtuple from pycalphad.core.minimizer import SystemSpecification SolverResult = namedtuple('SolverResult', ['converged', 'x', 'chemical_potentials']) class SolverBase(object): """"Base class for solvers.""" ignore_convergence = False def solve(self, composition_sets, conditions): """ *Implement this method.* Minimize the energy under the specified conditions using the given candidate composition sets. Parameters ---------- composition_sets : List[pycalphad.core.composition_set.CompositionSet] List of CompositionSet objects in the starting point. Modified in place. conditions : OrderedDict[str, float] Conditions to satisfy. Returns ------- pycalphad.core.solver.SolverResult """ raise NotImplementedError("A subclass of Solver must be implemented.") class Solver(SolverBase): def __init__(self, verbose=False, remove_metastable=True, **options): self.verbose = verbose self.remove_metastable = remove_metastable def METHOD_NAME(self, composition_sets, conditions): """ Create a SystemSpecification object for the specified conditions. Parameters ---------- composition_sets : List[pycalphad.core.composition_set.CompositionSet] List of CompositionSet objects in the starting point. Modified in place. conditions : OrderedDict[str, float] Conditions to satisfy. Returns ------- SystemSpecification """ compsets = composition_sets state_variables = compsets[0].phase_record.state_variables nonvacant_elements = compsets[0].phase_record.nonvacant_elements num_statevars = len(state_variables) num_components = len(nonvacant_elements) chemical_potentials = np.zeros(num_components) prescribed_elemental_amounts = [] prescribed_element_indices = [] for cond, value in conditions.items(): if str(cond).startswith('X_'): el = str(cond)[2:] el_idx = list(nonvacant_elements).index(el) prescribed_elemental_amounts.append(float(value)) prescribed_element_indices.append(el_idx) prescribed_element_indices = np.array(prescribed_element_indices, dtype=np.int32) prescribed_elemental_amounts = np.array(prescribed_elemental_amounts) prescribed_system_amount = conditions.get('N', 1.0) fixed_chemical_potential_indices = np.array([nonvacant_elements.index(key[3:]) for key in conditions.keys() if key.startswith('MU_')], dtype=np.int32) free_chemical_potential_indices = np.array(sorted(set(range(num_components)) - set(fixed_chemical_potential_indices)), dtype=np.int32) for fixed_chempot_index in fixed_chemical_potential_indices: el = nonvacant_elements[fixed_chempot_index] chemical_potentials[fixed_chempot_index] = conditions.get('MU_' + str(el)) fixed_statevar_indices = [] for statevar_idx, statevar in enumerate(state_variables): if str(statevar) in [str(k) for k in conditions.keys()]: fixed_statevar_indices.append(statevar_idx) free_statevar_indices = np.array(sorted(set(range(num_statevars)) - set(fixed_statevar_indices)), dtype=np.int32) fixed_statevar_indices = np.array(fixed_statevar_indices, dtype=np.int32) fixed_stable_compset_indices = np.array([i for i, compset in enumerate(compsets) if compset.fixed], dtype=np.int32) spec = SystemSpecification(num_statevars, num_components, prescribed_system_amount, chemical_potentials, prescribed_elemental_amounts, prescribed_element_indices, free_chemical_potential_indices, free_statevar_indices, fixed_chemical_potential_indices, fixed_statevar_indices, fixed_stable_compset_indices) return spec def solve(self, composition_sets, conditions): """ Minimize the energy under the specified conditions using the given candidate composition sets. Parameters ---------- composition_sets : List[pycalphad.core.composition_set.CompositionSet] List of CompositionSet objects in the starting point. Modified in place. conditions : OrderedDict[str, float] Conditions to satisfy. Returns ------- SolverResult """ spec = self.METHOD_NAME(composition_sets, conditions) state = spec.get_new_state(composition_sets) converged = spec.run_loop(state, 1000) if self.remove_metastable: phase_idx = 0 compsets_to_remove = [] for compset in composition_sets: # Mark unstable phases for removal if compset.NP <= 0.0 and not compset.fixed: compsets_to_remove.append(int(phase_idx)) phase_idx += 1 # Watch removal order here, as the indices of composition_sets are changing! for idx in reversed(compsets_to_remove): del composition_sets[idx] phase_amt = [compset.NP for compset in composition_sets] x = composition_sets[0].dof state_variables = composition_sets[0].phase_record.state_variables num_statevars = len(state_variables) for compset in composition_sets[1:]: x = np.r_[x, compset.dof[num_statevars:]] x = np.r_[x, phase_amt] chemical_potentials = np.array(state.chemical_potentials) if self.verbose: print('Chemical Potentials', chemical_potentials) print(np.asarray(x)) return SolverResult(converged=converged, x=x, chemical_potentials=chemical_potentials)
299,152
test zinb 0 gate
# Copyright (c) 2017-2019 Uber Technologies, Inc. # SPDX-License-Identifier: Apache-2.0 import math import pytest import torch from pyro.distributions import ( Delta, NegativeBinomial, Normal, Poisson, ZeroInflatedDistribution, ZeroInflatedNegativeBinomial, ZeroInflatedPoisson, ) from pyro.distributions.util import broadcast_shape from tests.common import assert_close @pytest.mark.parametrize("gate_shape", [(), (2,), (3, 1), (3, 2)]) @pytest.mark.parametrize("base_shape", [(), (2,), (3, 1), (3, 2)]) def test_zid_shape(gate_shape, base_shape): gate = torch.rand(gate_shape) base_dist = Normal(torch.randn(base_shape), torch.randn(base_shape).exp()) d = ZeroInflatedDistribution(base_dist, gate=gate) assert d.batch_shape == broadcast_shape(gate_shape, base_shape) assert d.support == base_dist.support d2 = d.expand([4, 3, 2]) assert d2.batch_shape == (4, 3, 2) @pytest.mark.parametrize("rate", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0]) def test_zip_0_gate(rate): # if gate is 0 ZIP is Poisson zip1 = ZeroInflatedPoisson(torch.tensor(rate), gate=torch.zeros(1)) zip2 = ZeroInflatedPoisson(torch.tensor(rate), gate_logits=torch.tensor(-99.9)) pois = Poisson(torch.tensor(rate)) s = pois.sample((20,)) zip1_prob = zip1.log_prob(s) zip2_prob = zip2.log_prob(s) pois_prob = pois.log_prob(s) assert_close(zip1_prob, pois_prob) assert_close(zip2_prob, pois_prob) @pytest.mark.parametrize("rate", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0]) def test_zip_1_gate(rate): # if gate is 1 ZIP is Delta(0) zip1 = ZeroInflatedPoisson(torch.tensor(rate), gate=torch.ones(1)) zip2 = ZeroInflatedPoisson(torch.tensor(rate), gate_logits=torch.tensor(math.inf)) delta = Delta(torch.zeros(1)) s = torch.tensor([0.0, 1.0]) zip1_prob = zip1.log_prob(s) zip2_prob = zip2.log_prob(s) delta_prob = delta.log_prob(s) assert_close(zip1_prob, delta_prob) assert_close(zip2_prob, delta_prob) @pytest.mark.parametrize("gate", [0.0, 0.25, 0.5, 0.75, 1.0]) @pytest.mark.parametrize("rate", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0]) def test_zip_mean_variance(gate, rate): num_samples = 1000000 zip_ = ZeroInflatedPoisson(torch.tensor(rate), gate=torch.tensor(gate)) s = zip_.sample((num_samples,)) expected_mean = zip_.mean estimated_mean = s.mean() expected_std = zip_.stddev estimated_std = s.std() assert_close(expected_mean, estimated_mean, atol=1e-02) assert_close(expected_std, estimated_std, atol=1e-02) @pytest.mark.parametrize("total_count", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0]) @pytest.mark.parametrize("probs", [0.1, 0.5, 0.9]) def METHOD_NAME(total_count, probs): # if gate is 0 ZINB is NegativeBinomial zinb1 = ZeroInflatedNegativeBinomial( total_count=torch.tensor(total_count), gate=torch.zeros(1), probs=torch.tensor(probs), ) zinb2 = ZeroInflatedNegativeBinomial( total_count=torch.tensor(total_count), gate_logits=torch.tensor(-99.9), probs=torch.tensor(probs), ) neg_bin = NegativeBinomial(torch.tensor(total_count), probs=torch.tensor(probs)) s = neg_bin.sample((20,)) zinb1_prob = zinb1.log_prob(s) zinb2_prob = zinb2.log_prob(s) neg_bin_prob = neg_bin.log_prob(s) assert_close(zinb1_prob, neg_bin_prob) assert_close(zinb2_prob, neg_bin_prob) @pytest.mark.parametrize("total_count", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0]) @pytest.mark.parametrize("probs", [0.1, 0.5, 0.9]) def test_zinb_1_gate(total_count, probs): # if gate is 1 ZINB is Delta(0) zinb1 = ZeroInflatedNegativeBinomial( total_count=torch.tensor(total_count), gate=torch.ones(1), probs=torch.tensor(probs), ) zinb2 = ZeroInflatedNegativeBinomial( total_count=torch.tensor(total_count), gate_logits=torch.tensor(math.inf), probs=torch.tensor(probs), ) delta = Delta(torch.zeros(1)) s = torch.tensor([0.0, 1.0]) zinb1_prob = zinb1.log_prob(s) zinb2_prob = zinb2.log_prob(s) delta_prob = delta.log_prob(s) assert_close(zinb1_prob, delta_prob) assert_close(zinb2_prob, delta_prob) @pytest.mark.parametrize("gate", [0.0, 0.25, 0.5, 0.75, 1.0]) @pytest.mark.parametrize("total_count", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0]) @pytest.mark.parametrize("logits", [-0.5, 0.5, -0.9, 1.9]) def test_zinb_mean_variance(gate, total_count, logits): num_samples = 1000000 zinb_ = ZeroInflatedNegativeBinomial( total_count=torch.tensor(total_count), gate=torch.tensor(gate), logits=torch.tensor(logits), ) s = zinb_.sample((num_samples,)) expected_mean = zinb_.mean estimated_mean = s.mean() expected_std = zinb_.stddev estimated_std = s.std() assert_close(expected_mean, estimated_mean, atol=1e-01) assert_close(expected_std, estimated_std, atol=1e-1)
299,153
main
#!/usr/bin/env python3 # SPDX-License-Identifier: LGPL-2.1-only # # Advanced cgxget/cgxset functionality test - '-b' '-g' <controller> (cgroup v2) # # Copyright (c) 2023 Oracle and/or its affiliates. # Author: Kamalesh Babulal <kamalesh.babulal@oracle.com> # from cgroup import Cgroup, CgroupVersion from systemd import Systemd from run import RunError import consts import ftests import sys import os CONTROLLER = 'cpu' SYSTEMD_CGNAME = '070_cg_in_scope' OTHER_CGNAME = '070_cg_not_in_scope' SLICE = 'libcgtests.slice' SCOPE = 'test070.scope' CONFIG_FILE_NAME = os.path.join(os.getcwd(), '070cgconfig.conf') CGRP_VER_V1 = CgroupVersion.CGROUP_V1 CGRP_VER_V2 = CgroupVersion.CGROUP_V2 TABLE = [ # writesetting, writeval, writever, readsetting, readval, readver ['cpu.shares', '512', CGRP_VER_V1, 'cpu.shares', '512', CGRP_VER_V1], ['cpu.shares', '512', CGRP_VER_V1, 'cpu.weight', '50', CGRP_VER_V2], ['cpu.weight', '200', CGRP_VER_V2, 'cpu.shares', '2048', CGRP_VER_V1], ['cpu.weight', '200', CGRP_VER_V2, 'cpu.weight', '200', CGRP_VER_V2], ['cpu.cfs_quota_us', '10000', CGRP_VER_V1, 'cpu.cfs_quota_us', '10000', CGRP_VER_V1], ['cpu.cfs_period_us', '100000', CGRP_VER_V1, 'cpu.cfs_period_us', '100000', CGRP_VER_V1], ['cpu.cfs_period_us', '50000', CGRP_VER_V1, 'cpu.max', '10000 50000', CGRP_VER_V2], ['cpu.cfs_quota_us', '-1', CGRP_VER_V1, 'cpu.cfs_quota_us', '-1', CGRP_VER_V1], ['cpu.cfs_period_us', '100000', CGRP_VER_V1, 'cpu.max', 'max 100000', CGRP_VER_V2], ['cpu.max', '5000 25000', CGRP_VER_V2, 'cpu.max', '5000 25000', CGRP_VER_V2], ['cpu.max', '6000 26000', CGRP_VER_V2, 'cpu.cfs_quota_us', '6000', CGRP_VER_V1], ['cpu.max', '7000 27000', CGRP_VER_V2, 'cpu.cfs_period_us', '27000', CGRP_VER_V1], ['cpu.max', 'max 40000', CGRP_VER_V2, 'cpu.max', 'max 40000', CGRP_VER_V2], ['cpu.max', 'max 41000', CGRP_VER_V2, 'cpu.cfs_quota_us', '-1', CGRP_VER_V1], ] def prereqs(config): result = consts.TEST_PASSED cause = None if CgroupVersion.get_version('cpu') != CgroupVersion.CGROUP_V2: result = consts.TEST_SKIPPED cause = 'This test requires the cgroup v2 cpu controller' return result, cause if config.args.container: result = consts.TEST_SKIPPED cause = 'This test cannot be run within a container' return result, cause def setup(config): result = consts.TEST_PASSED cause = None pid = Systemd.write_config_with_pid(config, CONFIG_FILE_NAME, SLICE, SCOPE) Cgroup.configparser(config, load_file=CONFIG_FILE_NAME) # create and check if the cgroup was created under the systemd default path if not Cgroup.create_and_validate(config, None, SYSTEMD_CGNAME): result = consts.TEST_FAILED cause = ( 'Failed to create systemd delegated cgroup {} under ' '/sys/fs/cgroup/{}/{}/'.format(SYSTEMD_CGNAME, SLICE, SCOPE) ) return result, cause # With cgroup v2, we can't enable controller for the child cgroup, while # a task is attached to test070.scope. Attach the task from test070.scope # to child cgroup SYSTEMD_CGNAME and then enable cpu controller in the parent, # so that the cgroup.get() works Cgroup.set(config, cgname=SYSTEMD_CGNAME, setting='cgroup.procs', value=pid) Cgroup.set( config, cgname=(os.path.join(SLICE, SCOPE)), setting='cgroup.subtree_control', value='+cpu', ignore_systemd=True ) # create and check if the cgroup was created under the controller root if not Cgroup.create_and_validate(config, CONTROLLER, OTHER_CGNAME, ignore_systemd=True): result = consts.TEST_FAILED cause = ( 'Failed to create cgroup {} under ' '/sys/fs/cgroup/{}/'.format(OTHER_CGNAME, CONTROLLER) ) return result, cause def test(config): result = consts.TEST_PASSED cause = None cgrps = {SYSTEMD_CGNAME: False, OTHER_CGNAME: True} for i in cgrps: for entry in TABLE: Cgroup.xset(config, cgname=i, setting=entry[0], value=entry[1], version=entry[2], ignore_systemd=cgrps[i]) out = Cgroup.xget(config, cgname=i, setting=entry[3], version=entry[5], values_only=True, print_headers=False, ignore_systemd=cgrps[i]) if out != entry[4]: result = consts.TEST_FAILED tmp_cause = ( 'After setting {}={}, expected {}={}, but received ' '{}={}'.format(entry[0], entry[1], entry[3], entry[4], entry[3], out) ) cause = '\n'.join(filter(None, [cause, tmp_cause])) return result, cause def teardown(config): Systemd.remove_scope_slice_conf(config, SLICE, SCOPE, CONTROLLER, CONFIG_FILE_NAME) # Incase the error occurs before the creation of OTHER_CGNAME, # let's ignore the exception try: Cgroup.delete(config, CONTROLLER, OTHER_CGNAME, ignore_systemd=True) except RunError as re: if 'No such file or directory' not in re.stderr: raise re def METHOD_NAME(config): [result, cause] = prereqs(config) if result != consts.TEST_PASSED: return [result, cause] [result, cause] = setup(config) if result != consts.TEST_PASSED: return [result, cause] try: [result, cause] = test(config) finally: teardown(config) return [result, cause] if __name__ == '__main__': config = ftests.parse_args() # this test was invoked directly. run only it config.args.num = int(os.path.basename(__file__).split('-')[0]) sys.exit(ftests.METHOD_NAME(config)) # vim: set et ts=4 sw=4:
299,154
test import statement relative local 1
# pyflyby/test_importstmt.py # License for THIS FILE ONLY: CC0 Public Domain Dedication # http://creativecommons.org/publicdomain/zero/1.0/ from pyflyby._flags import CompilerFlags from pyflyby._importstmt import Import, ImportSplit, ImportStatement def test_Import_from_parts_1(): imp = Import.from_parts(".foo.bar", "bar") assert imp.fullname == ".foo.bar" assert imp.import_as == "bar" assert imp.split == ImportSplit(".foo", "bar", None) assert str(imp) == "from .foo import bar" def test_Import_from_split_1(): imp = Import(ImportSplit(".foo", "bar", None)) assert imp.fullname == ".foo.bar" assert imp.import_as == "bar" assert imp.split == ImportSplit(".foo", "bar", None) assert str(imp) == "from .foo import bar" def test_Import_from_Statement_1(): imp = Import(ImportStatement("from foo import bar")) assert imp.fullname == "foo.bar" assert imp.import_as == "bar" assert imp.split == ImportSplit("foo", "bar", None) assert str(imp) == "from foo import bar" assert imp == Import("from foo import bar") def test_Import_basic_1(): imp = Import("from foo.foof import bar") assert imp.fullname == "foo.foof.bar" assert imp.import_as == "bar" assert imp.split == ImportSplit("foo.foof", "bar", None) assert str(imp) == "from foo.foof import bar" def test_Import_relative_1(): imp = Import("from .foo import bar") assert imp.fullname == ".foo.bar" assert imp.import_as == "bar" assert imp.split == ImportSplit(".foo", "bar", None) assert str(imp) == "from .foo import bar" def test_Import_relative_local_1(): imp = Import("from . import foo") assert imp.fullname == ".foo" assert imp.import_as == "foo" assert imp.split == ImportSplit(".", "foo", None) assert str(imp) == "from . import foo" def test_Import_module_1(): imp = Import("import foo . bar") assert imp.fullname == "foo.bar" assert imp.import_as == "foo.bar" assert imp.split == ImportSplit(None, "foo.bar", None) assert str(imp) == "import foo.bar" def test_Import_import_as_1(): imp = Import("import foo . bar as baz") assert imp.fullname == "foo.bar" assert imp.import_as == "baz" assert imp.split == ImportSplit("foo", "bar", "baz") assert str(imp) == "from foo import bar as baz" def test_Import_import_as_same_1(): imp = Import("import foo . bar as bar") assert imp.fullname == "foo.bar" assert imp.import_as == "bar" assert imp.split == ImportSplit("foo", "bar", None) assert str(imp) == "from foo import bar" assert imp == Import("from foo import bar") def test_Import_eqne_1(): imp1a = Import("from foo import bar") imp1b = Import("from foo import bar") imp2 = Import("from .foo import bar") assert (imp1a == imp1b) assert not (imp1a != imp1b) assert (imp1a != imp2 ) assert not (imp1a == imp2 ) def test_Import_eqne_2(): imp1a = Import("from foo import bar") imp1b = Import("from foo import bar") imp2 = Import("from foo import bar as Bar") assert (imp1a == imp1b) assert not (imp1a != imp1b) assert (imp1a != imp2 ) assert not (imp1a == imp2 ) def test_Import_prefix_match_1(): result = Import("import ab.cd.ef").prefix_match(Import("import ab.cd.xy")) assert result == ('ab', 'cd') def test_Import_replace_1(): result = Import("from aa.bb import cc").replace("aa.bb", "xx.yy") assert result == Import('from xx.yy import cc') def test_Import_replace_2(): result = Import("from aa import bb").replace("aa.bb", "xx.yy") assert result == Import('from xx import yy as bb') def test_ImportStatement_1(): stmt = ImportStatement("import foo . bar") assert stmt.fromname == None assert stmt.aliases == (("foo.bar", None),) assert stmt.imports == (Import(ImportSplit(None, "foo.bar", None)),) assert str(stmt) == "import foo.bar" def test_ImportStatement_member_1(): stmt = ImportStatement("from foo import bar ") assert stmt.fromname == "foo" assert stmt.aliases == (("bar", None),) assert stmt.imports == (Import(ImportSplit("foo", "bar", None)),) assert str(stmt) == "from foo import bar" def test_ImportStatement_multi_1(): stmt = ImportStatement("from foo import bar, bar2, bar") assert stmt.fromname == "foo" assert stmt.aliases == (("bar", None), ("bar2", None), ("bar", None)) assert stmt.imports == (Import(ImportSplit("foo", "bar", None)), Import(ImportSplit("foo", "bar2", None)), Import(ImportSplit("foo", "bar", None))) assert str(stmt) == "from foo import bar, bar2, bar" def test_ImportStatement_alias_1(): stmt = ImportStatement("from foo import bar as bar, bar as baz") assert stmt.fromname == "foo" assert stmt.aliases == (("bar", "bar"), ("bar", "baz")) assert stmt.imports == (Import(ImportSplit("foo", "bar", "bar")), Import(ImportSplit("foo", "bar", "baz"))) assert str(stmt) == "from foo import bar as bar, bar as baz" def test_ImportStatement_deep_member_1(): stmt = ImportStatement("from foo.bar import baz") assert stmt.fromname == "foo.bar" assert stmt.aliases == (("baz", None),) assert stmt.imports == (Import(ImportSplit("foo.bar", "baz", None)),) assert str(stmt) == "from foo.bar import baz" def test_ImportStatement_relative_1(): stmt = ImportStatement("from .foo import bar") assert stmt.fromname == ".foo" assert stmt.aliases == (("bar", None),) assert stmt.imports == (Import(ImportSplit(".foo", "bar", None)),) assert str(stmt) == "from .foo import bar" def METHOD_NAME(): stmt = ImportStatement("from . import bar , bar2 as baz2") assert stmt.fromname == "." assert stmt.aliases == (("bar", None), ("bar2", "baz2")) assert stmt.imports == (Import(ImportSplit(".", "bar", None)), Import(ImportSplit(".", "bar2", "baz2"))) assert str(stmt) == "from . import bar, bar2 as baz2" def test_ImportStatement_flags_1(): stmt = ImportStatement("from __future__ import division, print_function") assert stmt.flags == CompilerFlags('division', 'print_function') def test_ImportStatement_flags_2(): stmt = ImportStatement("from _future__ import division, print_function") assert stmt.flags == CompilerFlags.from_int(0) def test_ImportStatement_eqne_1(): stmt1a = ImportStatement("from a import b" ) stmt1b = ImportStatement("from a import b" ) stmt2 = ImportStatement("from a import b, b") assert (stmt1a == stmt1b) assert not (stmt1a != stmt1b) assert (stmt1a != stmt2 ) assert not (stmt1a == stmt2 ) def test_ImportStatement_eqne_2(): stmt1a = ImportStatement("from a import b" ) stmt1b = ImportStatement("from a import b" ) stmt2 = ImportStatement("from a import b as b") assert (stmt1a == stmt1b) assert not (stmt1a != stmt1b) assert (stmt1a != stmt2 ) assert not (stmt1a == stmt2 )
299,155
create argument parser
#!/usr/bin/env python3 import argparse import sys from PerformanceHarness import performance_test_basic, performance_test from TestHarness import Utils from pathlib import Path, PurePath sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent)) class ScenarioArgumentsHandler(object): @staticmethod def METHOD_NAME(): scenarioParser = argparse.ArgumentParser(add_help=True, formatter_class=argparse.ArgumentDefaultsHelpFormatter) ptbParser=performance_test_basic.PtbArgumentsHandler.METHOD_NAME() ptParser=performance_test.PerfTestArgumentsHandler.METHOD_NAME() #Let top level performance harness parser know there will be sub-commands, and that a scenario type sub-command is required scenarioTypeDesc=("Each Scenario Type sets up either a Performance Test Basic or a Performance Test scenario and allows further configuration of the scenario.") scenarioParserSubparsers = scenarioParser.add_subparsers(title="Scenario Types", description=scenarioTypeDesc, dest="scenario_type_sub_cmd", required=True, help="Currently supported scenario type sub-commands.") #Create the Single Test Scenario Type Sub-Command and Parsers scenarioParserSubparsers.add_parser(name="singleTest", parents=[ptbParser], add_help=False, help="Run a single Performance Test Basic test scenario.") #Create the Find Max Test Scenario Type Sub-Command and Parsers scenarioParserSubparsers.add_parser(name="findMax", parents=[ptParser], add_help=False, help="Runs a Performance Test scenario that iteratively runs performance test basic test scenarios to determine a max tps.") return scenarioParser @staticmethod def parseArgs(): scenarioParser=ScenarioArgumentsHandler.METHOD_NAME() args=scenarioParser.parse_args() return args def main(): args = ScenarioArgumentsHandler.parseArgs() Utils.Debug = args.v testHelperConfig = performance_test_basic.PerformanceTestBasic.setupTestHelperConfig(args) testClusterConfig = performance_test_basic.PerformanceTestBasic.setupClusterConfig(args) if args.contracts_console and testClusterConfig.loggingLevel != "debug" and testClusterConfig.loggingLevel != "all": print("Enabling contracts-console will not print anything unless debug level is 'debug' or higher." f" Current debug level is: {testClusterConfig.loggingLevel}") if args.scenario_type_sub_cmd == "singleTest": ptbConfig = performance_test_basic.PerformanceTestBasic.PtbConfig(targetTps=args.target_tps, testTrxGenDurationSec=args.test_duration_sec, tpsLimitPerGenerator=args.tps_limit_per_generator, numAddlBlocksToPrune=args.num_blocks_to_prune, logDirRoot=".", delReport=args.del_report, quiet=args.quiet, delPerfLogs=args.del_perf_logs, printMissingTransactions=args.print_missing_transactions, userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None, endpointMode=args.endpoint_mode, trxGenerator=args.trx_generator) Utils.Print(f"testNamePath: {PurePath(PurePath(__file__).name).stem}") myTest = performance_test_basic.PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig, testNamePath=f"{PurePath(PurePath(__file__).name).stem}") elif args.scenario_type_sub_cmd == "findMax": ptConfig = performance_test.PerformanceTest.PtConfig(testDurationSec=args.test_iteration_duration_sec, finalDurationSec=args.final_iterations_duration_sec, delPerfLogs=args.del_perf_logs, maxTpsToTest=args.max_tps_to_test, minTpsToTest=args.min_tps_to_test, testIterationMinStep=args.test_iteration_min_step, tpsLimitPerGenerator=args.tps_limit_per_generator, delReport=args.del_report, delTestReport=args.del_test_report, numAddlBlocksToPrune=args.num_blocks_to_prune, quiet=args.quiet, logDirRoot=Path("."), skipTpsTests=args.skip_tps_test, calcProducerThreads=args.calc_producer_threads, calcChainThreads=args.calc_chain_threads, calcNetThreads=args.calc_net_threads, userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None, endpointMode=args.endpoint_mode, opModeCmd=args.op_mode_sub_cmd, trxGenerator=args.trx_generator) myTest = performance_test.PerformanceTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptConfig=ptConfig) else: Utils.Print(f"Unknown Scenario Type: {args.scenario_type_sub_cmd}") exit(-1) testSuccessful = myTest.runTest() exitCode = 0 if testSuccessful else 1 exit(exitCode) if __name__ == '__main__': main(
299,156
process
import os.path as osp from typing import Callable, List, Optional import numpy as np import torch from torch_geometric.data import InMemoryDataset, download_url from torch_geometric.io import read_planetoid_data class Planetoid(InMemoryDataset): r"""The citation network datasets :obj:`"Cora"`, :obj:`"CiteSeer"` and :obj:`"PubMed"` from the `"Revisiting Semi-Supervised Learning with Graph Embeddings" <https://arxiv.org/abs/1603.08861>`_ paper. Nodes represent documents and edges represent citation links. Training, validation and test splits are given by binary masks. Args: root (str): Root directory where the dataset should be saved. name (str): The name of the dataset (:obj:`"Cora"`, :obj:`"CiteSeer"`, :obj:`"PubMed"`). split (str, optional): The type of dataset split (:obj:`"public"`, :obj:`"full"`, :obj:`"geom-gcn"`, :obj:`"random"`). If set to :obj:`"public"`, the split will be the public fixed split from the `"Revisiting Semi-Supervised Learning with Graph Embeddings" <https://arxiv.org/abs/1603.08861>`_ paper. If set to :obj:`"full"`, all nodes except those in the validation and test sets will be used for training (as in the `"FastGCN: Fast Learning with Graph Convolutional Networks via Importance Sampling" <https://arxiv.org/abs/1801.10247>`_ paper). If set to :obj:`"geom-gcn"`, the 10 public fixed splits from the `"Geom-GCN: Geometric Graph Convolutional Networks" <https://openreview.net/forum?id=S1e2agrFvS>`_ paper are given. If set to :obj:`"random"`, train, validation, and test sets will be randomly generated, according to :obj:`num_train_per_class`, :obj:`num_val` and :obj:`num_test`. (default: :obj:`"public"`) num_train_per_class (int, optional): The number of training samples per class in case of :obj:`"random"` split. (default: :obj:`20`) num_val (int, optional): The number of validation samples in case of :obj:`"random"` split. (default: :obj:`500`) num_test (int, optional): The number of test samples in case of :obj:`"random"` split. (default: :obj:`1000`) transform (callable, optional): A function/transform that takes in an :obj:`torch_geometric.data.Data` object and returns a transformed version. The data object will be transformed before every access. (default: :obj:`None`) pre_transform (callable, optional): A function/transform that takes in an :obj:`torch_geometric.data.Data` object and returns a transformed version. The data object will be transformed before being saved to disk. (default: :obj:`None`) **STATS:** .. list-table:: :widths: 10 10 10 10 10 :header-rows: 1 * - Name - #nodes - #edges - #features - #classes * - Cora - 2,708 - 10,556 - 1,433 - 7 * - CiteSeer - 3,327 - 9,104 - 3,703 - 6 * - PubMed - 19,717 - 88,648 - 500 - 3 """ url = 'https://github.com/kimiyoung/planetoid/raw/master/data' geom_gcn_url = ('https://raw.githubusercontent.com/graphdml-uiuc-jlu/' 'geom-gcn/master') def __init__(self, root: str, name: str, split: str = "public", num_train_per_class: int = 20, num_val: int = 500, num_test: int = 1000, transform: Optional[Callable] = None, pre_transform: Optional[Callable] = None): self.name = name self.split = split.lower() assert self.split in ['public', 'full', 'geom-gcn', 'random'] super().__init__(root, transform, pre_transform) self.load(self.processed_paths[0]) if split == 'full': data = self.get(0) data.train_mask.fill_(True) data.train_mask[data.val_mask | data.test_mask] = False self.data, self.slices = self.collate([data]) elif split == 'random': data = self.get(0) data.train_mask.fill_(False) for c in range(self.num_classes): idx = (data.y == c).nonzero(as_tuple=False).view(-1) idx = idx[torch.randperm(idx.size(0))[:num_train_per_class]] data.train_mask[idx] = True remaining = (~data.train_mask).nonzero(as_tuple=False).view(-1) remaining = remaining[torch.randperm(remaining.size(0))] data.val_mask.fill_(False) data.val_mask[remaining[:num_val]] = True data.test_mask.fill_(False) data.test_mask[remaining[num_val:num_val + num_test]] = True self.data, self.slices = self.collate([data]) @property def raw_dir(self) -> str: if self.split == 'geom-gcn': return osp.join(self.root, self.name, 'geom-gcn', 'raw') return osp.join(self.root, self.name, 'raw') @property def processed_dir(self) -> str: if self.split == 'geom-gcn': return osp.join(self.root, self.name, 'geom-gcn', 'processed') return osp.join(self.root, self.name, 'processed') @property def raw_file_names(self) -> List[str]: names = ['x', 'tx', 'allx', 'y', 'ty', 'ally', 'graph', 'test.index'] return [f'ind.{self.name.lower()}.{name}' for name in names] @property def processed_file_names(self) -> str: return 'data.pt' def download(self): for name in self.raw_file_names: download_url(f'{self.url}/{name}', self.raw_dir) if self.split == 'geom-gcn': for i in range(10): url = f'{self.geom_gcn_url}/splits/{self.name.lower()}' download_url(f'{url}_split_0.6_0.2_{i}.npz', self.raw_dir) def METHOD_NAME(self): data = read_planetoid_data(self.raw_dir, self.name) if self.split == 'geom-gcn': train_masks, val_masks, test_masks = [], [], [] for i in range(10): name = f'{self.name.lower()}_split_0.6_0.2_{i}.npz' splits = np.load(osp.join(self.raw_dir, name)) train_masks.append(torch.from_numpy(splits['train_mask'])) val_masks.append(torch.from_numpy(splits['val_mask'])) test_masks.append(torch.from_numpy(splits['test_mask'])) data.train_mask = torch.stack(train_masks, dim=1) data.val_mask = torch.stack(val_masks, dim=1) data.test_mask = torch.stack(test_masks, dim=1) data = data if self.pre_transform is None else self.pre_transform(data) self.save([data], self.processed_paths[0]) def __repr__(self) -> str: return f'{self.name}()'
299,157
camo url
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import binascii import collections import datetime import enum import hmac import json import re import urllib.parse from email.utils import getaddresses import html5lib import html5lib.serializer import html5lib.treewalkers import jinja2 import packaging_legacy.version import pytz from natsort import natsorted from pyramid.threadlocal import get_current_request from urllib3.util import parse_url from warehouse.utils.http import is_valid_uri class PackageType(enum.Enum): bdist_dmg = "OSX Disk Image" bdist_dumb = "Dumb Binary" bdist_egg = "Egg" bdist_msi = "Windows MSI Installer" bdist_rpm = "RPM" bdist_wheel = "Wheel" bdist_wininst = "Windows Installer" sdist = "Source" def format_package_type(value): try: return PackageType[value].value except KeyError: return value def METHOD_NAME(request, url): camo_url = request.registry.settings["camo.url"].format(request=request) camo_key = request.registry.settings["camo.key"].encode("utf8") url = url.encode("utf8") path = "/".join( [ hmac.new(camo_key, url, digestmod="sha1").hexdigest(), binascii.hexlify(url).decode("utf8"), ] ) return urllib.parse.urljoin(camo_url, path) @jinja2.pass_context def camoify(ctx, value): request = ctx.get("request") or get_current_request() # Parse the rendered output and replace any inline images that don't point # to HTTPS with camouflaged images. tree_builder = html5lib.treebuilders.getTreeBuilder("dom") parser = html5lib.html5parser.HTMLParser(tree=tree_builder) dom = parser.parse(value) for element in dom.getElementsByTagName("img"): src = element.getAttribute("src") if src: element.setAttribute("src", request.camo_url(src)) tree_walker = html5lib.treewalkers.getTreeWalker("dom") html_serializer = html5lib.serializer.HTMLSerializer() camoed = "".join(html_serializer.serialize(tree_walker(dom))) return camoed _SI_SYMBOLS = ["k", "M", "G", "T", "P", "E", "Z", "Y"] def shorten_number(value): for i, symbol in enumerate(_SI_SYMBOLS): magnitude = value / (1000 ** (i + 1)) if magnitude >= 1 and magnitude < 1000: return f"{magnitude:.3g}{symbol}" return str(value) def tojson(value): return json.dumps(value, sort_keys=True, separators=(",", ":")) def urlparse(value): return parse_url(value) def format_tags(tags): # split tags if re.search(r",", tags): split_tags = re.split(r"\s*,\s*", tags) elif re.search(r";", tags): split_tags = re.split(r"\s*;\s*", tags) else: split_tags = re.split(r"\s+", tags) # strip whitespace, quotes, double quotes stripped_tags = [re.sub(r'^["\'\s]+|["\'\s]+$', "", t) for t in split_tags] # remove any empty tags formatted_tags = [t for t in stripped_tags if t] return formatted_tags def format_classifiers(classifiers): structured = collections.OrderedDict() # Split up our classifiers into our data structure for classifier in classifiers: key, *value = classifier.split(" :: ", 1) if value: if key not in structured: structured[key] = [] structured[key].append(value[0]) # Sort all the values in our data structure for key, value in structured.items(): structured[key] = natsorted(value) return structured def classifier_id(classifier): return classifier.replace(" ", "_").replace("::", ".") def contains_valid_uris(items): """Returns boolean representing whether the input list contains any valid URIs """ return any(is_valid_uri(i) for i in items) def parse_version(version_str): return packaging_legacy.version.parse(version_str) def localize_datetime(timestamp): return pytz.utc.localize(timestamp) def ctime(timestamp): return datetime.datetime.fromtimestamp(timestamp) def is_recent(timestamp): if timestamp: return timestamp + datetime.timedelta(days=30) > datetime.datetime.now() return False def format_email(metadata_email: str) -> tuple[str, str]: """ Return the name and email address from a metadata RFC-822 string. Use Jinja's `first` and `last` to access each part in a template. TODO: Support more than one email address, per RFC-822. """ emails = [] for name, email in getaddresses([metadata_email]): if "@" not in email: return name, "" emails.append((name, email)) return emails[0][0], emails[0][1] def remove_invalid_xml_unicode(value: str | None) -> str | None: """ Remove invalid unicode characters from a string. Useful for XML Templates. Ref: https://www.w3.org/TR/REC-xml/#NT-Char """ return "".join(c for c in value if ord(c) >= 32) if value else value def includeme(config): config.add_request_method(METHOD_NAME, name="camo_url")
299,158
test to api repr
# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import mock class TestEncryptionConfiguration(unittest.TestCase): KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1" @staticmethod def _get_target_class(): from google.cloud.bigquery.encryption_configuration import ( EncryptionConfiguration, ) return EncryptionConfiguration def _make_one(self, *args, **kw): return self._get_target_class()(*args, **kw) def test_ctor_defaults(self): encryption_config = self._make_one() self.assertIsNone(encryption_config.kms_key_name) def test_ctor_with_key(self): encryption_config = self._make_one(kms_key_name=self.KMS_KEY_NAME) self.assertEqual(encryption_config.kms_key_name, self.KMS_KEY_NAME) def test_kms_key_name_setter(self): encryption_config = self._make_one() self.assertIsNone(encryption_config.kms_key_name) encryption_config.kms_key_name = self.KMS_KEY_NAME self.assertEqual(encryption_config.kms_key_name, self.KMS_KEY_NAME) encryption_config.kms_key_name = None self.assertIsNone(encryption_config.kms_key_name) def test_from_api_repr(self): RESOURCE = {"kmsKeyName": self.KMS_KEY_NAME} klass = self._get_target_class() encryption_config = klass.from_api_repr(RESOURCE) self.assertEqual(encryption_config.kms_key_name, self.KMS_KEY_NAME) def METHOD_NAME(self): encryption_config = self._make_one(kms_key_name=self.KMS_KEY_NAME) resource = encryption_config.to_api_repr() self.assertEqual(resource, {"kmsKeyName": self.KMS_KEY_NAME}) def test___eq___wrong_type(self): encryption_config = self._make_one() other = object() self.assertNotEqual(encryption_config, other) self.assertEqual(encryption_config, mock.ANY) def test___eq___kms_key_name_mismatch(self): encryption_config = self._make_one() other = self._make_one(self.KMS_KEY_NAME) self.assertNotEqual(encryption_config, other) def test___eq___hit(self): encryption_config = self._make_one(self.KMS_KEY_NAME) other = self._make_one(self.KMS_KEY_NAME) self.assertEqual(encryption_config, other) def test___ne___wrong_type(self): encryption_config = self._make_one() other = object() self.assertNotEqual(encryption_config, other) self.assertEqual(encryption_config, mock.ANY) def test___ne___same_value(self): encryption_config1 = self._make_one(self.KMS_KEY_NAME) encryption_config2 = self._make_one(self.KMS_KEY_NAME) # unittest ``assertEqual`` uses ``==`` not ``!=``. comparison_val = encryption_config1 != encryption_config2 self.assertFalse(comparison_val) def test___ne___different_values(self): encryption_config1 = self._make_one() encryption_config2 = self._make_one(self.KMS_KEY_NAME) self.assertNotEqual(encryption_config1, encryption_config2) def test___hash__set_equality(self): encryption_config1 = self._make_one(self.KMS_KEY_NAME) encryption_config2 = self._make_one(self.KMS_KEY_NAME) set_one = {encryption_config1, encryption_config2} set_two = {encryption_config1, encryption_config2} self.assertEqual(set_one, set_two) def test___hash__not_equals(self): encryption_config1 = self._make_one() encryption_config2 = self._make_one(self.KMS_KEY_NAME) set_one = {encryption_config1} set_two = {encryption_config2} self.assertNotEqual(set_one, set_two) def test___repr__(self): encryption_config = self._make_one(self.KMS_KEY_NAME) expected = "EncryptionConfiguration({})".format(self.KMS_KEY_NAME) self.assertEqual(repr(encryption_config), expected)
299,159
test augassign wcr2
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved. import dace import numpy as np @dace.program def augassign_wcr(A: dace.int32[10, 10, 10], B: dace.int32[10], W: dace.bool_[10]): count = 0 for i in dace.map[0:10]: B[i] = 0 if W[i] is not False: count += 1 for j in range(10): for k in range(10): B[i] += A[i, j, k] return count @dace.program def augassign_wcr2(A: dace.int32[10, 10, 10], B: dace.int32[10], W: dace.bool_[10, 10, 10]): count = 0 B[:] = 0 for i, j, k in dace.map[0:10, 0:10, 0:10]: if W[i, j, k]: count += 1 B[i] += A[i, j, k] return count @dace.program def augassign_wcr3(A: dace.int32[10, 10, 10], B: dace.int32[10], W: dace.bool_[10, 10, 10], ind: dace.int32[10]): count = 0 B[:] = 0 for i, j, k in dace.map[0:10, 0:10, 0:10]: if W[i, j, k]: count += 1 B[ind[i]] += A[i, j, k] return count @dace.program def augassign_wcr4(): a = np.zeros((10,)) for i in dace.map[1:9]: a[i-1] += 1 a[i] += 2 a[i+1] += 3 return a def test_augassign_wcr(): A = np.random.randint(1, 10, size=(10, 10, 10), dtype=np.int32) B = np.empty((10, ), dtype=np.int32) W = np.random.randint(2, size=(10, ), dtype=np.bool_) with dace.config.set_temporary('frontend', 'avoid_wcr', value=True): test_sdfg = augassign_wcr.to_sdfg(simplify=False) wcr_count = 0 for sdfg in test_sdfg.sdfg_list: for state in sdfg.nodes(): for edge in state.edges(): if edge.data.wcr: wcr_count += 1 assert (wcr_count == 1) count = test_sdfg(A=A, B=B, W=W) assert (count[0] == np.count_nonzero(W)) assert (np.array_equal(np.add.reduce(A, axis=(1, 2))[W], B[W])) def METHOD_NAME(): A = np.random.randint(1, 10, size=(10, 10, 10), dtype=np.int32) B = np.empty((10, ), dtype=np.int32) C = np.zeros((10, ), dtype=np.int32) W = np.random.randint(2, size=(10, 10, 10), dtype=np.bool_) with dace.config.set_temporary('frontend', 'avoid_wcr', value=True): test_sdfg = augassign_wcr2.to_sdfg(simplify=False) wcr_count = 0 for sdfg in test_sdfg.sdfg_list: for state in sdfg.nodes(): for edge in state.edges(): if edge.data.wcr: wcr_count += 1 assert (wcr_count == 2) count = test_sdfg(A=A, B=B, W=W) C = np.add.reduce(A, axis=(1, 2), where=W) assert (count[0] == np.count_nonzero(W)) assert (np.array_equal(B, C)) def test_augassign_wcr3(): A = np.random.randint(1, 10, size=(10, 10, 10), dtype=np.int32) B = np.empty((10, ), dtype=np.int32) C = np.zeros((10, ), dtype=np.int32) D = np.zeros((10, ), dtype=np.int32) ind = np.random.randint(0, 10, size=(10, ), dtype=np.int32) W = np.random.randint(2, size=(10, 10, 10), dtype=np.bool_) with dace.config.set_temporary('frontend', 'avoid_wcr', value=True): test_sdfg = augassign_wcr3.to_sdfg(simplify=False) wcr_count = 0 for sdfg in test_sdfg.sdfg_list: for state in sdfg.nodes(): for edge in state.edges(): if edge.data.wcr: wcr_count += 1 assert (wcr_count == 2) count = test_sdfg(A=A, B=B, W=W, ind=ind) C = np.add.reduce(A, axis=(1, 2), where=W) for i in range(10): D[ind[i]] += C[i] assert (count[0] == np.count_nonzero(W)) assert (np.array_equal(B, D)) def test_augassign_no_wcr(): @dace.program def no_wcr(A: dace.int32[5, 5, 5]): A[2, 3, :] += A[3, 2, :] with dace.config.set_temporary('frontend', 'avoid_wcr', value=True): sdfg = no_wcr.to_sdfg(simplify=False) for e, _ in sdfg.all_edges_recursive(): if hasattr(e.data, 'wcr'): assert (not e.data.wcr) ref = np.reshape(np.arange(125, dtype=np.int32), (5, 5, 5)) A = ref.copy() sdfg(A) no_wcr.f(ref) assert (np.allclose(A, ref)) def test_augassign_no_wcr2(): @dace.program def no_wcr(A: dace.int32[5, 5, 5]): A[2, 3, 1:4] += A[2:5, 1, 4] with dace.config.set_temporary('frontend', 'avoid_wcr', value=True): sdfg = no_wcr.to_sdfg(simplify=False) for e, _ in sdfg.all_edges_recursive(): if hasattr(e.data, 'wcr'): assert (not e.data.wcr) ref = np.reshape(np.arange(125, dtype=np.int32), (5, 5, 5)) A = ref.copy() sdfg(A) no_wcr.f(ref) assert (np.allclose(A, ref)) def test_augassign_wcr4(): with dace.config.set_temporary('frontend', 'avoid_wcr', value=False): val = augassign_wcr4() ref = augassign_wcr4.f() assert np.allclose(val, ref) def test_augassign_scalar_in_map(): @dace.program def tester(a: dace.float64[20], b: dace.float64[20, 2], c: dace.float64[20, 2]): for i in dace.map[0:20]: tmp: dace.float64 = 0 if i % 2 == 0: tmp += b[i, 0] * c[i, 0] else: tmp += b[i, 1] * c[i, 1] a[i] = tmp a = np.random.rand(20) b = np.random.rand(20, 2) c = np.random.rand(20, 2) ref = np.zeros(20) ref[::2] = (b * c)[::2, 0] ref[1::2] = (b * c)[1::2, 1] tester(a, b, c) assert np.allclose(a, ref) if __name__ == "__main__": test_augassign_wcr() METHOD_NAME() test_augassign_wcr3() test_augassign_wcr4() test_augassign_no_wcr() test_augassign_no_wcr2() test_augassign_scalar_in_map()
299,160
num outputs per input
#!/usr/bin/env python3 from typing import Optional import torch from .. import settings from ..constraints import Interval, Positive from ..priors import Prior from .kernel import Kernel class CylindricalKernel(Kernel): r""" Computes a covariance matrix based on the Cylindrical Kernel between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`. It was proposed in `BOCK: Bayesian Optimization with Cylindrical Kernels`. See http://proceedings.mlr.press/v80/oh18a.html for more details .. note:: The data must lie completely within the unit ball. Args: num_angular_weights (int): The number of components in the angular kernel radial_base_kernel (gpytorch.kernel): The base kernel for computing the radial kernel batch_size (int, optional): Set this if the data is batch of input data. It should be `b` if x1 is a `b x n x d` tensor. Default: `1` eps (float): Small floating point number used to improve numerical stability in kernel computations. Default: `1e-6` param_transform (function, optional): Set this if you want to use something other than softplus to ensure positiveness of parameters. inv_param_transform (function, optional): Set this to allow setting parameters directly in transformed space and sampling from priors. Automatically inferred for common transformations such as torch.exp or torch.nn.functional.softplus. """ def __init__( self, num_angular_weights: int, radial_base_kernel: Kernel, eps: Optional[float] = 1e-6, angular_weights_prior: Optional[Prior] = None, angular_weights_constraint: Optional[Interval] = None, alpha_prior: Optional[Prior] = None, alpha_constraint: Optional[Interval] = None, beta_prior: Optional[Prior] = None, beta_constraint: Optional[Interval] = None, **kwargs, ): if angular_weights_constraint is None: angular_weights_constraint = Positive() if alpha_constraint is None: alpha_constraint = Positive() if beta_constraint is None: beta_constraint = Positive() super().__init__(**kwargs) self.num_angular_weights = num_angular_weights self.radial_base_kernel = radial_base_kernel self.eps = eps self.register_parameter( name="raw_angular_weights", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, num_angular_weights)), ) self.register_constraint("raw_angular_weights", angular_weights_constraint) self.register_parameter(name="raw_alpha", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1))) self.register_constraint("raw_alpha", alpha_constraint) self.register_parameter(name="raw_beta", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1))) self.register_constraint("raw_beta", beta_constraint) if angular_weights_prior is not None: if not isinstance(angular_weights_prior, Prior): raise TypeError("Expected gpytorch.priors.Prior but got " + type(angular_weights_prior).__name__) self.register_prior( "angular_weights_prior", angular_weights_prior, lambda m: m.angular_weights, lambda m, v: m._set_angular_weights(v), ) if alpha_prior is not None: if not isinstance(alpha_prior, Prior): raise TypeError("Expected gpytorch.priors.Prior but got " + type(alpha_prior).__name__) self.register_prior("alpha_prior", alpha_prior, lambda m: m.alpha, lambda m, v: m._set_alpha(v)) if beta_prior is not None: if not isinstance(beta_prior, Prior): raise TypeError("Expected gpytorch.priors.Prior but got " + type(beta_prior).__name__) self.register_prior("beta_prior", beta_prior, lambda m: m.beta, lambda m, v: m._set_beta(v)) @property def angular_weights(self) -> torch.Tensor: return self.raw_angular_weights_constraint.transform(self.raw_angular_weights) @angular_weights.setter def angular_weights(self, value: torch.Tensor) -> None: if not torch.is_tensor(value): value = torch.tensor(value) self.initialize(raw_angular_weights=self.raw_angular_weights_constraint.inverse_transform(value)) @property def alpha(self) -> torch.Tensor: return self.raw_alpha_constraint.transform(self.raw_alpha) @alpha.setter def alpha(self, value: torch.Tensor) -> None: if not torch.is_tensor(value): value = torch.tensor(value) self.initialize(raw_alpha=self.raw_alpha_constraint.inverse_transform(value)) @property def beta(self) -> torch.Tensor: return self.raw_beta_constraint.transform(self.raw_beta) @beta.setter def beta(self, value: torch.Tensor) -> None: if not torch.is_tensor(value): value = torch.tensor(value) self.initialize(raw_beta=self.raw_beta_constraint.inverse_transform(value)) def forward(self, x1: torch.Tensor, x2: torch.Tensor, diag: Optional[bool] = False, **params) -> torch.Tensor: x1_, x2_ = x1.clone(), x2.clone() # Jitter datapoints that are exactly 0 x1_[x1_ == 0], x2_[x2_ == 0] = x1_[x1_ == 0] + self.eps, x2_[x2_ == 0] + self.eps r1, r2 = x1_.norm(dim=-1, keepdim=True), x2_.norm(dim=-1, keepdim=True) if torch.any(r1 > 1.0) or torch.any(r2 > 1.0): raise RuntimeError("Cylindrical kernel not defined for data points with radius > 1. Scale your data!") a1, a2 = x1.div(r1), x2.div(r2) if not diag: gram_mat = a1.matmul(a2.transpose(-2, -1)) for p in range(self.num_angular_weights): if p == 0: angular_kernel = self.angular_weights[..., 0, None, None] else: angular_kernel = angular_kernel + self.angular_weights[..., p, None, None].mul(gram_mat.pow(p)) else: gram_mat = a1.mul(a2).sum(-1) for p in range(self.num_angular_weights): if p == 0: angular_kernel = self.angular_weights[..., 0, None] else: angular_kernel = angular_kernel + self.angular_weights[..., p, None].mul(gram_mat.pow(p)) with settings.lazily_evaluate_kernels(False): radial_kernel = self.radial_base_kernel(self.kuma(r1), self.kuma(r2), diag=diag, **params) return radial_kernel.mul(angular_kernel) def kuma(self, x: torch.Tensor) -> torch.Tensor: alpha = self.alpha.view(*self.batch_shape, 1, 1) beta = self.beta.view(*self.batch_shape, 1, 1) res = 1 - (1 - x.pow(alpha) + self.eps).pow(beta) return res def METHOD_NAME(self, x1: torch.Tensor, x2: torch.Tensor) -> int: return self.radial_base_kernel.METHOD_NAME(x1, x2)
299,161
has organization
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license. """ import datetime from core.utils.common import load_func from django.conf import settings from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager from django.contrib.auth.models import PermissionsMixin from django.db import models from django.db.models.signals import post_save from django.dispatch import receiver from django.utils import timezone from django.utils.translation import gettext_lazy as _ from organizations.models import Organization from rest_framework.authtoken.models import Token from users.functions import hash_upload YEAR_START = 1980 YEAR_CHOICES = [] for r in range(YEAR_START, (datetime.datetime.now().year+1)): YEAR_CHOICES.append((r, r)) year = models.IntegerField(_('year'), choices=YEAR_CHOICES, default=datetime.datetime.now().year) class UserManager(BaseUserManager): use_in_migrations = True def _create_user(self, email, password, **extra_fields): """ Create and save a user with the given email and password. """ if not email: raise ValueError('Must specify an email address') email = self.normalize_email(email) user = self.model(email=email, **extra_fields) user.set_password(password) user.save(using=self._db) return user def create_user(self, email, password=None, **extra_fields): extra_fields.setdefault('is_staff', False) extra_fields.setdefault('is_superuser', False) return self._create_user(email, password, **extra_fields) def create_superuser(self, email, password, **extra_fields): extra_fields.setdefault('is_staff', True) extra_fields.setdefault('is_superuser', True) if extra_fields.get('is_staff') is not True: raise ValueError('Superuser must have is_staff=True.') if extra_fields.get('is_superuser') is not True: raise ValueError('Superuser must have is_superuser=True.') return self._create_user(email, password, **extra_fields) class UserLastActivityMixin(models.Model): last_activity = models.DateTimeField( _('last activity'), default=timezone.now, editable=False) def update_last_activity(self): self.last_activity = timezone.now() self.save(update_fields=["last_activity"]) class Meta: abstract = True UserMixin = load_func(settings.USER_MIXIN) class User(UserMixin, AbstractBaseUser, PermissionsMixin, UserLastActivityMixin): """ An abstract base class implementing a fully featured User model with admin-compliant permissions. Username and password are required. Other fields are optional. """ username = models.CharField(_('username'), max_length=256) email = models.EmailField(_('email address'), unique=True, blank=True) first_name = models.CharField(_('first name'), max_length=256, blank=True) last_name = models.CharField(_('last name'), max_length=256, blank=True) phone = models.CharField(_('phone'), max_length=256, blank=True) avatar = models.ImageField(upload_to=hash_upload, blank=True) is_staff = models.BooleanField(_('staff status'), default=False, help_text=_('Designates whether the user can log into this admin site.')) is_active = models.BooleanField(_('active'), default=True, help_text=_('Designates whether to treat this user as active. ' 'Unselect this instead of deleting accounts.')) date_joined = models.DateTimeField(_('date joined'), default=timezone.now) activity_at = models.DateTimeField(_('last annotation activity'), auto_now=True) active_organization = models.ForeignKey( 'organizations.Organization', null=True, on_delete=models.SET_NULL, related_name='active_users' ) allow_newsletters = models.BooleanField( _('allow newsletters'), null=True, default=None, help_text=_('Allow sending newsletters to user') ) objects = UserManager() EMAIL_FIELD = 'email' USERNAME_FIELD = 'email' REQUIRED_FIELDS = () class Meta: db_table = 'htx_user' verbose_name = _('user') verbose_name_plural = _('users') indexes = [ models.Index(fields=['username']), models.Index(fields=['email']), models.Index(fields=['first_name']), models.Index(fields=['last_name']), models.Index(fields=['date_joined']), ] @property def avatar_url(self): if self.avatar: if settings.CLOUD_FILE_STORAGE_ENABLED: return self.avatar.url else: return settings.HOSTNAME + self.avatar.url def is_organization_admin(self, org_pk): return True def active_organization_annotations(self): return self.annotations.filter(project__organization=self.active_organization) def active_organization_contributed_project_number(self): annotations = self.active_organization_annotations() return annotations.values_list('project').distinct().count() @property def own_organization(self): return Organization.objects.get(created_by=self) @property def METHOD_NAME(self): return Organization.objects.filter(created_by=self).exists() def clean(self): super().clean() self.email = self.__class__.objects.normalize_email(self.email) def name_or_email(self): name = self.get_full_name() if len(name) == 0: name = self.email return name def get_full_name(self): """ Return the first_name and the last_name for a given user with a space in between. """ full_name = '%s %s' % (self.first_name, self.last_name) return full_name.strip() def get_short_name(self): """Return the short name for the user.""" return self.first_name def reset_token(self): token = Token.objects.filter(user=self) if token.exists(): token.delete() return Token.objects.create(user=self) def get_initials(self): initials = '?' if not self.first_name and not self.last_name: initials = self.email[0:2] elif self.first_name and not self.last_name: initials = self.first_name[0:1] elif self.last_name and not self.first_name: initials = self.last_name[0:1] elif self.first_name and self.last_name: initials = self.first_name[0:1] + self.last_name[0:1] return initials @receiver(post_save, sender=User) def init_user(sender, instance=None, created=False, **kwargs): if created: # create token for user Token.objects.create(user=instance)
299,162
test cache deletion migration
# SPDX-License-Identifier: Apache-2.0 # Copyright 2023 The HuggingFace Authors. from libcommon.constants import ( CACHE_COLLECTION_RESPONSES, CACHE_METRICS_COLLECTION, CACHE_MONGOENGINE_ALIAS, METRICS_MONGOENGINE_ALIAS, QUEUE_COLLECTION_JOBS, QUEUE_METRICS_COLLECTION, QUEUE_MONGOENGINE_ALIAS, ) from libcommon.queue import JobDocument from libcommon.resources import MongoResource from libcommon.utils import get_datetime from mongoengine.connection import get_db from mongodb_migration.deletion_migrations import ( CacheDeletionMigration, MetricsDeletionMigration, MigrationQueueDeleteTTLIndex, QueueDeletionMigration, get_index_names, ) def METHOD_NAME(mongo_host: str) -> None: kind = "cache_kind" with MongoResource( database="test_cache_delete_migration", host=mongo_host, mongoengine_alias=CACHE_MONGOENGINE_ALIAS, ): db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].insert_many([{"kind": kind, "dataset": "dataset", "http_status": 200}]) assert db[CACHE_COLLECTION_RESPONSES].find_one({"kind": kind}) # Ensure there is at least one record to delete migration = CacheDeletionMigration( cache_kind=kind, version="20230505180100", description=f"remove cache for kind {kind}", ) migration.up() assert not db[CACHE_COLLECTION_RESPONSES].find_one({"kind": kind}) # Ensure 0 records with old kind db[CACHE_COLLECTION_RESPONSES].drop() def test_queue_deletion_migration(mongo_host: str) -> None: job_type = "job_type" with MongoResource( database="test_queue_delete_migration", host=mongo_host, mongoengine_alias=QUEUE_MONGOENGINE_ALIAS, ): db = get_db(QUEUE_MONGOENGINE_ALIAS) db[QUEUE_COLLECTION_JOBS].insert_many( [ { "type": job_type, "unicity_id": f"{job_type},dataset,config,split", "dataset": "dataset", "revision": "revision", "http_status": 200, } ] ) assert db[QUEUE_COLLECTION_JOBS].find_one({"type": job_type}) # Ensure there is at least one record to delete migration = QueueDeletionMigration( job_type=job_type, version="20230505180200", description=f"remove jobs of type '{job_type}'", ) migration.up() assert not db[QUEUE_COLLECTION_JOBS].find_one({"type": job_type}) # Ensure 0 records with old type db[QUEUE_COLLECTION_JOBS].drop() def test_metrics_deletion_migration(mongo_host: str) -> None: step_name = job_type = cache_kind = "step_name" with MongoResource( database="test_metrics_delete_migration", host=mongo_host, mongoengine_alias=METRICS_MONGOENGINE_ALIAS, ): db = get_db(METRICS_MONGOENGINE_ALIAS) db[QUEUE_METRICS_COLLECTION].insert_many([{"queue": job_type, "status": "waiting", "total": 0}]) db[CACHE_METRICS_COLLECTION].insert_many([{"kind": cache_kind, "http_status": 400, "total": 0}]) assert db[QUEUE_METRICS_COLLECTION].find_one( {"queue": job_type} ) # Ensure there is at least one record to delete assert db[CACHE_METRICS_COLLECTION].find_one( {"kind": cache_kind} ) # Ensure there is at least one record to delete migration = MetricsDeletionMigration( job_type=job_type, cache_kind=cache_kind, version="20230505180300", description=f"delete the queue and cache metrics for step '{step_name}'", ) migration.up() assert not db[QUEUE_METRICS_COLLECTION].find_one({"queue": job_type}) # Ensure 0 records after deletion assert not db[CACHE_METRICS_COLLECTION].find_one({"kind": cache_kind}) # Ensure 0 records after deletion db[QUEUE_METRICS_COLLECTION].drop() db[CACHE_METRICS_COLLECTION].drop() def test_queue_delete_ttl_index(mongo_host: str) -> None: with MongoResource(database="test_queue_delete_ttl_index", host=mongo_host, mongoengine_alias="queue"): JobDocument( type="test", dataset="test", revision="test", unicity_id="test", namespace="test", created_at=get_datetime(), difficulty=50, ).save() db = get_db(QUEUE_MONGOENGINE_ALIAS) assert ( len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information(), "finished_at")) == 1 ) # Ensure the TTL index exists migration = MigrationQueueDeleteTTLIndex( version="20230428145000", description="remove ttl index on field 'finished_at'", field_name="finished_at", ) migration.up() assert ( len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information(), "finished_at")) == 0 ) # Ensure the TTL index does not exist anymore db[QUEUE_COLLECTION_JOBS].drop()
299,163
get scope
# Copyright Contributors to the Amundsen project. # SPDX-License-Identifier: Apache-2.0 import csv import logging import os import shutil from csv import DictWriter from typing import ( Any, Dict, FrozenSet, ) from pyhocon import ConfigFactory, ConfigTree from databuilder.job.base_job import Job from databuilder.loader.base_loader import Loader from databuilder.models.atlas_serializable import AtlasSerializable from databuilder.serializers import atlas_serializer from databuilder.utils.closer import Closer LOGGER = logging.getLogger(__name__) class FsAtlasCSVLoader(Loader): """ Write entity and relationship CSV file(s) that can be consumed by AtlasCsvPublisher. It assumes that the record it consumes is instance of AtlasCsvSerializable """ # Config keys ENTITY_DIR_PATH = 'entity_dir_path' RELATIONSHIP_DIR_PATH = 'relationship_dir_path' FORCE_CREATE_DIR = 'force_create_directory' SHOULD_DELETE_CREATED_DIR = 'delete_created_directories' _DEFAULT_CONFIG = ConfigFactory.from_dict({ SHOULD_DELETE_CREATED_DIR: True, FORCE_CREATE_DIR: False, }) def __init__(self) -> None: self._entity_file_mapping: Dict[Any, DictWriter] = {} self._relation_file_mapping: Dict[Any, DictWriter] = {} self._keys: Dict[FrozenSet[str], int] = {} self._closer = Closer() def init(self, conf: ConfigTree) -> None: """ Initializing FsAtlasCSVLoader by creating directory for entity files and relationship files. Note that the directory defined in configuration should not exist. :param conf: :return: """ conf = conf.with_fallback(FsAtlasCSVLoader._DEFAULT_CONFIG) self._entity_dir = conf.get_string(FsAtlasCSVLoader.ENTITY_DIR_PATH) self._relation_dir = \ conf.get_string(FsAtlasCSVLoader.RELATIONSHIP_DIR_PATH) self._delete_created_dir = \ conf.get_bool(FsAtlasCSVLoader.SHOULD_DELETE_CREATED_DIR) self._force_create_dir = conf.get_bool(FsAtlasCSVLoader.FORCE_CREATE_DIR) self._create_directory(self._entity_dir) self._create_directory(self._relation_dir) def _create_directory(self, path: str) -> None: """ Validate directory does not exist, creates it, register deletion of created directory function to Job.closer. :param path: :return: """ if os.path.exists(path): if self._force_create_dir: LOGGER.info('Directory exist. Deleting directory %s', path) shutil.rmtree(path) else: raise RuntimeError(f'Directory should not exist: {path}') os.makedirs(path) def _delete_dir() -> None: if not self._delete_created_dir: LOGGER.warning('Skip Deleting directory %s', path) return LOGGER.info('Deleting directory %s', path) shutil.rmtree(path) # Directory should be deleted after publish is finished Job.closer.register(_delete_dir) def load(self, csv_serializable: AtlasSerializable) -> None: """ Writes AtlasSerializable into CSV files. There are multiple CSV files that this method writes. This is because there're not only node and relationship, but also it can also have different entities, and relationships. Common pattern for both entities and relations: 1. retrieve csv row (a dict where keys represent a header, values represent a row) 2. using this dict to get a appropriate csv writer and write to it. 3. repeat 1 and 2 :param csv_serializable: :return: """ entity = csv_serializable.next_atlas_entity() while entity: entity_dict = atlas_serializer.serialize_entity(entity) key = (self._make_key(entity_dict), entity.typeName) file_suffix = '{}_{}'.format(*key) entity_writer = self._get_writer( entity_dict, self._entity_file_mapping, key, self._entity_dir, file_suffix, ) entity_writer.writerow(entity_dict) entity = csv_serializable.next_atlas_entity() relation = csv_serializable.next_atlas_relation() while relation: relation_dict = atlas_serializer.serialize_relationship(relation) keys = ( self._make_key(relation_dict), relation.entityType1, relation.entityType2, ) file_suffix = '{}_{}_{}'.format(*keys) relation_writer = self._get_writer( relation_dict, self._relation_file_mapping, keys, self._relation_dir, file_suffix, ) relation_writer.writerow(relation_dict) relation = csv_serializable.next_atlas_relation() def _get_writer( self, csv_record_dict: Dict[str, Any], file_mapping: Dict[Any, DictWriter], key: Any, dir_path: str, file_suffix: str, ) -> DictWriter: """ Finds a writer based on csv record, key. If writer does not exist, it's creates a csv writer and update the mapping. :param csv_record_dict: :param file_mapping: :param key: :param file_suffix: :return: """ writer = file_mapping.get(key) if writer: return writer LOGGER.info('Creating file for %s', key) file_out = open(f'{dir_path}/{file_suffix}.csv', 'w', encoding='utf8') writer = csv.DictWriter( # type: ignore file_out, fieldnames=csv_record_dict.keys(), quoting=csv.QUOTE_NONNUMERIC, ) def file_out_close() -> None: LOGGER.info('Closing file IO %s', file_out) file_out.close() self._closer.register(file_out_close) writer.writeheader() file_mapping[key] = writer return writer def close(self) -> None: """ Any closeable callable registered in _closer, it will close. :return: """ self._closer.close() def METHOD_NAME(self) -> str: return "loader.filesystem_csv_atlas" def _make_key(self, record_dict: Dict[str, Any]) -> str: """ Each unique set of record keys is assigned an increasing numeric key """ return str(self._keys.setdefault(frozenset(record_dict.keys()), len(self._keys))).rjust(3, '0')
299,164
mnasnet0 75
# *************************************************************** # Copyright (c) 2023 Jittor. All Rights Reserved. # Maintainers: # Wenyang Zhou <576825820@qq.com> # Dun Liang <randonlang@gmail.com>. # # This file is subject to the terms and conditions defined in # file 'LICENSE.txt', which is part of this source code package. # *************************************************************** # This model is generated by pytorch converter. import jittor as jt from jittor import nn __all__ = ['MNASNet', 'mnasnet0_5', 'mnasnet0_75', 'mnasnet1_0', 'mnasnet1_3'] _BN_MOMENTUM = (1 - 0.9997) class _InvertedResidual(nn.Module): def __init__(self, in_ch, out_ch, kernel_size, stride, expansion_factor, bn_momentum=0.1): super(_InvertedResidual, self).__init__() assert (stride in [1, 2]) assert (kernel_size in [3, 5]) mid_ch = (in_ch * expansion_factor) self.apply_residual = ((in_ch == out_ch) and (stride == 1)) self.layers = nn.Sequential(nn.Conv(in_ch, mid_ch, 1, bias=False), nn.BatchNorm(mid_ch, momentum=bn_momentum), nn.Relu(), nn.Conv(mid_ch, mid_ch, kernel_size, padding=(kernel_size // 2), stride=stride, groups=mid_ch, bias=False), nn.BatchNorm(mid_ch, momentum=bn_momentum), nn.Relu(), nn.Conv(mid_ch, out_ch, 1, bias=False), nn.BatchNorm(out_ch, momentum=bn_momentum)) def execute(self, input): if self.apply_residual: return (self.layers(input) + input) else: return self.layers(input) def _stack(in_ch, out_ch, kernel_size, stride, exp_factor, repeats, bn_momentum): assert (repeats >= 1) first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor, bn_momentum=bn_momentum) remaining = [] for _ in range(1, repeats): remaining.append(_InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor, bn_momentum=bn_momentum)) return nn.Sequential(first, *remaining) def _round_to_multiple_of(val, divisor, round_up_bias=0.9): assert (0.0 < round_up_bias < 1.0) new_val = max(divisor, ((int((val + (divisor / 2))) // divisor) * divisor)) return (new_val if (new_val >= (round_up_bias * val)) else (new_val + divisor)) def _get_depths(alpha): depths = [24, 40, 80, 96, 192, 320] return [_round_to_multiple_of((depth * alpha), 8) for depth in depths] class MNASNet(nn.Module): """ MNASNet model architecture. version=2. Args: * alpha: Depth multiplier. * num_classes: Number of classes. Default: 1000. * dropout: Dropout probability of dropout layer. """ _version = 2 def __init__(self, alpha, num_classes=1000, dropout=0.2): super(MNASNet, self).__init__() assert (alpha > 0.0) self.alpha = alpha self.num_classes = num_classes depths = _get_depths(alpha) layers = [ nn.Conv(3, 32, 3, padding=1, stride=2, bias=False), nn.BatchNorm(32, momentum=_BN_MOMENTUM), nn.Relu(), nn.Conv(32, 32, 3, padding=1, stride=1, groups=32, bias=False), nn.BatchNorm(32, momentum=_BN_MOMENTUM), nn.Relu(), nn.Conv(32, 16, 1, padding=0, stride=1, bias=False), nn.BatchNorm(16, momentum=_BN_MOMENTUM), _stack(16, depths[0], 3, 2, 3, 3, _BN_MOMENTUM), _stack(depths[0], depths[1], 5, 2, 3, 3, _BN_MOMENTUM), _stack(depths[1], depths[2], 5, 2, 6, 3, _BN_MOMENTUM), _stack(depths[2], depths[3], 3, 1, 6, 2, _BN_MOMENTUM), _stack(depths[3], depths[4], 5, 2, 6, 4, _BN_MOMENTUM), _stack(depths[4], depths[5], 3, 1, 6, 1, _BN_MOMENTUM), nn.Conv(depths[5], 1280, 1, padding=0, stride=1, bias=False), nn.BatchNorm(1280, momentum=_BN_MOMENTUM), nn.Relu() ] self.layers = nn.Sequential(*layers) self.classifier = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(1280, num_classes)) def execute(self, x): x = self.layers(x) x = x.mean([2, 3]) return self.classifier(x) def mnasnet0_5(pretrained=False, **kwargs): model = MNASNet(0.5, **kwargs) if pretrained: model.load("jittorhub://mnasnet0_5.pkl") return model def METHOD_NAME(pretrained=False, **kwargs): model = MNASNet(0.75, **kwargs) if pretrained: model.load("jittorhub://mnasnet0_75.pkl") return model def mnasnet1_0(pretrained=False, **kwargs): model = MNASNet(1.0, **kwargs) if pretrained: model.load("jittorhub://mnasnet1_0.pkl") return model def mnasnet1_3(pretrained=False, **kwargs): model = MNASNet(1.3, **kwargs) if pretrained: model.load("jittorhub://mnasnet1_3.pkl") return model
299,165
test poll recap servers single project
from unittest.mock import call, patch import pytest import responses from sentry import eventstore from sentry.tasks.recap_servers import ( RECAP_SERVER_LATEST_ID, RECAP_SERVER_TOKEN_OPTION, RECAP_SERVER_URL_OPTION, poll_project_recap_server, poll_recap_servers, ) from sentry.testutils.cases import TestCase from sentry.testutils.helpers import Feature from sentry.utils import json crash_payload = { "_links": { "self": {"href": "ApiBaseUrl/burp/137?field=stopReason"}, "files": {"href": "ApiBaseUrl/burp/137/files", "custom": True}, }, "id": 1, "uploadDate": "2018-11-06T21:19:55.271Z", "stopReason": "SEGFAULT", "detailedStackTrace": [ { "sourceFile": "/usr/build/src/foo.c", "sourceLine": 42, "moduleName": "boot.bin", "moduleFingerprint": "iddqd", "moduleOffset": "0x1", "resolvedSymbol": "Foo::Run()+0x4", "absoluteAddress": "0xaa00bb4", "displayValue": "boot.bin!Foo::Update()+0x4", }, { "sourceFile": "/usr/build/src/bar.c", "sourceLine": 1337, "moduleName": "boot.bin", "moduleFingerprint": "idkfa", "moduleOffset": "0x10", "resolvedSymbol": "Executor::Run()+0x30", "absoluteAddress": "0xbb11aa4", "displayValue": "boot.bin!Bar::Trigger()+0x30", }, ], "userData": { "password": "should_be_redacted", }, } @pytest.mark.django_db @patch("sentry.tasks.recap_servers.poll_project_recap_server.delay") class PollRecapServersTest(TestCase): def setUp(self): self.org = self.create_organization(owner=self.user) def test_poll_recap_servers_no_matches( self, poll_project_recap_server, ): poll_recap_servers() assert poll_project_recap_server.call_count == 0 def METHOD_NAME( self, poll_project_recap_server, ): project = self.create_project(organization=self.org, name="foo") project.update_option(RECAP_SERVER_URL_OPTION, "http://example.com") poll_recap_servers() assert poll_project_recap_server.call_count == 1 poll_project_recap_server.assert_has_calls([call(project.id)], any_order=True) def test_poll_recap_servers_multiple_projects(self, poll_project_recap_server): project = self.create_project(organization=self.org, name="foo") project.update_option(RECAP_SERVER_URL_OPTION, "http://example.com") project_dos = self.create_project(organization=self.org, name="bar") project_dos.update_option(RECAP_SERVER_URL_OPTION, "http://example-dos.com") project_tres = self.create_project(organization=self.org, name="baz") project_tres.update_option(RECAP_SERVER_URL_OPTION, "http://example-tres.com") poll_recap_servers() assert poll_project_recap_server.call_count == 3 poll_project_recap_server.assert_has_calls( [call(project.id), call(project_dos.id), call(project_tres.id)], any_order=True ) @pytest.mark.django_db class PollProjectRecapServerTest(TestCase): @pytest.fixture(autouse=True) def initialize(self): with Feature({"organizations:recap-server": True}): yield # Run test case def setUp(self): self.org = self.create_organization(owner=self.user) self.project = self.create_project(organization=self.org, name="foo") def get_crash_payload(self, id): crash = dict(crash_payload) crash["id"] = id return crash def test_poll_project_recap_server_incorrect_project(self): poll_project_recap_server(1337) # should not error def test_poll_project_recap_server_missing_recap_url(self): poll_project_recap_server(self.project.id) # should not error def test_poll_project_recap_server_disabled_feature(self): with Feature({"organizations:recap-server": False}): self.project.update_option(RECAP_SERVER_URL_OPTION, "http://example.com") poll_project_recap_server(self.project.id) # should not error @patch("sentry.tasks.recap_servers.store_crash") @responses.activate def test_poll_project_recap_server_initial_request(self, store_crash): payload = { "results": 3, "_embedded": { "crash": [ {"id": 1}, {"id": 1337}, {"id": 42}, ] }, } outgoing_recap_request = responses.get( url="http://example.com/rest/v1/crashes;sort=id:ascending;limit=1000", body=json.dumps(payload), content_type="application/json", ) self.project.update_option(RECAP_SERVER_URL_OPTION, "http://example.com") assert self.project.get_option(RECAP_SERVER_LATEST_ID) is None poll_project_recap_server(self.project.id) assert outgoing_recap_request.call_count == 1 assert store_crash.call_count == 3 assert self.project.get_option(RECAP_SERVER_LATEST_ID) == 1337 @patch("sentry.tasks.recap_servers.store_crash") @responses.activate def test_poll_project_recap_server_following_request(self, store_crash): payload = { "results": 2, "_embedded": { "crash": [ {"id": 1337}, {"id": 42}, ] }, } # Encoded query: {8 TO *} outgoing_recap_request = responses.get( url="http://example.com/rest/v1/crashes;sort=id:ascending;q=id:%7B8%20TO%20%2A%7D", body=json.dumps(payload), content_type="application/json", ) self.project.update_option(RECAP_SERVER_URL_OPTION, "http://example.com") self.project.update_option(RECAP_SERVER_LATEST_ID, 8) poll_project_recap_server(self.project.id) assert outgoing_recap_request.call_count == 1 assert store_crash.call_count == 2 assert self.project.get_option(RECAP_SERVER_LATEST_ID) == 1337 @patch("sentry.tasks.recap_servers.store_crash") @responses.activate def test_poll_project_recap_server_auth_token_header(self, store_crash): outgoing_recap_request = responses.get( url="http://example.com/rest/v1/crashes;sort=id:ascending;limit=1000", body=json.dumps({"results": 0}), content_type="application/json", match=[responses.matchers.header_matcher({"Authorization": "Bearer mkey"})], ) self.project.update_option(RECAP_SERVER_URL_OPTION, "http://example.com") self.project.update_option(RECAP_SERVER_TOKEN_OPTION, "mkey") poll_project_recap_server(self.project.id) assert outgoing_recap_request.call_count == 1 # TODO(recap): Add more assetions on `event.data` when the time comes @responses.activate def test_poll_recap_servers_store_crash(self): payload = { "results": 2, "_embedded": {"crash": [self.get_crash_payload(1337), self.get_crash_payload(42)]}, } responses.get( url="http://example.com/rest/v1/crashes;sort=id:ascending;limit=1000", body=json.dumps(payload), content_type="application/json", ) self.project.update_option(RECAP_SERVER_URL_OPTION, "http://example.com") poll_project_recap_server(self.project.id) events = eventstore.backend.get_events( eventstore.Filter(project_ids=[self.project.id]), tenant_ids={"referrer": "relay-test", "organization_id": 123}, ) # Make sure that event went though the normalization and pii scrubbing process assert events[0].data["contexts"]["userData"]["password"] == "[Filtered]" assert events[1].data["contexts"]["userData"]["password"] == "[Filtered]"
299,166
datetime utc to local
""" datetime.datetime helper functions for converting to/from UTC and other datetime manipulations""" # source: https://github.com/RhetTbull/datetime-utils __version__ = "2022.04.30" import datetime # TODO: probably shouldn't use replace here, see this: # https://stackoverflow.com/questions/13994594/how-to-add-timezone-into-a-naive-datetime-instance-in-python/13994611#13994611 __all__ = [ "datetime_has_tz", "datetime_naive_to_local", "datetime_naive_to_utc", "datetime_remove_tz", "datetime_to_new_tz", "datetime_tz_to_utc", "datetime_utc_to_local", "get_local_tz", "utc_offset_seconds", ] # TODO: look at https://github.com/regebro/tzlocal for more robust implementation def get_local_tz(dt: datetime.datetime) -> datetime.tzinfo: """Return local timezone as datetime.timezone tzinfo for dt Args: dt: datetime.datetime Returns: local timezone for dt as datetime.timezone Raises: ValueError if dt is not timezone naive """ if not datetime_has_tz(dt): return dt.astimezone().tzinfo else: raise ValueError("dt must be naive datetime.datetime object") def datetime_has_tz(dt: datetime.datetime) -> bool: """Return True if datetime dt has tzinfo else False Args: dt: datetime.datetime Returns: True if dt is timezone aware, else False Raises: TypeError if dt is not a datetime.datetime object """ if type(dt) != datetime.datetime: raise TypeError(f"dt must be type datetime.datetime, not {type(dt)}") return dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None def datetime_tz_to_utc(dt: datetime.datetime) -> datetime.datetime: """Convert datetime.datetime object with timezone to UTC timezone Args: dt: datetime.datetime object Returns: datetime.datetime in UTC timezone Raises: TypeError if dt is not datetime.datetime object ValueError if dt does not have timeone information """ if type(dt) != datetime.datetime: raise TypeError(f"dt must be type datetime.datetime, not {type(dt)}") if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None: return dt.replace(tzinfo=dt.tzinfo).astimezone(tz=datetime.timezone.utc) else: raise ValueError("dt does not have timezone info") def datetime_remove_tz(dt: datetime.datetime) -> datetime.datetime: """Remove timezone from a datetime.datetime object Args: dt: datetime.datetime object with tzinfo Returns: dt without any timezone info (naive datetime object) Raises: TypeError if dt is not a datetime.datetime object """ if type(dt) != datetime.datetime: raise TypeError(f"dt must be type datetime.datetime, not {type(dt)}") return dt.replace(tzinfo=None) def datetime_naive_to_utc(dt: datetime.datetime) -> datetime.datetime: """Convert naive (timezone unaware) datetime.datetime to aware timezone in UTC timezone Args: dt: datetime.datetime without timezone Returns: datetime.datetime with UTC timezone Raises: TypeError if dt is not a datetime.datetime object ValueError if dt is not a naive/timezone unaware object """ if type(dt) != datetime.datetime: raise TypeError(f"dt must be type datetime.datetime, not {type(dt)}") if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None: # has timezone info raise ValueError( "dt must be naive/timezone unaware: " f"{dt} has tzinfo {dt.tzinfo} and offset {dt.tzinfo.utcoffset(dt)}" ) return dt.replace(tzinfo=datetime.timezone.utc) def datetime_naive_to_local(dt: datetime.datetime) -> datetime.datetime: """Convert naive (timezone unaware) datetime.datetime to aware timezone in local timezone Args: dt: datetime.datetime without timezone Returns: datetime.datetime with local timezone Raises: TypeError if dt is not a datetime.datetime object ValueError if dt is not a naive/timezone unaware object """ if type(dt) != datetime.datetime: raise TypeError(f"dt must be type datetime.datetime, not {type(dt)}") if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None: # has timezone info raise ValueError( "dt must be naive/timezone unaware: " f"{dt} has tzinfo {dt.tzinfo} and offset {dt.tzinfo.utcoffset(dt)}" ) return dt.replace(tzinfo=get_local_tz(dt)) def METHOD_NAME(dt: datetime.datetime) -> datetime.datetime: """Convert datetime.datetime object in UTC timezone to local timezone Args: dt: datetime.datetime object Returns: datetime.datetime in local timezone Raises: TypeError if dt is not a datetime.datetime object ValueError if dt is not in UTC timezone """ if type(dt) != datetime.datetime: raise TypeError(f"dt must be type datetime.datetime, not {type(dt)}") if dt.tzinfo is not datetime.timezone.utc: raise ValueError(f"{dt} must be in UTC timezone: timezone = {dt.tzinfo}") return dt.replace(tzinfo=datetime.timezone.utc).astimezone(tz=None) def datetime_to_new_tz(dt: datetime.datetime, offset) -> datetime.datetime: """Convert datetime.datetime object from current timezone to new timezone with offset of seconds from UTC""" if not datetime_has_tz(dt): raise ValueError("dt must be timezone aware") time_delta = datetime.timedelta(seconds=offset) tz = datetime.timezone(time_delta) return dt.astimezone(tz=tz) def utc_offset_seconds(dt: datetime.datetime) -> int: """Return offset in seconds from UTC for timezone aware datetime.datetime object Args: dt: datetime.datetime object Returns: offset in seconds from UTC Raises: ValueError if dt does not have timezone information """ if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None: return dt.tzinfo.utcoffset(dt).total_seconds() else: raise ValueError("dt does not have timezone info")
299,167
process document
"""Reader for CoNLL-U files with the old CorefUD 0.1 style of coreference annotation.""" import re import logging import udapi.block.read.conllu from udapi.core.coref import CorefEntity, CorefMention, BridgingLinks class OldCorefUD(udapi.block.read.conllu.Conllu): def __init__(self, replace_hyphen_in_id_with='', **kwargs): """Create the read.OldCorefUD reader object. Args: substitute_hyphen_in_id_for: string to use as a replacement for hyphens in ClusterId The new format does not allow hyphens in eid (IDs of entity entities), so we need to replace them. """ super().__init__(**kwargs) self.replace_hyphen_in_id_with = replace_hyphen_in_id_with self.orig2new = {} self.new2orig = {} def _fix_id(self, cid): if not cid or '-' not in cid: return cid new_cid = self.orig2new.get(cid) if new_cid is None: new_cid = cid.replace('-', self.replace_hyphen_in_id_with) base, counter = new_cid, 1 while new_cid in self.new2orig: counter += 1 new_cid = f"{base}{counter}" self.new2orig[new_cid] = cid self.orig2new[cid] = new_cid return new_cid def METHOD_NAME(self, doc, strict=True): super().METHOD_NAME(doc) eid_to_entity = {} for node in doc.nodes_and_empty: index, index_str = 0, "" eid = node.misc["ClusterId"] if not eid: index, index_str = 1, "[1]" eid = node.misc["ClusterId[1]"] eid = self._fix_id(eid) while eid: entity = eid_to_entity.get(eid) if entity is None: entity = CorefEntity(eid) eid_to_entity[eid] = entity mention = CorefMention(words=[node], entity=entity) if node.misc["MentionSpan" + index_str]: mention.span = node.misc["MentionSpan" + index_str] etype = node.misc["ClusterType" + index_str] if etype: if entity.etype is not None and etype != entity.etype: logging.warning(f"etype mismatch in {node}: {entity.etype} != {etype}") entity.etype = etype bridging_str = node.misc["Bridging" + index_str] if bridging_str: mention._bridging = BridgingLinks(mention) for link_str in bridging_str.split(','): target, relation = link_str.split(':') target = self._fix_id(target) if target == eid: _error("Bridging cannot self-reference the same entity: " + target, strict) if target not in eid_to_entity: eid_to_entity[target] = CorefEntity(target) mention._bridging.append((eid_to_entity[target], relation)) split_ante_str = node.misc["SplitAnte" + index_str] if split_ante_str: split_antes = [] # TODO in CorefUD draft "+" was used as the separator, but it was changed to comma. # We can delete `.replace('+', ',')` once there are no more data with the legacy plus separator. for ante_str in split_ante_str.replace('+', ',').split(','): ante_str = self._fix_id(ante_str) if ante_str in eid_to_entity: if ante_str == eid: _error("SplitAnte cannot self-reference the same entity: " + eid, strict) split_antes.append(eid_to_entity[ante_str]) else: # split cataphora, e.g. "We, that is you and me..." ante_cl = CorefEntity(ante_str) eid_to_entity[ante_str] = ante_cl split_antes.append(ante_cl) entity.split_ante = sorted(split_antes) # Some CorefUD 0.2 datasets (e.g. ARRAU) separate key-value pairs with spaces instead of commas. # We also need to escape forbidden characters. mmisc = node.misc["MentionMisc" + index_str].replace(' ', ',') mention.other = mmisc.replace('-', '%2D').replace('(', '%28').replace(')', '%29') index += 1 index_str = f"[{index}]" eid = self._fix_id(node.misc["ClusterId" + index_str]) # c=doc.coref_entities should be sorted, so that c[0] < c[1] etc. # In other words, the dict should be sorted by the values (according to CorefEntity.__lt__), # not by the keys (eid). # In Python 3.7+ (3.6+ in CPython), dicts are guaranteed to be insertion order. for entity in eid_to_entity.values(): if not entity._mentions: _error(f"Entity {entity.eid} referenced in SplitAnte or Bridging, but not defined with ClusterId", strict) entity._mentions.sort() doc._eid_to_entity = {c._eid: c for c in sorted(eid_to_entity.values())} # Delete all old-style attributes from MISC (so when converting old to new style, the old attributes are deleted). attrs = "ClusterId MentionSpan ClusterType Bridging SplitAnte MentionMisc".split() for node in doc.nodes_and_empty: for key in list(node.misc): if any(re.match(attr + r'(\[\d+\])?$', key) for attr in attrs): del node.misc[key] def _error(msg, strict): if strict: raise ValueError(msg) logging.error(msg)
299,168
example count
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example count metric.""" from typing import Optional, Dict, Iterable, List import apache_beam as beam import numpy as np from tensorflow_model_analysis import constants from tensorflow_model_analysis.metrics import metric_types from tensorflow_model_analysis.utils import util EXAMPLE_COUNT_NAME = 'example_count' class ExampleCount(metric_types.Metric): """Example count. Note that although the example_count is independent of the model, this metric will be associated with a model for consistency with other metrics. """ def __init__(self, name: str = EXAMPLE_COUNT_NAME): """Initializes example count. Args: name: Metric name. """ super().__init__(METHOD_NAME, name=name) @property def compute_confidence_interval(self) -> bool: """Always disable confidence intervals for ExampleCount. Confidence intervals capture uncertainty in a metric if it were computed on more examples. For ExampleCount, this sort of uncertainty is not meaningful, so confidence intervals are disabled. Returns: Whether to compute confidence intervals. """ return False metric_types.register_metric(ExampleCount) def METHOD_NAME( name: str = EXAMPLE_COUNT_NAME, model_names: Optional[List[str]] = None, output_names: Optional[List[str]] = None, sub_keys: Optional[List[metric_types.SubKey]] = None, example_weighted: bool = False) -> metric_types.MetricComputations: """Returns metric computations for example count.""" computations = [] for model_name in model_names or ['']: for output_name in output_names or ['']: keys = [] for sub_key in sub_keys or [None]: key = metric_types.MetricKey( name=name, model_name=model_name, output_name=output_name, sub_key=sub_key, example_weighted=example_weighted) keys.append(key) # Note: This cannot be implemented based on the weight stored in # calibration because weighted example count is used with multi-class, etc # models that do not use calibration metrics. # The combiner only needs example weights in case users do not have # predictions or labels. computations.append( metric_types.MetricComputation( keys=keys, preprocessors=[ metric_types.StandardMetricInputsPreprocessor( include_filter={constants.EXAMPLE_WEIGHTS_KEY: {}}, include_default_inputs=False, ) ], combiner=_ExampleCountCombiner( model_name, output_name, keys, example_weighted ), ) ) return computations class _ExampleCountCombiner(beam.CombineFn): """Computes example count.""" def __init__( self, model_name: str, output_name: str, keys: List[metric_types.MetricKey], example_weighted, ): self._model_name = model_name self._output_name = output_name self._keys = keys self._example_weighted = example_weighted def create_accumulator(self) -> float: return 0.0 def add_input(self, accumulator: float, element: metric_types.StandardMetricInputs) -> float: if not self._example_weighted or element.example_weight is None: example_weight = np.array(1.0) else: example_weight = element.example_weight if isinstance(example_weight, dict) and self._model_name: value = util.get_by_keys( example_weight, [self._model_name], optional=True) if value is not None: example_weight = value if isinstance(example_weight, dict) and self._output_name: example_weight = util.get_by_keys(example_weight, [self._output_name], np.array(1.0)) if isinstance(example_weight, dict): raise ValueError( f'example_count cannot be calculated on a dict {example_weight}: ' f'model_name={self._model_name}, output_name={self._output_name}.\n\n' 'This is most likely a configuration error (for multi-output models' 'a separate metric is needed for each output).') return accumulator + np.sum(example_weight) def merge_accumulators(self, accumulators: Iterable[float]) -> float: result = 0.0 for accumulator in accumulators: result += accumulator return result def extract_output(self, accumulator: float) -> Dict[metric_types.MetricKey, float]: return {k: accumulator for k in self._keys}
299,169
head sha
############################ Copyrights and license ############################ # # # Copyright 2021 Jeppe Fihl-Pearson <jeppe@tenzer.dk> # # # # This file is part of PyGithub. # # http://pygithub.readthedocs.io/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # ################################################################################ from __future__ import annotations from datetime import datetime from typing import Any import github.GithubObject import github.WorkflowStep from github.GithubObject import Attribute, CompletableGithubObject, NotSet class WorkflowJob(CompletableGithubObject): """ This class represents Workflow Jobs. The reference can be found here https://docs.github.com/en/rest/reference/actions#workflow-jobs """ def _initAttributes(self) -> None: self._check_run_url: Attribute[str] = NotSet self._completed_at: Attribute[datetime] = NotSet self._conclusion: Attribute[str] = NotSet self._head_sha: Attribute[str] = NotSet self._html_url: Attribute[str] = NotSet self._id: Attribute[int] = NotSet self._name: Attribute[str] = NotSet self._node_id: Attribute[str] = NotSet self._run_id: Attribute[int] = NotSet self._run_url: Attribute[str] = NotSet self._started_at: Attribute[datetime] = NotSet self._status: Attribute[str] = NotSet self._steps: Attribute[list[github.WorkflowStep.WorkflowStep]] = NotSet self._url: Attribute[str] = NotSet def __repr__(self) -> str: return self.get__repr__({"id": self._id.value, "url": self._url.value}) @property def check_run_url(self) -> str: self._completeIfNotSet(self._check_run_url) return self._check_run_url.value @property def completed_at(self) -> datetime: self._completeIfNotSet(self._completed_at) return self._completed_at.value @property def conclusion(self) -> str: self._completeIfNotSet(self._conclusion) return self._conclusion.value @property def METHOD_NAME(self) -> str: self._completeIfNotSet(self._head_sha) return self._head_sha.value @property def html_url(self) -> str: self._completeIfNotSet(self._html_url) return self._html_url.value @property def id(self) -> int: self._completeIfNotSet(self._id) return self._id.value @property def name(self) -> str: self._completeIfNotSet(self._name) return self._name.value @property def node_id(self) -> str: self._completeIfNotSet(self._node_id) return self._node_id.value @property def run_id(self) -> int: self._completeIfNotSet(self._run_id) return self._run_id.value @property def run_url(self) -> str: self._completeIfNotSet(self._run_url) return self._run_url.value @property def started_at(self) -> datetime: self._completeIfNotSet(self._started_at) return self._started_at.value @property def status(self) -> str: self._completeIfNotSet(self._status) return self._status.value @property def steps(self) -> list[github.WorkflowStep.WorkflowStep]: self._completeIfNotSet(self._steps) return self._steps.value @property def url(self) -> str: self._completeIfNotSet(self._url) return self._url.value def logs_url(self) -> str: headers, _ = self._requester.requestBlobAndCheck("GET", f"{self.url}/logs") return headers["location"] def _useAttributes(self, attributes: dict[str, Any]) -> None: if "check_run_url" in attributes: # pragma no branch self._check_run_url = self._makeStringAttribute(attributes["check_run_url"]) if "completed_at" in attributes: # pragma no branch self._completed_at = self._makeDatetimeAttribute(attributes["completed_at"]) if "conclusion" in attributes: # pragma no branch self._conclusion = self._makeStringAttribute(attributes["conclusion"]) if "head_sha" in attributes: # pragma no branch self._head_sha = self._makeStringAttribute(attributes["head_sha"]) if "html_url" in attributes: # pragma no branch self._html_url = self._makeStringAttribute(attributes["html_url"]) if "id" in attributes: # pragma no branch self._id = self._makeIntAttribute(attributes["id"]) if "name" in attributes: # pragma no branch self._name = self._makeStringAttribute(attributes["name"]) if "node_id" in attributes: # pragma no branch self._node_id = self._makeStringAttribute(attributes["node_id"]) if "run_id" in attributes: # pragma no branch self._run_id = self._makeIntAttribute(attributes["run_id"]) if "run_url" in attributes: # pragma no branch self._run_url = self._makeStringAttribute(attributes["run_url"]) if "started_at" in attributes: # pragma no branch self._started_at = self._makeDatetimeAttribute(attributes["started_at"]) if "status" in attributes: # pragma no branch self._status = self._makeStringAttribute(attributes["status"]) if "steps" in attributes: # pragma no branch self._steps = self._makeListOfClassesAttribute(github.WorkflowStep.WorkflowStep, attributes["steps"]) if "url" in attributes: # pragma no branch self._url = self._makeStringAttribute(attributes["url"])
299,170
forward dynamics
# # Copyright (c) 2018-2020 CNRS INRIA # ## In this file, are reported some deprecated functions that are still maintained until the next important future releases ## from __future__ import print_function import warnings as _warnings from . import pinocchio_pywrap as pin from .deprecation import deprecated, DeprecatedWarning # This function is only deprecated when using a specific signature. Therefore, it needs special care # Marked as deprecated on 16 Sept 2019 def impulseDynamics(model, data, *args): if len(args)==5 and type(args[4]) is bool: message = ("This function signature has been deprecated and will be removed in future releases of Pinocchio. " "Please change for the new signature of impulseDynamics(model,data[,q],v_before,J[,r_coeff[,inv_damping]]).") _warnings.warn(message, category=DeprecatedWarning, stacklevel=2) q = args[0] v_before = args[1] J = args[2] r_coeff = args[3] updateKinematics = args[4] inv_damping = 0. if updateKinematics: return pin.impulseDynamics(model,data,q,v_before,J,r_coeff,inv_damping) else: return pin.impulseDynamics(model,data,v_before,J,r_coeff,inv_damping) return pin.impulseDynamics(model, data, *args) impulseDynamics.__doc__ = ( pin.impulseDynamics.__doc__ + '\n\nimpulseDynamics( (Model)Model, (Data)Data, (object)q, (object)v_before, (object)J, (float)r_coeff, (bool)updateKinematics) -> object :' + '\n This function signature has been deprecated and will be removed in future releases of Pinocchio.' ) # This function is only deprecated when using a specific signature. Therefore, it needs special care # Marked as deprecated on 2 Oct 2019 def METHOD_NAME(model, data, *args): if len(args)==7 and type(args[6]) is bool: message = ("This function signature has been deprecated and will be removed in future releases of Pinocchio. " "Please change for the new signature of forwardDynamics(model,data[,q],v,tau,J,gamma[,inv_damping]).") _warnings.warn(message, category=DeprecatedWarning, stacklevel=2) q = args[0] v = args[1] tau = args[2] J = args[3] gamma = args[4] inv_damping = args[5] updateKinematics = args[6] if updateKinematics: return pin.METHOD_NAME(model,data,q,v,tau,J,gamma,inv_damping) else: return pin.METHOD_NAME(model,data,tau,J,gamma,inv_damping) return pin.METHOD_NAME(model, data, *args) METHOD_NAME.__doc__ = ( pin.METHOD_NAME.__doc__ + '\n\nforwardDynamics( (Model)Model, (Data)Data, (object)q, (object)v, (object)tau, (object)J, (object)gamma, (float)damping, (bool)update_kinematics) -> object :' + '\n This function signature has been deprecated and will be removed in future releases of Pinocchio.' ) @deprecated("This function has been renamed computeJointJacobian and will be removed in future releases of Pinocchio. Please change for new computeJointJacobian.") def jointJacobian(model, data, q, jointId): return pin.computeJointJacobian(model,data,q,jointId) @deprecated("This function has been renamed computeFrameJacobian and will be removed in future releases of Pinocchio. Please change for new computeFrameJacobian.") def frameJacobian(model, data, q, frameId): return pin.computeFrameJacobian(model,data,q,frameId) def computeCentroidalDynamics(model, data, q, v, a = None): if a is None: message = ("This function signature has been renamed and will be removed in future releases of Pinocchio. " "Please change for the new signature of computeCentroidalMomentum(model,data,q,v).") _warnings.warn(message, category=DeprecatedWarning, stacklevel=2) return pin.computeCentroidalMomentum(model,data,q,v) else: message = ("This function signature has been renamed and will be removed in future releases of Pinocchio. " "Please change for the new signature of computeCentroidalMomentumTimeVariation(model,data,q,v,a).") _warnings.warn(message, category=DeprecatedWarning, stacklevel=2) return pin.computeCentroidalMomentum(model,data,q,v,a) computeCentroidalDynamics.__doc__ = ( "This function has been renamed computeCentroidalMomentum or computeCentroidalMomentumTimeVariation to either only compute the centroidal momentum quantity or also its time derivative respectively." ) class GeometryObject(pin.GeometryObject): @property @deprecated("The fcl property has been renamed geometry. Please use GeometryObject.geometry instead") def fcl(self): return self.geometry @deprecated("This function is now called SE3ToXYZQUATtuple. Please change for this new signature to delete this warning.") def se3ToXYZQUATtuple(M): return pin.SE3ToXYZQUATtuple(M) @deprecated("This function is now called SE3ToXYZQUAT. Please change for this new signature to delete this warning.") def se3ToXYZQUAT(M): return pin.SE3ToXYZQUAT(M) @deprecated("This function is now called XYZQUATToSE3. Please change for this new signature to delete this warning.") def XYZQUATToSe3(x): return pin.XYZQUATToSE3(x) def buildGeomFromUrdf(model, filename, *args, **kwargs): arg3 = args[0] if isinstance(arg3,(str,list,pin.StdVec_StdString)): package_dir = arg3 geom_type = args[1] if len(args) >= 3: mesh_loader = args[2] message = ("This function signature is now deprecated and will be removed in future releases of Pinocchio. " "Please change for the new signature buildGeomFromUrdf(model,filename,type,package_dirs,mesh_loader).") _warnings.warn(message, category=DeprecatedWarning, stacklevel=2) return pin.buildGeomFromUrdf(model,filename,geom_type,package_dir,mesh_loader, **kwargs) else: message = ("This function signature is now deprecated and will be removed in future releases of Pinocchio. " "Please change for the new signature buildGeomFromUrdf(model,filename,type,package_dirs).") _warnings.warn(message, category=DeprecatedWarning, stacklevel=2) return pin.buildGeomFromUrdf(model,filename,geom_type,package_dir, **kwargs) else: return pin.buildGeomFromUrdf(model, filename, *args, **kwargs) buildGeomFromUrdf.__doc__ = ( pin.buildGeomFromUrdf.__doc__ ) @deprecated("This function is now deprecated and will be removed in future releases of Pinocchio. " "Please change for the new function computePotentialEnergy.") def potentialEnergy(model,data,q,update_kinematics=True): if update_kinematics: return pin.computePotentialEnergy(model,data,q) else: return pin.computePotentialEnergy(model,data) potentialEnergy.__doc__ += '\n' + pin.computePotentialEnergy.__doc__ @deprecated("This function is now deprecated and will be removed in future releases of Pinocchio. " "Please change for the new function computeKineticEnergy.") def kineticEnergy(model,data,q,v,update_kinematics=True): if update_kinematics: return pin.computeKineticEnergy(model,data,q,v) else: return pin.computeKineticEnergy(model,data) kineticEnergy.__doc__ += '\n' + pin.computeKineticEnergy.__doc__ from .utils import npToTTuple, npToTuple pin.rpy.npToTTuple = deprecated("This function was moved to the utils submodule.")(npToTTuple) pin.rpy.npToTuple = deprecated("This function was moved to the utils submodule.")(npToTuple) # Marked as deprecated on 26 Mar 2020 @deprecated("This function is now deprecated without replacement.") def setGeometryMeshScales(geom_model, mesh_scale): import numpy as np if not isinstance(mesh_scale, np.ndarray): mesh_scale = np.array([mesh_scale]*3) for geom in geom_model.geometryObjects: geom.meshScale = mesh_scale
299,171
icon world scale getter
from ursina import Entity, Text, camera, color, mouse, BoxCollider, Sequence, Func, Vec2, Vec3, scene from ursina.models.procedural.quad import Quad import textwrap from ursina.scripts.property_generator import generate_properties_for_class @generate_properties_for_class() class Button(Entity): default_color = color.black90 default_model = None # will default to rounded Quad def __init__(self, text='', radius=.1, text_origin=(0,0), **kwargs): super().__init__() self.parent = camera.ui self.disabled = False for key, value in kwargs.items(): # set the scale before model for correct corners if key in ('scale', 'scale_x', 'scale_y', 'scale_z', 'world_scale', 'world_scale_x', 'world_scale_y', 'world_scale_z'): setattr(self, key, value) if Button.default_model is None: if not 'model' in kwargs and self.scale[0] != 0 and self.scale[1] != 0: self.model = Quad(aspect=self.scale[0] / self.scale[1], radius=radius) else: self.model = Button.default_model if 'color' in kwargs: self.color = kwargs['color'] else: self.color = Button.default_color self.highlight_color = self.color.tint(.2) self.pressed_color = self.color.tint(-.2) self.highlight_scale = 1 # multiplier self.pressed_scale = 1 # multiplier self.collider = 'box' for key, value in kwargs.items(): setattr(self, key, value) self.text_entity = None if text: self.text = text self.text_origin = text_origin self.original_scale = self.scale def text_getter(self): if self.text_entity: return self.text_entity.text def text_setter(self, value): if isinstance(value, str) and not self.text_entity: self.text_entity = Text(parent=self, size=Text.size*20, position=(-self.origin[0],-self.origin[1],-.1), origin=(0,0), add_to_scene_entities=False) self.text_entity.text = value self.text_entity.world_scale = (1,1,1) def text_origin_getter(self): if not self.text_entity: return (0,0) return self.text_entity.origin def text_origin_setter(self, value): if not self.text_entity: return self.text_entity.world_parent = self.model self.text_entity.position = value # self.text_entity.x += self.model.radius * self.scale_y/self.scale_x * (-value[0]*2) # self.text_entity.y += self.model.radius * self.scale_y/self.scale_x * (-value[1]*2) self.text_entity.origin = value self.text_entity.world_parent = self def text_color_getter(self): return self.text_entity.color def text_color_setter(self, value): self.text_entity.color = value def icon_getter(self): return self.getattr('icon_entity', None) def icon_setter(self, value): if value and not hasattr(self, 'icon_entity'): self.icon_entity = Entity(parent=self.model, name=f'button_icon_entity_{value}', model='quad', z=-.1, add_to_scene_entities=False) self.icon_entity.texture = value def METHOD_NAME(self): if not self.icon: return None return self.icon_entity.world_scale def icon_world_scale_setter(self, value): if self.icon: self.icon_entity.world_scale = value def origin_getter(self): return getattr(self, '_origin', Vec3.zero) def origin_setter(self, value): if hasattr(self, 'text_entity') and self.text_entity: self.text_entity.world_parent = self.model super().origin_setter(value) self.text_entity.world_parent = self else: super().origin_setter(value) if isinstance(self.collider, BoxCollider): # update collider position by making a new one self.collider = 'box' def eternal_setter(self, value): super().eternal_setter(value) if self.text_entity: self.text_entity.eternal = value def input(self, key): if self.disabled or not self.model: return if key == 'left mouse down': if self.hovered: self.model.setColorScale(self.pressed_color) self.model.setScale(Vec3(self.pressed_scale, self.pressed_scale, 1)) if key == 'left mouse up': if self.hovered: self.model.setColorScale(self.highlight_color) self.model.setScale(Vec3(self.highlight_scale, self.highlight_scale, 1)) else: self.model.setColorScale(self.color) self.model.setScale(Vec3(1,1,1)) def on_mouse_enter(self): if not self.disabled and self.model: self.model.setColorScale(self.highlight_color) if self.highlight_scale != 1: self.model.setScale(Vec3(self.highlight_scale, self.highlight_scale, 1)) if hasattr(self, 'tooltip') and self.tooltip: self.tooltip.enabled = True def on_mouse_exit(self): if not self.disabled and self.model: self.model.setColorScale(self.color) if not mouse.left and self.highlight_scale != 1: self.model.setScale(Vec3(1,1,1)) if hasattr(self, 'tooltip') and self.tooltip: self.tooltip.enabled = False def fit_to_text(self, radius=.1, padding=Vec2(Text.size*1.5, Text.size)): if not self.text_entity.text or self.text_entity.text == '': return self.text_entity.world_parent = scene self.original_parent = self.parent self.parent = self.text_entity self.scale = Vec2(self.text_entity.width*self.text_entity.world_scale_x, self.text_entity.height*self.text_entity.world_scale_y) * Text.size * 2 # self.scale = Vec2(self.text_entity.width, self.text_entity.height) * Text.size * 2 self.scale += Vec2(*padding) self.position += self.text_origin * self.scale.xy * .5 self.model = Quad(aspect=self.scale_x/self.scale_y, radius=radius) self.parent = self.original_parent self.text_entity.world_parent = self if __name__ == '__main__': from ursina import Ursina, application, Tooltip app = Ursina() b = Button(text='hello world!', color=color.azure, icon='sword', scale=.25, text_origin=(-.5,0)) # b.fit_to_text() b.on_click = application.quit # assign a function to the button. b.tooltip = Tooltip('exit') app.run()
299,172
test combinatorial import
# MIT License # # Copyright The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import collections import os import unittest import TestCmd import SCons.Scanner.D test = TestCmd.TestCmd(workdir = '') class DummyEnvironment(collections.UserDict): def __init__(self, **kwargs) -> None: super().__init__() self.data.update(kwargs) self.fs = SCons.Node.FS.FS(test.workpath('')) def Dictionary(self, *args): return self.data def subst(self, strSubst, target=None, source=None, conv=None): if strSubst[0] == '$': return self.data[strSubst[1:]] return strSubst def subst_list(self, strSubst, target=None, source=None, conv=None): if strSubst[0] == '$': return [self.data[strSubst[1:]]] return [[strSubst]] def subst_path(self, path, target=None, source=None, conv=None): if not isinstance(path, list): path = [path] return list(map(self.subst, path)) def get_calculator(self): return None def get_factory(self, factory): return factory or self.fs.File def Dir(self, filename): return self.fs.Dir(filename) def File(self, filename): return self.fs.File(filename) if os.path.normcase('foo') == os.path.normcase('FOO'): my_normpath = os.path.normcase else: my_normpath = os.path.normpath def deps_match(self, deps, headers) -> None: global my_normpath scanned = list(map(my_normpath, list(map(str, deps)))) expect = list(map(my_normpath, headers)) self.assertTrue(scanned == expect, "expect %s != scanned %s" % (expect, scanned)) """ Examples from https://dlang.org/spec/module.html D Language: 2.071.1 Accessed: 11 August 2016 """ # Regular import test.write('basic.d',""" import A; void main() {} """) # Static import test.write('static.d',""" static import A; void main() { std.stdio.writeln("hello!"); // ok, writeln is fully qualified } """) # Public import test.write('public.d',""" public import A; void main() {} """) # Renamed import test.write('rename.d',""" import B = A; void main() { io.writeln("hello!"); // ok, calls std.stdio.writeln } """) # Selective import test.write('selective.d',""" import A : B, C; void main() { writeln("hello!"); // ok, writeln bound into current namespace foo("world"); // ok, calls std.stdio.write() } """) # Renamed and Selective import test.write('renameAndSelective.d',""" import B = A : C = D; void main() { } """) # Scoped import test.write('scoped.d',""" void main() { import A; } """) # Combinatorial import test.write('combinatorial.d',""" import A, B, CCC = C, DDD = D : EEE = FFF; void main() { } """) # Subdirs import test.write('subdirs.d',""" import X.Y, X.Z, X.X.X; void main() {} """) # Multiple import test.write('multiple.d',""" public import B; static import C; import X = X.Y : Q, R, S, T = U; void main() { import A; } """) # Multiline import test.write('multiline.d',""" import A; void main() {} """) test.write('A.d',""" module A; void main() {} """) test.write('B.d',""" module B; void main() {} """) test.write('C.d',""" module C; void main() {} """) test.write('D.d',""" module D; void main() {} """) test.subdir('X', os.path.join('X','X')) test.write(os.path.join('X','Y.d'),""" module Y; void main() {} """) test.write(os.path.join('X','Z.d'),""" module Z; void main() {} """) test.write(os.path.join('X','X','X.d'),""" module X; void main() {} """) class DScannerTestCase(unittest.TestCase): def helper(self, filename, headers) -> None: env = DummyEnvironment() s = SCons.Scanner.D.DScanner() path = s.path(env) deps = s(env.File(filename), env, path) deps_match(self, deps, headers) def test_BasicImport(self) -> None: self.helper('basic.d', ['A.d']) def test_StaticImport(self) -> None: self.helper('static.d', ['A.d']) def test_publicImport(self) -> None: self.helper('public.d', ['A.d']) def test_RenameImport(self) -> None: self.helper('rename.d', ['A.d']) def test_SelectiveImport(self) -> None: self.helper('selective.d', ['A.d']) def test_RenameAndSelectiveImport(self) -> None: self.helper('renameAndSelective.d', ['A.d']) def test_ScopedImport(self) -> None: self.helper('scoped.d', ['A.d']) def METHOD_NAME(self) -> None: self.helper('combinatorial.d', ['A.d', 'B.d', 'C.d', 'D.d']) def test_SubdirsImport(self) -> None: self.helper('subdirs.d', [os.path.join('X','X','X.d'), os.path.join('X','Y.d'), os.path.join('X','Z.d')]) def test_MultipleImport(self) -> None: self.helper('multiple.d', ['A.d', 'B.d', 'C.d', os.path.join('X','Y.d')]) def test_MultilineImport(self) -> None: self.helper('multiline.d', ['A.d']) if __name__ == "__main__": unittest.main() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
299,173
multi
import pytest from mitmproxy.coretypes import multidict class _TMulti: @staticmethod def _kconv(key): return key.lower() class TMultiDict(_TMulti, multidict.MultiDict): pass class TestMultiDict: @staticmethod def METHOD_NAME(): return TMultiDict((("foo", "bar"), ("bar", "baz"), ("Bar", "bam"))) def test_init(self): md = TMultiDict() assert len(md) == 0 md = TMultiDict([("foo", "bar")]) assert len(md) == 1 assert md.fields == (("foo", "bar"),) def test_repr(self): assert repr(self.METHOD_NAME()) == ( "TMultiDict[('foo', 'bar'), ('bar', 'baz'), ('Bar', 'bam')]" ) def test_getitem(self): md = TMultiDict([("foo", "bar")]) assert "foo" in md assert "Foo" in md assert md["foo"] == "bar" with pytest.raises(KeyError): assert md["bar"] md_multi = TMultiDict([("foo", "a"), ("foo", "b")]) assert md_multi["foo"] == "a" def test_setitem(self): md = TMultiDict() md["foo"] = "bar" assert md.fields == (("foo", "bar"),) md["foo"] = "baz" assert md.fields == (("foo", "baz"),) md["bar"] = "bam" assert md.fields == (("foo", "baz"), ("bar", "bam")) def test_delitem(self): md = self.METHOD_NAME() del md["foo"] assert "foo" not in md assert "bar" in md with pytest.raises(KeyError): del md["foo"] del md["bar"] assert md.fields == () def test_iter(self): md = self.METHOD_NAME() assert list(md.__iter__()) == ["foo", "bar"] def test_len(self): md = TMultiDict() assert len(md) == 0 md = self.METHOD_NAME() assert len(md) == 2 def test_eq(self): assert TMultiDict() == TMultiDict() assert not (TMultiDict() == 42) md1 = self.METHOD_NAME() md2 = self.METHOD_NAME() assert md1 == md2 md1.fields = md1.fields[1:] + md1.fields[:1] assert not (md1 == md2) def test_hash(self): """ If a class defines mutable objects and implements an __eq__() method, it should not implement __hash__(), since the implementation of hashable collections requires that a key's hash value is immutable. """ with pytest.raises(TypeError): assert hash(TMultiDict()) def test_get_all(self): md = self.METHOD_NAME() assert md.get_all("foo") == ["bar"] assert md.get_all("bar") == ["baz", "bam"] assert md.get_all("baz") == [] def test_set_all(self): md = TMultiDict() md.set_all("foo", ["bar", "baz"]) assert md.fields == (("foo", "bar"), ("foo", "baz")) md = TMultiDict( ( ("a", "b"), ("x", "x"), ("c", "d"), ("X", "X"), ("e", "f"), ) ) md.set_all("x", ["1", "2", "3"]) assert md.fields == ( ("a", "b"), ("x", "1"), ("c", "d"), ("X", "2"), ("e", "f"), ("x", "3"), ) md.set_all("x", ["4"]) assert md.fields == ( ("a", "b"), ("x", "4"), ("c", "d"), ("e", "f"), ) def test_add(self): md = self.METHOD_NAME() md.add("foo", "foo") assert md.fields == ( ("foo", "bar"), ("bar", "baz"), ("Bar", "bam"), ("foo", "foo"), ) def test_insert(self): md = TMultiDict([("b", "b")]) md.insert(0, "a", "a") md.insert(2, "c", "c") assert md.fields == (("a", "a"), ("b", "b"), ("c", "c")) def test_keys(self): md = self.METHOD_NAME() assert list(md.keys()) == ["foo", "bar"] assert list(md.keys(multi=True)) == ["foo", "bar", "Bar"] def test_values(self): md = self.METHOD_NAME() assert list(md.values()) == ["bar", "baz"] assert list(md.values(multi=True)) == ["bar", "baz", "bam"] def test_items(self): md = self.METHOD_NAME() assert list(md.items()) == [("foo", "bar"), ("bar", "baz")] assert list(md.items(multi=True)) == [ ("foo", "bar"), ("bar", "baz"), ("Bar", "bam"), ] def test_state(self): md = self.METHOD_NAME() assert len(md.get_state()) == 3 assert md == TMultiDict.from_state(md.get_state()) md2 = TMultiDict() assert md != md2 md2.set_state(md.get_state()) assert md == md2 class TParent: def __init__(self): self.vals = tuple() def setter(self, vals): self.vals = vals def getter(self): return self.vals class TestMultiDictView: def test_modify(self): p = TParent() tv = multidict.MultiDictView(p.getter, p.setter) assert len(tv) == 0 tv["a"] = "b" assert p.vals == (("a", "b"),) tv["c"] = "b" assert p.vals == (("a", "b"), ("c", "b")) assert tv["a"] == "b" def test_copy(self): p = TParent() tv = multidict.MultiDictView(p.getter, p.setter) c = tv.copy() assert isinstance(c, multidict.MultiDict) assert tv.items() == c.items() c["foo"] = "bar" assert tv.items() != c.items()
299,174
present
""" Management of Docker volumes .. versionadded:: 2017.7.0 :depends: docker_ Python module .. note:: Older releases of the Python bindings for Docker were called docker-py_ in PyPI. All releases of docker_, and releases of docker-py_ >= 1.6.0 are supported. These python bindings can easily be installed using :py:func:`pip.install <salt.modules.pip.install>`: .. code-block:: bash salt myminion pip.install docker To upgrade from docker-py_ to docker_, you must first uninstall docker-py_, and then install docker_: .. code-block:: bash salt myminion pip.uninstall docker-py salt myminion pip.install docker .. _docker: https://pypi.python.org/pypi/docker .. _docker-py: https://pypi.python.org/pypi/docker-py These states were moved from the :mod:`docker <salt.states.docker>` state module (formerly called **dockerng**) in the 2017.7.0 release. """ import logging import salt.utils.data # Enable proper logging log = logging.getLogger(__name__) # pylint: disable=invalid-name # Define the module's virtual name __virtualname__ = "docker_volume" __virtual_aliases__ = ("moby_volume",) def __virtual__(): """ Only load if the docker execution module is available """ if "docker.version" in __salt__: return __virtualname__ return (False, __salt__.missing_fun_string("docker.version")) def _find_volume(name): """ Find volume by name on minion """ docker_volumes = __salt__["docker.volumes"]()["Volumes"] if docker_volumes: volumes = [v for v in docker_volumes if v["Name"] == name] if volumes: return volumes[0] return None def METHOD_NAME(name, driver=None, driver_opts=None, force=False): """ Ensure that a volume is present. .. versionadded:: 2015.8.4 .. versionchanged:: 2015.8.6 This state no longer deletes and re-creates a volume if the existing volume's driver does not match the ``driver`` parameter (unless the ``force`` parameter is set to ``True``). .. versionchanged:: 2017.7.0 This state was renamed from **docker.volume_present** to **docker_volume.present** name Name of the volume driver Type of driver for that volume. If ``None`` and the volume does not yet exist, the volume will be created using Docker's default driver. If ``None`` and the volume does exist, this function does nothing, even if the existing volume's driver is not the Docker default driver. (To ensure that an existing volume's driver matches the Docker default, you must explicitly name Docker's default driver here.) driver_opts Options for the volume driver force : False If the volume already exists but the existing volume's driver does not match the driver specified by the ``driver`` parameter, this parameter controls whether the function errors out (if ``False``) or deletes and re-creates the volume (if ``True``). .. versionadded:: 2015.8.6 Usage Examples: .. code-block:: yaml volume_foo: docker_volume.present .. code-block:: yaml volume_bar: docker_volume.present - name: bar - driver: local - driver_opts: foo: bar .. code-block:: yaml volume_bar: docker_volume.present - name: bar - driver: local - driver_opts: - foo: bar - option: value """ ret = {"name": name, "changes": {}, "result": False, "comment": ""} if salt.utils.data.is_dictlist(driver_opts): driver_opts = salt.utils.data.repack_dictlist(driver_opts) volume = _find_volume(name) if not volume: if __opts__["test"]: ret["result"] = None ret["comment"] = "The volume '{}' will be created".format(name) return ret try: ret["changes"]["created"] = __salt__["docker.create_volume"]( name, driver=driver, driver_opts=driver_opts ) except Exception as exc: # pylint: disable=broad-except ret["comment"] = "Failed to create volume '{}': {}".format(name, exc) return ret else: result = True ret["result"] = result return ret # volume exists, check if driver is the same. if driver is not None and volume["Driver"] != driver: if not force: ret["comment"] = ( "Driver for existing volume '{}' ('{}')" " does not match specified driver ('{}')" " and force is False".format(name, volume["Driver"], driver) ) ret["result"] = None if __opts__["test"] else False return ret if __opts__["test"]: ret["result"] = None ret["comment"] = ( "The volume '{}' will be replaced with a" " new one using the driver '{}'".format(name, volume) ) return ret try: ret["changes"]["removed"] = __salt__["docker.remove_volume"](name) except Exception as exc: # pylint: disable=broad-except ret["comment"] = "Failed to remove volume '{}': {}".format(name, exc) return ret else: try: ret["changes"]["created"] = __salt__["docker.create_volume"]( name, driver=driver, driver_opts=driver_opts ) except Exception as exc: # pylint: disable=broad-except ret["comment"] = "Failed to create volume '{}': {}".format(name, exc) return ret else: result = True ret["result"] = result return ret ret["result"] = True ret["comment"] = "Volume '{}' already exists.".format(name) return ret def absent(name, driver=None): """ Ensure that a volume is absent. .. versionadded:: 2015.8.4 .. versionchanged:: 2017.7.0 This state was renamed from **docker.volume_absent** to **docker_volume.absent** name Name of the volume Usage Examples: .. code-block:: yaml volume_foo: docker_volume.absent """ ret = {"name": name, "changes": {}, "result": False, "comment": ""} volume = _find_volume(name) if not volume: ret["result"] = True ret["comment"] = "Volume '{}' already absent".format(name) return ret try: ret["changes"]["removed"] = __salt__["docker.remove_volume"](name) ret["result"] = True except Exception as exc: # pylint: disable=broad-except ret["comment"] = "Failed to remove volume '{}': {}".format(name, exc) return ret
299,175
r
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDAnalysis --- https://www.mdanalysis.org # Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors # (see the file AUTHORS for the full list of names) # # Released under the GNU Public Licence, v2 or any higher version # # Please cite your use of MDAnalysis in published work: # # R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler, # D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein. # MDAnalysis: A Python package for the rapid analysis of molecular dynamics # simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th # Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy. # doi: 10.25080/majora-629e541a-00e # # N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein. # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations. # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787 # import MDAnalysis as mda import numpy as np import pytest from numpy.testing import (assert_equal, assert_allclose) from MDAnalysisTests.datafiles import (DLP_CONFIG, DLP_CONFIG_minimal, DLP_CONFIG_order, DLP_HISTORY, DLP_HISTORY_minimal, DLP_HISTORY_order, DLP_HISTORY_minimal_cell) class _DLPConfig(object): @pytest.fixture() def METHOD_NAME(self): return mda.coordinates.DLPoly.ConfigReader @pytest.fixture() def rd(self, METHOD_NAME): return METHOD_NAME(self.f) @pytest.fixture() def ts(self, rd): return rd.ts def test_read_unitcell(self, ts): ref = np.array([[18.6960000000, 0.0000000000, 0.0000000000], [0.0000000000, 18.6960000000, 0.0000000000], [0.0000000000, 0.0000000000, 18.6960000000]]) assert_allclose(ts.dimensions, mda.coordinates.core.triclinic_box(*ref)) def test_positions(self, ts): ref = np.array([-7.608595309, -7.897790000, -7.892053559]) assert_allclose(ts._pos[0], ref) def test_velocities(self, ts): ref = np.array([1.056610291, -1.218664448, 3.345828610]) assert_allclose(ts._velocities[0], ref) def test_forces(self, ts): ref = np.array([-1979.558687, 739.7961625, 1027.996603]) assert_allclose(ts._forces[0], ref) class TestConfigReader(_DLPConfig): f = DLP_CONFIG def test_read(self, rd): assert_equal(rd.title, "DL_POLY: Potassium Chloride Test Case") class TestConfigOrder(_DLPConfig): f = DLP_CONFIG_order class TestConfigMinimal(_DLPConfig): f = DLP_CONFIG_minimal def test_read_unitcell(self): pass # cythonised class can no longer raise AttributeError # so changed to test of has_... properties def test_velocities(self, ts): assert(ts.has_velocities == False) def test_forces(self, ts): assert(ts.has_forces == False) class _DLPConfig2(object): @pytest.fixture() def u(self): return mda.Universe(self.f, format='CONFIG') def test_names(self, u): ref = ['C', 'B', 'A'] assert_equal([a.name for a in u.atoms], ref) def test_pos(self, u): ref = np.array([-7.821414265, -4.635443539, -4.732164540]) assert_allclose(u.atoms[2].position, ref) def test_vel(self, u): ref = np.array([2.637614561, 0.5778767520E-01, -1.704765568]) assert_allclose(u.atoms[2].velocity, ref) def test_for(self, u): ref = np.array([150.3309776, -812.6932914, 1429.413120]) assert_allclose(u.atoms[2].force, ref) def test_number(self, u): ref = [0, 1, 2] assert_equal([a.index for a in u.atoms], ref) class TestConfigReader2(_DLPConfig2): f = DLP_CONFIG_order class TestConfigReaderMinimal2(_DLPConfig2): f = DLP_CONFIG_minimal def test_vel(self): pass def test_for(self): pass class _DLHistory(object): @pytest.fixture() def u(self): return mda.Universe(self.f, format='HISTORY') def test_len(self, u): assert_equal(len(u.trajectory), 3) assert_equal([ts.frame for ts in u.trajectory], [0, 1, 2]) def test_getting(self, u): ts = u.trajectory[1] assert_equal(ts.frame, 1) def test_slicing(self, u): nums = [ts.frame for ts in u.trajectory[::2]] assert_equal(nums, [0, 2]) def test_slicing_2(self, u): nums = [ts.frame for ts in u.trajectory[1::-2]] assert_equal(nums, [1]) def test_position(self, u): ref = np.array([[-7.595541651, -7.898808509, -7.861763110 ], [-7.019565641, -7.264933320, -7.045213551], [-6.787470785, -6.912685099, -6.922156843]]) for ts, METHOD_NAME in zip(u.trajectory, ref): assert_allclose(u.atoms[0].position, METHOD_NAME) def test_velocity(self, u): ref = np.array([[1.109901682, -1.500264697, 4.752251711 ], [-1.398479696, 2.091141311, 1.957430003], [0.2570827995, -0.7146878577, -3.547444215]]) for ts, METHOD_NAME in zip(u.trajectory, ref): assert_allclose(u.atoms[0].velocity, METHOD_NAME) def test_force(self, u): ref = np.array([[-2621.386432, 1579.334443, 1041.103241 ], [-1472.262341, 2450.379615, -8149.916193], [2471.802059, -3828.467296, 3596.679326]]) for ts, METHOD_NAME in zip(u.trajectory, ref): assert_allclose(u.atoms[0].force, METHOD_NAME) def test_unitcell(self, u): ref1 = np.array([[18.6796195135, 0.0000058913, -0.0000139999 ], [0.0000058913, 18.6794658887, -0.0000016255], [-0.0000139999, -0.0000016255, 18.6797229304]]) ref2 = np.array([[17.2277221163, -0.0044216126, -0.0003229237 ], [-0.0044205826, 17.2124253987, 0.0019439244], [-0.0003226531, 0.0019445826, 17.2416976104]]) ref3 = np.array([[16.5435673205, -0.0108424742, 0.0014935464 ], [-0.0108333201, 16.5270298891, 0.0011094612], [0.0014948739, 0.0011058349, 16.5725517831]]) for ts, METHOD_NAME in zip(u.trajectory, [ref1, ref2, ref3]): assert_allclose(ts.dimensions, mda.coordinates.core.triclinic_box(*METHOD_NAME)) class TestDLPolyHistory(_DLHistory): f = DLP_HISTORY class TestDLPolyHistoryOrder(_DLHistory): f = DLP_HISTORY_order class TestDLPolyHistoryMinimal(_DLHistory): f = DLP_HISTORY_minimal def test_velocity(self, u): with pytest.raises(mda.NoDataError): getattr(u.atoms[0], 'velocity') def test_force(self, u): with pytest.raises(mda.NoDataError): getattr(u.atoms[0], 'force') def test_unitcell(self): pass class TestDLPolyHistoryMinimalCell(_DLHistory): f = DLP_HISTORY_minimal_cell def test_velocity(self, u): with pytest.raises(mda.NoDataError): getattr(u.atoms[0], 'velocity') def test_force(self, u): with pytest.raises(mda.NoDataError): getattr(u.atoms[0], 'force')
299,176
create grid
import asyncio import enum import random import time import reactpy class GameState(enum.Enum): init = 0 lost = 1 won = 2 play = 3 @reactpy.component def GameView(): game_state, set_game_state = reactpy.hooks.use_state(GameState.init) if game_state == GameState.play: return GameLoop(grid_size=6, block_scale=50, set_game_state=set_game_state) start_button = reactpy.html.button( {"on_click": lambda event: set_game_state(GameState.play)}, "Start" ) if game_state == GameState.won: menu = reactpy.html.div(reactpy.html.h3("You won!"), start_button) elif game_state == GameState.lost: menu = reactpy.html.div(reactpy.html.h3("You lost"), start_button) else: menu = reactpy.html.div(reactpy.html.h3("Click to play"), start_button) menu_style = reactpy.html.style( """ .snake-game-menu h3 { margin-top: 0px !important; } """ ) return reactpy.html.div({"class_name": "snake-game-menu"}, menu_style, menu) class Direction(enum.Enum): ArrowUp = (0, -1) ArrowLeft = (-1, 0) ArrowDown = (0, 1) ArrowRight = (1, 0) @reactpy.component def GameLoop(grid_size, block_scale, set_game_state): # we `use_ref` here to capture the latest direction press without any delay direction = reactpy.hooks.use_ref(Direction.ArrowRight.value) # capture the last direction of travel that was rendered last_direction = direction.current snake, set_snake = reactpy.hooks.use_state( [(grid_size // 2 - 1, grid_size // 2 - 1)] ) food, set_food = use_snake_food(grid_size, snake) grid = METHOD_NAME(grid_size, block_scale) @reactpy.event(prevent_default=True) def on_direction_change(event): if hasattr(Direction, event["key"]): maybe_new_direction = Direction[event["key"]].value direction_vector_sum = tuple( map(sum, zip(last_direction, maybe_new_direction)) ) if direction_vector_sum != (0, 0): direction.current = maybe_new_direction grid_wrapper = reactpy.html.div({"on_key_down": on_direction_change}, grid) assign_grid_block_color(grid, food, "blue") for location in snake: assign_grid_block_color(grid, location, "white") new_game_state = None if snake[-1] in snake[:-1]: assign_grid_block_color(grid, snake[-1], "red") new_game_state = GameState.lost elif len(snake) == grid_size**2: assign_grid_block_color(grid, snake[-1], "yellow") new_game_state = GameState.won interval = use_interval(0.5) @reactpy.hooks.use_effect async def animate(): if new_game_state is not None: await asyncio.sleep(1) set_game_state(new_game_state) return await interval new_snake_head = ( # grid wraps due to mod op here (snake[-1][0] + direction.current[0]) % grid_size, (snake[-1][1] + direction.current[1]) % grid_size, ) if snake[-1] == food: set_food() new_snake = [*snake, new_snake_head] else: new_snake = snake[1:] + [new_snake_head] set_snake(new_snake) return grid_wrapper def use_snake_food(grid_size, current_snake): grid_points = {(x, y) for x in range(grid_size) for y in range(grid_size)} points_not_in_snake = grid_points.difference(current_snake) food, _set_food = reactpy.hooks.use_state(current_snake[-1]) def set_food(): _set_food(random.choice(list(points_not_in_snake))) return food, set_food def use_interval(rate): usage_time = reactpy.hooks.use_ref(time.time()) async def interval() -> None: await asyncio.sleep(rate - (time.time() - usage_time.current)) usage_time.current = time.time() return asyncio.ensure_future(interval()) def METHOD_NAME(grid_size, block_scale): return reactpy.html.div( { "style": { "height": f"{block_scale * grid_size}px", "width": f"{block_scale * grid_size}px", "cursor": "pointer", "display": "grid", "grid-gap": 0, "grid-template-columns": f"repeat({grid_size}, {block_scale}px)", "grid-template-rows": f"repeat({grid_size}, {block_scale}px)", }, "tab_index": -1, }, [ reactpy.html.div( {"style": {"height": f"{block_scale}px"}, "key": i}, [ create_grid_block("black", block_scale, key=i) for i in range(grid_size) ], ) for i in range(grid_size) ], ) def create_grid_block(color, block_scale, key): return reactpy.html.div( { "style": { "height": f"{block_scale}px", "width": f"{block_scale}px", "background_color": color, "outline": "1px solid grey", }, "key": key, } ) def assign_grid_block_color(grid, point, color): x, y = point block = grid["children"][x]["children"][y] block["attributes"]["style"]["backgroundColor"] = color reactpy.run(GameView)
299,177
test completions throws if prompt contains non
from unittest import mock import pytest from fastapi import HTTPException from fastapi.encoders import jsonable_encoder from pydantic import ValidationError from mlflow.gateway.config import RouteConfig from mlflow.gateway.providers.cohere import CohereProvider from mlflow.gateway.schemas import completions, embeddings from tests.gateway.tools import MockAsyncResponse def completions_config(): return { "name": "completions", "route_type": "llm/v1/completions", "model": { "provider": "cohere", "name": "command", "config": { "cohere_api_key": "key", }, }, } def completions_response(): return { "id": "string", "generations": [ { "id": "string", "text": "This is a test", } ], "prompt": "string", "headers": {"Content-Type": "application/json"}, } @pytest.mark.asyncio async def test_completions(): resp = completions_response() config = completions_config() with mock.patch( "aiohttp.ClientSession.post", return_value=MockAsyncResponse(resp) ) as mock_post: provider = CohereProvider(RouteConfig(**config)) payload = { "prompt": "This is a test", } response = await provider.completions(completions.RequestPayload(**payload)) assert jsonable_encoder(response) == { "candidates": [ { "text": "This is a test", "metadata": {}, } ], "metadata": { "input_tokens": None, "output_tokens": None, "total_tokens": None, "model": "command", "route_type": "llm/v1/completions", }, } mock_post.assert_called_once() @pytest.mark.asyncio async def test_completions_temperature_is_multiplied_by_5(): resp = completions_response() config = completions_config() with mock.patch( "aiohttp.ClientSession.post", return_value=MockAsyncResponse(resp) ) as mock_post: provider = CohereProvider(RouteConfig(**config)) payload = { "prompt": "This is a test", "temperature": 0.5, } await provider.completions(completions.RequestPayload(**payload)) assert mock_post.call_args[1]["json"]["temperature"] == 0.5 * 5 def embeddings_config(): return { "name": "embeddings", "route_type": "llm/v1/embeddings", "model": { "provider": "cohere", "name": "embed-english-light-v2.0", "config": { "cohere_api_key": "key", }, }, } def embeddings_response(): return { "id": "bc57846a-3e56-4327-8acc-588ca1a37b8a", "texts": ["hello world"], "embeddings": [ [ 3.25, 0.7685547, 2.65625, -0.30126953, -2.3554688, 1.2597656, ] ], "meta": [ { "api_version": [ { "version": "1", } ] }, ], "headers": {"Content-Type": "application/json"}, } @pytest.mark.asyncio async def test_embeddings(): resp = embeddings_response() config = embeddings_config() with mock.patch( "aiohttp.ClientSession.post", return_value=MockAsyncResponse(resp) ) as mock_post: provider = CohereProvider(RouteConfig(**config)) payload = {"text": "This is a test"} response = await provider.embeddings(embeddings.RequestPayload(**payload)) assert jsonable_encoder(response) == { "embeddings": [ [ 3.25, 0.7685547, 2.65625, -0.30126953, -2.3554688, 1.2597656, ] ], "metadata": { "input_tokens": None, "output_tokens": None, "total_tokens": None, "model": "embed-english-light-v2.0", "route_type": "llm/v1/embeddings", }, } mock_post.assert_called_once() @pytest.mark.asyncio async def test_param_model_is_not_permitted(): config = embeddings_config() provider = CohereProvider(RouteConfig(**config)) payload = { "prompt": "This should fail", "max_tokens": 5000, "model": "something-else", } with pytest.raises(HTTPException, match=r".*") as e: await provider.completions(completions.RequestPayload(**payload)) assert "The parameter 'model' is not permitted" in e.value.detail assert e.value.status_code == 422 @pytest.mark.parametrize("prompt", [{"set1", "set2"}, ["list1"], [1], ["list1", "list2"], [1, 2]]) @pytest.mark.asyncio async def METHOD_NAME(prompt): config = completions_config() provider = CohereProvider(RouteConfig(**config)) payload = {"prompt": prompt} with pytest.raises(ValidationError, match=r"prompt"): await provider.completions(completions.RequestPayload(**payload))
299,178
enable scp
from typing import Any, Optional, Callable import re import os from netmiko.base_connection import BaseConnection from netmiko.cisco_base_connection import CiscoSSHConnection from netmiko.cisco_base_connection import CiscoFileTransfer class CiscoNxosSSH(CiscoSSHConnection): def session_preparation(self) -> None: """Prepare the session after the connection has been established.""" self.ansi_escape_codes = True # NX-OS has an issue where it echoes the command even though it hasn't returned the prompt self._test_channel_read(pattern=r"[>#]") self.set_terminal_width( command="terminal width 511", pattern=r"terminal width 511" ) self.disable_paging() self.set_base_prompt() def normalize_linefeeds(self, a_string: str) -> str: """Convert '\r\n' or '\r\r\n' to '\n, and remove extra '\r's in the text.""" newline = re.compile(r"(\r\r\n\r|\r\r\n|\r\n)") # NX-OS fix for incorrect MD5 on 9K (due to strange <enter> patterns on NX-OS) return newline.sub(self.RESPONSE_RETURN, a_string).replace("\r", "\n") def check_config_mode( self, check_string: str = ")#", pattern: str = r"[>#]", force_regex: bool = False, ) -> bool: """Checks if the device is in configuration mode or not.""" return super().check_config_mode(check_string=check_string, pattern=pattern) def save_config( self, cmd: str = "copy running-config startup-config", confirm: bool = False, confirm_response: str = "", ) -> str: self.enable() output = "" if confirm: output += self._send_command_timing_str( command_string=cmd, strip_prompt=False, strip_command=False ) if confirm_response: output += self._send_command_timing_str( confirm_response, strip_prompt=False, strip_command=False ) else: # Send enter by default output += self._send_command_timing_str( self.RETURN, strip_prompt=False, strip_command=False ) else: # NX-OS is very slow on save_config ensure it waits long enough. output += self._send_command_str( command_string=cmd, strip_prompt=False, strip_command=False, read_timeout=100, ) return output class CiscoNxosFileTransfer(CiscoFileTransfer): """Cisco NXOS SCP File Transfer driver.""" def __init__( self, ssh_conn: BaseConnection, source_file: str, dest_file: str, file_system: str = "bootflash:", direction: str = "put", socket_timeout: float = 10.0, progress: Optional[Callable[..., Any]] = None, progress4: Optional[Callable[..., Any]] = None, hash_supported: bool = True, ) -> None: self.ssh_ctl_chan = ssh_conn self.source_file = source_file self.dest_file = dest_file self.direction = direction if hash_supported is False: raise ValueError("hash_supported=False is not supported for NX-OS") if file_system: self.file_system = file_system else: raise ValueError("Destination file system must be specified for NX-OS") if direction == "put": self.source_md5 = self.file_md5(source_file) self.file_size = os.stat(source_file).st_size elif direction == "get": self.source_md5 = self.remote_md5(remote_file=source_file) self.file_size = self.remote_file_size(remote_file=source_file) else: raise ValueError("Invalid direction specified") self.socket_timeout = socket_timeout self.progress = progress self.progress4 = progress4 def check_file_exists(self, remote_cmd: str = "") -> bool: """Check if the dest_file already exists on the file system (return boolean).""" if self.direction == "put": if not remote_cmd: remote_cmd = f"dir {self.file_system}{self.dest_file}" remote_out = self.ssh_ctl_chan._send_command_str(remote_cmd) search_string = r"{}.*Usage for".format(self.dest_file) if "No such file or directory" in remote_out: return False elif re.search(search_string, remote_out, flags=re.DOTALL): return True else: raise ValueError("Unexpected output from check_file_exists") elif self.direction == "get": return os.path.exists(self.dest_file) else: raise ValueError("Invalid value for file transfer direction.") def remote_file_size( self, remote_cmd: str = "", remote_file: Optional[str] = None ) -> int: """Get the file size of the remote file.""" if remote_file is None: if self.direction == "put": remote_file = self.dest_file elif self.direction == "get": remote_file = self.source_file else: raise ValueError("Invalid value for file transfer direction.") if not remote_cmd: remote_cmd = f"dir {self.file_system}/{remote_file}" remote_out = self.ssh_ctl_chan._send_command_str(remote_cmd) if re.search("no such file or directory", remote_out, flags=re.I): raise IOError("Unable to find file on remote system") # Match line containing file name escape_file_name = re.escape(remote_file) pattern = r".*({}).*".format(escape_file_name) match = re.search(pattern, remote_out) if match: file_size = match.group(0) file_size = file_size.split()[0] return int(file_size) raise IOError("Unable to find file on remote system") @staticmethod def process_md5(md5_output: str, pattern: str = r"= (.*)") -> str: """Not needed on NX-OS.""" raise NotImplementedError def remote_md5( self, base_cmd: str = "show file", remote_file: Optional[str] = None ) -> str: if remote_file is None: if self.direction == "put": remote_file = self.dest_file elif self.direction == "get": remote_file = self.source_file remote_md5_cmd = f"{base_cmd} {self.file_system}{remote_file} md5sum" output = self.ssh_ctl_chan._send_command_str(remote_md5_cmd, read_timeout=300) output = output.strip() return output def METHOD_NAME(self, cmd: str = "") -> None: raise NotImplementedError def disable_scp(self, cmd: str = "") -> None: raise NotImplementedError
299,179
identity
# coding=utf-8 # *** WARNING: this file was generated by pulumi. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs __all__ = [ 'GetSqlResourceSqlContainerResult', 'AwaitableGetSqlResourceSqlContainerResult', 'get_sql_resource_sql_container', 'get_sql_resource_sql_container_output', ] @pulumi.output_type class GetSqlResourceSqlContainerResult: """ An Azure Cosmos DB container. """ def __init__(__self__, id=None, METHOD_NAME=None, location=None, name=None, options=None, resource=None, tags=None, type=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if METHOD_NAME and not isinstance(METHOD_NAME, dict): raise TypeError("Expected argument 'identity' to be a dict") pulumi.set(__self__, "identity", METHOD_NAME) if location and not isinstance(location, str): raise TypeError("Expected argument 'location' to be a str") pulumi.set(__self__, "location", location) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if options and not isinstance(options, dict): raise TypeError("Expected argument 'options' to be a dict") pulumi.set(__self__, "options", options) if resource and not isinstance(resource, dict): raise TypeError("Expected argument 'resource' to be a dict") pulumi.set(__self__, "resource", resource) if tags and not isinstance(tags, dict): raise TypeError("Expected argument 'tags' to be a dict") pulumi.set(__self__, "tags", tags) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def id(self) -> str: """ The unique resource identifier of the ARM resource. """ return pulumi.get(self, "id") @property @pulumi.getter def METHOD_NAME(self) -> Optional['outputs.ManagedServiceIdentityResponse']: """ Identity for the resource. """ return pulumi.get(self, "identity") @property @pulumi.getter def location(self) -> Optional[str]: """ The location of the resource group to which the resource belongs. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> str: """ The name of the ARM resource. """ return pulumi.get(self, "name") @property @pulumi.getter def options(self) -> Optional['outputs.SqlContainerGetPropertiesResponseOptions']: return pulumi.get(self, "options") @property @pulumi.getter def resource(self) -> Optional['outputs.SqlContainerGetPropertiesResponseResource']: return pulumi.get(self, "resource") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB". """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> str: """ The type of Azure resource. """ return pulumi.get(self, "type") class AwaitableGetSqlResourceSqlContainerResult(GetSqlResourceSqlContainerResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetSqlResourceSqlContainerResult( id=self.id, METHOD_NAME=self.METHOD_NAME, location=self.location, name=self.name, options=self.options, resource=self.resource, tags=self.tags, type=self.type) def get_sql_resource_sql_container(account_name: Optional[str] = None, container_name: Optional[str] = None, database_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlResourceSqlContainerResult: """ Gets the SQL container under an existing Azure Cosmos DB database account. :param str account_name: Cosmos DB database account name. :param str container_name: Cosmos DB container name. :param str database_name: Cosmos DB database name. :param str resource_group_name: The name of the resource group. The name is case insensitive. """ __args__ = dict() __args__['accountName'] = account_name __args__['containerName'] = container_name __args__['databaseName'] = database_name __args__['resourceGroupName'] = resource_group_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20230315preview:getSqlResourceSqlContainer', __args__, opts=opts, typ=GetSqlResourceSqlContainerResult).value return AwaitableGetSqlResourceSqlContainerResult( id=pulumi.get(__ret__, 'id'), METHOD_NAME=pulumi.get(__ret__, 'identity'), location=pulumi.get(__ret__, 'location'), name=pulumi.get(__ret__, 'name'), options=pulumi.get(__ret__, 'options'), resource=pulumi.get(__ret__, 'resource'), tags=pulumi.get(__ret__, 'tags'), type=pulumi.get(__ret__, 'type')) @_utilities.lift_output_func(get_sql_resource_sql_container) def get_sql_resource_sql_container_output(account_name: Optional[pulumi.Input[str]] = None, container_name: Optional[pulumi.Input[str]] = None, database_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSqlResourceSqlContainerResult]: """ Gets the SQL container under an existing Azure Cosmos DB database account. :param str account_name: Cosmos DB database account name. :param str container_name: Cosmos DB container name. :param str database_name: Cosmos DB database name. :param str resource_group_name: The name of the resource group. The name is case insensitive. """ ...
299,180
compare rec subrows
import sys import numpy as np from .. import util def check_header(header, rh): for k in header: v = header[k] rv = rh[k] if isinstance(rv, str): v = v.strip() rv = rv.strip() assert v == rv, "testing equal key '%s'" % k def compare_headerlist_header(header_list, header): """ The first is a list of dicts, second a FITSHDR """ for entry in header_list: name = entry['name'].upper() value = entry['value'] hvalue = header[name] if isinstance(hvalue, str): hvalue = hvalue.strip() assert value == hvalue, ( "testing header key '%s'" % name ) if 'comment' in entry: assert ( entry['comment'].strip() == header.get_comment(name).strip() ), "testing comment for header key '%s'" % name def cast_shape(shape): if len(shape) == 2 and shape[1] == 1: return (shape[0], ) elif shape == (1, ): return tuple() else: return shape def compare_array(arr1, arr2, name): arr1_shape = cast_shape(arr1.shape) arr2_shape = cast_shape(arr2.shape) assert arr1_shape == arr2_shape, ( "testing arrays '%s' shapes are equal: " "input %s, read: %s" % (name, arr1_shape, arr2_shape) ) if sys.version_info >= (3, 0, 0) and arr1.dtype.char == 'S': _arr1 = arr1.astype('U') else: _arr1 = arr1 res = np.where(_arr1 != arr2) for i, w in enumerate(res): assert w.size == 0, "testing array '%s' dim %d are equal" % (name, i) def compare_array_tol(arr1, arr2, tol, name): assert arr1.shape == arr2.shape, ( "testing arrays '%s' shapes are equal: " "input %s, read: %s" % (name, arr1.shape, arr2.shape) ) adiff = np.abs((arr1 - arr2)/arr1) maxdiff = adiff.max() res = np.where(adiff > tol) for i, w in enumerate(res): assert w.size == 0, ( "testing array '%s' dim %d are " "equal within tolerance %e, found " "max diff %e" % (name, i, tol, maxdiff) ) def compare_array_abstol(arr1, arr2, tol, name): assert arr1.shape == arr2.shape, ( "testing arrays '%s' shapes are equal: " "input %s, read: %s" % (name, arr1.shape, arr2.shape) ) adiff = np.abs(arr1-arr2) maxdiff = adiff.max() res = np.where(adiff > tol) for i, w in enumerate(res): assert w.size == 0, ( "testing array '%s' dim %d are " "equal within tolerance %e, found " "max diff %e" % (name, i, tol, maxdiff) ) def compare_object_array(arr1, arr2, name, rows=None): """ The first must be object """ if rows is None: rows = np.arange(arr1.size) for i, row in enumerate(rows): if ((sys.version_info >= (3, 0, 0) and isinstance(arr2[i], bytes)) or isinstance(arr2[i], str)): if sys.version_info >= (3, 0, 0) and isinstance(arr1[row], bytes): _arr1row = arr1[row].decode('ascii') else: _arr1row = arr1[row] assert _arr1row == arr2[i], ( "%s str el %d equal" % (name, i) ) else: delement = arr2[i] orig = arr1[row] s = len(orig) compare_array( orig, delement[0:s], "%s num el %d equal" % (name, i) ) def compare_rec(rec1, rec2, name): for f in rec1.dtype.names: rec1_shape = cast_shape(rec1[f].shape) rec2_shape = cast_shape(rec2[f].shape) assert rec1_shape == rec2_shape, ( "testing '%s' field '%s' shapes are equal: " "input %s, read: %s" % ( name, f, rec1_shape, rec2_shape) ) if sys.version_info >= (3, 0, 0) and rec1[f].dtype.char == 'S': # for python 3, we get back unicode always _rec1f = rec1[f].astype('U') else: _rec1f = rec1[f] assert np.all(_rec1f == rec2[f]) # res = np.where(_rec1f != rec2[f]) # for w in res: # assert w.size == 0, "testing column %s" % f def METHOD_NAME(rec1, rec2, rows, name): for f in rec1.dtype.names: rec1_shape = cast_shape(rec1[f][rows].shape) rec2_shape = cast_shape(rec2[f].shape) assert rec1_shape == rec2_shape, ( "testing '%s' field '%s' shapes are equal: " "input %s, read: %s" % ( name, f, rec1_shape, rec2_shape) ) if sys.version_info >= (3, 0, 0) and rec1[f].dtype.char == 'S': # for python 3, we get back unicode always _rec1frows = rec1[f][rows].astype('U') else: _rec1frows = rec1[f][rows] res = np.where(_rec1frows != rec2[f]) for w in res: assert w.size == 0, "testing column %s" % f def compare_rec_with_var(rec1, rec2, name, rows=None): """ First one *must* be the one with object arrays Second can have fixed length both should be same number of rows """ if rows is None: rows = np.arange(rec2.size) assert rec1.size == rec2.size, ( "testing '%s' same number of rows" % name ) # rec2 may have fewer fields for f in rec2.dtype.names: # f1 will have the objects if util.is_object(rec1[f]): compare_object_array( rec1[f], rec2[f], "testing '%s' field '%s'" % (name, f), rows=rows ) else: compare_array( rec1[f][rows], rec2[f], "testing '%s' num field '%s' equal" % (name, f) ) def compare_names(read_names, true_names, lower=False, upper=False): for nread, ntrue in zip(read_names, true_names): if lower: tname = ntrue.lower() mess = "lower: '%s' vs '%s'" % (nread, tname) else: tname = ntrue.upper() mess = "upper: '%s' vs '%s'" % (nread, tname) assert nread == tname, mess
299,181
resource event
#!/usr/bin/env python # coding=utf-8 import json from threading import Lock import warnings from sacred.commandline_options import cli_option from sacred.observers.base import RunObserver from sacred.serializer import flatten DEFAULT_SQL_PRIORITY = 40 # ############################# Observer #################################### # class SqlObserver(RunObserver): @classmethod def create(cls, url, echo=False, priority=DEFAULT_SQL_PRIORITY): warnings.warn( "SqlObserver.create(...) is deprecated. Please use" " SqlObserver(...) instead.", DeprecationWarning, ) return cls(url, echo, priority) def __init__(self, url, echo=False, priority=DEFAULT_SQL_PRIORITY): from sqlalchemy.orm import sessionmaker, scoped_session import sqlalchemy as sa engine = sa.create_engine(url, echo=echo) session_factory = sessionmaker(bind=engine) # make session thread-local to avoid problems with sqlite (see #275) session = scoped_session(session_factory) self.engine = engine self.session = session self.priority = priority self.run = None self.lock = Lock() @classmethod def create_from(cls, engine, session, priority=DEFAULT_SQL_PRIORITY): """Instantiate a SqlObserver with an existing engine and session.""" self = cls.__new__(cls) # skip __init__ call self.engine = engine self.session = session self.priority = priority self.run = None self.lock = Lock() return self def started_event( self, ex_info, command, host_info, start_time, config, meta_info, _id ): return self._add_event( ex_info, command, host_info, config, meta_info, _id, "RUNNING", start_time=start_time, ) def queued_event( self, ex_info, command, host_info, queue_time, config, meta_info, _id ): return self._add_event( ex_info, command, host_info, config, meta_info, _id, "QUEUED" ) def _add_event( self, ex_info, command, host_info, config, meta_info, _id, status, **kwargs ): from .sql_bases import Base, Experiment, Host, Run Base.metadata.create_all(self.engine) sql_exp = Experiment.get_or_create(ex_info, self.session) sql_host = Host.get_or_create(host_info, self.session) if _id is None: i = self.session.query(Run).order_by(Run.id.desc()).first() _id = 0 if i is None else i.id + 1 self.run = Run( run_id=str(_id), config=json.dumps(flatten(config)), command=command, priority=meta_info.get("priority", 0), comment=meta_info.get("comment", ""), experiment=sql_exp, host=sql_host, status=status, **kwargs, ) self.session.add(self.run) self.save() return _id or self.run.run_id def heartbeat_event(self, info, captured_out, beat_time, result): self.run.info = json.dumps(flatten(info)) self.run.captured_out = captured_out self.run.heartbeat = beat_time self.run.result = result self.save() def completed_event(self, stop_time, result): self.run.stop_time = stop_time self.run.result = result self.run.status = "COMPLETED" self.save() def interrupted_event(self, interrupt_time, status): self.run.stop_time = interrupt_time self.run.status = status self.save() def failed_event(self, fail_time, fail_trace): self.run.stop_time = fail_time self.run.fail_trace = "\n".join(fail_trace) self.run.status = "FAILED" self.save() def METHOD_NAME(self, filename): from .sql_bases import Resource res = Resource.get_or_create(filename, self.session) self.run.resources.append(res) self.save() def artifact_event(self, name, filename, metadata=None, content_type=None): from .sql_bases import Artifact a = Artifact.create(name, filename) self.run.artifacts.append(a) self.save() def save(self): with self.lock: self.session.commit() def query(self, _id): from .sql_bases import Run run = self.session.query(Run).filter_by(id=_id).first() return run.to_json() def __eq__(self, other): if isinstance(other, SqlObserver): # fixme: this will probably fail to detect two equivalent engines return self.engine == other.engine and self.session == other.session return False # ######################## Commandline Option ############################### # @cli_option("-s", "--sql") def sql_option(args, run): """Add a SQL Observer to the experiment. The typical form is: dialect://username:password@host:port/database """ run.observers.append(SqlObserver(args))
299,182
test event without state key
from unittest import TestCase as StdlibTestCase import yaml from synapse.config import ConfigError from synapse.config.api import ApiConfig from synapse.types.state import StateFilter DEFAULT_PREJOIN_STATE_PAIRS = { ("m.room.join_rules", ""), ("m.room.canonical_alias", ""), ("m.room.avatar", ""), ("m.room.encryption", ""), ("m.room.name", ""), ("m.room.create", ""), ("m.room.topic", ""), } class TestRoomPrejoinState(StdlibTestCase): def read_config(self, source: str) -> ApiConfig: config = ApiConfig() config.read_config(yaml.safe_load(source)) return config def test_no_prejoin_state(self) -> None: config = self.read_config("foo: bar") self.assertFalse(config.room_prejoin_state.has_wildcards()) self.assertEqual( set(config.room_prejoin_state.concrete_types()), DEFAULT_PREJOIN_STATE_PAIRS ) def test_disable_default_event_types(self) -> None: config = self.read_config( """ room_prejoin_state: disable_default_event_types: true """ ) self.assertEqual(config.room_prejoin_state, StateFilter.none()) def METHOD_NAME(self) -> None: config = self.read_config( """ room_prejoin_state: disable_default_event_types: true additional_event_types: - foo """ ) self.assertEqual(config.room_prejoin_state.wildcard_types(), ["foo"]) self.assertEqual(config.room_prejoin_state.concrete_types(), []) def test_event_with_specific_state_key(self) -> None: config = self.read_config( """ room_prejoin_state: disable_default_event_types: true additional_event_types: - [foo, bar] """ ) self.assertFalse(config.room_prejoin_state.has_wildcards()) self.assertEqual( set(config.room_prejoin_state.concrete_types()), {("foo", "bar")}, ) def test_repeated_event_with_specific_state_key(self) -> None: config = self.read_config( """ room_prejoin_state: disable_default_event_types: true additional_event_types: - [foo, bar] - [foo, baz] """ ) self.assertFalse(config.room_prejoin_state.has_wildcards()) self.assertEqual( set(config.room_prejoin_state.concrete_types()), {("foo", "bar"), ("foo", "baz")}, ) def test_no_specific_state_key_overrides_specific_state_key(self) -> None: config = self.read_config( """ room_prejoin_state: disable_default_event_types: true additional_event_types: - [foo, bar] - foo """ ) self.assertEqual(config.room_prejoin_state.wildcard_types(), ["foo"]) self.assertEqual(config.room_prejoin_state.concrete_types(), []) config = self.read_config( """ room_prejoin_state: disable_default_event_types: true additional_event_types: - foo - [foo, bar] """ ) self.assertEqual(config.room_prejoin_state.wildcard_types(), ["foo"]) self.assertEqual(config.room_prejoin_state.concrete_types(), []) def test_bad_event_type_entry_raises(self) -> None: with self.assertRaises(ConfigError): self.read_config( """ room_prejoin_state: additional_event_types: - [] """ ) with self.assertRaises(ConfigError): self.read_config( """ room_prejoin_state: additional_event_types: - [a] """ ) with self.assertRaises(ConfigError): self.read_config( """ room_prejoin_state: additional_event_types: - [a, b, c] """ ) with self.assertRaises(ConfigError): self.read_config( """ room_prejoin_state: additional_event_types: - [true, 1.23] """ )
299,183
file name
import logging from django.conf import settings from django.db import models, router, transaction from django.urls import reverse from django.utils import timezone from django.utils.encoding import force_str from sentry.backup.scopes import RelocationScope from sentry.db.models import ( BoundedBigIntegerField, BoundedPositiveIntegerField, FlexibleForeignKey, JSONField, Model, region_silo_only_model, sane_repr, ) from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey from sentry.services.hybrid_cloud.user.service import user_service from sentry.utils import json from .base import DEFAULT_EXPIRATION, ExportQueryType, ExportStatus logger = logging.getLogger(__name__) @region_silo_only_model class ExportedData(Model): """ Stores references to asynchronous data export jobs """ __relocation_scope__ = RelocationScope.Excluded organization = FlexibleForeignKey("sentry.Organization") user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete="SET_NULL") file_id = BoundedBigIntegerField(null=True) date_added = models.DateTimeField(default=timezone.now) date_finished = models.DateTimeField(null=True) date_expired = models.DateTimeField(null=True, db_index=True) query_type = BoundedPositiveIntegerField(choices=ExportQueryType.as_choices()) query_info = JSONField() @property def status(self): if self.date_finished is None: return ExportStatus.Early elif self.date_expired is not None and self.date_expired < timezone.now(): return ExportStatus.Expired else: return ExportStatus.Valid @property def payload(self): payload = self.query_info.copy() payload["export_type"] = ExportQueryType.as_str(self.query_type) return payload @property def METHOD_NAME(self): date = self.date_added.strftime("%Y-%B-%d") export_type = ExportQueryType.as_str(self.query_type) # Example: Discover_2020-July-21_27.csv return f"{export_type}_{date}_{self.id}.csv" @staticmethod def format_date(date): # Example: 12:21 PM on July 21, 2020 (UTC) return None if date is None else force_str(date.strftime("%-I:%M %p on %B %d, %Y (%Z)")) def delete_file(self): file = self._get_file() if file: file.delete() def delete(self, *args, **kwargs): self.delete_file() super().delete(*args, **kwargs) def finalize_upload(self, file, expiration=DEFAULT_EXPIRATION): self.delete_file() # If a file is present, remove it current_time = timezone.now() expire_time = current_time + expiration self.update(file_id=file.id, date_finished=current_time, date_expired=expire_time) transaction.on_commit(lambda: self.email_success(), router.db_for_write(ExportedData)) def email_success(self): from sentry.utils.email import MessageBuilder user_email = None user = user_service.get_user(user_id=self.user_id) if user: user_email = user.email # The following condition should never be true, but it's a safeguard in case someone manually calls this method if self.date_finished is None or self.date_expired is None or self._get_file() is None: logger.warning( "Notification email attempted on incomplete dataset", extra={"data_export_id": self.id, "organization_id": self.organization_id}, ) return url = self.organization.absolute_url( reverse("sentry-data-export-details", args=[self.organization.slug, self.id]) ) msg = MessageBuilder( subject="Your data is ready.", context={"url": url, "expiration": self.format_date(self.date_expired)}, type="organization.export-data", template="sentry/emails/data-export-success.txt", html_template="sentry/emails/data-export-success.html", ) if user_email is not None: msg.send_async([user_email]) def email_failure(self, message): from sentry.utils.email import MessageBuilder user = user_service.get_user(user_id=self.user_id) if user is None: return msg = MessageBuilder( subject="We couldn't export your data.", context={ "creation": self.format_date(self.date_added), "error_message": message, "payload": json.dumps(self.payload), }, type="organization.export-data", template="sentry/emails/data-export-failure.txt", html_template="sentry/emails/data-export-failure.html", ) msg.send_async([user.email]) self.delete() def _get_file(self): from sentry.models import File if self.file_id: try: return File.objects.get(pk=self.file_id) except File.DoesNotExist: self.update(file_id=None) return None class Meta: app_label = "sentry" db_table = "sentry_exporteddata" __repr__ = sane_repr("query_type", "query_info") @region_silo_only_model class ExportedDataBlob(Model): __relocation_scope__ = RelocationScope.Excluded data_export = FlexibleForeignKey("sentry.ExportedData") blob_id = BoundedBigIntegerField() offset = BoundedBigIntegerField() class Meta: app_label = "sentry" db_table = "sentry_exporteddatablob" unique_together = (("data_export", "blob_id", "offset"),)
299,184
get model name
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module provides functions to help with testing against iraf tasks. """ import numpy as np from astropy.logger import log iraf_models_map = {1.0: "Chebyshev", 2.0: "Legendre", 3.0: "Spline3", 4.0: "Spline1"} def get_records(fname): """ Read the records of an IRAF database file into a python list. Parameters ---------- fname : str name of an IRAF database file Returns ------- A list of records """ f = open(fname) dtb = f.read() f.close() recs = dtb.split("begin")[1:] records = [Record(r) for r in recs] return records def get_database_string(fname): """ Read an IRAF database file. Parameters ---------- fname : str name of an IRAF database file Returns ------- the database file as a string """ f = open(fname) dtb = f.read() f.close() return dtb class Record: """ A base class for all records - represents an IRAF database record. Attributes ---------- recstr: string the record as a string fields: dict the fields in the record taskname: string the name of the task which created the database file """ def __init__(self, recstr): self.recstr = recstr self.fields = self.get_fields() self.taskname = self.get_task_name() def aslist(self): reclist = self.recstr.split("\n") reclist = [entry.strip() for entry in reclist] [reclist.remove(entry) for entry in reclist if len(entry) == 0] return reclist def get_fields(self): # read record fields as an array fields = {} flist = self.aslist() numfields = len(flist) for i in range(numfields): line = flist[i] if line and line[0].isalpha(): field = line.split() if i + 1 < numfields: if not flist[i + 1][0].isalpha(): fields[field[0]] = self.read_array_field( flist[i : i + int(field[1]) + 1] ) else: fields[field[0]] = " ".join(s for s in field[1:]) else: fields[field[0]] = " ".join(s for s in field[1:]) else: continue return fields def get_task_name(self): try: return self.fields["task"] except KeyError: return None def read_array_field(self, fieldlist): # Turn an iraf record array field into a numpy array fieldline = [entry.split() for entry in fieldlist[1:]] # take only the first 3 columns # identify writes also strings at the end of some field lines xyz = [entry[:3] for entry in fieldline] try: farr = np.array(xyz) except Exception: log.debug(f"Could not read array field {fieldlist[0].split()[0]}") return farr.astype(np.float64) class IdentifyRecord(Record): """ Represents a database record for the onedspec.identify task. Attributes ---------- x: array the X values of the identified features this represents values on axis1 (image rows) y: int the Y values of the identified features (image columns) z: array the values which X maps into modelname: string the function used to fit the data nterms: int degree of the polynomial which was fit to the data in IRAF this is the number of coefficients, not the order mrange: list the range of the data coeff: array function (modelname) coefficients """ def __init__(self, recstr): super().__init__(recstr) self._flatcoeff = self.fields["coefficients"].flatten() self.x = self.fields["features"][:, 0] self.y = self.get_ydata() self.z = self.fields["features"][:, 1] self.modelname = self.METHOD_NAME() self.nterms = self.get_nterms() self.mrange = self.get_range() self.coeff = self.get_coeff() def METHOD_NAME(self): return iraf_models_map[self._flatcoeff[0]] def get_nterms(self): return self._flatcoeff[1] def get_range(self): low = self._flatcoeff[2] high = self._flatcoeff[3] return [low, high] def get_coeff(self): return self._flatcoeff[4:] def get_ydata(self): image = self.fields["image"] left = image.find("[") + 1 right = image.find("]") section = image[left:right] if "," in section: yind = image.find(",") + 1 return int(image[yind:-1]) else: return int(section) class FitcoordsRecord(Record): """ Represents a database record for the longslit.fitccords task. Attributes ---------- modelname: string the function used to fit the data xorder: int number of terms in x yorder: int number of terms in y xbounds: list data range in x ybounds: list data range in y coeff: array function coefficients """ def __init__(self, recstr): super().__init__(recstr) self._surface = self.fields["surface"].flatten() self.modelname = iraf_models_map[self._surface[0]] self.xorder = self._surface[1] self.yorder = self._surface[2] self.xbounds = [self._surface[4], self._surface[5]] self.ybounds = [self._surface[6], self._surface[7]] self.coeff = self.get_coeff() def get_coeff(self): return self._surface[8:] class IDB: """ Base class for an IRAF identify database. Attributes ---------- records: list a list of all `IdentifyRecord` in the database numrecords: int number of records """ def __init__(self, dtbstr): self.records = [IdentifyRecord(rstr) for rstr in self.aslist(dtbstr)] self.numrecords = len(self.records) def aslist(self, dtb): # return a list of records # if the first one is a comment remove it from the list rl = dtb.split("begin") try: rl0 = rl[0].split("\n") except Exception: return rl if len(rl0) == 2 and rl0[0].startswith("#") and not rl0[1].strip(): return rl[1:] else: return rl class ReidentifyRecord(IDB): """ Represents a database record for the onedspec.reidentify task. """ def __init__(self, databasestr): super().__init__(databasestr) self.x = np.array([r.x for r in self.records]) self.y = self.get_ydata() self.z = np.array([r.z for r in self.records]) def get_ydata(self): y = np.ones(self.x.shape) y = y * np.array([r.y for r in self.records])[:, np.newaxis] return y
299,185
cleanup
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runs ping. This benchmark runs ping using the internal, and optionally external, ips of vms in the same zone. """ import logging import re from absl import flags from perfkitbenchmarker import configs from perfkitbenchmarker import sample from perfkitbenchmarker import vm_util FLAGS = flags.FLAGS BENCHMARK_NAME = 'ping' BENCHMARK_CONFIG = """ ping: description: Benchmarks ping latency over internal IP addresses vm_groups: vm_1: vm_spec: *default_single_core vm_2: vm_spec: *default_single_core """ METRICS = ('Min Latency', 'Average Latency', 'Max Latency', 'Latency Std Dev') def GetConfig(user_config): return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) def Prepare(benchmark_spec): # pylint: disable=unused-argument """Install ping on the target vm. Checks that there are exactly two vms specified. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ if len(benchmark_spec.vms) != 2: raise ValueError( 'Ping benchmark requires exactly two machines, ' f'found {len(benchmark_spec.vms)}') if vm_util.ShouldRunOnExternalIpAddress(): vms = benchmark_spec.vms for vm in vms: vm.AllowIcmp() def Run(benchmark_spec): """Run ping on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ vms = benchmark_spec.vms results = [] for sending_vm, receiving_vm in vms, reversed(vms): if vm_util.ShouldRunOnExternalIpAddress(): ip_type = vm_util.IpAddressMetadata.EXTERNAL results = results + _RunPing(sending_vm, receiving_vm, receiving_vm.ip_address, ip_type) if vm_util.ShouldRunOnInternalIpAddress(sending_vm, receiving_vm): ip_type = vm_util.IpAddressMetadata.INTERNAL results = results + _RunPing(sending_vm, receiving_vm, receiving_vm.internal_ip, ip_type) return results def _RunPing(sending_vm, receiving_vm, receiving_ip, ip_type): """Run ping using 'sending_vm' to connect to 'receiving_ip'. Args: sending_vm: The VM issuing the ping request. receiving_vm: The VM receiving the ping. Needed for metadata. receiving_ip: The IP address to be pinged. ip_type: The type of 'receiving_ip', (either 'vm_util.IpAddressSubset.INTERNAL or vm_util.IpAddressSubset.EXTERNAL') Returns: A list of samples, with one sample for each metric. """ if (ip_type == vm_util.IpAddressMetadata.INTERNAL and not sending_vm.IsReachable(receiving_vm)): logging.warning('%s is not reachable from %s', receiving_vm, sending_vm) return [] logging.info('Ping results (ip_type = %s):', ip_type) ping_cmd = f'ping -c 100 {receiving_ip}' stdout, _ = sending_vm.RemoteCommand(ping_cmd) stats = re.findall('([0-9]*\\.[0-9]*)', stdout.splitlines()[-1]) assert len(stats) == len(METRICS), stats results = [] metadata = {'ip_type': ip_type, 'receiving_zone': receiving_vm.zone, 'sending_zone': sending_vm.zone} for i, metric in enumerate(METRICS): results.append(sample.Sample(metric, float(stats[i]), 'ms', metadata)) return results def METHOD_NAME(benchmark_spec): # pylint: disable=unused-argument """Cleanup ping on the target vm (by uninstalling). Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ pass
299,186
verify columns
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- """Module for converting DataFrame to Networkx graph.""" from typing import Callable, Dict, Iterable, Optional, Union import networkx as nx import pandas as pd from typing_extensions import Literal from .._version import VERSION __version__ = VERSION __author__ = "Ian Hellen" GraphType = Literal["graph", "digraph"] NodeRole = Literal["source", "target"] GraphItem = Literal["node", "edge"] def df_to_networkx( data: pd.DataFrame, source_col: str, target_col: str, source_attrs: Optional[Iterable[str]] = None, target_attrs: Optional[Iterable[str]] = None, edge_attrs: Optional[Iterable[str]] = None, graph_type: GraphType = "graph", ): """ Create a networkx graph from a DataFrame. Parameters ---------- data : pd.DataFrame Input data source_col : str Column for source nodes. target_col : str Column for target nodes. source_attrs : Optional[List[str]], optional Optional list of columns to use as source node attributes, by default None target_attrs : Optional[List[str]], optional Optional list of columns to use as target node attributes, by default None edge_attrs : Optional[List[str]], optional Optional list of columns to use as edge node attributes, by default None graph_type : str "graph" or "digraph" (for nx.DiGraph) Returns ------- nx.Graph The networkx graph object """ create_as = nx.DiGraph if graph_type == "digraph" else nx.Graph METHOD_NAME( data, source_col, target_col, source_attrs, target_attrs, edge_attrs ) # remove any source or target rows that are NaN data = data.dropna(axis=0, subset=[source_col, target_col]) nx_graph = nx.from_pandas_edgelist( data, source=source_col, target=target_col, edge_attr=edge_attrs, create_using=create_as, ) _set_node_attributes(data, nx_graph, source_col, source_attrs, node_role="source") _set_node_attributes(data, nx_graph, target_col, target_attrs, node_role="target") return nx_graph def _set_node_attributes( data: pd.DataFrame, graph: nx.Graph, column: str, attrib_cols: Optional[Iterable[str]], node_role: NodeRole, ): """Set node attributes from column values.""" all_cols = [column, *attrib_cols] if attrib_cols else [column] # Create an 'agg' dictionary to apply to DataFrame agg_dict: Dict[str, Union[str, Callable]] = ( {col: _pd_unique_list for col in attrib_cols} if attrib_cols else {} ) # Add these two items as attributes agg_dict.update({"node_role": "first", "node_type": "first"}) # Group by the column value and apply the agg dictionary # to_dict(orient="index") produces a dict like # { column_val: { attrib_name: attrib_val, attrib2_name: ....}} source_attrib_dict = ( data[all_cols] .assign(node_role=node_role, node_type=column) .groupby(column) .agg(agg_dict) .to_dict(orient="index") ) # we can use the dict to directly set the attributes for all graph items. nx.set_node_attributes(graph, source_attrib_dict) def _pd_unique_list(series: pd.Series): """Return either a value or a string if item not unique.""" unique_vals = series.unique() if len(unique_vals) == 1: return unique_vals[0] return ", ".join([str(attrib) for attrib in unique_vals]) def METHOD_NAME( data, source_col, target_col, source_attrs, target_attrs, edge_attrs ): """Check specified columns are in data.""" missing_columns = { **_verify_column(data, "source_col", source_col), **_verify_column(data, "target_col", target_col), } for col in source_attrs or []: missing_columns.update(_verify_column(data, "source_attrs", col)) for col in target_attrs or []: missing_columns.update(_verify_column(data, "target_attrs", col)) for col in edge_attrs or []: missing_columns.update(_verify_column(data, "edge_attrs", col)) if missing_columns: raise ValueError( "The following parameters had columns that are missing from the input data", ", ".join(f"{col} ({param})" for col, param in missing_columns.items()), ) def _verify_column(data, param, column): """Verify individual column.""" return {column: param} if column not in data.columns else {}
299,187
test 001 detect
#!/usr/bin/env python # # Copyright 2012,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # import random import numpy from gnuradio import gr, gr_unittest, blocks, analog, channels from gnuradio import digital from gnuradio.digital.utils import tagged_streams from gnuradio.digital.ofdm_txrx import ofdm_tx def make_bpsk_burst(fft_len, cp_len, num_bits): """ Create a burst of a sync symbol and some BPSK bits """ sync_symbol = [ (random.randint(0, 1) * 2) - 1 for x in range(fft_len // 2) ] * 2 sync_symbols = sync_symbol[-cp_len:] + sync_symbol mod_symbols = [ (random.randint(0, 1) * 2) - 1 for x in range(num_bits) ] return sync_symbols + mod_symbols class qa_ofdm_sync_sc_cfb (gr_unittest.TestCase): def setUp(self): random.seed(0) self.tb = gr.top_block() def tearDown(self): self.tb = None def METHOD_NAME(self): """ Send two bursts, with zeros in between, and check they are both detected at the correct position and no false alarms occur """ n_zeros = 15 fft_len = 32 cp_len = 4 sig_len = (fft_len + cp_len) * 10 tx_signal = [0, ] * n_zeros + make_bpsk_burst(fft_len, cp_len, sig_len) tx_signal = tx_signal * 2 add = blocks.add_cc() sync = digital.ofdm_sync_sc_cfb(fft_len, cp_len) sink_freq = blocks.vector_sink_f() sink_detect = blocks.vector_sink_b() self.tb.connect(blocks.vector_source_c(tx_signal), (add, 0)) self.tb.connect( analog.noise_source_c( analog.GR_GAUSSIAN, .01), (add, 1)) self.tb.connect(add, sync) self.tb.connect((sync, 0), sink_freq) self.tb.connect((sync, 1), sink_detect) self.tb.run() sig1_detect = sink_detect.data()[0:len(tx_signal) // 2] sig2_detect = sink_detect.data()[len(tx_signal) // 2:] self.assertAlmostEqual(sig1_detect.index(1), n_zeros + fft_len + cp_len, delta=cp_len - 1) self.assertAlmostEqual(sig2_detect.index(1), n_zeros + fft_len + cp_len, delta=cp_len - 1) self.assertEqual(numpy.sum(sig1_detect), 1) self.assertEqual(numpy.sum(sig2_detect), 1) def test_002_freq(self): """ Add a fine frequency offset and see if that gets detected properly """ fft_len = 32 cp_len = 4 # This frequency offset is normalized to rads, i.e. \pi == f_s/2 max_freq_offset = 2 * numpy.pi / fft_len # Otherwise, it's coarse freq_offset = ((2 * random.random()) - 1) * max_freq_offset sig_len = (fft_len + cp_len) * 10 tx_signal = make_bpsk_burst(fft_len, cp_len, sig_len) sync = digital.ofdm_sync_sc_cfb(fft_len, cp_len, True) sink_freq = blocks.vector_sink_f() sink_detect = blocks.vector_sink_b() channel = channels.channel_model(0.005, freq_offset / 2.0 / numpy.pi) self.tb.connect(blocks.vector_source_c(tx_signal), channel, sync) self.tb.connect((sync, 0), sink_freq) self.tb.connect((sync, 1), sink_detect) self.tb.run() phi_hat = sink_freq.data()[sink_detect.data().index(1)] est_freq_offset = 2 * phi_hat / fft_len self.assertAlmostEqual(est_freq_offset, freq_offset, places=2) def test_003_multiburst(self): """ Send several bursts, see if the number of detects is correct. Burst lengths and content are random. The channel is assumed AWGN for this test. """ n_bursts = 42 fft_len = 32 cp_len = 4 tx_signal = [] for _ in range(n_bursts): gap = [0, ] * random.randint(0, 2 * fft_len) tx_signal += gap + \ make_bpsk_burst(fft_len, cp_len, fft_len * random.randint(5, 23)) # Very loose definition of SNR here snr = 20 # dB sigma = 10**(-snr / 10) # Add noise -- we don't use the channel model blocks, we want to keep # this test as self-contained as possible, and all randomness should # derive from random.seed() above def complex_randn(N): return (numpy.random.randn( N) + 1j * numpy.random.randn(N)) * sigma / numpy.sqrt(2) tx_signal += complex_randn(len(tx_signal)) sync = digital.ofdm_sync_sc_cfb(fft_len, cp_len) sink_freq = blocks.vector_sink_f() sink_detect = blocks.vector_sink_b() self.tb.connect(blocks.vector_source_c(tx_signal), sync) self.tb.connect((sync, 0), sink_freq) self.tb.connect((sync, 1), sink_detect) self.tb.run() n_bursts_detected = numpy.sum(sink_detect.data()) self.assertEqual( n_bursts_detected, n_bursts, msg="Detection error (missed bursts): {}".format( (numpy.sum(sink_detect.data()) - n_bursts)) ) def test_004_ofdm_packets(self): """ Send several bursts using ofdm_tx, see if the number of detects is correct. Burst lengths and content are random. """ n_bursts = 42 fft_len = 64 cp_len = 16 # Here, coarse freq offset is allowed max_freq_offset = 2 * numpy.pi / fft_len * 4 freq_offset = ((2 * random.random()) - 1) * max_freq_offset packets = [] tagname = "packet_length" min_packet_length = 10 max_packet_length = 50 for _ in range(n_bursts): packet_length = random.randint(min_packet_length, max_packet_length + 1) packet = [random.randint(0, 255) for i in range(packet_length)] packets.append(packet) data, tags = tagged_streams.packets_to_vectors( packets, tagname, vlen=1) src = blocks.vector_source_b(data, False, 1, tags) mod = ofdm_tx(packet_length_tag_key=tagname) sync = digital.ofdm_sync_sc_cfb(fft_len, cp_len) sink_freq = blocks.vector_sink_f() sink_detect = blocks.vector_sink_b() noise_level = 0.005 channel = channels.channel_model( noise_level, freq_offset / 2 / numpy.pi) self.tb.connect(src, mod, channel, sync, sink_freq) self.tb.connect((sync, 1), sink_detect) self.tb.run() self.assertEqual(numpy.sum(sink_detect.data()), n_bursts) if __name__ == '__main__': gr_unittest.run(qa_ofdm_sync_sc_cfb)
299,188
f1
# encoding: utf-8 import pytest import requests import json import responses import six from ckan.tests.helpers import _get_test_app from ckan.common import config import ckan.model as model import ckan.plugins as p import ckan.lib.create_test_data as create_test_data import ckanext.resourceproxy.blueprint as blueprint import ckanext.resourceproxy.plugin as proxy JSON_STRING = json.dumps({ "a": "foo", "bar": "yes, I'm proxied", "b": 42}) def set_resource_url(url): testpackage = model.Package.get('annakarenina') context = { 'model': model, 'session': model.Session, 'user': model.User.get('testsysadmin').name, 'use_cache': False, } resource = p.toolkit.get_action('resource_show')( context, {'id': testpackage.resources[0].id}) package = p.toolkit.get_action('package_show')( context, {'id': testpackage.id}) resource['url'] = url p.toolkit.get_action('resource_update')(context, resource) testpackage = model.Package.get('annakarenina') assert testpackage.resources[0].url == resource['url'] return {'resource': resource, 'package': package} @pytest.mark.ckan_config('ckan.plugins', 'resource_proxy') @pytest.mark.usefixtures("clean_db", "with_plugins", "with_request_context") class TestProxyPrettyfied(object): serving = False @pytest.fixture(autouse=True) def initial_data(self, clean_db, with_request_context): create_test_data.CreateTestData.create() self.url = 'http://www.ckan.org/static/example.json' self.data_dict = set_resource_url(self.url) def mock_out_urls(self, *args, **kwargs): responses.add(responses.GET, *args, **kwargs) responses.add(responses.HEAD, *args, **kwargs) @responses.activate def test_resource_proxy_on_200(self): self.mock_out_urls( self.url, content_type='application/json', body=six.ensure_binary(JSON_STRING)) url = self.data_dict['resource']['url'] result = requests.get(url, timeout=30) assert result.status_code == 200, result.status_code assert "yes, I'm proxied" in six.ensure_str(result.content) @responses.activate def test_resource_proxy_on_404(self, app): self.mock_out_urls( self.url, body=six.ensure_binary("I'm not here"), content_type='application/json', status=404) url = self.data_dict['resource']['url'] result = requests.get(url, timeout=30) assert result.status_code == 404, result.status_code proxied_url = proxy.get_proxified_resource_url(self.data_dict) result = app.get(proxied_url) # we expect a 409 because the resourceproxy got an error (404) # from the server assert result.status_code == 409 assert '404' in result.body @responses.activate def test_large_file(self, app): cl = blueprint.MAX_FILE_SIZE + 1 self.mock_out_urls( self.url, headers={'Content-Length': six.text_type(cl)}, body='c' * cl) proxied_url = proxy.get_proxified_resource_url(self.data_dict) result = app.get(proxied_url) assert result.status_code == 409 assert six.b('too large') in result.data @responses.activate def test_large_file_streaming(self, app): cl = blueprint.MAX_FILE_SIZE + 1 self.mock_out_urls( self.url, stream=True, body='c' * cl) proxied_url = proxy.get_proxified_resource_url(self.data_dict) result = app.get(proxied_url) assert result.status_code == 409 assert six.b('too large') in result.data @responses.activate def test_invalid_url(self, app): responses.add_passthru(config['solr_url']) self.data_dict = set_resource_url('http:invalid_url') proxied_url = proxy.get_proxified_resource_url(self.data_dict) result = app.get(proxied_url) assert result.status_code == 409 assert six.b('Invalid URL') in result.data def test_non_existent_url(self, app): self.data_dict = set_resource_url('http://nonexistent.example.com') def METHOD_NAME(): url = self.data_dict['resource']['url'] requests.get(url, timeout=30) with pytest.raises(requests.ConnectionError): METHOD_NAME() proxied_url = proxy.get_proxified_resource_url(self.data_dict) result = app.get(proxied_url) assert result.status_code == 502 assert six.b('connection error') in result.data def test_proxied_resource_url_proxies_http_and_https_by_default(self): http_url = 'http://ckan.org' https_url = 'https://ckan.org' for url in [http_url, https_url]: data_dict = set_resource_url(url) proxied_url = proxy.get_proxified_resource_url(data_dict) assert proxied_url != url, proxied_url def test_resource_url_doesnt_proxy_non_http_or_https_urls_by_default(self): schemes = ['file', 'ws'] for scheme in schemes: url = '%s://ckan.org' % scheme data_dict = set_resource_url(url) non_proxied_url = proxy.get_proxified_resource_url(data_dict) proxied_url = proxy.get_proxified_resource_url(data_dict, scheme) assert non_proxied_url == url, non_proxied_url assert proxied_url != url, proxied_url
299,189
test unlink single layer
import numpy as np import pytest from napari import layers from napari.layers.utils._link_layers import ( layers_linked, link_layers, unlink_layers, ) BASE_ATTRS = {} BASE_ATTRS = { 'opacity': 0.75, 'blending': 'additive', 'visible': False, 'editable': False, 'shear': [30], } IM_ATTRS = { 'rendering': 'translucent', 'iso_threshold': 0.34, 'interpolation2d': 'linear', 'contrast_limits': [0.25, 0.75], 'gamma': 0.5, } @pytest.mark.parametrize('key, value', {**BASE_ATTRS, **IM_ATTRS}.items()) def test_link_image_layers_all_attributes(key, value): """Test linking common attributes across layers of similar types.""" l1 = layers.Image(np.random.rand(10, 10), contrast_limits=(0, 0.8)) l2 = layers.Image(np.random.rand(10, 10), contrast_limits=(0.1, 0.9)) link_layers([l1, l2]) # linking does (currently) apply to things that were unequal before linking assert l1.contrast_limits != l2.contrast_limits # once we set either... they will both be changed assert getattr(l1, key) != value setattr(l2, key, value) assert getattr(l1, key) == getattr(l2, key) == value @pytest.mark.parametrize('key, value', BASE_ATTRS.items()) def test_link_different_type_layers_all_attributes(key, value): """Test linking common attributes across layers of different types.""" l1 = layers.Image(np.random.rand(10, 10)) l2 = layers.Points(None) link_layers([l1, l2]) # once we set either... they will both be changed assert getattr(l1, key) != value setattr(l2, key, value) assert getattr(l1, key) == getattr(l2, key) == value def test_link_invalid_param(): """Test that linking non-shared attributes raises.""" l1 = layers.Image(np.random.rand(10, 10)) l2 = layers.Points(None) with pytest.raises(ValueError) as e: link_layers([l1, l2], ('rendering',)) assert "Cannot link attributes that are not shared by all layers" in str(e) def test_double_linking_noop(): """Test that linking already linked layers is a noop.""" l1 = layers.Points(None) l2 = layers.Points(None) l3 = layers.Points(None) # no callbacks to begin with assert len(l1.events.opacity.callbacks) == 0 # should have two after linking layers link_layers([l1, l2, l3]) assert len(l1.events.opacity.callbacks) == 2 # should STILL have two after linking layers again link_layers([l1, l2, l3]) assert len(l1.events.opacity.callbacks) == 2 def test_removed_linked_target(): """Test that linking already linked layers is a noop.""" l1 = layers.Points(None) l2 = layers.Points(None) l3 = layers.Points(None) link_layers([l1, l2, l3]) l1.opacity = 0.5 assert l1.opacity == l2.opacity == l3.opacity == 0.5 # if we delete layer3 we shouldn't get an error when updating otherlayers del l3 l1.opacity = 0.25 assert l1.opacity == l2.opacity def test_context_manager(): """Test that we can temporarily link layers.""" l1 = layers.Points(None) l2 = layers.Points(None) l3 = layers.Points(None) assert len(l1.events.opacity.callbacks) == 0 with layers_linked([l1, l2, l3], ('opacity',)): assert len(l1.events.opacity.callbacks) == 2 assert len(l1.events.blending.callbacks) == 0 # it's just opacity del l2 # if we lose a layer in the meantime it should be ok assert len(l1.events.opacity.callbacks) == 0 def test_unlink_layers(): """Test that we can unlink layers.""" l1 = layers.Points(None) l2 = layers.Points(None) l3 = layers.Points(None) link_layers([l1, l2, l3]) assert len(l1.events.opacity.callbacks) == 2 unlink_layers([l1, l2], ('opacity',)) # just unlink opacity on l1/l2 assert len(l1.events.opacity.callbacks) == 1 assert len(l2.events.opacity.callbacks) == 1 # l3 is still connected to them both assert len(l3.events.opacity.callbacks) == 2 # blending was untouched assert len(l1.events.blending.callbacks) == 2 assert len(l2.events.blending.callbacks) == 2 assert len(l3.events.blending.callbacks) == 2 unlink_layers([l1, l2, l3]) # unlink everything assert len(l1.events.blending.callbacks) == 0 assert len(l2.events.blending.callbacks) == 0 assert len(l3.events.blending.callbacks) == 0 def METHOD_NAME(): """Test that we can unlink a single layer from all others.""" l1 = layers.Points(None) l2 = layers.Points(None) l3 = layers.Points(None) link_layers([l1, l2, l3]) assert len(l1.events.opacity.callbacks) == 2 unlink_layers([l1], ('opacity',)) # just unlink L1 opacicity from others assert len(l1.events.opacity.callbacks) == 0 assert len(l2.events.opacity.callbacks) == 1 assert len(l3.events.opacity.callbacks) == 1 # blending was untouched assert len(l1.events.blending.callbacks) == 2 assert len(l2.events.blending.callbacks) == 2 assert len(l3.events.blending.callbacks) == 2 unlink_layers([l1]) # completely unlink L1 from everything assert not l1.events.blending.callbacks def test_mode_recursion(): l1 = layers.Points(None, name='l1') l2 = layers.Points(None, name='l2') link_layers([l1, l2]) l1.mode = 'add'
299,190
get veff
#!/usr/bin/env python # # This code was copied from the data generation program of Tencent Alchemy # project (https://github.com/tencent-alchemy). # # # Copyright 2019 Tencent America LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: Qiming Sun <osirpt.sun@gmail.com> # from pyscf import lib from pyscf.lib import logger from pyscf.grad import rks as rks_grad from pyscf.df.grad import rhf as df_rhf_grad def METHOD_NAME(ks_grad, mol=None, dm=None): '''Coulomb + XC functional ''' if mol is None: mol = ks_grad.mol if dm is None: dm = ks_grad.base.make_rdm1() t0 = (logger.process_clock(), logger.perf_counter()) mf = ks_grad.base ni = mf._numint grids, nlcgrids = rks_grad._initialize_grids(ks_grad) mem_now = lib.current_memory()[0] max_memory = max(2000, ks_grad.max_memory*.9-mem_now) if ks_grad.grid_response: exc, vxc = rks_grad.get_vxc_full_response( ni, mol, grids, mf.xc, dm, max_memory=max_memory, verbose=ks_grad.verbose) if mf.nlc or ni.libxc.is_nlc(mf.xc): if ni.libxc.is_nlc(mf.xc): xc = mf.xc else: xc = mf.nlc enlc, vnlc = rks_grad.get_nlc_vxc_full_response( ni, mol, nlcgrids, xc, dm, max_memory=max_memory, verbose=ks_grad.verbose) exc += enlc vxc += vnlc logger.debug1(ks_grad, 'sum(grids response) %s', exc.sum(axis=0)) else: exc, vxc = rks_grad.get_vxc( ni, mol, grids, mf.xc, dm, max_memory=max_memory, verbose=ks_grad.verbose) if mf.nlc or ni.libxc.is_nlc(mf.xc): if ni.libxc.is_nlc(mf.xc): xc = mf.xc else: xc = mf.nlc enlc, vnlc = rks_grad.get_nlc_vxc( ni, mol, nlcgrids, xc, dm, max_memory=max_memory, verbose=ks_grad.verbose) vxc += vnlc t0 = logger.timer(ks_grad, 'vxc', *t0) if not ni.libxc.is_hybrid_xc(mf.xc): vj = ks_grad.get_j(mol, dm) vxc += vj if ks_grad.auxbasis_response: e1_aux = vj.aux.sum ((0,1)) else: omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin) vj, vk = ks_grad.get_jk(mol, dm) if ks_grad.auxbasis_response: vk.aux *= hyb vk[:] *= hyb # Don't erase the .aux tags! if omega != 0: # For range separated Coulomb operator # TODO: replaced with vk_sr which is numerically more stable for # inv(int2c2e) vk_lr = ks_grad.get_k(mol, dm, omega=omega) vk[:] += vk_lr * (alpha - hyb) if ks_grad.auxbasis_response: vk.aux[:] += vk_lr.aux * (alpha - hyb) vxc += vj - vk * .5 if ks_grad.auxbasis_response: e1_aux = (vj.aux - vk.aux * .5).sum ((0,1)) if ks_grad.auxbasis_response: logger.debug1(ks_grad, 'sum(auxbasis response) %s', e1_aux.sum(axis=0)) vxc = lib.tag_array(vxc, exc1_grid=exc, aux=e1_aux) else: vxc = lib.tag_array(vxc, exc1_grid=exc) return vxc class Gradients(rks_grad.Gradients): def __init__(self, mf): # Whether to include the response of DF auxiliary basis when computing # nuclear gradients of J/K matrices self.auxbasis_response = True rks_grad.Gradients.__init__(self, mf) get_jk = df_rhf_grad.Gradients.get_jk get_j = df_rhf_grad.Gradients.get_j get_k = df_rhf_grad.Gradients.get_k METHOD_NAME = METHOD_NAME def extra_force(self, atom_id, envs): e1 = rks_grad.Gradients.extra_force(self, atom_id, envs) if self.auxbasis_response: e1 += envs['vhf'].aux[atom_id] return e1 Grad = Gradients
299,191
transform
# Conversion of model from channels_first to channels_last data format # Based on https://github.com/fastmachinelearning/qonnx/blob/ # 12c96a3ded06beacab08e0f554e4ed014476c0aa/src/qonnx/transformation/channels_last.py from hls4ml.model.layers import Concatenate, Input, Reshape from hls4ml.model.optimizer import OptimizerPass class ChannelsLastConverter(OptimizerPass): '''Converts a model from channels_first to channels_last data format by transposing the weights of relevant layers and adding a transpose layer for the inputs and outputs, if necessary''' def match(self, node): if not hasattr(node, 'channels_last_converted'): return True def METHOD_NAME(self, model, node): # If this parameter has not been set, this model does not need to be converted if 'InputsChannelLast' not in model.config.config['HLSConfig']['Model']: node.channels_last_converted = True return False outshape = node.get_output_variable().shape if isinstance(node, Input): # if inputs are not yet transposed into channels_last, add transpose layer if not model.config.config['HLSConfig']['Model']['InputsChannelLast'] and len(outshape) > 1: # Add transpose for input layer input = node.name if len(outshape) == 2: attributes = {'perm': [1, 0]} else: attributes = {'perm': [1, 2, 0]} transpose_node = model.make_node( 'Transpose', f'transpose_input_for_{node.get_attr("name")}', attributes, [input] ) transpose_node.set_attr('name', f'transpose_input_for_{node.get_attr("name")}') transpose_node.channels_last_converted = True model.insert_node(transpose_node) else: input_shape = node.get_output_variable().shape input_shape.append(input_shape.pop(0)) node.get_output_variable().shape = input_shape dim_names = [f'N_INPUT_{i}_{node.index}' for i in range(1, len(input_shape) + 1)] node.get_output_variable().dim_names = dim_names else: # Transpose weight tensors tensors = ['weight', 'depthwise', 'pointwise', 'zero_bias', 'scale', 'recurrent_weight'] for tensor in tensors: try: if len(node.get_weights(tensor).shape) == 2: weights_channels_last = node.get_weights(tensor).data.transpose() node.get_weights(tensor).data = weights_channels_last elif len(node.get_weights(tensor).shape) == 3: weights_channels_last = node.get_weights(tensor).data.transpose([2, 1, 0]) node.get_weights(tensor).data = weights_channels_last elif len(node.get_weights(tensor).shape) == 4: weights_channels_last = node.get_weights(tensor).data.transpose([2, 3, 1, 0]) node.get_weights(tensor).data = weights_channels_last except KeyError: pass try: node.set_attr('data_format', 'channels_last') except AttributeError: pass # Adjust axis of operation if isinstance(node, Concatenate): old_axis = node.get_attr('axis') if len(outshape) == 2: if old_axis == -1 or old_axis == 2: node.set_attr('axis', 1) else: node.set_attr('axis', 2) elif len(outshape) == 3: if old_axis == 3 or old_axis == -1: node.set_attr('axis', 1) elif old_axis == 2 or old_axis == -2: node.set_attr('axis', 2) # Not required, but left for clarity else: node.set_attr('axis', 3) # Adjust output shape outdims = node.get_output_variable().dim_names if len(outshape) == 2: shape = [outshape[1], outshape[0]] dims = [outdims[1], outdims[0]] node.add_output_variable(shape, dims) elif len(outshape) == 3: shape = [outshape[1], outshape[2], outshape[0]] dims = [outdims[1], outdims[2], outdims[0]] node.add_output_variable(shape, dims) # Have to transpose back before flattening to get correct order of elements in the flattened tensor if isinstance(node, Reshape) and len(node.attributes['target_shape']) == 1: previous_node = node.get_input_node(node.inputs[0]) input = previous_node.name outshape = previous_node.get_output_variable().shape if len(outshape) == 2: attributes = {'perm': [1, 0]} else: attributes = {'perm': [2, 0, 1]} transpose_node = model.make_node( 'Transpose', f'transpose_input_for_{node.get_attr("name")}', attributes, [input] ) transpose_node.channels_last_converted = True model.insert_node(transpose_node) # Add transpose for output layer elif ( node.get_attr('name') in model.outputs and len(outshape) > 1 and model.config.config['HLSConfig']['Model']['TransposeOutputs'] ): input = node.name outshape = node.get_output_variable().shape if len(outshape) == 2: attributes = {'perm': [1, 0]} else: attributes = {'perm': [2, 0, 1]} transpose_node = model.make_node( 'Transpose', f'transpose_ouput_for_{node.get_attr("name")}', attributes, [input] ) transpose_node.channels_last_converted = True model.insert_node(transpose_node) node.channels_last_converted = True return True
299,192
get lr
"""Callback class(es) for using during model training.""" import logging from typing import Dict, List import warnings import numpy as np from tqdm.std import Bar from pytorch_lightning import LightningModule, Trainer from pytorch_lightning.callbacks import TQDMProgressBar from pytorch_lightning.utilities import rank_zero_only from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler from graphnet.utilities.logging import Logger class PiecewiseLinearLR(_LRScheduler): """Interpolate learning rate linearly between milestones.""" def __init__( self, optimizer: Optimizer, milestones: List[int], factors: List[float], last_epoch: int = -1, verbose: bool = False, ): """Construct `PiecewiseLinearLR`. For each milestone, denoting a specified number of steps, a factor multiplying the base learning rate is specified. For steps between two milestones, the learning rate is interpolated linearly between the two closest milestones. For steps before the first milestone, the factor for the first milestone is used; vice versa for steps after the last milestone. Args: optimizer: Wrapped optimizer. milestones: List of step indices. Must be increasing. factors: List of multiplicative factors. Must be same length as `milestones`. last_epoch: The index of the last epoch. verbose: If ``True``, prints a message to stdout for each update. """ # Check(s) if milestones != sorted(milestones): raise ValueError("Milestones must be increasing") if len(milestones) != len(factors): raise ValueError( "Only multiplicative factor must be specified for each milestone." ) self.milestones = milestones self.factors = factors super().__init__(optimizer, last_epoch, verbose) def _get_factor(self) -> np.ndarray: # Linearly interpolate multiplicative factor between milestones. return np.interp(self.last_epoch, self.milestones, self.factors) def METHOD_NAME(self) -> List[float]: """Get effective learning rate(s) for each optimizer.""" if not self._get_lr_called_within_step: warnings.warn( "To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning, ) return [base_lr * self._get_factor() for base_lr in self.base_lrs] class ProgressBar(TQDMProgressBar): """Custom progress bar for graphnet. Customises the default progress in pytorch-lightning. """ def _common_config(self, bar: Bar) -> Bar: bar.unit = " batch(es)" bar.colour = "green" return bar def init_validation_tqdm(self) -> Bar: """Override for customisation.""" bar = super().init_validation_tqdm() bar = self._common_config(bar) return bar def init_predict_tqdm(self) -> Bar: """Override for customisation.""" bar = super().init_predict_tqdm() bar = self._common_config(bar) return bar def init_test_tqdm(self) -> Bar: """Override for customisation.""" bar = super().init_test_tqdm() bar = self._common_config(bar) return bar def init_train_tqdm(self) -> Bar: """Override for customisation.""" bar = super().init_train_tqdm() bar = self._common_config(bar) return bar def get_metrics(self, trainer: Trainer, model: LightningModule) -> Dict: """Override to not show the version number in the logging.""" items = super().get_metrics(trainer, model) items.pop("v_num", None) return items def on_train_epoch_start( self, trainer: Trainer, model: LightningModule ) -> None: """Print the results of the previous epoch on a separate line. This allows the user to see the losses/metrics for previous epochs while the current is training. The default behaviour in pytorch- lightning is to overwrite the progress bar from previous epochs. """ if trainer.current_epoch > 0: self.train_progress_bar.set_postfix( self.get_metrics(trainer, model) ) print("") super().on_train_epoch_start(trainer, model) self.train_progress_bar.set_description( f"Epoch {trainer.current_epoch:2d}" ) def on_train_epoch_end( self, trainer: Trainer, model: LightningModule ) -> None: """Log the final progress bar for the epoch to file. Don't duplciate to stdout. """ super().on_train_epoch_end(trainer, model) if rank_zero_only.rank == 0: # Construct Logger logger = Logger() # Log only to file, not stream h = logger.handlers[0] assert isinstance(h, logging.StreamHandler) level = h.level h.setLevel(logging.ERROR) logger.info(str(super().train_progress_bar)) h.setLevel(level)
299,193
from hdf
# coding: utf-8 # Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department # Distributed under the terms of "New BSD License", see the LICENSE file. __author__ = "Yury Lysogorskiy, Jan Janssen, Marvin Poul" __copyright__ = ( "Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - " "Computational Materials Design (CM) Department" ) __version__ = "0.1" __maintainer__ = "Marvin Poul" __email__ = "poul@mpie.de" __status__ = "development" __date__ = "Aug 12, 2020" from pyiron_base import DataContainer, GenericJob, deprecate from pyiron_atomistics.atomistics.job.atomistic import AtomisticGenericJob from pyiron_atomistics.atomistics.structure.atoms import Atoms from pyiron_atomistics.atomistics.structure.has_structure import HasStructure from pyiron_atomistics.atomistics.structure.structurestorage import StructureStorage class StructureContainer(GenericJob, HasStructure): """ Container to save a list of structures in HDF5 together with tags. Add new structures with :meth:`.append`, they are added to :attr:`.structure_lst`. The HDF5 is written when :meth:`.run` is called. """ def __init__(self, project, job_name): super().__init__(project, job_name) self.__version__ = "0.2.0" self.__hdf_version__ = "0.3.0" self._structure_lst = DataContainer(table_name="structures") self._container = StructureStorage() self.server.run_mode.interactive = True @property def structure_lst(self): """ :class:`.DataContainer`: list of :class:`~.Atoms` """ if len(self._structure_lst) != len(self._container): self._structure_lst = DataContainer(list(self._container.iter_structures())) return self._structure_lst @staticmethod def _to_structure(structure_or_job): """ Return structure from structure or atomic job. Args: structure_or_job (:class:`~.AtomisticGenericJob`, :class:`~.Atoms`): if :class:`~.AtomisticGenericJob` try to get most recent structure, copy it and set the job_id in :attr:`~.Atoms.info` Returns: :class:`~.Atoms`: structure from the job or given structure Raises: ValueError: if given :class:`~.AtomisticGenericJob` has no structure set TypeError: if structure_or_job is of invalid type """ if isinstance(structure_or_job, AtomisticGenericJob): if structure_or_job.structure: s = structure_or_job.get_structure(-1).copy() s.info["jobid"] = structure_or_job.job_id return s else: raise ValueError("The job does not contain any structure to import.") elif isinstance(structure_or_job, Atoms): return structure_or_job else: raise TypeError( f"structure_or_job must be of type {Atoms} or {AtomisticGenericJob}, not {type(structure_or_job)}" ) def append(self, structure_or_job): """ Add new structure to structure list. The added structure will available in :attr:`~.structure_lst`. If the structure is added via a job, retrieve the latest structure and add its id to :attr:`pyiron_atomistics.atomistics.generic.Atoms.info`. Args: structure_or_job (:class:`~.AtomisticGenericJob`/:class:`~.Atoms`): if :class:`~.AtomisticGenericJob` add from :meth:`~.AtomisticGenericJob.get_structure`, otherwise add just the given :class:`~.Atoms` Returns: dict: item added to :attr:`~.structure_lst` """ struct = self._to_structure(structure_or_job) self._container.add_structure(struct) return struct def add_structure(self, structure: Atoms, identifier: str = None, **kwargs): """ Add a new structure. Args: structure (:class:`~.Atoms`): structure to add identifier (str, optional): optional identifier for the structure **kwargs: passed through to the underlying :meth:`.StructureStorage.add_structure` """ self._container.add_structure(structure, identifier=identifier, **kwargs) def run_static(self): self.status.finished = True def run_if_interactive(self): self.to_hdf() self.status.finished = True def write_input(self): pass def collect_output(self): pass @property @deprecate("use get_structure()") def structure(self): return self._get_structure(frame=0) @structure.setter @deprecate("use append()") def structure(self, struct): self.append(struct) def _number_of_structures(self): return len(self._container) def _get_structure(self, frame=-1, wrap_atoms=True): return self._container._get_structure(frame=frame, wrap_atoms=wrap_atoms) def to_hdf(self, hdf=None, group_name=None): super().to_hdf(hdf=hdf, group_name=group_name) self._container.to_hdf(hdf=self.project_hdf5, group_name="structures") def METHOD_NAME(self, hdf=None, group_name=None): # keep hdf structure for version peeking in separate variable, so that # the inherited from_hdf() can properly deal with it h5 = hdf or self.project_hdf5 if group_name: h5 = h5[group_name] if "HDF_VERSION" in h5.list_nodes(): hdf_version = h5["HDF_VERSION"] else: # old versions didn't use to set a HDF version hdf_version = "0.1.0" if hdf_version == "0.1.0": super().METHOD_NAME(hdf=hdf, group_name=group_name) with self.project_hdf5.open("input") as hdf5_input: self.append(Atoms().METHOD_NAME(hdf5_input)) elif hdf_version == "0.2.0": GenericJob.METHOD_NAME(self, hdf=hdf, group_name=group_name) hdf = self.project_hdf5["structures"] for group in sorted(hdf.list_groups()): self.append(Atoms().METHOD_NAME(hdf=hdf, group_name=group)) else: super().METHOD_NAME(hdf=hdf, group_name=group_name) self._container.METHOD_NAME(hdf=self.project_hdf5, group_name="structures") @property def plot(self): """ Accessor for :class:`~.StructurePlots` instance using these structures. """ return self._container.plot
299,194
assert style
from selenium.webdriver.common.by import By from nicegui import ui from .screen import Screen def test_classes(screen: Screen): label = ui.label('Some label') def assert_classes(classes: str) -> None: assert screen.selenium.find_element(By.XPATH, f'//*[normalize-space(@class)="{classes}" and text()="Some label"]') screen.open('/') screen.wait(0.5) assert_classes('') label.classes('one') assert_classes('one') label.classes('one') assert_classes('one') label.classes('two three') assert_classes('one two three') label.classes(remove='two') assert_classes('one three') label.classes(replace='four') assert_classes('four') def test_style_parsing(): # pylint: disable=protected-access assert ui.element._parse_style(None) == {} # pylint: disable=use-implicit-booleaness-not-comparison assert ui.element._parse_style('color: red; background-color: blue') == {'color': 'red', 'background-color': 'blue'} assert ui.element._parse_style('width:12em;height:34.5em') == {'width': '12em', 'height': '34.5em'} assert ui.element._parse_style('transform: translate(120.0px, 50%)') == {'transform': 'translate(120.0px, 50%)'} assert ui.element._parse_style('box-shadow: 0 0 0.5em #1976d2') == {'box-shadow': '0 0 0.5em #1976d2'} def test_props_parsing(): # pylint: disable=protected-access assert ui.element._parse_props(None) == {} # pylint: disable=use-implicit-booleaness-not-comparison assert ui.element._parse_props('one two=1 three="abc def"') == {'one': True, 'two': '1', 'three': 'abc def'} assert ui.element._parse_props('loading percentage=12.5') == {'loading': True, 'percentage': '12.5'} assert ui.element._parse_props('size=50%') == {'size': '50%'} assert ui.element._parse_props('href=http://192.168.42.100/') == {'href': 'http://192.168.42.100/'} assert ui.element._parse_props('hint="Your \\"given\\" name"') == {'hint': 'Your "given" name'} assert ui.element._parse_props('input-style="{ color: #ff0000 }"') == {'input-style': '{ color: #ff0000 }'} def test_style(screen: Screen): label = ui.label('Some label') def METHOD_NAME(style: str) -> None: assert screen.selenium.find_element(By.XPATH, f'//*[normalize-space(@style)="{style}" and text()="Some label"]') screen.open('/') screen.wait(0.5) METHOD_NAME('') label.style('color: red') METHOD_NAME('color: red;') label.style('color: red') METHOD_NAME('color: red;') label.style('color: blue') METHOD_NAME('color: blue;') label.style('font-weight: bold') METHOD_NAME('color: blue; font-weight: bold;') label.style(remove='color: blue') METHOD_NAME('font-weight: bold;') label.style(replace='text-decoration: underline') METHOD_NAME('text-decoration: underline;') label.style('color: blue;') METHOD_NAME('text-decoration: underline; color: blue;') def test_props(screen: Screen): input_ = ui.input() def assert_props(*props: str) -> None: class_conditions = [f'contains(@class, "q-field--{prop}")' for prop in props] assert screen.selenium.find_element(By.XPATH, f'//label[{" and ".join(class_conditions)}]') screen.open('/') screen.wait(0.5) assert_props('standard') input_.props('dark') assert_props('standard', 'dark') input_.props('dark') assert_props('standard', 'dark') input_.props(remove='dark') assert_props('standard') def test_move(screen: Screen): with ui.card() as a: ui.label('A') x = ui.label('X') with ui.card() as b: ui.label('B') ui.button('Move X to A', on_click=lambda: x.move(a)) ui.button('Move X to B', on_click=lambda: x.move(b)) ui.button('Move X to top', on_click=lambda: x.move(target_index=0)) screen.open('/') assert screen.find('A').location['y'] < screen.find('X').location['y'] < screen.find('B').location['y'] screen.click('Move X to B') screen.wait(0.5) assert screen.find('A').location['y'] < screen.find('B').location['y'] < screen.find('X').location['y'] screen.click('Move X to A') screen.wait(0.5) assert screen.find('A').location['y'] < screen.find('X').location['y'] < screen.find('B').location['y'] screen.click('Move X to top') screen.wait(0.5) assert screen.find('X').location['y'] < screen.find('A').location['y'] < screen.find('B').location['y'] def test_xss(screen: Screen): ui.label('</script><script>alert(1)</script>') ui.label('<b>Bold 1</b>, `code`, copy&paste, multi\nline') ui.button('Button', on_click=lambda: ( ui.label('</script><script>alert(2)</script>'), ui.label('<b>Bold 2</b>, `code`, copy&paste, multi\nline'), )) screen.open('/') screen.click('Button') screen.should_contain('</script><script>alert(1)</script>') screen.should_contain('</script><script>alert(2)</script>') screen.should_contain('<b>Bold 1</b>, `code`, copy&paste, multi\nline') screen.should_contain('<b>Bold 2</b>, `code`, copy&paste, multi\nline')
299,195
audio tokenize
# Copyright (c) 2017-2019 NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # See the LICENSE file for licensing terms (BSD-style). # Modified from https://github.com/webdataset/webdataset import yaml from . import autodecode from . import cache from . import filters from . import shardlists from . import tariterators from .filters import reraise_exception from .paddle_utils import DataLoader from .paddle_utils import IterableDataset from .pipeline import DataPipeline class FluidInterface: def batched(self, batchsize): return self.compose(filters.batched(batchsize)) def dynamic_batched(self, max_frames_in_batch): return self.compose(filter.dynamic_batched(max_frames_in_batch)) def unbatched(self): return self.compose(filters.unbatched()) def listed(self, batchsize, partial=True): return self.compose( filters.batched(), batchsize=batchsize, collation_fn=None) def unlisted(self): return self.compose(filters.unlisted()) def log_keys(self, logfile=None): return self.compose(filters.log_keys(logfile)) def shuffle(self, size, **kw): if size < 1: return self else: return self.compose(filters.shuffle(size, **kw)) def map(self, f, handler=reraise_exception): return self.compose(filters.map(f, handler=handler)) def decode(self, *args, pre=None, post=None, only=None, partial=False, handler=reraise_exception): handlers = [ autodecode.ImageHandler(x) if isinstance(x, str) else x for x in args ] decoder = autodecode.Decoder( handlers, pre=pre, post=post, only=only, partial=partial) return self.map(decoder, handler=handler) def map_dict(self, handler=reraise_exception, **kw): return self.compose(filters.map_dict(handler=handler, **kw)) def select(self, predicate, **kw): return self.compose(filters.select(predicate, **kw)) def to_tuple(self, *args, handler=reraise_exception): return self.compose(filters.to_tuple(*args, handler=handler)) def map_tuple(self, *args, handler=reraise_exception): return self.compose(filters.map_tuple(*args, handler=handler)) def slice(self, *args): return self.compose(filters.slice(*args)) def rename(self, **kw): return self.compose(filters.rename(**kw)) def rsample(self, p=0.5): return self.compose(filters.rsample(p)) def rename_keys(self, *args, **kw): return self.compose(filters.rename_keys(*args, **kw)) def extract_keys(self, *args, **kw): return self.compose(filters.extract_keys(*args, **kw)) def xdecode(self, *args, **kw): return self.compose(filters.xdecode(*args, **kw)) def audio_data_filter(self, *args, **kw): return self.compose(filters.audio_data_filter(*args, **kw)) def METHOD_NAME(self, *args, **kw): return self.compose(filters.METHOD_NAME(*args, **kw)) def resample(self, *args, **kw): return self.compose(filters.resample(*args, **kw)) def audio_compute_fbank(self, *args, **kw): return self.compose(filters.audio_compute_fbank(*args, **kw)) def audio_spec_aug(self, *args, **kw): return self.compose(filters.audio_spec_aug(*args, **kw)) def sort(self, size=500): return self.compose(filters.sort(size)) def audio_padding(self): return self.compose(filters.audio_padding()) def audio_cmvn(self, cmvn_file): return self.compose(filters.audio_cmvn(cmvn_file)) class WebDataset(DataPipeline, FluidInterface): """Small fluid-interface wrapper for DataPipeline.""" def __init__( self, urls, handler=reraise_exception, resampled=False, repeat=False, shardshuffle=None, cache_size=0, cache_dir=None, detshuffle=False, nodesplitter=shardlists.single_node_only, verbose=False, ): super().__init__() if isinstance(urls, IterableDataset): assert not resampled self.append(urls) elif isinstance(urls, str) and (urls.endswith(".yaml") or urls.endswith(".yml")): with (open(urls)) as stream: spec = yaml.safe_load(stream) assert "datasets" in spec self.append(shardlists.MultiShardSample(spec)) elif isinstance(urls, dict): assert "datasets" in urls self.append(shardlists.MultiShardSample(urls)) elif resampled: self.append(shardlists.ResampledShards(urls)) else: self.append(shardlists.SimpleShardList(urls)) self.append(nodesplitter) self.append(shardlists.split_by_worker) if shardshuffle is True: shardshuffle = 100 if shardshuffle is not None: if detshuffle: self.append(filters.detshuffle(shardshuffle)) else: self.append(filters.shuffle(shardshuffle)) if cache_size == 0: self.append(tariterators.tarfile_to_samples(handler=handler)) else: assert cache_size == -1 or cache_size > 0 self.append( cache.cached_tarfile_to_samples( handler=handler, verbose=verbose, cache_size=cache_size, cache_dir=cache_dir, )) class FluidWrapper(DataPipeline, FluidInterface): """Small fluid-interface wrapper for DataPipeline.""" def __init__(self, initial): super().__init__() self.append(initial) class WebLoader(DataPipeline, FluidInterface): def __init__(self, *args, **kw): super().__init__(DataLoader(*args, **kw))
299,196
batchrm framemd5
#!/usr/bin/env python3 import os import sys import time import argparse import subprocess import platform import ififuncs import framemd5 def parse_args(): ''' Parse command line arguments. ''' parser = argparse.ArgumentParser( description='create framemd5 for a batch of SIP objects. check if all the hashes match to the md5 manifest in the PSM packages.' ' Written by Yazhou He' ) parser.add_argument( '-sip', help='Path to the parent folder containing a batch of SIPs.' ) parser.add_argument( '-psm', help='Path to the parent folder containing a batch of PSMs.' ) parsed_args = parser.parse_args() return parsed_args def batchframemd5(sip_path): print("\n\nPART 1 - framemd5 creation") oe = os.listdir(sip_path) framemd5_f = [] for sip in oe: if sip[:2] == 'oe': print("\n**** SIP has been found: %s" % sip) sip_d = os.path.join(sip_path, sip) uuid = os.listdir(sip_d) for item in uuid: if os.path.isdir(os.path.join(sip_d, item)): uuid_d = os.path.join(sip_d, item) obj = os.path.join(uuid_d, 'objects') obj_d = os.listdir(obj) for item in obj_d: if not item.endswith('.qctools.mkv'): v = os.path.join(obj, item) print("**** Object has been found: %s" % v) try: ''' if platform.system() == 'Windows': framemd5_cmd = ['python', 'framemd5.py', '-i', v] else: framemd5_cmd = ['python3', 'framemd5.py', '-i', v] print(framemd5_cmd) subprocess.check_output(framemd5_cmd) ''' framemd5_cmd = ['-i', v] framemd5.main(framemd5_cmd) except Exception as e: print(e) print("!!!! framemd5.py failed!") obj_d = os.listdir(obj) for item in obj_d: if item.endswith('.framemd5'): framemd5_sip = os.path.join(obj, item) framemd5_f.append(framemd5_sip) print("**** Framemd5 has been genreated: " + framemd5_sip) return framemd5_f def diff_framemd5(framemd5_f, psm_path, txt_name_source): print("\n\nPART 2 - framemd5 vs md5 fixity check") print('Framemd5 exists:') for f in framemd5_f: print('\t' + f) framemd5_count = len(framemd5_f) print('Md5 exists:') psms = os.listdir(psm_path) md5_f = [] for psm in psms: psm_d = os.path.join(psm_path, psm) for root, dir, file in os.walk(psm_d): for f in file: if f.endswith('.md5'): md5 = os.path.join(psm_d, f) md5_f.append(md5) print('\t' + md5) md5_count = len(md5_f) if framemd5_count == md5_count: print('**** SIPs/framemd5 count matches PSMs/md5 count %s\n' % framemd5_count) ififuncs.generate_txt('',txt_name_source, 'SIPs/framemd5 count matches PSMs/md5 count %s' % framemd5_count) else: print('!!!! SIPs/framemd5 count %s DOES NOT match PSMs/md5 count %s' % (framemd5_count, md5_count)) ififuncs.generate_txt('',txt_name_source, 'SIPs/framemd5 count %s DOES NOT match PSMs/md5 count %s' % (framemd5_count, md5_count)) # sys.exit() i = 0 while i < framemd5_count: with open(framemd5_f[i], 'r') as f1, open(md5_f[i], 'r') as f2: flag = True for l1, l2 in zip(f1, f2): if not l1.startswith('#') and not l2.startswith('#'): if l1 != l2: flag = False print('-----\n!!!! %s DOES NOT match %s\n\tFrom %s:\n\t%s\tFrom %s:\n\t%sMISMATCH FOUND - GOING TO THE NEXT MANIFEST...' % (framemd5_f[i], md5_f[i], framemd5_f[i], l1, md5_f[i], l2)) ififuncs.generate_txt('',txt_name_source, '! %s DOES NOT match %s\n\tFrom %s:\n\t%s\tFrom %s:\n\t%s' % (framemd5_f[i], md5_f[i], framemd5_f[i], l1, md5_f[i], l2)) # mismatch_list.append() break if flag: print('-----\n**** All checksums match between %s and %s' % (framemd5_f[i], md5_f[i])) ififuncs.generate_txt('',txt_name_source, '* All checksums match between %s and %s' % (framemd5_f[i], md5_f[i])) i = i + 1 else: print('-----\nFixity check completed') def METHOD_NAME(sip_path): print("\n\nPART 3 - framemd5 deletation") framemd5_f = [] for root, dir, file in os.walk(sip_path): if root.endswith('objects'): for f in file: f_abspath = os.path.join(root, f) if f.endswith('.framemd5') and os.path.isfile(f_abspath): os.remove(f_abspath) framemd5_f.append(f_abspath) return framemd5_f def main(): args = parse_args() sip_path = args.sip psm_path = args.psm framemd5_f = batchframemd5(sip_path) time.sleep(1) desktop_logs_dir = ififuncs.make_desktop_logs_dir() txt_name_filename = (os.path.basename(sys.argv[0]).split(".")[0]) + time.strftime("_%Y_%m_%dT%H_%M_%S") txt_name_source = "%s/%s.txt" % (desktop_logs_dir, txt_name_filename) ififuncs.generate_txt('',txt_name_source, 'SIP Directory: %s' % sip_path) ififuncs.generate_txt('',txt_name_source, 'PSM Directory: %s' % psm_path) diff_framemd5(framemd5_f, psm_path, txt_name_source) time.sleep(1) del_framemd5_f = METHOD_NAME(sip_path) if not del_framemd5_f: print('!!! Cannot delete framemd5 files!') else: print("**** Below framemd5 files have been deleted:") for f in del_framemd5_f: print('\t' + f) if __name__ == '__main__': main(
299,197
test detail
from pathlib import Path import pytest from django.conf import settings from django.urls import reverse from rdmo.core.constants import VALUE_TYPE_FILE from ..models import Project, Snapshot users = ( ('owner', 'owner'), ('manager', 'manager'), ('author', 'author'), ('guest', 'guest'), ('api', 'api'), ('user', 'user'), ('site', 'site'), ('anonymous', None), ) view_snapshot_permission_map = { 'owner': [1, 2, 3, 4, 5], 'manager': [1, 3, 5], 'author': [1, 3, 5], 'guest': [1, 3, 5], 'api': [1, 2, 3, 4, 5], 'site': [1, 2, 3, 4, 5] } add_snapshot_permission_map = change_snapshot_permission_map = delete_snapshot_permission_map = { 'owner': [1, 2, 3, 4, 5], 'manager': [1, 3, 5], 'api': [1, 2, 3, 4, 5], 'site': [1, 2, 3, 4, 5] } urlnames = { 'list': 'v1-projects:project-snapshot-list', 'detail': 'v1-projects:project-snapshot-detail' } projects = [1, 2, 3, 4, 5] snapshots = [1, 3, 7, 4, 5, 6] @pytest.mark.parametrize('username,password', users) @pytest.mark.parametrize('project_id', projects) def test_list(db, client, username, password, project_id): client.login(username=username, password=password) url = reverse(urlnames['list'], args=[project_id]) response = client.get(url) if project_id in view_snapshot_permission_map.get(username, []): assert response.status_code == 200 assert isinstance(response.json(), list) if username == 'user': assert sorted([item['id'] for item in response.json()]) == [] else: values_list = Snapshot.objects.filter(project_id=project_id) \ .order_by('id').values_list('id', flat=True) assert sorted([item['id'] for item in response.json()]) == list(values_list) else: assert response.status_code == 404 @pytest.mark.parametrize('username,password', users) @pytest.mark.parametrize('project_id', projects) @pytest.mark.parametrize('snapshot_id', snapshots) def METHOD_NAME(db, client, username, password, project_id, snapshot_id): client.login(username=username, password=password) snapshot = Snapshot.objects.filter(project_id=project_id, id=snapshot_id).filter() url = reverse(urlnames['detail'], args=[project_id, snapshot_id]) response = client.get(url) if snapshot and project_id in view_snapshot_permission_map.get(username, []): assert response.status_code == 200 assert isinstance(response.json(), dict) assert response.json().get('id') == snapshot_id else: assert response.status_code == 404 @pytest.mark.parametrize('username,password', users) @pytest.mark.parametrize('project_id', projects) def test_create(db, client, files, username, password, project_id): client.login(username=username, password=password) project = Project.objects.get(id=project_id) snapshot_count = project.snapshots.count() values_count = project.values.count() current_values_count = project.values.filter(snapshot=None).count() url = reverse(urlnames['list'], args=[project_id]) data = { 'title': 'A new snapshot', 'description': 'Some description' } response = client.post(url, data) if project_id in add_snapshot_permission_map.get(username, []): assert response.status_code == 201 assert isinstance(response.json(), dict) assert response.json().get('id') in project.snapshots.values_list('id', flat=True) assert project.snapshots.count() == snapshot_count + 1 assert project.values.count() == values_count + current_values_count for file_value in project.values.filter(value_type=VALUE_TYPE_FILE): assert Path(settings.MEDIA_ROOT).joinpath(file_value.file.name).exists() else: if project_id in view_snapshot_permission_map.get(username, []): assert response.status_code == 403 else: assert response.status_code == 404 assert project.snapshots.count() == snapshot_count assert project.values.count() == values_count @pytest.mark.parametrize('username,password', users) @pytest.mark.parametrize('project_id', projects) @pytest.mark.parametrize('snapshot_id', snapshots) def test_update(db, client, username, password, project_id, snapshot_id): client.login(username=username, password=password) project = Project.objects.get(id=project_id) snapshot = Snapshot.objects.filter(project_id=project_id, id=snapshot_id).first() snapshot_count = project.snapshots.count() values_count = project.values.count() values_files = [value.file.name for value in project.values.filter(value_type=VALUE_TYPE_FILE)] url = reverse(urlnames['detail'], args=[project_id, snapshot_id]) data = { 'title': 'A new title', 'description': 'A new description' } response = client.put(url, data, content_type='application/json') if snapshot and project_id in change_snapshot_permission_map.get(username, []): assert response.status_code == 200 assert isinstance(response.json(), dict) assert response.json().get('id') in project.snapshots.values_list('id', flat=True) elif snapshot and project_id in view_snapshot_permission_map.get(username, []): assert response.status_code == 403 else: assert response.status_code == 404 assert project.snapshots.count() == snapshot_count assert project.values.count() == values_count for file_value in values_files: assert Path(settings.MEDIA_ROOT).joinpath(file_value).exists() @pytest.mark.parametrize('username,password', users) @pytest.mark.parametrize('project_id', projects) @pytest.mark.parametrize('snapshot_id', snapshots) def test_delete(db, client, username, password, project_id, snapshot_id): client.login(username=username, password=password) project = Project.objects.get(id=project_id) snapshot_count = project.snapshots.count() values_count = project.values.count() values_files = [value.file.name for value in project.values.filter(value_type=VALUE_TYPE_FILE)] url = reverse(urlnames['detail'], args=[project_id, snapshot_id]) response = client.delete(url) if project_id in view_snapshot_permission_map.get(username, []): assert response.status_code == 405 else: assert response.status_code == 404 assert project.snapshots.count() == snapshot_count assert project.values.count() == values_count for file_value in values_files: assert Path(settings.MEDIA_ROOT).joinpath(file_value).exists()
299,198
deactivate command
__license__ = "GNU Affero General Public License http://www.gnu.org/licenses/agpl.html" __copyright__ = "Copyright (C) 2019 The OctoPrint Project - Released under terms of the AGPLv3 License" import logging import click from octoprint import init_settings from octoprint.access.groups import FilebasedGroupManager from octoprint.access.users import ( CorruptUserStorage, FilebasedUserManager, InvalidUsername, UnknownUser, UserAlreadyExists, ) from octoprint.cli import get_ctx_obj_option from octoprint.util import get_class, sv click.disable_unicode_literals_warning = True # ~~ "octoprint user" commands @click.group() @click.pass_context def cli(ctx): """ User management. Note that this currently only supports managing user accounts stored in the configured user manager, not any user managers added through plugins and the "octoprint.users.factory" hook. """ try: logging.basicConfig( level=logging.DEBUG if get_ctx_obj_option(ctx, "verbosity", 0) > 0 else logging.WARN ) settings = init_settings( get_ctx_obj_option(ctx, "basedir", None), get_ctx_obj_option(ctx, "configfile", None), overlays=get_ctx_obj_option(ctx, "overlays", None), ) group_manager_name = settings.get(["accessControl", "groupManager"]) try: clazz = get_class(group_manager_name) group_manager = clazz() except AttributeError: click.echo( "Could not instantiate group manager {}, " "falling back to FilebasedGroupManager!".format(group_manager_name), err=True, ) group_manager = FilebasedGroupManager() ctx.obj.group_manager = group_manager name = settings.get(["accessControl", "userManager"]) try: clazz = get_class(name) user_manager = clazz(group_manager=group_manager, settings=settings) except CorruptUserStorage: raise except Exception: click.echo( "Could not instantiate user manager {}, falling back to FilebasedUserManager!".format( name ), err=True, ) user_manager = FilebasedUserManager(group_manager, settings=settings) ctx.obj.user_manager = user_manager except Exception: click.echo("Could not instantiate user manager", err=True) ctx.exit(-1) @cli.command(name="list") @click.pass_context def list_users_command(ctx): """Lists user information""" users = ctx.obj.user_manager.get_all_users() _print_list(users) @cli.command(name="add") @click.argument("username", type=click.STRING, required=True) @click.password_option("--password", "password", help="Password for the user") @click.option("-g", "--group", "groups", multiple=True, help="Groups to set on the user") @click.option( "-p", "--permission", "permissions", multiple=True, help="Individual permissions to set on the user", ) @click.option( "--admin", "is_admin", type=click.BOOL, is_flag=True, default=False, help="Adds user to admin group", ) @click.pass_context def add_user_command(ctx, username, password, groups, permissions, is_admin): """Add a new user.""" if not groups: groups = [] if is_admin: groups.append(ctx.obj.group_manager.admin_group) try: ctx.obj.user_manager.add_user( username, password, groups=groups, permissions=permissions, active=True ) user = ctx.obj.user_manager.find_user(username) if user: click.echo("User created:") click.echo(f"\t{_user_to_line(user.as_dict())}") except UserAlreadyExists: click.echo(f"A user with the name {username} does already exist!", err=True) except InvalidUsername: click.echo(f"The username '{username}' is invalid!", err=True) @cli.command(name="remove") @click.argument("username", type=click.STRING) @click.pass_context def remove_user_command(ctx, username): """Remove an existing user.""" confirm = click.prompt( "This is will irreversibly destroy the user account! Enter 'yes' to confirm", type=click.STRING, ) if confirm.lower() == "yes": ctx.obj.user_manager.remove_user(username) click.echo(f"User {username} removed.") else: click.echo(f"User {username} not removed.") @cli.command(name="password") @click.argument("username", type=click.STRING) @click.password_option("--password", "password", help="New password for user") @click.pass_context def change_password_command(ctx, username, password): """Change an existing user's password.""" try: ctx.obj.user_manager.change_user_password(username, password) click.echo(f"Password changed for user {username}.") except UnknownUser: click.echo(f"User {username} does not exist!", err=True) @cli.command(name="activate") @click.argument("username", type=click.STRING) @click.pass_context def activate_command(ctx, username): """Activate a user account.""" try: ctx.obj.user_manager.change_user_activation(username, True) click.echo(f"User {username} activated.") user = ctx.obj.user_manager.find_user(username) if user: click.echo("User created:") click.echo(f"\t{_user_to_line(user.asDict())}") except UnknownUser: click.echo(f"User {username} does not exist!", err=True) @cli.command(name="deactivate") @click.argument("username", type=click.STRING) @click.pass_context def METHOD_NAME(ctx, username): """Activate a user account.""" try: ctx.obj.user_manager.change_user_activation(username, False) click.echo(f"User {username} activated.") user = ctx.obj.user_manager.find_user(username) if user: click.echo("User created:") click.echo(f"\t{_user_to_line(user.asDict())}") except UnknownUser: click.echo(f"User {username} does not exist!", err=True) def _print_list(users): click.echo(f"{len(users)} users registered in the system:") for user in sorted( map(lambda x: x.as_dict(), users), key=lambda x: sv(x.get("name")) ): click.echo(f"\t{_user_to_line(user)}") def _user_to_line(user): return ( "{name}" "\n\t\tactive: {active}" "\n\t\tgroups: {groups}" "\n\t\tpermissions: {permissions}".format( name=user.get("name"), active=user.get("active", "False"), groups=", ".join(user.get("groups", [])), permissions=", ".join(user.get("permissions", [])), ) )
299,199
sample
from __future__ import annotations import itertools import logging import typing as t from typing import TYPE_CHECKING import anyio from bentoml.exceptions import InvalidArgument logger = logging.getLogger(__name__) if TYPE_CHECKING: from ..runner.container import Payload T = t.TypeVar("T") To = t.TypeVar("To") Ti = t.TypeVar("Ti") CUDA_SUCCESS = 0 def pass_through(i: T) -> T: return i class Params(t.Generic[T]): """ A container for */** parameters. It helps to perform an operation on all the params values at the same time. """ args: tuple[T, ...] kwargs: dict[str, T] def __init__(self, *args: T, **kwargs: T): self.args = args self.kwargs = kwargs def items(self) -> t.Iterator[t.Tuple[t.Union[int, str], T]]: return itertools.chain(enumerate(self.args), self.kwargs.items()) @classmethod def from_dict(cls, data: dict[str | int, T]) -> Params[T]: return cls( *(data[k] for k in sorted(k for k in data if isinstance(k, int))), **{k: v for k, v in data.items() if isinstance(k, str)}, ) def all_equal(self) -> bool: value_iter = iter(self.items()) _, first = next(value_iter) return all(v == first for _, v in value_iter) def map(self, function: t.Callable[[T], To]) -> Params[To]: """ Apply a function to all the values in the Params and return a Params of the return values. """ args = tuple(function(a) for a in self.args) kwargs = {k: function(v) for k, v in self.kwargs.items()} return Params[To](*args, **kwargs) def map_enumerate( self, function: t.Callable[[T, Ti], To], iterable: t.Iterable[Ti] ) -> Params[To]: """ Apply function that takes two arguments with given iterable as index to all none empty field in Params. """ if self.args: return Params[To]( *tuple(function(a, b) for a, b in zip(self.args, iterable)) ) return Params[To]( **{k: function(self.kwargs[k], b) for k, b in zip(self.kwargs, iterable)} ) def iter(self: Params[tuple[t.Any, ...]]) -> t.Iterator[Params[t.Any]]: """ Iter over a Params of iterable values into a list of Params. All values should have the same length. """ iter_params = self.map(iter) try: while True: args = tuple(next(a) for a in iter_params.args) kwargs = {k: next(v) for k, v in iter_params.kwargs.items()} yield Params[To](*args, **kwargs) except StopIteration: pass @classmethod def agg( cls, params_list: t.Sequence[Params[T]], agg_func: t.Callable[[t.Sequence[T]], To] = pass_through, ) -> Params[To]: """ Aggregate a list of Params into a single Params by performing the aggregate function on the list of values at the same position. """ if not params_list: return Params() args = tuple( agg_func(tuple(params.args[i] for params in params_list)) for i, _ in enumerate(params_list[0].args) ) kwargs = { k: agg_func(tuple(params.kwargs[k] for params in params_list)) for k in params_list[0].kwargs } return Params(*args, **kwargs) @property def METHOD_NAME(self) -> T: """ Return a sample value (the first value of args or kwargs if args is empty) of the Params. """ if self.args: return self.args[0] return next(iter(self.kwargs.values())) PAYLOAD_META_HEADER = "Bento-Payload-Meta" def payload_paramss_to_batch_params( paramss: t.Sequence[Params[Payload]], batch_dim: int, # TODO: support mapping from arg to batch dimension ) -> tuple[Params[t.Any], list[int]]: from ..runner.container import AutoContainer _converted_params = Params.agg( paramss, agg_func=lambda i: AutoContainer.from_batch_payloads( i, batch_dim=batch_dim, ), ).iter() batched_params = next(_converted_params) indice_params: Params[list[int]] = next(_converted_params) # considering skip this check if the CPU overhead of each inference is too high if not indice_params.all_equal(): raise InvalidArgument( f"argument lengths for parameters do not matchs: {tuple(indice_params.items())}" ) return batched_params, indice_params.METHOD_NAME # This is a reference to starlette.concurrency class _StopIteration(Exception): pass def _next(iterator: t.Iterator[T]) -> T: # We can't raise `StopIteration` from within the threadpool iterator # and catch it outside that context, so we coerce them into a different # exception type. try: return next(iterator) except StopIteration: raise _StopIteration async def iterate_in_threadpool( iterator: t.Iterator[T], ) -> t.AsyncIterator[T]: while True: try: yield await anyio.to_thread.run_sync(_next, iterator) except _StopIteration: break