blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f287477ff2b04066c5a7dec2d7bf312e952dfe5e
|
26060f5ea4d7efee2d03cbbd0b49c099e0f5f38a
|
/sharpy/linear/dev/linsym_uc_dncdzeta.py
|
1cbee5cabfced8dfc929bdf5af75603fa94bc091
|
[
"BSD-3-Clause"
] |
permissive
|
ImperialCollegeLondon/sharpy
|
0fcd1fba9ed2181dabc1124f9800aa75521bfc3d
|
58ddceb985bef13af3ea199a1764c8dc9b088907
|
refs/heads/main
| 2023-08-19T03:04:26.044857
| 2023-07-17T07:05:06
| 2023-07-17T07:05:06
| 70,235,936
| 106
| 55
|
BSD-3-Clause
| 2023-08-16T02:27:58
| 2016-10-07T10:11:51
|
Python
|
UTF-8
|
Python
| false
| false
| 5,041
|
py
|
linsym_uc_dncdzeta.py
|
'''
Analytical linearisation of uc*dnc/dzeta
Sign convention:
Scalar quantities are all lower case, e.g. zeta
Arrays begin with upper case, e.g. Zeta_i
2 D Matrices are all upper case, e.g. AW, ZETA=[Zeta_i]
3 D arrays (tensors) will be labelled with a 3 in the name, e.g. A3
'''
import numpy as np
import sympy as sm
import sympy.tensor.array as smarr
import linfunc
##### Define symbols
### vertices vectors
# coordinates
zeta00_x,zeta00_y,zeta00_z=sm.symbols('zeta00_x,zeta00_y,zeta00_z', real=True)
zeta01_x,zeta01_y,zeta01_z=sm.symbols('zeta01_x,zeta01_y,zeta01_z', real=True)
zeta02_x,zeta02_y,zeta02_z=sm.symbols('zeta02_x,zeta02_y,zeta02_z', real=True)
zeta03_x,zeta03_y,zeta03_z=sm.symbols('zeta03_x,zeta03_y,zeta03_z', real=True)
# vectors
Zeta00,Zeta01,Zeta02,Zeta03=sm.symbols('Zeta00 Zeta01 Zeta02 Zeta03', real=True)
Zeta00=smarr.MutableDenseNDimArray([zeta00_x,zeta00_y,zeta00_z])
Zeta01=smarr.MutableDenseNDimArray([zeta01_x,zeta01_y,zeta01_z])
Zeta02=smarr.MutableDenseNDimArray([zeta02_x,zeta02_y,zeta02_z])
Zeta03=smarr.MutableDenseNDimArray([zeta03_x,zeta03_y,zeta03_z])
### external velocity at nodes - not required here
# coordinates
u00_x,u00_y,u00_z=sm.symbols('u00_x u00_y u00_z', real=True)
u01_x,u01_y,u01_z=sm.symbols('u01_x u01_y u01_z', real=True)
u02_x,u02_y,u02_z=sm.symbols('u02_x u02_y u02_z', real=True)
u03_x,u03_y,u03_z=sm.symbols('u03_x u03_y u03_z', real=True)
# vectors
U00,U01,U02,U03=sm.symbols('U00 U01 U02 U03', real=True)
U01=smarr.MutableDenseNDimArray([u00_x,u00_y,u00_z])
U02=smarr.MutableDenseNDimArray([u01_x,u01_y,u01_z])
U03=smarr.MutableDenseNDimArray([u02_x,u02_y,u02_z])
U04=smarr.MutableDenseNDimArray([u03_x,u03_y,u03_z])
### velocity at collocation point
uc_x, uc_y, uc_z=sm.symbols('uc_x uc_y uc_z', real=True)
Uc=smarr.MutableDenseNDimArray([uc_x,uc_y,uc_z])
### Compute normal to panel
# see surface.AeroGridSurface.get_panel_normal
R02=Zeta02-Zeta00
R13=Zeta03-Zeta01
Norm=linfunc.cross_product(R02,R13)
Norm=Norm/linfunc.norm2(Norm)
### check norm
assert linfunc.scalar_product(Norm,R02).simplify()==0, 'Norm is wrong'
assert linfunc.scalar_product(Norm,R13).simplify()==0, 'Norm is wrong'
assert linfunc.scalar_product(Norm,Norm).simplify()==1, 'Normal is not unit length'
### Compute normal velocity at panel
Unorm=linfunc.scalar_product(Norm,Uc)
Unorm=sm.simplify(Unorm)
### Compute derivative
dUnorm_dZeta=sm.derive_by_array(Unorm,[Zeta00,Zeta01,Zeta02,Zeta03])
#dUnorm_dZeta=linfunc.simplify(dUnorm_dZeta)
################################################################################
### exploit combined derivatives
################################################################################
dR_dZeta=sm.derive_by_array([R02,R13],[Zeta00,Zeta01,Zeta02,Zeta03])
### redefine R02,R13
r02_x,r02_y,r02_z=sm.symbols('r02_x r02_y r02_z', real=True)
r13_x,r13_y,r13_z=sm.symbols('r13_x r13_y r13_z', real=True)
R02=smarr.MutableDenseNDimArray([r02_x,r02_y,r02_z])
R13=smarr.MutableDenseNDimArray([r13_x,r13_y,r13_z])
Norm=linfunc.cross_product(R02,R13)
Norm=Norm/linfunc.norm2(Norm)
### check norm
assert linfunc.scalar_product(Norm,R02).simplify()==0, 'Norm is wrong'
assert linfunc.scalar_product(Norm,R13).simplify()==0, 'Norm is wrong'
assert linfunc.scalar_product(Norm,Norm).simplify()==1, 'Normal is not unit length'
### Compute normal velocity at panel
Unorm=linfunc.scalar_product(Norm,Uc)
Unorm=sm.simplify(Unorm)
# derivative
dUnorm_dR=sm.derive_by_array(Unorm,[R02,R13])
### shorten equations
Der=dUnorm_dR
eq_crR13Uc=linfunc.cross_product(R13,Uc)
eq_crR02Uc=linfunc.cross_product(R02,Uc)
eq_crR02R13=linfunc.cross_product(R02,R13)
crR13Uc_x,crR13Uc_y,crR13Uc_z=sm.symbols('crR13Uc_x crR13Uc_y crR13Uc_z',real=True)
crR02Uc_x,crR02Uc_y,crR02Uc_z=sm.symbols('crR02Uc_x crR02Uc_y crR02Uc_z',real=True)
crR02R13_x,crR02R13_y,crR02R13_z=sm.symbols('crR02R13_x crR02R13_y crR02R13_z',real=True)
crR13Uc=smarr.MutableDenseNDimArray([crR13Uc_x,crR13Uc_y,crR13Uc_z])
crR02Uc=smarr.MutableDenseNDimArray([crR02Uc_x,crR02Uc_y,crR02Uc_z])
crR02R13=smarr.MutableDenseNDimArray([crR02R13_x,crR02R13_y,crR02R13_z])
for cc in range(3):
Der=Der.subs(eq_crR02Uc[cc],crR02Uc[cc])
Der=Der.subs(eq_crR13Uc[cc],crR13Uc[cc])
Der=Der.subs(eq_crR02R13[cc],crR02R13[cc])
norm_crR02R13=sm.symbols('norm_crR02R13',real=True)
cub_crR02R13=sm.symbols('cub_crR02R13',real=True)
Der=Der.subs(sm.sqrt(crR02R13_x**2 + crR02R13_y**2 + crR02R13_z**2),norm_crR02R13)
Der=Der.subs(norm_crR02R13**3,cub_crR02R13)
# other products
eq_Acr=linfunc.cross_product(crR02R13,R13)
Acr_x,Acr_y,Acr_z=sm.symbols('Acr_x Acr_y Acr_z',real=True)
Acr=sm.MutableDenseNDimArray([Acr_x,Acr_y,Acr_z])
for cc in range(3):
Der=Der.subs(eq_Acr[cc],Acr[cc])
eq_Bcr=linfunc.cross_product(crR02R13,R02)
Bcr_x,Bcr_y,Bcr_z=sm.symbols('Bcr_x Bcr_y Bcr_z',real=True)
Bcr=sm.MutableDenseNDimArray([Bcr_x,Bcr_y,Bcr_z])
for cc in range(3):
Der=Der.subs(eq_Bcr[cc],Bcr[cc])
eq_Cdot=linfunc.scalar_product(crR02R13,Uc)
Cdot=sm.symbols('Cdot',real=True)
Der=Der.subs(eq_Cdot,Cdot)
|
bfc3a671bed4bf3e1d24505f769887418cd2815f
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Social_Network_Analysis_for_Starupts/chapter4/hijackers.py
|
33f1aa8e0f9dae850bdc919c67ff17b5784e4d31
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,088
|
py
|
hijackers.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
hijackers.py
Created by Maksim Tsvetovat on 2011-08-16.
Copyright (c) 2011 Maksim Tsvetovat. All rights reserved.
"""
import sys
import os
import csv ## we'll use the built-in CSV library
import networkx as net
import multimode as mm
import triadic
# open the file
in_file=csv.reader(open('9_11_edgelist.txt','rb'))
g=net.Graph()
for line in in_file:
g.add_edge(line[0],line[1],weight=line[2],conf=line[3])
#first, let's make sure that all nodes in the graph have the 'flight' attribute
for n in g.nodes_iter(): g.node[n]['flight']='None'
attrb=csv.reader(open('9_11_attrib.txt','rb'))
for line in attrb:
g.node[line[0]]['flight']=line[1]
# Connected_component_subgraphs() returns a list of components, sorted largest to smallest
components=net.connected_component_subgraphs(g)
# pick the first and largest component
cc = components[0]
# type-string tells the function what attribute to differentiate on
mm.plot_multimode(cc,type_string='flight')
# run triadic analysis
census, node_census = triadic.triadic_census(cc2)
|
5cc0b0d4774aa296ae0fd46d219042bd10f6a9ef
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/tests/system/providers/google/cloud/gcs/example_sftp_to_gcs.py
|
80b53774d55f26727a6249fa12e5b5dcd1247cb0
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,510
|
py
|
example_sftp_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG for Google Cloud Storage to SFTP transfer operators.
"""
from __future__ import annotations
import os
from datetime import datetime
from pathlib import Path
from airflow import models
from airflow.models.baseoperator import chain
from airflow.operators.bash import BashOperator
from airflow.providers.google.cloud.operators.gcs import GCSCreateBucketOperator, GCSDeleteBucketOperator
from airflow.providers.google.cloud.transfers.sftp_to_gcs import SFTPToGCSOperator
from airflow.utils.trigger_rule import TriggerRule
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
PROJECT_ID = os.environ.get("SYSTEM_TESTS_GCP_PROJECT")
DAG_ID = "example_sftp_to_gcs"
BUCKET_NAME = f"bucket-{DAG_ID}-{ENV_ID}"
TMP_PATH = "tmp"
DIR = "tests_sftp_hook_dir"
SUBDIR = "subdir"
OBJECT_SRC_1 = "parent-1.bin"
OBJECT_SRC_2 = "parent-2.bin"
CURRENT_FOLDER = Path(__file__).parent
LOCAL_PATH = str(Path(CURRENT_FOLDER) / "resources")
FILE_LOCAL_PATH = str(Path(LOCAL_PATH) / TMP_PATH / DIR)
FILE_NAME = "tmp.tar.gz"
with models.DAG(
DAG_ID,
schedule="@once",
start_date=datetime(2021, 1, 1),
catchup=False,
) as dag:
create_bucket = GCSCreateBucketOperator(task_id="create_bucket", bucket_name=BUCKET_NAME)
unzip_file = BashOperator(
task_id="unzip_data_file", bash_command=f"tar xvf {LOCAL_PATH}/{FILE_NAME} -C {LOCAL_PATH}"
)
# [START howto_operator_sftp_to_gcs_copy_single_file]
copy_file_from_sftp_to_gcs = SFTPToGCSOperator(
task_id="file-copy-sftp-to-gcs",
source_path=f"{FILE_LOCAL_PATH}/{OBJECT_SRC_1}",
destination_bucket=BUCKET_NAME,
)
# [END howto_operator_sftp_to_gcs_copy_single_file]
# [START howto_operator_sftp_to_gcs_move_single_file_destination]
move_file_from_sftp_to_gcs_destination = SFTPToGCSOperator(
task_id="file-move-sftp-to-gcs-destination",
source_path=f"{FILE_LOCAL_PATH}/{OBJECT_SRC_2}",
destination_bucket=BUCKET_NAME,
destination_path="destination_dir/destination_filename.bin",
move_object=True,
)
# [END howto_operator_sftp_to_gcs_move_single_file_destination]
# [START howto_operator_sftp_to_gcs_copy_directory]
copy_directory_from_sftp_to_gcs = SFTPToGCSOperator(
task_id="dir-copy-sftp-to-gcs",
source_path=f"{FILE_LOCAL_PATH}/{SUBDIR}/*",
destination_bucket=BUCKET_NAME,
)
# [END howto_operator_sftp_to_gcs_copy_directory]
# [START howto_operator_sftp_to_gcs_move_specific_files]
move_specific_files_from_sftp_to_gcs = SFTPToGCSOperator(
task_id="dir-move-specific-files-sftp-to-gcs",
source_path=f"{FILE_LOCAL_PATH}/{SUBDIR}/*.bin",
destination_bucket=BUCKET_NAME,
destination_path="specific_files/",
move_object=True,
)
# [END howto_operator_sftp_to_gcs_move_specific_files]
delete_bucket = GCSDeleteBucketOperator(
task_id="delete_bucket", bucket_name=BUCKET_NAME, trigger_rule=TriggerRule.ALL_DONE
)
chain(
# TEST SETUP
create_bucket,
unzip_file,
# TEST BODY
copy_file_from_sftp_to_gcs,
move_file_from_sftp_to_gcs_destination,
copy_directory_from_sftp_to_gcs,
move_specific_files_from_sftp_to_gcs,
# TEST TEARDOWN
delete_bucket,
)
from tests.system.utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "tearDown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
|
03b003710097d91372b8d57d6f3cf700654ae4f4
|
5a6ccde5f37cc86b6fc0812b2bf40f42eab23906
|
/A-set/612A.Splitting Text.py
|
7fde6e905d38de66f9879199485d02bc1956d9fa
|
[] |
no_license
|
Waqar-107/Codeforces
|
23f2b1edffb85f6f020107f03e09a455d3e6e792
|
f0d2f25aa6a09c06083b82c39cdf3288ec2eecba
|
refs/heads/master
| 2023-03-09T07:55:46.583363
| 2023-03-04T09:57:44
| 2023-03-04T09:57:44
| 82,915,896
| 196
| 138
| null | 2023-02-11T22:06:20
| 2017-02-23T10:29:34
|
C++
|
UTF-8
|
Python
| false
| false
| 500
|
py
|
612A.Splitting Text.py
|
# from dust i have come dust i will be
n,p,q=map(int,input().split())
s=input()
x=0
while p*x<=n:
if (n-p*x)%q==0:
y=(n-p*x)//q
print(x+y)
l=0
while x>0:
x-=1
for i in range(p):
print(s[l],end='')
l+=1
print()
while y>0:
y-=1
for i in range(q):
print(s[l],end='')
l+=1
print()
exit(0)
x+=1
print(-1)
|
a366e85cde21fe661c307a2eb1506604a8aba75f
|
8d37f57da7c991381c9fc3d7d5d3c9f610ac10d4
|
/tests/samples/variables_classes.py
|
328e3572f71235fb408e729060c8f1acab82fbc7
|
[
"MIT"
] |
permissive
|
alexmojaki/snoop
|
b91615844ed9f8c9f34071774ed4aaeb197d3a6a
|
98102bde87d092640828590927ef144d069dc56f
|
refs/heads/master
| 2023-08-21T11:59:00.497603
| 2022-12-22T13:48:56
| 2022-12-22T13:48:56
| 186,476,740
| 975
| 40
|
MIT
| 2022-12-22T13:47:56
| 2019-05-13T18:45:35
|
Python
|
UTF-8
|
Python
| false
| false
| 473
|
py
|
variables_classes.py
|
from collections import OrderedDict
import snoop
class WithSlots(object):
__slots__ = ('x', 'y')
def __init__(self):
self.x = 3
self.y = 4
@snoop.snoop(watch=(
snoop.Keys('_d', exclude='c'),
snoop.Attrs('_s'),
snoop.Indices('_lst')[-3:],
snoop.Attrs('_lst'), # doesn't have attributes
))
def main():
_d = OrderedDict([('a', 1), ('b', 2), ('c', 'ignore')])
_s = WithSlots()
_lst = list(range(1000))
|
835c20a08ad5469a9e28e6a0944731782c7ef693
|
5eb52c07e5b1bd00af77306f927f382b684cd6ff
|
/indy_node/test/request_handlers/rich_schema/test_rich_schema_handler.py
|
8704f7e847446e3da4da548010712cbcb56073f7
|
[
"Apache-2.0"
] |
permissive
|
hyperledger/indy-node
|
bce39486988f5114581cff4f6d14fc1b7684143c
|
e6bb87d4c605aff9914491d062248b6ec857334c
|
refs/heads/main
| 2023-09-03T15:33:08.187153
| 2023-05-08T22:48:21
| 2023-05-08T22:48:21
| 77,021,566
| 691
| 783
|
Apache-2.0
| 2023-05-09T15:42:43
| 2016-12-21T05:45:04
|
Python
|
UTF-8
|
Python
| false
| false
| 943
|
py
|
test_rich_schema_handler.py
|
import pytest
from indy_common.constants import ENDORSER
from indy_node.server.request_handlers.domain_req_handlers.rich_schema.rich_schema_handler import RichSchemaHandler
from indy_node.test.request_handlers.helper import add_to_idr
from indy_node.test.request_handlers.rich_schema.helper import rich_schema_request
from plenum.common.constants import TRUSTEE
@pytest.fixture()
def rich_schema_handler(db_manager, write_auth_req_validator):
return RichSchemaHandler(db_manager, write_auth_req_validator)
@pytest.fixture()
def rich_schema_req(rich_schema_handler):
req = rich_schema_request()
add_to_idr(rich_schema_handler.database_manager.idr_cache, req.identifier, TRUSTEE)
add_to_idr(rich_schema_handler.database_manager.idr_cache, req.endorser, ENDORSER)
return req
def test_schema_dynamic_validation_passes(rich_schema_handler, rich_schema_req):
rich_schema_handler.dynamic_validation(rich_schema_req, 0)
|
1c501f03cab1ac47f87eb09aaef108a89468f84c
|
d48e09166db1ff0dae2c8a4ddbbe453606689081
|
/panda/tests/safety/test_honda.py
|
a5eb04ad6632c53ad685f06cbce200be4f07a383
|
[
"MIT"
] |
permissive
|
Gernby/raspberry-pilot
|
a8857fdb2a50ade237d657ccd9d1049c5aa65515
|
0909e7594dff1c6fb524b5502ce3258f1969b6a5
|
refs/heads/resonant-rails
| 2023-04-06T10:09:35.457160
| 2023-01-06T01:57:31
| 2023-01-06T01:57:31
| 242,575,807
| 141
| 62
|
MIT
| 2023-03-05T05:22:49
| 2020-02-23T19:15:12
|
C
|
UTF-8
|
Python
| false
| false
| 10,898
|
py
|
test_honda.py
|
#!/usr/bin/env python3
import unittest
import numpy as np
import libpandasafety_py
from panda import Panda
MAX_BRAKE = 255
INTERCEPTOR_THRESHOLD = 328
class TestHondaSafety(unittest.TestCase):
@classmethod
def setUp(cls):
cls.safety = libpandasafety_py.libpandasafety
cls.safety.safety_set_mode(Panda.SAFETY_HONDA, 0)
cls.safety.init_tests_honda()
def _send_msg(self, bus, addr, length):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = addr << 21
to_send[0].RDTR = length
to_send[0].RDTR = bus << 4
return to_send
def _speed_msg(self, speed):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0x158 << 21
to_send[0].RDLR = speed
return to_send
def _button_msg(self, buttons, msg):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = msg << 21
to_send[0].RDLR = buttons << 5
is_panda_black = self.safety.get_hw_type() == 3 # black_panda
honda_bosch_hardware = self.safety.get_honda_bosch_hardware()
bus = 1 if is_panda_black and honda_bosch_hardware else 0
to_send[0].RDTR = bus << 4
return to_send
def _brake_msg(self, brake):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0x17C << 21
to_send[0].RDHR = 0x200000 if brake else 0
return to_send
def _alt_brake_msg(self, brake):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0x1BE << 21
to_send[0].RDLR = 0x10 if brake else 0
return to_send
def _gas_msg(self, gas):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0x17C << 21
to_send[0].RDLR = 1 if gas else 0
return to_send
def _send_brake_msg(self, brake):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0x1FA << 21
to_send[0].RDLR = ((brake & 0x3) << 14) | ((brake & 0x3FF) >> 2)
return to_send
def _send_interceptor_msg(self, gas, addr):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = addr << 21
to_send[0].RDTR = 6
gas2 = gas * 2
to_send[0].RDLR = ((gas & 0xff) << 8) | ((gas & 0xff00) >> 8) | \
((gas2 & 0xff) << 24) | ((gas2 & 0xff00) << 8)
return to_send
def _send_steer_msg(self, steer):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
to_send[0].RIR = 0xE4 << 21
to_send[0].RDLR = steer
return to_send
def test_default_controls_not_allowed(self):
self.assertFalse(self.safety.get_controls_allowed())
def test_resume_button(self):
RESUME_BTN = 4
self.safety.set_controls_allowed(0)
self.safety.safety_rx_hook(self._button_msg(RESUME_BTN, 0x1A6))
self.assertTrue(self.safety.get_controls_allowed())
def test_set_button(self):
SET_BTN = 3
self.safety.set_controls_allowed(0)
self.safety.safety_rx_hook(self._button_msg(SET_BTN, 0x1A6))
self.assertTrue(self.safety.get_controls_allowed())
def test_cancel_button(self):
CANCEL_BTN = 2
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._button_msg(CANCEL_BTN, 0x1A6))
self.assertFalse(self.safety.get_controls_allowed())
def test_sample_speed(self):
self.assertEqual(0, self.safety.get_honda_moving())
self.safety.safety_rx_hook(self._speed_msg(100))
self.assertEqual(1, self.safety.get_honda_moving())
def test_prev_brake(self):
self.assertFalse(self.safety.get_honda_brake_pressed_prev())
self.safety.safety_rx_hook(self._brake_msg(True))
self.assertTrue(self.safety.get_honda_brake_pressed_prev())
def test_disengage_on_brake(self):
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._brake_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
def test_alt_disengage_on_brake(self):
self.safety.set_honda_alt_brake_msg(1)
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._alt_brake_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_honda_alt_brake_msg(0)
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._alt_brake_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
def test_allow_brake_at_zero_speed(self):
# Brake was already pressed
self.safety.safety_rx_hook(self._brake_msg(True))
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._brake_msg(True))
self.assertTrue(self.safety.get_controls_allowed())
self.safety.safety_rx_hook(self._brake_msg(False)) # reset no brakes
def test_not_allow_brake_when_moving(self):
# Brake was already pressed
self.safety.safety_rx_hook(self._brake_msg(True))
self.safety.safety_rx_hook(self._speed_msg(100))
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._brake_msg(True))
self.assertFalse(self.safety.get_controls_allowed())
def test_prev_gas(self):
self.safety.safety_rx_hook(self._gas_msg(False))
self.assertFalse(self.safety.get_honda_gas_prev())
self.safety.safety_rx_hook(self._gas_msg(True))
self.assertTrue(self.safety.get_honda_gas_prev())
def test_prev_gas_interceptor(self):
self.safety.safety_rx_hook(self._send_interceptor_msg(0x0, 0x201))
self.assertFalse(self.safety.get_gas_interceptor_prev())
self.safety.safety_rx_hook(self._send_interceptor_msg(0x1000, 0x201))
self.assertTrue(self.safety.get_gas_interceptor_prev())
self.safety.safety_rx_hook(self._send_interceptor_msg(0x0, 0x201))
self.safety.set_gas_interceptor_detected(False)
def test_disengage_on_gas(self):
for long_controls_allowed in [0, 1]:
self.safety.set_long_controls_allowed(long_controls_allowed)
self.safety.safety_rx_hook(self._gas_msg(0))
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._gas_msg(1))
if long_controls_allowed:
self.assertFalse(self.safety.get_controls_allowed())
else:
self.assertTrue(self.safety.get_controls_allowed())
self.safety.set_long_controls_allowed(True)
def test_allow_engage_with_gas_pressed(self):
self.safety.safety_rx_hook(self._gas_msg(1))
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._gas_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
def test_disengage_on_gas_interceptor(self):
for long_controls_allowed in [0, 1]:
for g in range(0, 0x1000):
self.safety.set_long_controls_allowed(long_controls_allowed)
self.safety.safety_rx_hook(self._send_interceptor_msg(0, 0x201))
self.safety.set_controls_allowed(True)
self.safety.safety_rx_hook(self._send_interceptor_msg(g, 0x201))
remain_enabled = (not long_controls_allowed or g <= INTERCEPTOR_THRESHOLD)
self.assertEqual(remain_enabled, self.safety.get_controls_allowed())
self.safety.safety_rx_hook(self._send_interceptor_msg(0, 0x201))
self.safety.set_gas_interceptor_detected(False)
self.safety.set_long_controls_allowed(True)
def test_allow_engage_with_gas_interceptor_pressed(self):
self.safety.safety_rx_hook(self._send_interceptor_msg(0x1000, 0x201))
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._send_interceptor_msg(0x1000, 0x201))
self.assertTrue(self.safety.get_controls_allowed())
self.safety.safety_rx_hook(self._send_interceptor_msg(0, 0x201))
self.safety.set_gas_interceptor_detected(False)
def test_brake_safety_check(self):
for fwd_brake in [False, True]:
self.safety.set_honda_fwd_brake(fwd_brake)
for long_controls_allowed in [0, 1]:
self.safety.set_long_controls_allowed(long_controls_allowed)
for brake in np.arange(0, MAX_BRAKE + 10, 1):
for controls_allowed in [True, False]:
self.safety.set_controls_allowed(controls_allowed)
if fwd_brake:
send = False # block openpilot brake msg when fwd'ing stock msg
elif controls_allowed and long_controls_allowed:
send = MAX_BRAKE >= brake >= 0
else:
send = brake == 0
self.assertEqual(send, self.safety.safety_tx_hook(self._send_brake_msg(brake)))
self.safety.set_long_controls_allowed(True)
self.safety.set_honda_fwd_brake(False)
def test_gas_interceptor_safety_check(self):
for long_controls_allowed in [0, 1]:
self.safety.set_long_controls_allowed(long_controls_allowed)
for gas in np.arange(0, 4000, 100):
for controls_allowed in [True, False]:
self.safety.set_controls_allowed(controls_allowed)
if controls_allowed and long_controls_allowed:
send = True
else:
send = gas == 0
self.assertEqual(send, self.safety.safety_tx_hook(self._send_interceptor_msg(gas, 0x200)))
self.safety.set_long_controls_allowed(True)
def test_steer_safety_check(self):
self.safety.set_controls_allowed(0)
self.assertTrue(self.safety.safety_tx_hook(self._send_steer_msg(0x0000)))
self.assertFalse(self.safety.safety_tx_hook(self._send_steer_msg(0x1000)))
def test_spam_cancel_safety_check(self):
RESUME_BTN = 4
SET_BTN = 3
CANCEL_BTN = 2
BUTTON_MSG = 0x296
self.safety.set_honda_bosch_hardware(1)
self.safety.set_controls_allowed(0)
self.assertTrue(self.safety.safety_tx_hook(self._button_msg(CANCEL_BTN, BUTTON_MSG)))
self.assertFalse(self.safety.safety_tx_hook(self._button_msg(RESUME_BTN, BUTTON_MSG)))
self.assertFalse(self.safety.safety_tx_hook(self._button_msg(SET_BTN, BUTTON_MSG)))
# do not block resume if we are engaged already
self.safety.set_controls_allowed(1)
self.assertTrue(self.safety.safety_tx_hook(self._button_msg(RESUME_BTN, BUTTON_MSG)))
def test_fwd_hook(self):
buss = list(range(0x0, 0x3))
msgs = list(range(0x1, 0x800))
long_controls_allowed = [0, 1]
fwd_brake = [False, True]
self.safety.set_honda_bosch_hardware(0)
for f in fwd_brake:
self.safety.set_honda_fwd_brake(f)
for l in long_controls_allowed:
self.safety.set_long_controls_allowed(l)
blocked_msgs = [0xE4, 0x194, 0x33D]
if l:
blocked_msgs += [0x30C, 0x39F]
if not f:
blocked_msgs += [0x1FA]
for b in buss:
for m in msgs:
if b == 0:
fwd_bus = 2
elif b == 1:
fwd_bus = -1
elif b == 2:
fwd_bus = -1 if m in blocked_msgs else 0
# assume len 8
self.assertEqual(fwd_bus, self.safety.safety_fwd_hook(b, self._send_msg(b, m, 8)))
self.safety.set_long_controls_allowed(True)
self.safety.set_honda_fwd_brake(False)
if __name__ == "__main__":
unittest.main()
|
4136638bf46777bf1cc168db6b898f7ad60bc4ce
|
f4393e02de84da11a2c4562d44a0f5b9996f5150
|
/hard_negative_mining.py
|
4d0498125102dec96f7e3234dcff971befc02532
|
[] |
no_license
|
CuongNN218/zalo_ltr_2021
|
809637ac246c95e857808893dc96895e917ebe47
|
b63aa805052c068ab3a18811ec344598bfe845c4
|
refs/heads/main
| 2023-05-22T13:43:49.015440
| 2021-12-20T17:25:29
| 2021-12-20T17:25:29
| 437,156,924
| 162
| 56
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,137
|
py
|
hard_negative_mining.py
|
import pickle
import os
import numpy as np
import json
import torch
from tqdm import tqdm
from rank_bm25 import *
import argparse
import warnings
from sentence_transformers import SentenceTransformer, util
warnings.filterwarnings("ignore")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", default="saved_model/bm25_Plus_04_06_model_full_manual_stopword", type=str)
parser.add_argument("--sentence_bert_path", default="", type=str, help="path to round 1 sentence bert model")
parser.add_argument("--data_path", default="zac2021-ltr-data", type=str, help="path to input data")
parser.add_argument("--save_path", default="pair_data", type=str)
parser.add_argument("--top_k", default=20, type=str, help="top k hard negative mining")
parser.add_argument("--path_doc_refer", default="generated_data/doc_refers_saved.pkl", type=str, help="path to doc refers")
parser.add_argument("--path_legal", default="generated_data/legal_dict.json", type=str, help="path to legal dict")
args = parser.parse_args()
# load training data from json
data = json.load(open(os.path.join(args.data_path, "train_question_answer.json")))
training_data = data["items"]
print(len(training_data))
# load bm25 model
with open(args.model_path, "rb") as bm_file:
bm25 = pickle.load(bm_file)
with open(args.path_doc_refer, "rb") as doc_refer_file:
doc_refers = pickle.load(doc_refer_file)
doc_path = os.path.join(args.path_legal)
df = open(doc_path)
doc_data = json.load(df)
# load hard negative model
model = SentenceTransformer(args.sentence_bert_path)
# add embedding for data
# if you already have data with encoded sentence uncoment line 47 - 54
import pickle
embed_list = []
for k, v in tqdm(doc_data.items()):
embed = model.encode(v['title'] + ' ' + v['text'])
doc_data[k]['embedding'] = embed
with open('legal_corpus_vibert_embedding.pkl', 'wb') as pkl:
pickle.dump(doc_data, pkl)
with open('legal_corpus_vibert_embedding.pkl', 'rb') as pkl:
data = pickle.load(pkl)
pred_list = []
top_k = args.top_k
save_pairs = []
for idx, item in tqdm(enumerate(training_data)):
question_id = item["question_id"]
question = item["question"]
relevant_articles = item["relevant_articles"]
actual_positive = len(relevant_articles)
for article in relevant_articles:
save_dict = {}
save_dict["question"] = question
concat_id = article["law_id"] + "_" + article["article_id"]
save_dict["document"] = doc_data[concat_id]["title"] + " " + doc_data[concat_id]["text"]
save_dict["relevant"] = 1
save_pairs.append(save_dict)
encoded_question = model.encode(question)
list_embs = []
for k, v in data.items():
emb_2 = torch.tensor(v['embedding']).unsqueeze(0)
list_embs.append(emb_2)
matrix_emb = torch.cat(list_embs, dim=0)
all_cosine = util.cos_sim(encoded_question, matrix_emb).numpy().squeeze(0)
predictions = np.argpartition(all_cosine, len(all_cosine) - top_k)[-top_k:]
for idx, idx_pred in enumerate(predictions):
pred = doc_refers[idx_pred]
check = 0
for article in relevant_articles:
check += 1 if pred[0] == article["law_id"] and pred[1] == article["article_id"] else 0
if check == 0:
save_dict = {}
save_dict["question"] = question
concat_id = pred[0] + "_" + pred[1]
save_dict["document"] = doc_data[concat_id]["title"] + " " + doc_data[concat_id]["text"]
save_dict["relevant"] = 0
save_pairs.append(save_dict)
save_path = args.save_path
os.makedirs(save_path, exist_ok=True)
with open(os.path.join(save_path, f"save_pairs_vibert_top{top_k}.pkl"), "wb") as pair_file:
pickle.dump(save_pairs, pair_file)
|
db2a3eb1a4d100654846eaebe6b6c27d9d58832c
|
862ecba769c82b7e4dd83f14c6e99aedbbef7e98
|
/fnss/__init__.py
|
ebd581c63a6efce40a533873c13e8fe5a9a1e2b7
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
fnss/fnss
|
3f1611066227c9f4acc2b394f53667d008bf84fa
|
cf4cec4ceb82c0bfe3032c7cef1754d451de8216
|
refs/heads/master
| 2023-03-10T00:44:03.803279
| 2023-02-22T01:41:28
| 2023-02-22T01:41:28
| 7,737,956
| 128
| 38
|
NOASSERTION
| 2023-02-22T01:41:29
| 2013-01-21T18:36:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,397
|
py
|
__init__.py
|
"""
The Fast Network Simulation Setup (FNSS) core library is a Python library
providing a set of features allowing network researchers and engineers to
simplify the setup of a network simulation.
These features include the ability to:
* Parse a topology from a dataset, a topology generator or generate it
according to a number of synthetic models.
* Apply link capacities, link weights, link delays and buffer sizes.
* Deploy protocol stacks and applications on network nodes.
* Generate traffic matrices.
* Generate event schedules.
This core library includes adapters that allow users to export topologies to
ns-2, Mininet and Autonetkit.
In addition, the core library can be used in conjunction with the FNSS Java and
C++ API or the ns-3 adapter to export topologies, traffic matrices and event
schedules to the desired target simulator or emulator.
"""
# check Python version
import sys
if sys.version_info[:2] < (2, 7):
m = "Python version 2.7 or later is required for FNSS (%d.%d detected)."
raise ImportError(m % sys.version_info[:2])
del sys
# Import release information
import fnss.release as release
__author__ = release.author
__version__ = release.version
__license__ = release.license_short
# import all subpackages and modules
from fnss.topologies import *
from fnss.netconfig import *
from fnss.traffic import *
from fnss.adapters import *
from fnss.units import *
|
64ec8770b74a2717905e00eaaf39db7c143271e6
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-py/tests/testdir_apis/H2OAssembly/pyunit_h2oassembly_fit.py
|
124049cee8e59def7db3a0b90872c83942cdcac4
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,311
|
py
|
pyunit_h2oassembly_fit.py
|
import sys
sys.path.insert(1,"../../")
from tests import pyunit_utils
from h2o.utils.typechecks import assert_is_type
from h2o.assembly import *
from h2o.transforms.preprocessing import *
def h2oassembly_fit():
"""
Python API test: H2OAssembly.fit(fr)
Copied from pyunit_assembly_demo.py
"""
fr = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris_wheader.csv"),
col_types=["numeric","numeric","numeric","numeric","string"]) # import data
assembly = H2OAssembly(steps=[("col_select", H2OColSelect(["sepal_len", "petal_len", "class"])),
("cos_sep_len", H2OColOp(op=H2OFrame.cos, col="sepal_len", inplace=True)),
("str_cnt_species", H2OColOp(op=H2OFrame.countmatches, col="class", inplace=False,
pattern="s"))]) # string operation
assert_is_type(assembly, H2OAssembly)
result = assembly.fit(fr) # fit the assembly, which performs column select
assert_is_type(result, H2OFrame)
assert result.ncol==4, "H2OAssembly.fit() command is not working" # selects 3 columns, added one from countmatches.
if __name__ == "__main__":
pyunit_utils.standalone_test(h2oassembly_fit)
else:
h2oassembly_fit()
|
90eb736cd81231aead116d9a5f903411be83c4c1
|
684df684759bfbef64b0fbcde9eb2b898a2e2061
|
/swagger-gen/python/test/test_linear_market_api.py
|
be4d9e8add06a6e28d556205cdfb47e68c097bdf
|
[] |
no_license
|
bybit-exchange/api-connectors
|
ae13caecb98c82460c0a24b910f2e9c1eb80b9bc
|
cc021a371bde30c2fd282be9fdc8eef0ed0e362e
|
refs/heads/master
| 2021-12-31T10:23:24.429638
| 2021-11-24T16:37:18
| 2021-11-24T16:37:18
| 213,896,494
| 192
| 185
| null | 2023-03-03T12:50:12
| 2019-10-09T11:10:19
|
C#
|
UTF-8
|
Python
| false
| false
| 912
|
py
|
test_linear_market_api.py
|
# coding: utf-8
"""
Bybit API
## REST API for the Bybit Exchange. Base URI: [https://api.bybit.com] # noqa: E501
OpenAPI spec version: 0.2.11
Contact: support@bybit.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.linear_market_api import LinearMarketApi # noqa: E501
from swagger_client.rest import ApiException
class TestLinearMarketApi(unittest.TestCase):
"""LinearMarketApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.linear_market_api.LinearMarketApi() # noqa: E501
def tearDown(self):
pass
def test_linear_market_trading(self):
"""Test case for linear_market_trading
Get recent trades # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
74a53bb2575fb64827af397e94e74acc9e638dde
|
bdf0d4d3aac186af3ad0ad6ac9f380f9a0573fba
|
/aries_cloudagent/ledger/endpoint_type.py
|
2f5f7da59247b5366708190e9bf8b41c6f0af12e
|
[
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] |
permissive
|
hyperledger/aries-cloudagent-python
|
f25d961e0717a4d703bf43df3e4b4bc8ec07b908
|
39cac36d8937ce84a9307ce100aaefb8bc05ec04
|
refs/heads/main
| 2023-09-01T15:37:05.353674
| 2023-08-31T14:13:06
| 2023-08-31T14:13:06
| 193,556,007
| 370
| 530
|
Apache-2.0
| 2023-09-14T17:59:34
| 2019-06-24T18:12:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
endpoint_type.py
|
"""Ledger utilities."""
from collections import namedtuple
from enum import Enum
EndpointTypeName = namedtuple("EndpointTypeName", "w3c indy")
class EndpointType(Enum):
"""Enum for endpoint/service types."""
ENDPOINT = EndpointTypeName("Endpoint", "endpoint")
PROFILE = EndpointTypeName("Profile", "profile")
LINKED_DOMAINS = EndpointTypeName("LinkedDomains", "linked_domains")
@staticmethod
def get(name: str) -> "EndpointType":
"""Return enum instance corresponding to input string."""
if name is None:
return None
for endpoint_type in EndpointType:
if name.replace("_", "").lower() == endpoint_type.w3c.lower():
return endpoint_type
return None
@property
def w3c(self):
"""W3C name of endpoint type: externally-facing."""
return self.value.w3c
@property
def indy(self):
"""Indy name of endpoint type: internally-facing, on ledger and in wallet."""
return self.value.indy
|
9cb03e49709c619b614f4abf8a637511a1716295
|
0b98732dcd3dd94a97555a8f3e8dd3524bb8ec86
|
/mmdet/datasets/loader/build_loader.py
|
1843713738ef417b17aa090b2d69562b131a8626
|
[
"Apache-2.0"
] |
permissive
|
hasanirtiza/Pedestron
|
e89fea2ec676f150a7266f6b65963dd6c4ec35c9
|
8ab23ec38982cfaf0ae82c77c30f10b2fff62d12
|
refs/heads/master
| 2023-08-06T02:53:06.368937
| 2023-04-06T13:46:27
| 2023-04-06T13:46:27
| 247,410,025
| 723
| 161
|
Apache-2.0
| 2022-10-02T10:17:44
| 2020-03-15T05:52:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
build_loader.py
|
import platform
from functools import partial
from mmcv.runner import get_dist_info
from mmcv.parallel import collate
from torch.utils.data import DataLoader
from .sampler import GroupSampler, DistributedGroupSampler, DistributedSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def build_dataloader(dataset,
imgs_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
**kwargs):
shuffle = kwargs.get('shuffle', True)
if dist:
rank, world_size = get_dist_info()
if shuffle:
sampler = DistributedGroupSampler(dataset, imgs_per_gpu,
world_size, rank)
else:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=False)
batch_size = imgs_per_gpu
num_workers = workers_per_gpu
else:
sampler = GroupSampler(dataset, imgs_per_gpu) if shuffle else None
batch_size = num_gpus * imgs_per_gpu
num_workers = num_gpus * workers_per_gpu
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu),
pin_memory=False,
**kwargs)
return data_loader
|
f2ea3f33a920d9dab10435de1ce94a26ef5288e8
|
ceeb1dd2c71c6c94cf0c4581218fd02f64b526dd
|
/extruct/microformat.py
|
dab6a11478f22b32fa12d50865de7ce4ae4952d4
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
scrapinghub/extruct
|
f2a79c45117bda0a819f50f51c4edb97eb0c75f7
|
60538125f743c7c357fd170abcf94440ea45a132
|
refs/heads/master
| 2023-09-03T01:49:58.318356
| 2023-07-07T11:18:01
| 2023-07-07T11:18:01
| 44,965,223
| 788
| 122
|
BSD-3-Clause
| 2023-07-04T10:08:31
| 2015-10-26T11:51:21
|
Python
|
UTF-8
|
Python
| false
| false
| 372
|
py
|
microformat.py
|
# mypy: disallow_untyped_defs=False
import mf2py
class MicroformatExtractor:
def extract(self, htmlstring, base_url=None, encoding="UTF-8"):
return list(self.extract_items(htmlstring, base_url=base_url))
def extract_items(self, html, base_url=None):
for obj in mf2py.parse(html, html_parser="lxml", url=base_url)["items"]:
yield obj
|
5f0796fcf7d040ad674bf02f01fdc9cb327211e6
|
71f97e5a7a9a41090cc160fd3e77bbdb05fbc43c
|
/bids/variables/tests/test_entities.py
|
fcf0ecae3ff31429852169aae06f6ef44620fce0
|
[
"MIT"
] |
permissive
|
bids-standard/pybids
|
731a86c4d4a28b383da347010be5535bf52dbd4b
|
013067e4ddabb3f8d9e84b0f63675791a0c3becf
|
refs/heads/master
| 2023-08-25T13:50:38.525154
| 2023-08-21T15:58:26
| 2023-08-21T15:58:26
| 62,954,915
| 138
| 78
|
MIT
| 2023-09-11T13:24:43
| 2016-07-09T15:08:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,897
|
py
|
test_entities.py
|
from bids.layout import BIDSLayout
from bids.variables.entities import RunNode, Node, NodeIndex
from bids.variables import load_variables
from bids.variables import BIDSRunVariableCollection
import pytest
from os.path import join
from bids.tests import get_test_data_path
@pytest.fixture(scope="module")
def layout1():
path = join(get_test_data_path(), 'ds005')
layout = BIDSLayout(path)
return layout
@pytest.fixture(scope="module")
def layout2():
path = join(get_test_data_path(), '7t_trt')
layout = BIDSLayout(path)
return layout
def test_run(layout1):
img = layout1.get(subject='01', task='mixedgamblestask', suffix='bold',
run=1, return_type='obj')[0]
run = RunNode(None, img.filename, 480, 2, 480/2)
assert run.image_file == img.filename
assert run.duration == 480
assert run.repetition_time == 2
assert run.n_vols == 480 / 2
def test_get_or_create_node(layout1):
img = layout1.get(subject='01', task='mixedgamblestask', suffix='bold',
run=1, return_type='obj')[0]
index = NodeIndex()
entities = {'subject': '01', 'session': 1}
sess = index.get_or_create_node('session', entities)
assert sess.__class__ == Node
sess2 = index.get_or_create_node('session', entities)
assert sess2 == sess
run = index.get_or_create_node('run', img.entities,
image_file=img.filename, duration=480,
repetition_time=2,
n_vols=480/2)
assert run.__class__ == RunNode
assert run.duration == 480
assert run.n_vols == 480 / 2
def test_get_nodes(layout1):
index = load_variables(layout1, scan_length=480)
# scans.tsv
nodes = index.get_nodes('session')
assert len(nodes) == 2
# participants.tsv
nodes = index.get_nodes('dataset')
assert len(nodes) == 1
assert all([isinstance(n, Node) for n in nodes])
nodes = index.get_nodes('run', {'subject': ['01', '02', '03']})
assert len(nodes) == 9
assert all([isinstance(n, RunNode) for n in nodes])
def test_get_collections_merged(layout1):
dataset = load_variables(layout1, scan_length=480)
collection = dataset.get_collections('run', merge=True)
assert isinstance(collection, BIDSRunVariableCollection)
assert len(collection.variables) == 8
vals = collection.variables['RT'].values
ents = collection.variables['RT'].index
assert len(ents) == len(vals) == 4096
assert set(ents.columns) == {'task', 'run', 'subject', 'suffix', 'datatype'}
def test_get_collections_unmerged(layout2):
dataset = load_variables(layout2, types=['sessions'], scan_length=480)
colls = dataset.get_collections('subject', merge=False)
assert len(colls) == 10
assert len(colls[0].variables) == 94
assert colls[0]['panas_at_ease'].values.shape == (2,)
|
3396e6cefb081b51c26dcea1e6069e4aff7e0668
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/ecolite/infer/sdk/main.py
|
ef1796f19076bbf520a03cbb063d215a4170b7fa
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 5,130
|
py
|
main.py
|
# -*- coding:utf-8 -*-
# Copyright(C) 2021. Huawei Technologies Co.,Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Use this file for sdk running
"""
import os
import time
import argparse
from StreamManagerApi import StreamManagerApi, MxDataInput, InProtobufVector, MxProtobufIn, StringVector
import MxpiDataType_pb2 as MxpiDataType
import numpy as np
def get_args():
""" get args"""
parser = argparse.ArgumentParser(description='get ecolite train dataset')
parser.add_argument('type', type=str, default="1", choices=['1', '2'],
help='1 is used for single record infer, 2 is used for full data infer')
parser.add_argument('root_data_dir', type=str)
parser.add_argument('batch_size', default=16, type=int,
metavar='N', help='mini-batch size (default: 16)')
return parser.parse_args()
args = get_args()
def getData(path):
""" get data"""
data = np.fromfile(path, dtype=np.float32)
data = data.reshape(args.batch_size, 12, 224, 224)
return data
def send_and_infer(filename, stream_manager):
"""
Construct the input of the stream,
send inputs data to a specified stream based on streamName.
Returns:
bool: send data success or not
"""
stream_name = b'ecolite'
tensor = getData(filename)
tensor_package_list = MxpiDataType.MxpiTensorPackageList()
tensor_package = tensor_package_list.tensorPackageVec.add()
array_bytes = tensor.tobytes()
data_input = MxDataInput()
data_input.data = array_bytes
tensor_vec = tensor_package.tensorVec.add()
tensor_vec.deviceId = 0
tensor_vec.memType = 0
for i_ in tensor.shape:
tensor_vec.tensorShape.append(i_)
tensor_vec.dataStr = data_input.data
tensor_vec.tensorDataSize = len(array_bytes)
key = "appsrc0".encode('utf-8')
protobuf_vec = InProtobufVector()
protobuf = MxProtobufIn()
protobuf.key = key
protobuf.type = b'MxTools.MxpiTensorPackageList'
protobuf.protobuf = tensor_package_list.SerializeToString()
protobuf_vec.push_back(protobuf)
ret_ = stream_manager.SendProtobuf(stream_name, 0, protobuf_vec)
if ret_ < 0:
print("Failed to send data to stream.")
return False
key_vec = StringVector()
key_vec.push_back(b'mxpi_tensorinfer0')
infer_result = stream_manager.GetProtobuf(stream_name, 0, key_vec)
if infer_result.size() == 0:
print("inferResult is null")
return False
if infer_result[0].errorCode != 0:
print("GetProtobuf error. errorCode=%d" % (infer_result[0].errorCode))
return False
result_ = MxpiDataType.MxpiTensorPackageList()
result_.ParseFromString(infer_result[0].messageBuf)
res = np.frombuffer(result_.tensorPackageVec[0].tensorVec[0].dataStr, dtype='<f4')
res = res.reshape((16, 101))
return res
if __name__ == '__main__':
# init stream manager
streamManagerApi = StreamManagerApi()
ret = streamManagerApi.InitManager()
if ret != 0:
print("Failed to init Stream manager, ret=%s" % str(ret))
exit()
# create streams by pipeline config file
with open("./config/ecolite.pipeline", 'rb') as f:
pipeline = f.read()
ret = streamManagerApi.CreateMultipleStreams(pipeline)
if ret != 0:
print("Failed to create Stream, ret=%s" % str(ret))
exit()
time_begin = time.time()
if args.type == '2':
if not os.path.exists('./result'):
os.makedirs('./result')
root_dir = args.root_data_dir
datapath = os.listdir(root_dir)
for i, path_ in enumerate(datapath):
datapath = os.path.join(root_dir, path_)
result = send_and_infer(datapath, streamManagerApi)
videoid = datapath.split('/')[-1].split('_')[-2]
dataname = 'eval_predict_' + str(videoid) + '_.bin'
result.tofile(f'./result/{dataname}')
if args.type == '1':
if not os.path.exists('./result_single'):
os.makedirs('./result_single')
dirname = args.root_data_dir
datapath = os.listdir(dirname)
datapath = os.path.join(dirname, datapath[-1])
result = send_and_infer(datapath, streamManagerApi)
videoid = datapath.split('/')[-1].split('_')[-2]
dataname = 'eval_predict_' + str(videoid) + '_.bin'
result.tofile(f'./result_single/{dataname}')
time_end = time.time()
print("infer time cost:", time_end - time_begin)
# destroy streams
streamManagerApi.DestroyAllStreams()
|
46bb608da1047495a2b6b5ae9e751ddaa4936722
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Configuration/ProcessModifiers/python/ecal_deepsc_cff.py
|
997decae5b524032d31d6026e81f3322fb21e6e1
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 131
|
py
|
ecal_deepsc_cff.py
|
import FWCore.ParameterSet.Config as cms
# This modifier is for ECAL SuperCluster with ML studies
ecal_deepsc = cms.Modifier()
|
5b4bdb57cdc4dfec4601875c66bf257f383cb049
|
b11113d4e20d5c2dca31a7e9a1fe33c8242c9416
|
/slack-bot/app.py
|
df3dba4dc1bcf982264d39d84ae62cfb6c85670a
|
[] |
no_license
|
MoralisWeb3/youtube-tutorials
|
ce182b6b70443af36891ea3f3c25bde6914b9dd9
|
4f6fa634344614840b80f7a3942976f09837a77d
|
refs/heads/main
| 2023-07-06T10:22:32.342053
| 2023-07-04T22:26:39
| 2023-07-04T22:26:39
| 346,786,715
| 639
| 1,821
| null | 2023-05-21T12:00:47
| 2021-03-11T17:40:31
|
C#
|
UTF-8
|
Python
| false
| false
| 2,639
|
py
|
app.py
|
from flask import Flask, request
from dotenv import load_dotenv
import json
import locale
import os
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
load_dotenv()
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
app = Flask(__name__)
SLACK_BOT_TOKEN = os.getenv('SLACK_BOT_TOKEN')
SLACK_CHANNEL = os.getenv('SLACK_CHANNEL')
slack_client = WebClient(token=SLACK_BOT_TOKEN)
@app.route('/webhook', methods=['POST'])
def webhook():
webhook = request.data.decode('utf-8')
json_object = json.loads(webhook)
try:
transfer = json_object['erc20Transfers'][0]
except IndexError:
return 'No transfers'
sender = transfer["from"]
receiver = transfer["to"]
value = transfer["value"]
token_name = transfer["tokenName"]
transaction_hash = transfer["transactionHash"]
handle_response_and_chat(sender, receiver, value,
token_name, transaction_hash)
return 'OK'
def send_massage(channel, attachment):
try:
response = slack_client.chat_postMessage(
channel=channel,
attachments=[attachment]
)
print(f"Message sent to channel {channel}")
except SlackApiError as e:
print(f"Error: {e}")
def handle_response_and_chat(sender, receiver, value, token_name, transaction_hash):
sender = sender[:7] + "..." + sender[-3:]
receiver = receiver[:7] + "..." + receiver[-3:]
value = "${:,.6f}".format(float(value)/1000000)
transaction_hash = 'https://etherscan.io/tx/' + transaction_hash
attachment = {
"fallback": f"Transfer Details:\nSender: {sender}\nReceiver: {receiver}\nValue: {value}\nToken Name: {token_name}\nTransaction Hash: {transaction_hash}",
"color": "#36a64f",
"pretext": "New Whale Transfer!",
"fields": [
{
"title": "Sender",
"value": sender,
"short": True
},
{
"title": "Receiver",
"value": receiver,
"short": True
},
{
"title": "Value",
"value": value,
"short": True
},
{
"title": "Token Name",
"value": token_name,
"short": True
},
{
"title": "Transaction Hash",
"value": f"<{transaction_hash}|{transaction_hash}>",
"short": False
}
]
}
send_massage(SLACK_CHANNEL, attachment)
if __name__ == "__main__":
app.run(port=5002)
|
aa27752f2bef4526394816c616ee6a9e0c85d90a
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Validation/RecoEgamma/test/PhotonValidator_SingleGammaFlatPt10To100.py
|
b922d8caead8ff60c76d320da5b751f27c5cd27d
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 5,160
|
py
|
PhotonValidator_SingleGammaFlatPt10To100.py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TestPhotonValidator")
process.load('Configuration/StandardSequences/GeometryPilot2_cff')
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
process.load("Geometry.TrackerGeometryBuilder.trackerGeometry_cfi")
process.load("RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi")
process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi")
process.load("RecoTracker.MeasurementDet.MeasurementTrackerESProducer_cfi")
process.load("SimGeneral.MixingModule.mixNoPU_cfi")
process.load("TrackingTools.TransientTrack.TransientTrackBuilder_cfi")
process.load("DQMServices.Components.MEtoEDMConverter_cfi")
process.load("Validation.RecoEgamma.photonValidationSequence_cff")
process.load("Validation.RecoEgamma.photonPostprocessing_cfi")
process.load("Validation.RecoEgamma.conversionPostprocessing_cfi")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'MC_38Y_V7::All'
process.DQMStore = cms.Service("DQMStore");
process.load("DQMServices.Components.DQMStoreStats_cfi")
from DQMServices.Components.DQMStoreStats_cfi import *
dqmStoreStats.runOnEndJob = cms.untracked.bool(True)
process.maxEvents = cms.untracked.PSet(
#input = cms.untracked.int32(10)
)
from Validation.RecoEgamma.photonValidationSequence_cff import *
from Validation.RecoEgamma.photonPostprocessing_cfi import *
from Validation.RecoEgamma.conversionPostprocessing_cfi import *
photonValidation.OutputFileName = 'PhotonValidationRelVal380_SingleGammaFlatPt10To100.root'
photonPostprocessing.standalone = cms.bool(True)
photonPostprocessing.InputFileName = photonValidation.OutputFileName
photonPostprocessing.OuputFileName = photonValidation.OutputFileName
conversionPostprocessing.standalone = cms.bool(True)
conversionPostprocessing.InputFileName = tkConversionValidation.OutputFileName
conversionPostprocessing.OuputFileName = tkConversionValidation.OutputFileName
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring(
# official RelVal 380 single Photons pt=10to100 GeV
'/store/relval/CMSSW_3_8_0/RelValSingleGammaFlatPt10To100/GEN-SIM-RECO/MC_38Y_V7-v1/0007/E0129BE0-C495-DF11-9BB9-003048678D52.root',
'/store/relval/CMSSW_3_8_0/RelValSingleGammaFlatPt10To100/GEN-SIM-RECO/MC_38Y_V7-v1/0007/5ACA3FEB-C595-DF11-AEBB-00304867926C.root',
'/store/relval/CMSSW_3_8_0/RelValSingleGammaFlatPt10To100/GEN-SIM-RECO/MC_38Y_V7-v1/0007/2ADCCE62-C495-DF11-9B1B-00261894396A.root',
'/store/relval/CMSSW_3_8_0/RelValSingleGammaFlatPt10To100/GEN-SIM-RECO/MC_38Y_V7-v1/0007/08B89B62-C595-DF11-B204-003048678D52.root'
),
secondaryFileNames = cms.untracked.vstring(
# official RelVal 380 single Photons pt=10to100GeV
'/store/relval/CMSSW_3_8_0/RelValSingleGammaFlatPt10To100/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_38Y_V7-v1/0007/E2526065-C595-DF11-A3C5-003048678B06.root',
'/store/relval/CMSSW_3_8_0/RelValSingleGammaFlatPt10To100/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_38Y_V7-v1/0007/D033B2E0-C495-DF11-8E41-002618943937.root',
'/store/relval/CMSSW_3_8_0/RelValSingleGammaFlatPt10To100/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_38Y_V7-v1/0007/D0218DE0-C495-DF11-9CF7-003048678C06.root',
'/store/relval/CMSSW_3_8_0/RelValSingleGammaFlatPt10To100/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_38Y_V7-v1/0007/BC3B6364-C495-DF11-841B-0018F3D095F8.root',
'/store/relval/CMSSW_3_8_0/RelValSingleGammaFlatPt10To100/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_38Y_V7-v1/0007/5CCBAAEB-C595-DF11-BB53-0018F3D0968C.root',
'/store/relval/CMSSW_3_8_0/RelValSingleGammaFlatPt10To100/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_38Y_V7-v1/0007/5AD17AEA-C595-DF11-9C4A-002618943886.root',
'/store/relval/CMSSW_3_8_0/RelValSingleGammaFlatPt10To100/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_38Y_V7-v1/0007/4E2D2565-C495-DF11-A6AF-001A92810AD2.root',
'/store/relval/CMSSW_3_8_0/RelValSingleGammaFlatPt10To100/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_38Y_V7-v1/0007/44FF7B64-C495-DF11-B9B9-0018F3D096DA.root',
'/store/relval/CMSSW_3_8_0/RelValSingleGammaFlatPt10To100/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_38Y_V7-v1/0007/34A1F2C6-4E96-DF11-9FE0-001A92971B56.root',
'/store/relval/CMSSW_3_8_0/RelValSingleGammaFlatPt10To100/GEN-SIM-DIGI-RAW-HLTDEBUG/MC_38Y_V7-v1/0006/8EB149DD-C295-DF11-9D54-0018F3C3E3A6.root'
)
)
photonPostprocessing.rBin = 48
## For single gamma pt = 35
photonValidation.eMax = 300
photonValidation.etMax = 50
photonValidation.etScale = 0.20
photonValidation.dCotCutOn = False
photonValidation.dCotCutValue = 0.15
process.FEVT = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring("keep *_MEtoEDMConverter_*_*"),
fileName = cms.untracked.string('pippo.root')
)
process.p1 = cms.Path(process.tpSelection*process.photonValidationSequence*process.photonPostprocessing*process.conversionPostprocessing*process.dqmStoreStats)
process.schedule = cms.Schedule(process.p1)
|
dfd05440404d7c829d0671f195cc223c2940bf3c
|
8488fa51bd937bc9403d636279ba03ee5b1bd4c0
|
/sample-plugins/permissions/debug_perm.py
|
d3e43b7ec9190b4d268605ce10e932f22ce22a5e
|
[
"BSD-3-Clause"
] |
permissive
|
edgewall/trac
|
e7ecd994121c6e30b39e98dc6ad9b9edf5be4559
|
f7eba7b121c9ff227b062e9d032ff4d4582adc39
|
refs/heads/trunk
| 2023-08-17T00:13:12.555838
| 2023-07-02T15:13:51
| 2023-07-02T15:13:51
| 615,096
| 399
| 173
|
NOASSERTION
| 2023-03-07T13:46:40
| 2010-04-17T15:10:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,518
|
py
|
debug_perm.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2023 Edgewall Software
# Copyright (C) 2007 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/.
from trac.core import *
from trac.perm import IPermissionPolicy, PermissionCache
from trac.resource import Resource
revision = "$Rev$"
url = "$URL$"
class DebugPolicy(Component):
"""Verify the well-formedness of the permission checks.
**This plugin is only useful for Trac Development.**
Once this plugin is enabled, you'll have to insert it at the appropriate
place in your list of permission policies, e.g.
{{{
[trac]
permission_policies = DebugPolicy, SecurityTicketsPolicy, AuthzPolicy,
DefaultPermissionPolicy, LegacyAttachmentPolicy
}}}
"""
implements(IPermissionPolicy)
# IPermissionPolicy methods
def check_permission(self, action, username, resource, perm):
if resource:
assert resource is None or isinstance(resource, Resource)
assert isinstance(perm, PermissionCache)
self.log.info("does '%s' have %s on %r?", username, action, resource)
|
ba6662f2fa00dd00da6f2a22bd001a5b1a9df11c
|
5e66707ccdea0c000e6e269fce6907ee3cfcdbde
|
/galaxy/main/migrations/0123_fix_importtaskmessage_constraints.py
|
77da91b227f2868e1665852f75dcfc250d74bd13
|
[
"Apache-2.0"
] |
permissive
|
ansible/galaxy
|
f629046d579d7cd4e484cdf1e27ad68fe7b170a2
|
6a374cacdf0f04de94486913bba5285e24e178d3
|
refs/heads/devel
| 2023-09-04T09:21:43.542346
| 2023-08-25T16:58:09
| 2023-08-25T16:58:09
| 24,333,272
| 972
| 419
|
Apache-2.0
| 2023-08-25T17:38:20
| 2014-09-22T15:04:57
|
Python
|
UTF-8
|
Python
| false
| false
| 501
|
py
|
0123_fix_importtaskmessage_constraints.py
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0122_auto_20181015_1802'),
]
operations = [
migrations.AlterField(
model_name='importtaskmessage',
name='message_text',
field=models.TextField(),
),
migrations.AlterField(
model_name='importtaskmessage',
name='rule_desc',
field=models.TextField(null=True),
),
]
|
f4014b8272e5885612ff6444b75e4419449c8fdd
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/dynalite/common.py
|
446cdc74c0bd01cd16ba71a515a056f94156dab4
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,519
|
py
|
common.py
|
"""Common functions for tests."""
from unittest.mock import AsyncMock, Mock, call, patch
from homeassistant.components import dynalite
from homeassistant.const import ATTR_SERVICE
from homeassistant.helpers import entity_registry as er
from tests.common import MockConfigEntry
ATTR_METHOD = "method"
ATTR_ARGS = "args"
def create_mock_device(platform, spec):
"""Create a dynalite mock device for a platform according to a spec."""
device = Mock(spec=spec)
device.category = platform
device.unique_id = "UNIQUE"
device.name = "NAME"
device.device_class = "Device Class"
return device
async def get_entry_id_from_hass(hass):
"""Get the config entry id from hass."""
ent_reg = er.async_get(hass)
assert ent_reg
conf_entries = hass.config_entries.async_entries(dynalite.DOMAIN)
assert len(conf_entries) == 1
return conf_entries[0].entry_id
async def create_entity_from_device(hass, device):
"""Set up the component and platform and create a light based on the device provided."""
host = "1.2.3.4"
entry = MockConfigEntry(domain=dynalite.DOMAIN, data={dynalite.CONF_HOST: host})
entry.add_to_hass(hass)
with patch(
"homeassistant.components.dynalite.bridge.DynaliteDevices"
) as mock_dyn_dev:
mock_dyn_dev().async_setup = AsyncMock(return_value=True)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
new_device_func = mock_dyn_dev.mock_calls[1][2]["new_device_func"]
new_device_func([device])
await hass.async_block_till_done()
return mock_dyn_dev.mock_calls[1][2]["update_device_func"]
async def run_service_tests(hass, device, platform, services):
"""Run a series of service calls and check that the entity and device behave correctly."""
for cur_item in services:
service = cur_item[ATTR_SERVICE]
args = cur_item.get(ATTR_ARGS, {})
service_data = {"entity_id": f"{platform}.name", **args}
await hass.services.async_call(platform, service, service_data, blocking=True)
await hass.async_block_till_done()
for check_item in services:
check_method = getattr(device, check_item[ATTR_METHOD])
if check_item[ATTR_SERVICE] == service:
check_method.assert_called_once()
assert check_method.mock_calls == [call(**args)]
check_method.reset_mock()
else:
check_method.assert_not_called()
|
f645bf788a38a8e13116492fdc46810f37aa8661
|
9d7ea04b3e0a0edf85fa6f7e09ccf8cbd958d291
|
/examples/bench/bench.py
|
ab813ed9d8736d01b3c06a7eee6d505fdef24659
|
[
"MIT"
] |
permissive
|
mrjoes/sockjs-tornado
|
321557657c0cdef409f0790930e34334b84bfaf6
|
59f99e02ee210764f0cdefc93d11e6060aa937d1
|
refs/heads/master
| 2023-09-04T00:22:59.375813
| 2023-05-10T16:47:01
| 2023-05-10T16:47:01
| 2,927,833
| 437
| 126
|
MIT
| 2023-08-04T15:50:55
| 2011-12-06T20:41:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
bench.py
|
# -*- coding: utf-8 -*-
"""
sockjs-tornado benchmarking server. Works as a simple chat server
without HTML frontend and listens on port 8080 by default.
"""
import sys
import weakref
from tornado import web, ioloop
from sockjs.tornado import SockJSRouter, SockJSConnection
class EchoConnection(SockJSConnection):
"""Echo connection implementation"""
clients = set()
weak_clients = weakref.WeakSet([])
def on_open(self, info):
# When new client comes in, will add it to the clients list
self.clients.add(self)
self.weak_clients.add(self)
def on_message(self, msg):
# For every incoming message, broadcast it to all clients
self.broadcast(self.clients, msg)
def on_close(self):
# If client disconnects, remove him from the clients list
self.clients.remove(self)
@classmethod
def dump_stats(cls):
# Print current client count
print 'Clients: %d' % (len(cls.clients))
print 'Weak Clients: %d' % (len(cls.weak_clients))
if __name__ == '__main__':
options = dict()
if len(sys.argv) > 1:
options['immediate_flush'] = False
# 1. Create SockJSRouter
EchoRouter = SockJSRouter(EchoConnection, '/broadcast', options)
# 2. Create Tornado web.Application
app = web.Application(EchoRouter.urls)
# 3. Make application listen on port 8080
app.listen(8080)
# 4. Every 1 second dump current client count
ioloop.PeriodicCallback(EchoConnection.dump_stats, 1000).start()
# 5. Start IOLoop
ioloop.IOLoop.instance().start()
|
b9b61060d270b66039586168be10048f70b77433
|
4506d81df5ae98078e5cbe79f613514ad12b1c83
|
/nipype/interfaces/spm/tests/test_auto_RealignUnwarp.py
|
bb27419547f5bb478ea308af8a131ea763c3c767
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
nipy/nipype
|
d52eba1b98fda68e24d006ac0d5701fc8a531b9c
|
03a236320fa229299d637ff9af97865a6ae76aca
|
refs/heads/master
| 2023-08-28T10:36:07.020541
| 2023-08-25T13:40:09
| 2023-08-25T13:40:09
| 791,477
| 692
| 569
|
NOASSERTION
| 2023-09-11T06:04:51
| 2010-07-22T17:06:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,311
|
py
|
test_auto_RealignUnwarp.py
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..preprocess import RealignUnwarp
def test_RealignUnwarp_inputs():
input_map = dict(
est_basis_func=dict(
field="uweoptions.basfcn",
),
est_first_order_effects=dict(
field="uweoptions.fot",
),
est_jacobian_deformations=dict(
field="uweoptions.jm",
),
est_num_of_iterations=dict(
field="uweoptions.noi",
maxlen=1,
minlen=1,
usedefault=True,
),
est_re_est_mov_par=dict(
field="uweoptions.rem",
),
est_reg_factor=dict(
field="uweoptions.lambda",
maxlen=1,
minlen=1,
usedefault=True,
),
est_reg_order=dict(
field="uweoptions.regorder",
),
est_second_order_effects=dict(
field="uweoptions.sot",
),
est_taylor_expansion_point=dict(
field="uweoptions.expround",
usedefault=True,
),
est_unwarp_fwhm=dict(
field="uweoptions.uwfwhm",
),
fwhm=dict(
field="eoptions.fwhm",
),
in_files=dict(
copyfile=True,
field="data.scans",
mandatory=True,
),
interp=dict(
field="eoptions.einterp",
),
matlab_cmd=dict(),
mfile=dict(
usedefault=True,
),
out_prefix=dict(
field="uwroptions.prefix",
usedefault=True,
),
paths=dict(),
phase_map=dict(
copyfile=False,
extensions=None,
field="data.pmscan",
),
quality=dict(
field="eoptions.quality",
),
register_to_mean=dict(
field="eoptions.rtm",
),
reslice_interp=dict(
field="uwroptions.rinterp",
),
reslice_mask=dict(
field="uwroptions.mask",
),
reslice_which=dict(
field="uwroptions.uwwhich",
maxlen=2,
minlen=2,
usedefault=True,
),
reslice_wrap=dict(
field="uwroptions.wrap",
),
separation=dict(
field="eoptions.sep",
),
use_mcr=dict(),
use_v8struct=dict(
min_ver="8",
usedefault=True,
),
weight_img=dict(
extensions=None,
field="eoptions.weight",
),
wrap=dict(
field="eoptions.ewrap",
),
)
inputs = RealignUnwarp.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_RealignUnwarp_outputs():
output_map = dict(
mean_image=dict(
extensions=None,
),
modified_in_files=dict(),
realigned_unwarped_files=dict(),
realignment_parameters=dict(),
)
outputs = RealignUnwarp.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
aca9da847da3807b3014235f5ea2194ebb0e3882
|
2181883c8faac55bfc969a97d22d9b24a3e81ab3
|
/com/win32comext/shell/demos/servers/context_menu.py
|
b92109d738022a3c17ad22552b445efef87163ae
|
[
"PSF-2.0"
] |
permissive
|
mhammond/pywin32
|
574bf121cfeac8c7a9d28f94ee0f2069a425e8ab
|
2a7137f21965013020ef9e4f27565db6dea59003
|
refs/heads/main
| 2023-09-02T13:16:52.307262
| 2023-08-17T19:42:26
| 2023-08-17T19:42:26
| 108,187,130
| 4,757
| 907
| null | 2023-08-23T01:45:49
| 2017-10-24T21:44:27
|
C++
|
UTF-8
|
Python
| false
| false
| 4,413
|
py
|
context_menu.py
|
# A sample context menu handler.
# Adds a 'Hello from Python' menu entry to .py files. When clicked, a
# simple message box is displayed.
#
# To demostrate:
# * Execute this script to register the context menu.
# * Open Windows Explorer, and browse to a directory with a .py file.
# * Right-Click on a .py file - locate and click on 'Hello from Python' on
# the context menu.
import pythoncom
import win32con
import win32gui
from win32com.shell import shell, shellcon
class ShellExtension:
_reg_progid_ = "Python.ShellExtension.ContextMenu"
_reg_desc_ = "Python Sample Shell Extension (context menu)"
_reg_clsid_ = "{CED0336C-C9EE-4a7f-8D7F-C660393C381F}"
_com_interfaces_ = [shell.IID_IShellExtInit, shell.IID_IContextMenu]
_public_methods_ = shellcon.IContextMenu_Methods + shellcon.IShellExtInit_Methods
def Initialize(self, folder, dataobj, hkey):
print("Init", folder, dataobj, hkey)
self.dataobj = dataobj
def QueryContextMenu(self, hMenu, indexMenu, idCmdFirst, idCmdLast, uFlags):
print("QCM", hMenu, indexMenu, idCmdFirst, idCmdLast, uFlags)
# Query the items clicked on
format_etc = win32con.CF_HDROP, None, 1, -1, pythoncom.TYMED_HGLOBAL
sm = self.dataobj.GetData(format_etc)
num_files = shell.DragQueryFile(sm.data_handle, -1)
if num_files > 1:
msg = "&Hello from Python (with %d files selected)" % num_files
else:
fname = shell.DragQueryFile(sm.data_handle, 0)
msg = "&Hello from Python (with '%s' selected)" % fname
idCmd = idCmdFirst
items = ["First Python content menu item"]
if (
uFlags & 0x000F
) == shellcon.CMF_NORMAL: # Check == here, since CMF_NORMAL=0
print("CMF_NORMAL...")
items.append(msg)
elif uFlags & shellcon.CMF_VERBSONLY:
print("CMF_VERBSONLY...")
items.append(msg + " - shortcut")
elif uFlags & shellcon.CMF_EXPLORE:
print("CMF_EXPLORE...")
items.append(msg + " - normal file, right-click in Explorer")
elif uFlags & CMF_DEFAULTONLY:
print("CMF_DEFAULTONLY...\r\n")
else:
print("** unknown flags", uFlags)
win32gui.InsertMenu(
hMenu, indexMenu, win32con.MF_SEPARATOR | win32con.MF_BYPOSITION, 0, None
)
indexMenu += 1
for item in items:
win32gui.InsertMenu(
hMenu,
indexMenu,
win32con.MF_STRING | win32con.MF_BYPOSITION,
idCmd,
item,
)
indexMenu += 1
idCmd += 1
win32gui.InsertMenu(
hMenu, indexMenu, win32con.MF_SEPARATOR | win32con.MF_BYPOSITION, 0, None
)
indexMenu += 1
return idCmd - idCmdFirst # Must return number of menu items we added.
def InvokeCommand(self, ci):
mask, hwnd, verb, params, dir, nShow, hotkey, hicon = ci
win32gui.MessageBox(hwnd, "Hello", "Wow", win32con.MB_OK)
def GetCommandString(self, cmd, typ):
# If GetCommandString returns the same string for all items then
# the shell seems to ignore all but one. This is even true in
# Win7 etc where there is no status bar (and hence this string seems
# ignored)
return "Hello from Python (cmd=%d)!!" % (cmd,)
def DllRegisterServer():
import winreg
key = winreg.CreateKey(winreg.HKEY_CLASSES_ROOT, "Python.File\\shellex")
subkey = winreg.CreateKey(key, "ContextMenuHandlers")
subkey2 = winreg.CreateKey(subkey, "PythonSample")
winreg.SetValueEx(subkey2, None, 0, winreg.REG_SZ, ShellExtension._reg_clsid_)
print(ShellExtension._reg_desc_, "registration complete.")
def DllUnregisterServer():
import winreg
try:
key = winreg.DeleteKey(
winreg.HKEY_CLASSES_ROOT,
"Python.File\\shellex\\ContextMenuHandlers\\PythonSample",
)
except WindowsError as details:
import errno
if details.errno != errno.ENOENT:
raise
print(ShellExtension._reg_desc_, "unregistration complete.")
if __name__ == "__main__":
from win32com.server import register
register.UseCommandLine(
ShellExtension,
finalize_register=DllRegisterServer,
finalize_unregister=DllUnregisterServer,
)
|
b745e8c69c5374129e8a07365203983cce755d7b
|
c1ab5fc6d37749cf7dd693a8f6d5475dfa54cd45
|
/kubernetes/client/api/events_v1_api.py
|
94c411ed03d9cee74ce7cb0334ab9452b82e4cca
|
[
"Apache-2.0"
] |
permissive
|
kubernetes-client/python
|
2d10e5d7c1358aa4473c1fcd54d2c5a1085cf56e
|
68d5a1479e7d735ea454021bc54e453c9b31baf7
|
refs/heads/master
| 2023-09-01T11:23:54.508420
| 2023-08-31T21:04:31
| 2023-08-31T21:04:31
| 72,473,727
| 5,792
| 3,654
|
Apache-2.0
| 2023-09-13T18:34:16
| 2016-10-31T20:08:03
|
Python
|
UTF-8
|
Python
| false
| false
| 114,411
|
py
|
events_v1_api.py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.28
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes.client.api_client import ApiClient
from kubernetes.client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class EventsV1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_event(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_event # noqa: E501
create an Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_event(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param EventsV1Event body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: EventsV1Event
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_event_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_event_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_event # noqa: E501
create an Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_event_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param EventsV1Event body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(EventsV1Event, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_event" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_event`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_event`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/events.k8s.io/v1/namespaces/{namespace}/events', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EventsV1Event', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_event(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_event # noqa: E501
delete collection of Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_event(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_event_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_event_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_event # noqa: E501
delete collection of Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_event_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_event" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_event`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/events.k8s.io/v1/namespaces/{namespace}/events', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_event(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_event # noqa: E501
delete an Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_event(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Event (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_event_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_event_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_event # noqa: E501
delete an Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_event_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Event (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_event" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_event`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_event`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/events.k8s.io/v1/namespaces/{namespace}/events/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/events.k8s.io/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_event_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_event_for_all_namespaces # noqa: E501
list or watch objects of kind Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_event_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: EventsV1EventList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_event_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_event_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_event_for_all_namespaces # noqa: E501
list or watch objects of kind Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_event_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(EventsV1EventList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_event_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/events.k8s.io/v1/events', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EventsV1EventList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_event(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_event # noqa: E501
list or watch objects of kind Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_event(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: EventsV1EventList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_event_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_event_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_event # noqa: E501
list or watch objects of kind Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_event_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(EventsV1EventList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_event" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_event`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/events.k8s.io/v1/namespaces/{namespace}/events', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EventsV1EventList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_event(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_event # noqa: E501
partially update the specified Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_event(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Event (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: EventsV1Event
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_event_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_event_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_event # noqa: E501
partially update the specified Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_event_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Event (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(EventsV1Event, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_event" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_event`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_event`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_event`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/events.k8s.io/v1/namespaces/{namespace}/events/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EventsV1Event', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_event(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_event # noqa: E501
read the specified Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_event(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Event (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: EventsV1Event
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_event_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_event_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_event # noqa: E501
read the specified Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_event_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Event (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(EventsV1Event, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_event" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_event`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_event`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/events.k8s.io/v1/namespaces/{namespace}/events/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EventsV1Event', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_event(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_event # noqa: E501
replace the specified Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_event(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Event (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param EventsV1Event body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: EventsV1Event
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_event_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_event_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_event # noqa: E501
replace the specified Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_event_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Event (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param EventsV1Event body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(EventsV1Event, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_event" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_event`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_event`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_event`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/events.k8s.io/v1/namespaces/{namespace}/events/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EventsV1Event', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
2841f2d77f18270f4fb3616b21a1e8845365cd04
|
8f4db78e562ab88ef6eadf991c340829f2c67217
|
/doc/examples/bench_proj_perp/bench.py
|
03114dc8bd4adbbd016fc74549bfc8dd9615bf65
|
[
"BSD-3-Clause"
] |
permissive
|
fluiddyn/transonic
|
3df606abda7f98c9958a8964ce1c6ab442167574
|
40329915cb277645c0a54286ef9e1d400e29719f
|
refs/heads/branch/default
| 2023-08-30T14:00:12.851327
| 2023-08-24T09:16:38
| 2023-08-24T09:16:38
| 164,679,098
| 107
| 1
|
BSD-3-Clause
| 2021-12-30T11:58:11
| 2019-01-08T15:34:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,825
|
py
|
bench.py
|
import numpy as np
from transonic import boost, Array, Type
A = Array[Type(np.float64, np.complex128), "3d"]
Af = "float[:,:,:]"
A = Af # issue fused type with Cython
def proj(vx: A, vy: A, vz: A, kx: Af, ky: Af, kz: Af, inv_k_square_nozero: Af):
tmp = (kx * vx + ky * vy + kz * vz) * inv_k_square_nozero
vx -= kx * tmp
vy -= ky * tmp
vz -= kz * tmp
def proj_loop(
vx: A, vy: A, vz: A, kx: Af, ky: Af, kz: Af, inv_k_square_nozero: Af
):
# type annotations only useful for Cython
n0: int
n1: int
n2: int
i0: int
i1: int
i2: int
tmp: float
n0, n1, n2 = kx.shape[0], kx.shape[1], kx.shape[2]
for i0 in range(n0):
for i1 in range(n1):
for i2 in range(n2):
tmp = (
kx[i0, i1, i2] * vx[i0, i1, i2]
+ ky[i0, i1, i2] * vy[i0, i1, i2]
+ kz[i0, i1, i2] * vz[i0, i1, i2]
) * inv_k_square_nozero[i0, i1, i2]
vx[i0, i1, i2] -= kx[i0, i1, i2] * tmp
vy[i0, i1, i2] -= ky[i0, i1, i2] * tmp
vz[i0, i1, i2] -= kz[i0, i1, i2] * tmp
proj_pythran = boost(backend="pythran")(proj)
proj_numba = boost(backend="numba")(proj)
proj_cython = boost(backend="cython")(proj)
proj_loop_pythran = boost(backend="pythran")(proj_loop)
proj_loop_numba = boost(backend="numba")(proj_loop)
proj_loop_cython = boost(backend="cython", boundscheck=False, wraparound=False)(
proj_loop
)
if __name__ == "__main__":
from textwrap import dedent
from transonic.util import print_versions, timeit_verbose
loc = locals()
print_versions()
setup = dedent(
"""
shape = n0, n1, n2 = 64, 512, 512
k0 = np.linspace(0, 100, n0)
k1 = np.linspace(0, 100, n1)
k2 = np.linspace(0, 100, n2)
K1, K0, K2 = np.meshgrid(k1, k0, k2, copy=False)
kz = np.ascontiguousarray(K0)
ky = np.ascontiguousarray(K1)
kx = np.ascontiguousarray(K2)
k_square_nozero = K0 ** 2 + K1 ** 2 + K2 ** 2
k_square_nozero[0, 0, 0] = 1e-14
inv_k_square_nozero = 1.0 / k_square_nozero
vx = np.ones(shape)
vy = np.ones(shape)
vz = np.ones(shape)
"""
)
print()
norm = timeit_verbose(
"proj(vx, vy, vz, kx, ky, kz, inv_k_square_nozero)",
setup=setup,
globals=loc,
)
for backend in ("cython", "numba", "pythran"):
timeit_verbose(
f"proj_{backend}(vx, vy, vz, kx, ky, kz, inv_k_square_nozero)",
setup=setup,
globals=loc,
norm=norm,
)
timeit_verbose(
f"proj_loop_{backend}(vx, vy, vz, kx, ky, kz, inv_k_square_nozero)",
setup=setup,
globals=loc,
norm=norm,
)
|
4f1a37bdcad92827e8289d7159e62eef8ac6ef9b
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/aws-cdi-sdk/all/conanfile.py
|
5fba77e00e10af17890c762f9d9b7eb837144668
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 5,910
|
py
|
conanfile.py
|
import os
import re
from conans import AutoToolsBuildEnvironment, CMake, ConanFile, tools
from conans.errors import ConanInvalidConfiguration
required_conan_version = ">=1.35.0"
class AwsCdiSdkConan(ConanFile):
name = "aws-cdi-sdk"
description = "AWS Cloud Digital Interface (CDI) SDK"
topics = ("aws", "communication", "framework", "service")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/aws/aws-cdi-sdk"
license = "BSD-2-Clause"
settings = "os", "arch", "compiler", "build_type"
exports_sources = ["CMakeLists.txt", "patches/**"]
generators = "cmake", "cmake_find_package"
@property
def _source_subfolder(self):
return "source_subfolder"
_autotools = None
_cmake = None
def requirements(self):
self.requires("aws-libfabric/1.9.1amzncdi1.0")
self.requires("aws-sdk-cpp/1.8.130")
def configure(self):
self.options["aws-libfabric"].shared = True
self.options["aws-sdk-cpp"].shared = True
def validate(self):
if self.settings.os != "Linux":
raise ConanInvalidConfiguration("This recipe currently only supports Linux. Feel free to contribute other platforms!")
if not self.options["aws-libfabric"].shared or not self.options["aws-sdk-cpp"].shared:
raise ConanInvalidConfiguration("Cannot build with static dependencies")
if not getattr(self.options["aws-sdk-cpp"], "monitoring"):
raise ConanInvalidConfiguration("This package requires the monitoring AWS SDK")
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 11)
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_autotools(self):
if self._autotools:
return self._autotools
self._autotools = AutoToolsBuildEnvironment(self)
return self._autotools
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.configure()
return self._cmake
def _detect_compilers(self):
cmake_cache = tools.load(os.path.join(self.build_folder, "CMakeCache.txt"))
cc = re.search("CMAKE_C_COMPILER:FILEPATH=(.*)", cmake_cache)[1]
cxx = re.search("CMAKE_CXX_COMPILER:FILEPATH=(.*)", cmake_cache)[1]
return cc, cxx
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
# build aws-cpp-sdk-cdi
cmake = self._configure_cmake()
cmake.build()
autotools = self._configure_autotools()
with tools.chdir(self._source_subfolder):
# configure autotools to find aws-cpp-sdk-cdi
autotools.include_paths.append(os.path.join(self.build_folder, self._source_subfolder, "aws-cpp-sdk-cdi", "include"))
autotools.library_paths.append(os.path.join(self.build_folder, "lib"))
autotools.libs.append("aws-cpp-sdk-cdi")
vars = autotools.vars
cc, cxx = self._detect_compilers()
vars["CC"] = cc
vars["CXX"] = cxx
if self.settings.build_type == "Debug":
vars["DEBUG"] = "y"
args = ["require_aws_sdk=no"]
autotools.make(target="libsdk", vars=vars, args=args)
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
self.copy(pattern="*", dst="include", src=os.path.join(self._source_subfolder, "include"))
config = "debug" if self.settings.build_type == "Debug" else "release"
self.copy(pattern="*", dst="lib", src=os.path.join(self._source_subfolder, "build", config, "lib"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "aws-cdi-sdk")
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
# TODO: Remove the namespace on CMake targets
self.cpp_info.names["cmake_find_package"] = "AWS"
self.cpp_info.names["cmake_find_package_multi"] = "AWS"
self.cpp_info.filenames["cmake_find_package"] = "aws-cdi-sdk"
self.cpp_info.filenames["cmake_find_package_multi"] = "aws-cdi-sdk"
cppSdk = self.cpp_info.components["aws-cpp-sdk-cdi"]
cppSdk.libs = ["aws-cpp-sdk-cdi"]
cppSdk.requires = ["aws-sdk-cpp::monitoring", "aws-libfabric::aws-libfabric"]
cppSdk.set_property("cmake_target_name", "AWS::aws-cpp-sdk-cdi")
cppSdk.set_property("pkg_config_name", "aws-cpp-sdk-cdi")
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
# TODO: Remove the namespace on CMake targets
cppSdk.names["cmake_find_package"] = "aws-cpp-sdk-cdi"
cppSdk.names["cmake_find_package_multi"] = "aws-cpp-sdk-cdi"
cppSdk.names["pkg_config"] = "aws-cpp-sdk-cdi"
cSdk = self.cpp_info.components["cdisdk"]
cSdk.libs = ["cdisdk"]
cSdk.requires = ["aws-cpp-sdk-cdi"]
if self.settings.os == "Linux":
cSdk.defines = ["_LINUX"]
cSdk.set_property("cmake_target_name", "AWS::aws-cdi-sdk")
cSdk.set_property("pkg_config_name", "aws-cdi-sdk")
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
# TODO: Remove the namespace on CMake targets
cSdk.names["cmake_find_package"] = "aws-cdi-sdk"
cSdk.names["cmake_find_package_multi"] = "aws-cdi-sdk"
cSdk.names["pkg_config"] = "aws-cdi-sdk"
|
da04c577fa422c16b68954d688d51de350c4143a
|
aa2ae30a88361b4b80ffa28c4d8a54600bbee542
|
/Chapter24/libcube/cubes/cube2x2.py
|
fdb01e8eeca00b43469d2ceda58f0b9e507e9da2
|
[
"MIT"
] |
permissive
|
PacktPublishing/Deep-Reinforcement-Learning-Hands-On-Second-Edition
|
6728fadb38076f6243da3d98b1cf18faf6b287af
|
d5a421d63c6d3ebbdfa54537fa5ce485bc2b9220
|
refs/heads/master
| 2023-07-05T23:08:32.621622
| 2022-01-17T12:18:54
| 2022-01-17T12:18:54
| 195,020,985
| 963
| 491
|
MIT
| 2023-03-25T01:00:07
| 2019-07-03T09:21:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,766
|
py
|
cube2x2.py
|
import enum
import collections
from . import _env
from . import _common
State = collections.namedtuple("State", field_names=['corner_pos', 'corner_ort'])
RenderedState = collections.namedtuple("RenderedState", field_names=['top', 'front', 'left',
'right', 'back', 'bottom'])
initial_state = State(corner_pos=tuple(range(8)), corner_ort=tuple([0]*8))
def is_initial(state):
assert isinstance(state, State)
return state.corner_pos == initial_state.corner_pos and \
state.corner_ort == initial_state.corner_ort
# available actions. Capital actions denote clockwise rotation
class Action(enum.Enum):
R = 0
L = 1
T = 2
D = 3
F = 4
B = 5
r = 6
l = 7
t = 8
d = 9
f = 10
b = 11
_inverse_action = {
Action.R: Action.r,
Action.r: Action.R,
Action.L: Action.l,
Action.l: Action.L,
Action.T: Action.t,
Action.t: Action.T,
Action.D: Action.d,
Action.d: Action.D,
Action.F: Action.f,
Action.f: Action.F,
Action.B: Action.b,
Action.b: Action.B
}
def inverse_action(action):
assert isinstance(action, Action)
return _inverse_action[action]
_transform_map = {
Action.R: [
((1, 2), (2, 6), (6, 5), (5, 1)), # corner map
((1, 2), (2, 1), (5, 1), (6, 2)), # corner rotate
],
Action.L: [
((3, 0), (7, 3), (0, 4), (4, 7)),
((0, 1), (3, 2), (4, 2), (7, 1)),
],
Action.T: [
((0, 3), (1, 0), (2, 1), (3, 2)),
(),
],
Action.D: [
((4, 5), (5, 6), (6, 7), (7, 4)),
(),
],
Action.F: [
((0, 1), (1, 5), (5, 4), (4, 0)),
((0, 2), (1, 1), (4, 1), (5, 2)),
],
Action.B: [
((2, 3), (3, 7), (7, 6), (6, 2)),
((2, 2), (3, 1), (6, 1), (7, 2)),
]
}
def transform(state, action):
assert isinstance(state, State)
assert isinstance(action, Action)
global _transform_map
is_inv = action not in _transform_map
if is_inv:
action = inverse_action(action)
c_map, c_rot = _transform_map[action]
corner_pos = _common._permute(state.corner_pos, c_map, is_inv)
corner_ort = _common._permute(state.corner_ort, c_map, is_inv)
corner_ort = _common._rotate(corner_ort, c_rot)
return State(corner_pos=tuple(corner_pos), corner_ort=tuple(corner_ort))
# create initial sides in the right order
def _init_sides():
return [
[None for _ in range(4)]
for _ in range(6) # top, left, back, front, right, bottom
]
# corner cubelets colors (clockwise from main label). Order of cubelets are first top,
# in counter-clockwise, started from front left
corner_colors = (
('W', 'R', 'G'), ('W', 'B', 'R'), ('W', 'O', 'B'), ('W', 'G', 'O'),
('Y', 'G', 'R'), ('Y', 'R', 'B'), ('Y', 'B', 'O'), ('Y', 'O', 'G')
)
# map every 3-side cubelet to their projection on sides
# sides are indexed in the order of _init_sides() function result
corner_maps = (
# top layer
((0, 2), (3, 0), (1, 1)),
((0, 3), (4, 0), (3, 1)),
((0, 1), (2, 0), (4, 1)),
((0, 0), (1, 0), (2, 1)),
# bottom layer
((5, 0), (1, 3), (3, 2)),
((5, 1), (3, 3), (4, 2)),
((5, 3), (4, 3), (2, 2)),
((5, 2), (2, 3), (1, 2))
)
# render state into human readable form
def render(state):
assert isinstance(state, State)
global corner_colors, corner_maps
sides = _init_sides()
for corner, orient, maps in zip(state.corner_pos, state.corner_ort, corner_maps):
cols = corner_colors[corner]
cols = _common._map_orient(cols, orient)
for (arr_idx, index), col in zip(maps, cols):
sides[arr_idx][index] = col
return RenderedState(top=sides[0], left=sides[1], back=sides[2], front=sides[3],
right=sides[4], bottom=sides[5])
encoded_shape = (8, 24)
def encode_inplace(target, state):
"""
Encode cude into existig zeroed numpy array
Follows encoding described in paper https://arxiv.org/abs/1805.07470
:param target: numpy array
:param state: state to be encoded
"""
assert isinstance(state, State)
# handle corner cubelets: find their permuted position
for corner_idx in range(8):
perm_pos = state.corner_pos.index(corner_idx)
corn_ort = state.corner_ort[perm_pos]
target[corner_idx, perm_pos * 3 + corn_ort] = 1
# register env
_env.register(_env.CubeEnv(
name="cube2x2", state_type=State, initial_state=initial_state,
is_goal_pred=is_initial, action_enum=Action,
transform_func=transform, inverse_action_func=inverse_action,
render_func=render, encoded_shape=encoded_shape,
encode_func=encode_inplace))
|
b3b802dfa822aea2a1862a18eb00cadd1f77d7b4
|
8f7320c10f2c5fc8475753dc5256d1a66067e15c
|
/pykeops/pykeops/tutorials/a_LazyTensors/plot_lazytensors_a.py
|
a736fe8124be019eaa284f5cb5f41e9568ff30e2
|
[
"MIT"
] |
permissive
|
getkeops/keops
|
947a5409710379893c6c7a46d0a256133a6d8aff
|
52ed22a7fbbcf4bd02dbdf5dc2b00bf79cceddf5
|
refs/heads/main
| 2023-08-25T12:44:22.092925
| 2023-08-09T13:33:58
| 2023-08-09T13:33:58
| 182,054,091
| 910
| 69
|
MIT
| 2023-09-03T20:35:44
| 2019-04-18T09:04:07
|
Python
|
UTF-8
|
Python
| false
| false
| 13,029
|
py
|
plot_lazytensors_a.py
|
r"""
================================================
A wrapper for NumPy and PyTorch arrays
================================================
KeOps brings **semi-symbolic** calculus
to modern computing libraries:
it alleviates the need for **huge intermediate variables**
such as *kernel* or *distance* matrices in machine
learning and computational geometry.
"""
#########################################################################
# First steps
# -----------
#
# A simple interface to the KeOps inner routines is provided by
# the :class:`pykeops.numpy.LazyTensor` or :class:`pykeops.torch.LazyTensor`
# **symbolic wrapper**, to be used with **NumPy arrays** or **PyTorch
# tensors** respectively.
#
# To illustrate its main features on a **simple example**, let's generate two point
# clouds :math:`(x_i)_{i\in[1,M]}` and :math:`(y_j)_{j\in[1,N]}` in the unit square:
import numpy as np
M, N = 1000, 2000
x = np.random.rand(M, 2)
y = np.random.rand(N, 2)
##########################################################################
# With NumPy, an efficient way of computing the index of the **nearest y-neighbor**
#
# .. math::
# \sigma(i) = \arg \min_{j\in [1,N]} \| x_i - y_j\|^2
#
# for all points :math:`x_i` is to perform a :func:`numpy.argmin()`
# reduction on the **M-by-N matrix** of squared distances
#
# .. math::
# D_{i,j} = \|x_i-y_j\|^2,
#
# computed using **tensorized**, broadcasted operators:
#
x_i = x[:, None, :] # (M, 1, 2) numpy array
y_j = y[None, :, :] # (1, N, 2) numpy array
D_ij = ((x_i - y_j) ** 2).sum(-1) # (M, N) array of squared distances |x_i-y_j|^2
s_i = np.argmin(D_ij, axis=1) # (M,) array of integer indices
print(s_i[:10])
###########################################################################
# That's good! Going further, we can speed-up these computations
# using the **CUDA routines** of the PyTorch library:
import torch
use_cuda = torch.cuda.is_available()
tensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
x_i = tensor(x[:, None, :]) # (M, 1, 2) torch tensor
y_j = tensor(y[None, :, :]) # (1, N, 2) torch tensor
D_ij = ((x_i - y_j) ** 2).sum(-1) # (M, N) tensor of squared distances |x_i-y_j|^2
s_i = D_ij.argmin(dim=1) # (M,) tensor of integer indices
print(s_i[:10])
###########################################################################
# But **can we scale to larger point clouds?**
# Unfortunately, tensorized codes will throw an exception
# as soon as the **M-by-N matrix** :math:`(D_{i,j})` stops fitting
# contiguously on the device memory. This generally happens
# when :math:`\sqrt{MN}` goes past a hardware-dependent threshold
# in the [5,000; 50,000] range:
M, N = (100000, 200000) if use_cuda else (1000, 2000)
x = np.random.rand(M, 2)
y = np.random.rand(N, 2)
x_i = tensor(x[:, None, :]) # (M, 1, 2) torch tensor
y_j = tensor(y[None, :, :]) # (1, N, 2) torch tensor
try:
D_ij = ((x_i - y_j) ** 2).sum(-1) # (M, N) tensor of squared distances |x_i-y_j|^2
except RuntimeError as err:
print(err)
###########################################################################
# **That's unfortunate...** And unexpected!
# After all, modern GPUs routinely handle
# `the real-time rendering of scenes with millions of triangles moving around <https://www.youtube.com/watch?v=RaLQuJtQ-gc>`_.
# So how do `graphics <https://www.siggraph.org/>`_ programmers achieve such
# a level of performance?
#
# The key to efficient numerical schemes is to remark that
# even though the distance matrix :math:`(D_{i,j})` is not **sparse** in the
# traditional sense, it definitely is **compact from a computational perspective**.
# Since its coefficients are fully described by two lists of points
# and a **symbolic formula**, sensible implementations should
# compute required values on-the-fly...
# and bypass, **lazily**, the cumbersome pre-computation and storage
# of all pairwise distances :math:`\|x_i-y_j\|^2`.
#
#
from pykeops.numpy import LazyTensor as LazyTensor_np
x_i = LazyTensor_np(
x[:, None, :]
) # (M, 1, 2) KeOps LazyTensor, wrapped around the numpy array x
y_j = LazyTensor_np(
y[None, :, :]
) # (1, N, 2) KeOps LazyTensor, wrapped around the numpy array y
D_ij = ((x_i - y_j) ** 2).sum(-1) # **Symbolic** (M, N) matrix of squared distances
print(D_ij)
##########################################################
# With KeOps, implementing **lazy** numerical schemes really
# is **that simple**!
# Our :class:`LazyTensor<pykeops.torch.LazyTensor>` variables
# are encoded as a list of data arrays plus an arbitrary
# symbolic formula, written with a :doc:`custom mathematical syntax <../../api/math-operations>`
# that is modified after each "pythonic" operation such as ``-``, ``**2`` or ``.exp()``.
#
# We can then perform a :meth:`pykeops.torch.LazyTensor.argmin` reduction with
# an efficient Map-Reduce scheme, implemented
# as a `templated CUDA kernel <https://github.com/getkeops/keops/blob/main/keops/core/GpuConv1D.cu>`_ around
# our custom formula.
# As evidenced by our :doc:`benchmarks <../../_auto_benchmarks/index>`,
# the KeOps routines have a **linear memory footprint**
# and generally **outperform tensorized GPU implementations by two orders of magnitude**.
s_i = D_ij.argmin(dim=1).ravel() # genuine (M,) array of integer indices
print("s_i is now a {} of shape {}.".format(type(s_i), s_i.shape))
print(s_i[:10])
##########################################################
# Going further, we can combine :class:`LazyTensors <pykeops.torch.LazyTensor>`
# using a **wide range of mathematical operations**.
# For instance, with data arrays stored directly on the GPU,
# an exponential kernel dot product
#
# .. math::
# a_i = \sum_{j=1}^N \exp(-\|x_i-y_j\|)\cdot b_j
#
# in dimension D=10 can be performed with:
from pykeops.torch import LazyTensor
D = 10
x = torch.randn(M, D).type(tensor) # M target points in dimension D, stored on the GPU
y = torch.randn(N, D).type(tensor) # N source points in dimension D, stored on the GPU
b = torch.randn(N, 4).type(
tensor
) # N values of the 4D source signal, stored on the GPU
x.requires_grad = True # In the next section, we'll compute gradients wrt. x!
x_i = LazyTensor(x[:, None, :]) # (M, 1, D) LazyTensor
y_j = LazyTensor(y[None, :, :]) # (1, N, D) LazyTensor
D_ij = ((x_i - y_j) ** 2).sum(-1).sqrt() # Symbolic (M, N) matrix of distances
K_ij = (-D_ij).exp() # Symbolic (M, N) Laplacian (aka. exponential) kernel matrix
a_i = K_ij @ b # The matrix-vector product "@" can be used on "raw" PyTorch tensors!
print("a_i is now a {} of shape {}.".format(type(a_i), a_i.shape))
#############################################################################
#
# .. note::
# KeOps LazyTensors have two symbolic or
# "virtual" axes at positions -3 and -2.
# Operations on the last "vector" dimension (-1)
# or on optional "batch" dimensions (-4 and beyond)
# are evaluated **lazily**.
# On the other hand, a reduction on one of the two symbolic axes
# (-2 or -3) triggers an **explicit computation**:
# we return a standard dense array with no symbolic axes.
#
#
#
# Automatic differentiation
# -----------------------------------------------
#
# KeOps
# **fully support** the :mod:`torch.autograd` engine:
# we can backprop through KeOps reductions as easily as through
# vanilla PyTorch operations.
# For instance, coming back to the kernel dot product above,
# we can compute the gradient
#
# .. math::
# g_i ~=~ \frac{\partial \sum_i \|a_i\|^2}{\partial x_i}
#
# with:
[g_i] = torch.autograd.grad((a_i**2).sum(), [x], create_graph=True)
print("g_i is now a {} of shape {}.".format(type(g_i), g_i.shape))
#############################################################################
# As usual with PyTorch, having set the ``create_graph=True`` option
# allows us to compute higher-order derivatives as needed:
[h_i] = torch.autograd.grad(g_i.exp().sum(), [x], create_graph=True)
print("h_i is now a {} of shape {}.".format(type(h_i), h_i.shape))
############################################################################
# .. warning::
# As of today, backpropagation is **not supported** through
# the :meth:`pykeops.torch.LazyTensor.min`, :meth:`pykeops.torch.LazyTensor.max` or :meth:`pykeops.torch.LazyTensor.Kmin` reductions:
# we're working on it, but are not there just yet.
# Until then, a simple workaround is to use
# the indices computed by the
# :meth:`pykeops.torch.LazyTensor.argmin`, :meth:`pykeops.torch.LazyTensor.argmax` or :meth:`pykeops.torch.LazyTensor.argKmin`
# reductions to define a fully differentiable PyTorch tensor as we now explain.
#
# Coming back to our example about nearest neighbors in the unit cube:
x = torch.randn(M, 3).type(tensor)
y = torch.randn(N, 3).type(tensor)
x.requires_grad = True
x_i = LazyTensor(x[:, None, :]) # (M, 1, 3) LazyTensor
y_j = LazyTensor(y[None, :, :]) # (1, N, 3) LazyTensor
D_ij = ((x_i - y_j) ** 2).sum(-1) # Symbolic (M, N) matrix of squared distances
#####################################################################
# We could compute the ``(M,)`` vector of squared distances to the **nearest y-neighbor** with:
to_nn = D_ij.min(dim=1).view(-1)
################################################################
# But instead, using:
s_i = D_ij.argmin(dim=1).view(-1) # (M,) integer Torch tensor
to_nn_alt = ((x - y[s_i, :]) ** 2).sum(-1)
##########################################################
# outputs the same result, while also allowing us to **compute arbitrary gradients**:
print(
"Difference between the two vectors: {:.2e}".format((to_nn - to_nn_alt).abs().max())
)
[g_i] = torch.autograd.grad(to_nn_alt.sum(), [x])
print("g_i is now a {} of shape {}.".format(type(g_i), g_i.shape))
###########################################################
# The only real downside here is that we had to write **twice** the
# "squared distance" formula that specifies our computation.
# We hope to fix this (minor) inconvenience sooner rather than later!
#
#############################################################################
# Batch processing
# -----------------------------------------------
#
# As should be expected, :class:`LazyTensors<pykeops.torch.LazyTensor>`
# also provide full support of **batch processing**,
# with broadcasting over dummy (=1) batch dimensions:
A, B = 7, 3 # Batch dimensions
x_i = LazyTensor(torch.randn(A, B, M, 1, D))
l_i = LazyTensor(torch.randn(1, 1, M, 1, D))
y_j = LazyTensor(torch.randn(1, B, 1, N, D))
s = LazyTensor(torch.rand(A, 1, 1, 1, 1))
D_ij = ((l_i * x_i - y_j) ** 2).sum(-1) # Symbolic (A, B, M, N, 1) LazyTensor
K_ij = -1.6 * D_ij / (1 + s**2) # Some arbitrary (A, B, M, N, 1) Kernel matrix
a_i = K_ij.sum(dim=3)
print("a_i is now a {} of shape {}.".format(type(a_i), a_i.shape))
##################################################################
# Everything works just fine, with two major caveats:
#
# - The structure of KeOps computations is still a little bit **rigid**:
# :class:`LazyTensors<pykeops.torch.LazyTensor>` should only
# be used in situations where the **large** dimensions M and N
# over which the main reduction
# is performed are in positions
# -3 and -2 (respectively), with **vector** variables in position
# -1 and an arbitrary number of batch dimensions beforehand.
# We're working towards a full support of **tensor** variables,
# but this will probably take some time to implement and test properly...
#
# - KeOps :class:`LazyTensors<pykeops.torch.LazyTensor>` never collapse
# their last "dimension", even after a ``.sum(-1)`` reduction
# whose **keepdim** argument is implicitely set to **True**.
print("Convenient, numpy-friendly shape: ", K_ij.shape)
print("Actual shape, used internally by KeOps: ", K_ij._shape)
##################################################################
# This is the reason why in the example above,
# **a_i** is a 4D Tensor of shape ``(7, 3, 1000, 1)`` and **not**
# a 3D Tensor of shape ``(7, 3, 1000)``.
#
#############################################################################
# Supported formulas
# ------------------------------------
#
# The full range of mathematical operations supported by
# :class:`LazyTensors<pykeops.torch.LazyTensor>` is described
# in our API documentation.
# Let's just mention that the lines below define valid computations:
#
x_i = LazyTensor(torch.randn(A, B, M, 1, D))
l_i = LazyTensor(torch.randn(1, 1, M, 1, D))
y_j = LazyTensor(torch.randn(1, B, 1, N, D))
s = LazyTensor(torch.rand(A, 1, 1, 1, 1))
F_ij = (
(x_i**1.5 + y_j / l_i).cos() - (x_i | y_j) + (x_i[:, :, :, :, 2] * s.relu() * y_j)
)
print(F_ij)
a_j = F_ij.sum(dim=2)
print("a_j is now a {} of shape {}.".format(type(a_j), a_j.shape))
#############################################################################
# Enjoy! And feel free to check the next tutorial for a discussion
# of the varied reduction operations that can be applied to
# KeOps :class:`LazyTensors<pykeops.torch.LazyTensor>`.
|
ccc0d50a13ca967957094fed4c3563929ed6e0f3
|
0ec2b0a2caf2cc5e0ec2bbb89aefc10fc5c63047
|
/tests/test_datetimes.py
|
30fa3b026c833059cef5d18ad0d6f2b6dd78defc
|
[
"Apache-2.0"
] |
permissive
|
spulec/freezegun
|
3d8406eaf4c3ffa72f884162a278a332eaee4a94
|
4f4496380deefceead7bef23bccaca17c2bdecfa
|
refs/heads/master
| 2023-08-30T22:29:16.153344
| 2023-02-22T05:49:22
| 2023-02-22T05:49:22
| 7,106,250
| 3,480
| 294
|
Apache-2.0
| 2023-08-24T21:19:36
| 2012-12-11T05:11:00
|
Python
|
UTF-8
|
Python
| false
| false
| 27,795
|
py
|
test_datetimes.py
|
import time
import calendar
import datetime
import unittest
import locale
import sys
from unittest import SkipTest
from dateutil.tz import UTC
import pytest
from tests import utils
from freezegun import freeze_time
from freezegun.api import FakeDatetime, FakeDate
try:
import maya
except ImportError:
maya = None
# time.clock was removed in Python 3.8
HAS_CLOCK = hasattr(time, 'clock')
HAS_TIME_NS = hasattr(time, 'time_ns')
HAS_MONOTONIC_NS = hasattr(time, 'monotonic_ns')
HAS_PERF_COUNTER_NS = hasattr(time, 'perf_counter_ns')
class temp_locale:
"""Temporarily change the locale."""
def __init__(self, *targets):
self.targets = targets
def __enter__(self):
self.old = locale.setlocale(locale.LC_ALL)
for target in self.targets:
try:
locale.setlocale(locale.LC_ALL, target)
return
except locale.Error:
pass
msg = 'could not set locale to any of: %s' % ', '.join(self.targets)
raise SkipTest(msg)
def __exit__(self, *args):
locale.setlocale(locale.LC_ALL, self.old)
# Small sample of locales where '%x' expands to a dd/mm/yyyy string,
# which can cause trouble when parsed with dateutil.
_dd_mm_yyyy_locales = ['da_DK.UTF-8', 'de_DE.UTF-8', 'fr_FR.UTF-8']
def test_simple_api():
# time to freeze is always provided in UTC
freezer = freeze_time("2012-01-14")
# expected timestamp must be a timestamp, corresponding to 2012-01-14 UTC
local_time = datetime.datetime(2012, 1, 14)
utc_time = local_time - datetime.timedelta(seconds=time.timezone)
expected_timestamp = time.mktime(utc_time.timetuple())
freezer.start()
assert time.time() == expected_timestamp
assert time.monotonic() >= 0.0
assert time.perf_counter() >= 0.0
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14)
assert datetime.datetime.utcnow() == datetime.datetime(2012, 1, 14)
assert datetime.date.today() == datetime.date(2012, 1, 14)
assert datetime.datetime.now().today() == datetime.datetime(2012, 1, 14)
freezer.stop()
assert time.time() != expected_timestamp
assert time.monotonic() >= 0.0
assert time.perf_counter() >= 0.0
assert datetime.datetime.now() != datetime.datetime(2012, 1, 14)
assert datetime.datetime.utcnow() != datetime.datetime(2012, 1, 14)
freezer = freeze_time("2012-01-10 13:52:01")
freezer.start()
assert datetime.datetime.now() == datetime.datetime(2012, 1, 10, 13, 52, 1)
freezer.stop()
def test_tz_offset():
freezer = freeze_time("2012-01-14 03:21:34", tz_offset=-4)
# expected timestamp must be a timestamp,
# corresponding to 2012-01-14 03:21:34 UTC
# and it doesn't depend on tz_offset
local_time = datetime.datetime(2012, 1, 14, 3, 21, 34)
utc_time = local_time - datetime.timedelta(seconds=time.timezone)
expected_timestamp = time.mktime(utc_time.timetuple())
freezer.start()
assert datetime.datetime.now() == datetime.datetime(2012, 1, 13, 23, 21, 34)
assert datetime.datetime.utcnow() == datetime.datetime(2012, 1, 14, 3, 21, 34)
assert time.time() == expected_timestamp
freezer.stop()
def test_timedelta_tz_offset():
freezer = freeze_time("2012-01-14 03:21:34",
tz_offset=-datetime.timedelta(hours=3, minutes=30))
freezer.start()
assert datetime.datetime.now() == datetime.datetime(2012, 1, 13, 23, 51, 34)
assert datetime.datetime.utcnow() == datetime.datetime(2012, 1, 14, 3, 21, 34)
freezer.stop()
def test_tz_offset_with_today():
freezer = freeze_time("2012-01-14", tz_offset=-4)
freezer.start()
assert datetime.date.today() == datetime.date(2012, 1, 13)
freezer.stop()
assert datetime.date.today() != datetime.date(2012, 1, 13)
def test_zero_tz_offset_with_time():
# we expect the system to behave like a system with UTC timezone
# at the beginning of the Epoch
freezer = freeze_time('1970-01-01')
freezer.start()
assert datetime.date.today() == datetime.date(1970, 1, 1)
assert datetime.datetime.now() == datetime.datetime(1970, 1, 1)
assert datetime.datetime.utcnow() == datetime.datetime(1970, 1, 1)
assert time.time() == 0.0
assert time.monotonic() >= 0.0
assert time.perf_counter() >= 0.0
freezer.stop()
def test_tz_offset_with_time():
# we expect the system to behave like a system with UTC-4 timezone
# at the beginning of the Epoch (wall clock should be 4 hrs late)
freezer = freeze_time('1970-01-01', tz_offset=-4)
freezer.start()
assert datetime.date.today() == datetime.date(1969, 12, 31)
assert datetime.datetime.now() == datetime.datetime(1969, 12, 31, 20)
assert datetime.datetime.utcnow() == datetime.datetime(1970, 1, 1)
assert time.time() == 0.0
assert time.monotonic() >= 0
assert time.perf_counter() >= 0
freezer.stop()
def test_time_with_microseconds():
freezer = freeze_time(datetime.datetime(1970, 1, 1, 0, 0, 1, 123456))
freezer.start()
assert time.time() == 1.123456
freezer.stop()
def test_time_with_dst():
freezer = freeze_time(datetime.datetime(1970, 6, 1, 0, 0, 1, 123456))
freezer.start()
assert time.time() == 13046401.123456
freezer.stop()
def test_manual_increment():
initial_datetime = datetime.datetime(year=1, month=7, day=12,
hour=15, minute=6, second=3)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
frozen_datetime.tick()
initial_datetime += datetime.timedelta(seconds=1)
assert frozen_datetime() == initial_datetime
frozen_datetime.tick(delta=datetime.timedelta(seconds=10))
initial_datetime += datetime.timedelta(seconds=10)
assert frozen_datetime() == initial_datetime
def test_manual_increment_seconds():
initial_datetime = datetime.datetime(year=1, month=7, day=12,
hour=15, minute=6, second=3)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
frozen_datetime.tick()
initial_datetime += datetime.timedelta(seconds=1)
assert frozen_datetime() == initial_datetime
frozen_datetime.tick(10)
initial_datetime += datetime.timedelta(seconds=10)
assert frozen_datetime() == initial_datetime
def test_move_to():
initial_datetime = datetime.datetime(year=1, month=7, day=12,
hour=15, minute=6, second=3)
other_datetime = datetime.datetime(year=2, month=8, day=13,
hour=14, minute=5, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
frozen_datetime.move_to(other_datetime)
assert frozen_datetime() == other_datetime
frozen_datetime.move_to(initial_datetime)
assert frozen_datetime() == initial_datetime
def test_bad_time_argument():
try:
freeze_time("2012-13-14", tz_offset=-4)
except ValueError:
pass
else:
assert False, "Bad values should raise a ValueError"
@pytest.mark.parametrize("func_name, has_func, tick_size", (
("monotonic", True, 1.0),
("monotonic_ns", HAS_MONOTONIC_NS, int(1e9)),
("perf_counter", True, 1.0),
("perf_counter_ns", HAS_PERF_COUNTER_NS, int(1e9)),)
)
def test_time_monotonic(func_name, has_func, tick_size):
initial_datetime = datetime.datetime(year=1, month=7, day=12,
hour=15, minute=6, second=3)
if not has_func:
pytest.skip("%s does not exist in current version" % func_name)
with freeze_time(initial_datetime) as frozen_datetime:
func = getattr(time, func_name)
t0 = func()
frozen_datetime.tick()
t1 = func()
assert t1 == t0 + tick_size
frozen_datetime.tick(10)
t11 = func()
assert t11 == t1 + 10 * tick_size
def test_time_gmtime():
with freeze_time('2012-01-14 03:21:34'):
time_struct = time.gmtime()
assert time_struct.tm_year == 2012
assert time_struct.tm_mon == 1
assert time_struct.tm_mday == 14
assert time_struct.tm_hour == 3
assert time_struct.tm_min == 21
assert time_struct.tm_sec == 34
assert time_struct.tm_wday == 5
assert time_struct.tm_yday == 14
assert time_struct.tm_isdst == -1
@pytest.mark.skipif(not HAS_CLOCK,
reason="time.clock was removed in Python 3.8")
def test_time_clock():
with freeze_time('2012-01-14 03:21:34'):
assert time.clock() == 0
with freeze_time('2012-01-14 03:21:35'):
assert time.clock() == 1
with freeze_time('2012-01-14 03:21:36'):
assert time.clock() == 2
class modify_timezone:
def __init__(self, new_timezone):
self.new_timezone = new_timezone
self.original_timezone = time.timezone
def __enter__(self):
time.timezone = self.new_timezone
def __exit__(self, *args):
time.timezone = self.original_timezone
def test_time_localtime():
with modify_timezone(-3600): # Set this for UTC-1
with freeze_time('2012-01-14 03:21:34'):
time_struct = time.localtime()
assert time_struct.tm_year == 2012
assert time_struct.tm_mon == 1
assert time_struct.tm_mday == 14
assert time_struct.tm_hour == 4 # offset of 1 hour due to time zone
assert time_struct.tm_min == 21
assert time_struct.tm_sec == 34
assert time_struct.tm_wday == 5
assert time_struct.tm_yday == 14
assert time_struct.tm_isdst == -1
assert time.localtime().tm_year != 2012
def test_strftime():
with modify_timezone(0):
with freeze_time('1970-01-01'):
assert time.strftime("%Y") == "1970"
def test_real_strftime_fall_through():
this_real_year = datetime.datetime.now().year
with freeze_time():
assert time.strftime('%Y') == str(this_real_year)
assert time.strftime('%Y', (2001, 1, 1, 1, 1, 1, 1, 1, 1)) == '2001'
def test_date_object():
frozen_date = datetime.date(year=2012, month=11, day=10)
date_freezer = freeze_time(frozen_date)
regular_freezer = freeze_time('2012-11-10')
assert date_freezer.time_to_freeze == regular_freezer.time_to_freeze
def test_old_date_object():
frozen_date = datetime.date(year=1, month=1, day=1)
with freeze_time(frozen_date):
assert datetime.date.today() == frozen_date
def test_date_with_locale():
with temp_locale(*_dd_mm_yyyy_locales):
frozen_date = datetime.date(year=2012, month=1, day=2)
date_freezer = freeze_time(frozen_date)
assert date_freezer.time_to_freeze.date() == frozen_date
def test_invalid_type():
try:
freeze_time(int(4))
except TypeError:
pass
else:
assert False, "Bad types should raise a TypeError"
def test_datetime_object():
frozen_datetime = datetime.datetime(year=2012, month=11, day=10,
hour=4, minute=15, second=30)
datetime_freezer = freeze_time(frozen_datetime)
regular_freezer = freeze_time('2012-11-10 04:15:30')
assert datetime_freezer.time_to_freeze == regular_freezer.time_to_freeze
def test_function_object():
frozen_datetime = datetime.datetime(year=2012, month=11, day=10,
hour=4, minute=15, second=30)
def function(): return frozen_datetime
with freeze_time(function):
assert frozen_datetime == datetime.datetime.now()
def test_lambda_object():
frozen_datetime = datetime.datetime(year=2012, month=11, day=10,
hour=4, minute=15, second=30)
with freeze_time(lambda: frozen_datetime):
assert frozen_datetime == datetime.datetime.now()
def test_generator_object():
frozen_datetimes = (datetime.datetime(year=y, month=1, day=1)
for y in range(2010, 2012))
with freeze_time(frozen_datetimes):
assert datetime.datetime(2010, 1, 1) == datetime.datetime.now()
with freeze_time(frozen_datetimes):
assert datetime.datetime(2011, 1, 1) == datetime.datetime.now()
with pytest.raises(StopIteration):
freeze_time(frozen_datetimes)
def test_maya_datetimes():
if not maya:
raise SkipTest("maya is optional since it's not supported for "
"enough python versions")
with freeze_time(maya.when("October 2nd, 1997")):
assert datetime.datetime.now() == datetime.datetime(
year=1997,
month=10,
day=2
)
def test_old_datetime_object():
frozen_datetime = datetime.datetime(year=1, month=7, day=12,
hour=15, minute=6, second=3)
with freeze_time(frozen_datetime):
assert datetime.datetime.now() == frozen_datetime
def test_datetime_with_locale():
with temp_locale(*_dd_mm_yyyy_locales):
frozen_datetime = datetime.datetime(year=2012, month=1, day=2)
date_freezer = freeze_time(frozen_datetime)
assert date_freezer.time_to_freeze == frozen_datetime
@freeze_time("2012-01-14")
def test_decorator():
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14)
def test_decorator_wrapped_attribute():
def to_decorate():
pass
wrapped = freeze_time("2014-01-14")(to_decorate)
assert wrapped.__wrapped__ is to_decorate
class Callable:
def __call__(self, *args, **kws):
return (args, kws)
@freeze_time("2012-01-14")
class Tester:
def test_the_class(self):
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14)
def test_still_the_same(self):
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14)
def test_class_name_preserved_by_decorator(self):
assert self.__class__.__name__ == "Tester"
class NotATestClass:
def perform_operation(self):
return datetime.date.today()
@freeze_time('2001-01-01')
def test_class_decorator_ignores_nested_class(self):
not_a_test = self.NotATestClass()
assert not_a_test.perform_operation() == datetime.date(2001, 1, 1)
a_mock = Callable()
def test_class_decorator_wraps_callable_object_py3(self):
assert self.a_mock.__wrapped__.__class__ == Callable
@staticmethod
def helper():
return datetime.date.today()
def test_class_decorator_respects_staticmethod(self):
assert self.helper() == datetime.date(2012, 1, 14)
@freeze_time("Jan 14th, 2012")
def test_nice_datetime():
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14)
@freeze_time("2012-01-14")
def test_datetime_date_method():
now = datetime.datetime.now()
assert now.date() == FakeDate(2012, 1, 14)
def test_context_manager():
with freeze_time("2012-01-14"):
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14)
assert datetime.datetime.now() != datetime.datetime(2012, 1, 14)
def test_nested_context_manager():
with freeze_time("2012-01-14"):
with freeze_time("2012-12-25"):
_assert_datetime_date_and_time_are_all_equal(datetime.datetime(2012, 12, 25))
_assert_datetime_date_and_time_are_all_equal(datetime.datetime(2012, 1, 14))
assert datetime.datetime.now() > datetime.datetime(2013, 1, 1)
def _assert_datetime_date_and_time_are_all_equal(expected_datetime):
assert datetime.datetime.now() == expected_datetime
assert datetime.date.today() == expected_datetime.date()
assert datetime.datetime.fromtimestamp(time.time()) == expected_datetime
def test_nested_context_manager_with_tz_offsets():
with freeze_time("2012-01-14 23:00:00", tz_offset=2):
with freeze_time("2012-12-25 19:00:00", tz_offset=6):
assert datetime.datetime.now() == datetime.datetime(2012, 12, 26, 1)
assert datetime.date.today() == datetime.date(2012, 12, 26)
# no assertion for time.time() since it's not affected by tz_offset
assert datetime.datetime.now() == datetime.datetime(2012, 1, 15, 1)
assert datetime.date.today() == datetime.date(2012, 1, 15)
assert datetime.datetime.now() > datetime.datetime(2013, 1, 1)
@freeze_time("Jan 14th, 2012")
def test_isinstance_with_active():
now = datetime.datetime.now()
assert utils.is_fake_datetime(now)
assert utils.is_fake_date(now.date())
today = datetime.date.today()
assert utils.is_fake_date(today)
def test_isinstance_without_active():
now = datetime.datetime.now()
assert isinstance(now, datetime.datetime)
assert isinstance(now, datetime.date)
assert isinstance(now.date(), datetime.date)
today = datetime.date.today()
assert isinstance(today, datetime.date)
class TestUnitTestMethodDecorator(unittest.TestCase):
@freeze_time('2013-04-09')
def test_method_decorator_works_on_unittest(self):
self.assertEqual(datetime.date(2013, 4, 9), datetime.date.today())
@freeze_time('2013-04-09', as_kwarg='frozen_time')
def test_method_decorator_works_on_unittest_kwarg_frozen_time(self, frozen_time):
self.assertEqual(datetime.date(2013, 4, 9), datetime.date.today())
self.assertEqual(datetime.date(2013, 4, 9), frozen_time.time_to_freeze.today())
@freeze_time('2013-04-09', as_kwarg='hello')
def test_method_decorator_works_on_unittest_kwarg_hello(self, **kwargs):
self.assertEqual(datetime.date(2013, 4, 9), datetime.date.today())
self.assertEqual(datetime.date(2013, 4, 9), kwargs.get('hello').time_to_freeze.today())
@freeze_time(lambda: datetime.date(year=2013, month=4, day=9), as_kwarg='frozen_time')
def test_method_decorator_works_on_unittest_kwarg_frozen_time_with_func(self, frozen_time):
self.assertEqual(datetime.date(2013, 4, 9), datetime.date.today())
self.assertEqual(datetime.date(2013, 4, 9), frozen_time.time_to_freeze.today())
@freeze_time('2013-04-09')
class TestUnitTestClassDecorator(unittest.TestCase):
@classmethod
def setUpClass(cls):
assert datetime.date(2013, 4, 9) == datetime.date.today()
def setUp(self):
self.assertEqual(datetime.date(2013, 4, 9), datetime.date.today())
def tearDown(self):
self.assertEqual(datetime.date(2013, 4, 9), datetime.date.today())
@classmethod
def tearDownClass(cls):
assert datetime.date(2013, 4, 9) == datetime.date.today()
def test_class_decorator_works_on_unittest(self):
self.assertEqual(datetime.date(2013, 4, 9), datetime.date.today())
def test_class_name_preserved_by_decorator(self):
self.assertEqual(self.__class__.__name__, "TestUnitTestClassDecorator")
@freeze_time('2013-04-09')
class TestUnitTestClassDecoratorWithNoSetUpOrTearDown(unittest.TestCase):
def test_class_decorator_works_on_unittest(self):
self.assertEqual(datetime.date(2013, 4, 9), datetime.date.today())
class TestUnitTestClassDecoratorSubclass(TestUnitTestClassDecorator):
@classmethod
def setUpClass(cls):
# the super() call can fail if the class decoration was done wrong
super().setUpClass()
@classmethod
def tearDownClass(cls):
# the super() call can fail if the class decoration was done wrong
super().tearDownClass()
def test_class_name_preserved_by_decorator(self):
self.assertEqual(self.__class__.__name__,
"TestUnitTestClassDecoratorSubclass")
class BaseInheritanceFreezableTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
class UnfrozenInheritedTests(BaseInheritanceFreezableTests):
def test_time_is_not_frozen(self):
# In this class, time should not be frozen - and the below decorated
# class shouldn't affect that
self.assertNotEqual(datetime.date(2013, 4, 9), datetime.date.today())
@freeze_time('2013-04-09')
class FrozenInheritedTests(BaseInheritanceFreezableTests):
def test_time_is_frozen(self):
# In this class, time should be frozen
self.assertEqual(datetime.date(2013, 4, 9), datetime.date.today())
class TestOldStyleClasses:
def test_direct_method(self):
# Make sure old style classes (not inheriting from object) is supported
@freeze_time('2013-04-09')
class OldStyleClass:
def method(self):
return datetime.date.today()
assert OldStyleClass().method() == datetime.date(2013, 4, 9)
def test_inherited_method(self):
class OldStyleBaseClass:
def inherited_method(self):
return datetime.date.today()
@freeze_time('2013-04-09')
class OldStyleClass(OldStyleBaseClass):
pass
assert OldStyleClass().inherited_method() == datetime.date(2013, 4, 9)
def test_min_and_max():
freezer = freeze_time("2012-01-14")
real_datetime = datetime.datetime
real_date = datetime.date
freezer.start()
assert datetime.datetime.min.__class__ == FakeDatetime
assert datetime.datetime.max.__class__ == FakeDatetime
assert datetime.date.min.__class__ == FakeDate
assert datetime.date.max.__class__ == FakeDate
assert datetime.datetime.min.__class__ != real_datetime
assert datetime.datetime.max.__class__ != real_datetime
assert datetime.date.min.__class__ != real_date
assert datetime.date.max.__class__ != real_date
freezer.stop()
assert datetime.datetime.min.__class__ == datetime.datetime
assert datetime.datetime.max.__class__ == datetime.datetime
assert datetime.date.min.__class__ == datetime.date
assert datetime.date.max.__class__ == datetime.date
assert datetime.datetime.min.__class__ != FakeDatetime
assert datetime.datetime.max.__class__ != FakeDatetime
assert datetime.date.min.__class__ != FakeDate
assert datetime.date.max.__class__ != FakeDate
@freeze_time("2014-07-30T01:00:00Z")
def test_freeze_with_timezone_aware_datetime_in_utc():
"""
utcnow() should always return a timezone naive datetime
"""
utc_now = datetime.datetime.utcnow()
assert utc_now.tzinfo is None
@freeze_time("1970-01-01T00:00:00-04:00")
def test_freeze_with_timezone_aware_datetime_in_non_utc():
"""
we expect the system to behave like a system with UTC-4 timezone
at the beginning of the Epoch (wall clock should be 4 hrs late)
"""
utc_now = datetime.datetime.utcnow()
assert utc_now.tzinfo is None
assert utc_now == datetime.datetime(1970, 1, 1, 4)
@freeze_time('2015-01-01')
def test_time_with_nested():
from time import time
first = 1420070400.0
second = 1420070760.0
assert time() == first
with freeze_time('2015-01-01T00:06:00'):
assert time() == second
@pytest.mark.parametrize("func_name",
("monotonic", "perf_counter")
)
def test_monotonic_with_nested(func_name):
__import__("time", fromlist=[func_name])
invoke_time_func = lambda: getattr(time, func_name)()
with freeze_time('2015-01-01') as frozen_datetime_1:
initial_t1 = invoke_time_func()
with freeze_time('2015-12-25') as frozen_datetime_2:
initial_t2 = invoke_time_func()
frozen_datetime_2.tick()
assert invoke_time_func() == initial_t2 + 1
assert invoke_time_func() == initial_t1
frozen_datetime_1.tick()
assert invoke_time_func() == initial_t1 + 1
def test_should_use_real_time():
frozen = datetime.datetime(2015, 3, 5)
expected_frozen = 1425513600.0
# TODO: local time seems to leak the local timezone, so this test fails in CI
# expected_frozen_local = (2015, 3, 5, 1, 0, 0, 3, 64, -1)
expected_frozen_gmt = (2015, 3, 5, 0, 0, 0, 3, 64, -1)
expected_clock = 0
from freezegun import api
api.call_stack_inspection_limit = 100 # just to increase coverage
timestamp_to_convert = 1579602312
time_tuple = time.gmtime(timestamp_to_convert)
with freeze_time(frozen):
assert time.time() == expected_frozen
# assert time.localtime() == expected_frozen_local
assert time.gmtime() == expected_frozen_gmt
if HAS_CLOCK:
assert time.clock() == expected_clock
if HAS_TIME_NS:
assert time.time_ns() == expected_frozen * 1e9
assert calendar.timegm(time.gmtime()) == expected_frozen
assert calendar.timegm(time_tuple) == timestamp_to_convert
with freeze_time(frozen, ignore=['_pytest']):
assert time.time() != expected_frozen
# assert time.localtime() != expected_frozen_local
assert time.gmtime() != expected_frozen_gmt
if HAS_CLOCK:
assert time.clock() != expected_clock
if HAS_TIME_NS:
assert time.time_ns() != expected_frozen * 1e9
assert calendar.timegm(time.gmtime()) != expected_frozen
assert calendar.timegm(time_tuple) == timestamp_to_convert
@pytest.mark.skipif(not HAS_TIME_NS,
reason="time.time_ns is present only on 3.7 and above")
def test_time_ns():
freezer = freeze_time("2012-01-14")
local_time = datetime.datetime(2012, 1, 14)
utc_time = local_time - datetime.timedelta(seconds=time.timezone)
expected_timestamp = time.mktime(utc_time.timetuple())
freezer.start()
assert time.time() == expected_timestamp
assert time.time_ns() == expected_timestamp * 1e9
freezer.stop()
assert time.time() != expected_timestamp
assert time.time_ns() != expected_timestamp * 1e9
def test_compare_datetime_and_time_with_timezone(monkeypatch):
"""
Compare the result of datetime.datetime.now() and time.time() in a non-UTC timezone. These
should be consistent.
"""
try:
with monkeypatch.context() as m, freeze_time("1970-01-01 00:00:00"):
m.setenv("TZ", "Europe/Berlin")
time.tzset()
now = datetime.datetime.now()
assert now == datetime.datetime.fromtimestamp(time.time())
assert now == datetime.datetime.utcfromtimestamp(time.time())
assert now == datetime.datetime.utcnow()
assert now.timestamp() == time.time()
finally:
time.tzset() # set the timezone back to what is was before
def test_timestamp_with_tzoffset():
with freeze_time("2000-01-01", tz_offset=6):
utcnow = datetime.datetime(2000, 1, 1, 0)
nowtz = datetime.datetime(2000, 1, 1, 0, tzinfo=UTC)
now = datetime.datetime(2000, 1, 1, 6)
assert now == datetime.datetime.now()
assert now == datetime.datetime.fromtimestamp(time.time())
assert now.timestamp() == time.time()
assert nowtz.timestamp() == time.time()
assert utcnow == datetime.datetime.utcfromtimestamp(time.time())
assert utcnow == datetime.datetime.utcnow()
@pytest.mark.skip("timezone handling is currently incorrect")
def test_datetime_in_timezone(monkeypatch):
"""
It is assumed that the argument passed to freeze_time is in UTC, unless explicitly indicated
otherwise. Therefore datetime.now() should return the frozen time with an offset.
"""
try:
with monkeypatch.context() as m, freeze_time("1970-01-01 00:00:00"):
m.setenv("TZ", "Europe/Berlin")
time.tzset()
assert datetime.datetime.now() == datetime.datetime(1970, 1, 1, 1, 0, 0)
finally:
time.tzset() # set the timezone back to what is was before
|
d69c09e262e76661b35b5ac37b500ce3330d8db3
|
8ef61b6b4cd0c533524d23c3e77a9caf70077023
|
/mnamer/tty.py
|
e9aa2db47d323e72db8073f83eec974a8733ec21
|
[
"MIT"
] |
permissive
|
jkwill87/mnamer
|
73e68d27d184193218605484a0db1c935f256ca6
|
82da0712ab99e9d80c60adda3db6a3225fdf49b4
|
refs/heads/main
| 2023-08-30T21:48:34.567239
| 2023-08-15T19:31:04
| 2023-08-15T19:31:04
| 107,178,928
| 656
| 69
|
MIT
| 2023-09-11T15:02:41
| 2017-10-16T20:23:27
|
Python
|
UTF-8
|
Python
| false
| false
| 4,192
|
py
|
tty.py
|
"""Provides an interface for handling user input and printing output."""
import traceback
from typing import Any, Callable
from teletype import codes
from teletype.components import ChoiceHelper, SelectOne
from teletype.io import style_format, style_print
from mnamer.const import SYSTEM
from mnamer.exceptions import MnamerAbortException, MnamerException, MnamerSkipException
from mnamer.language import Language
from mnamer.metadata import Metadata
from mnamer.setting_store import SettingStore
from mnamer.types import MessageType
from mnamer.utils import format_dict, format_exception, format_iter
no_style: bool = False
verbose: bool = False
def _chars() -> dict[str, str]:
if no_style:
chars = codes.CHARS_ASCII
else:
chars = codes.CHARS_DEFAULT
chars["arrow"] = style_format(chars["arrow"], "magenta")
return chars
def _abort_helpers() -> (
tuple[ChoiceHelper[MnamerSkipException], ChoiceHelper[MnamerAbortException]]
):
if no_style:
style = None
skip_mnemonic = "[s]"
quit_mnemonic = "[q]"
else:
style = "dark"
skip_mnemonic = "s"
quit_mnemonic = "q"
return (
ChoiceHelper(MnamerSkipException(), "skip", style, skip_mnemonic),
ChoiceHelper(MnamerAbortException(), "quit", style, quit_mnemonic),
)
def _msg_format(body: Any):
converter_map: dict[type, Callable] = {
dict: format_dict,
list: format_iter,
tuple: format_iter,
set: format_iter,
MnamerException: format_exception,
}
converter: Callable | None = converter_map.get(type(body), str)
if converter:
body = converter(body)
else:
body = getattr(body, "value", body)
return body
def configure(settings: SettingStore):
"""Sets class variables using a settings instance."""
global verbose, no_style
verbose = settings.verbose
no_style = settings.no_style
def msg(
body: Any,
message_type: MessageType = MessageType.INFO,
debug: bool = False,
):
if debug and not verbose:
return
if no_style:
print(_msg_format(body))
else:
style_print(_msg_format(body), style=message_type.value)
def error(body: Any):
msg(body, message_type=MessageType.ERROR, debug=False)
def metadata_prompt(matches: list[Metadata]) -> Metadata: # pragma: no cover
"""Prompts user to choose a match from a list of matches."""
msg("select match")
selector = SelectOne([*matches, *_abort_helpers()], **_chars())
choice = selector.prompt()
if isinstance(choice, (MnamerAbortException, MnamerSkipException)):
raise choice
return choice
def metadata_guess(metadata: Metadata) -> Metadata: # pragma: no cover
"""Prompts user to confirm a single match."""
label = str(metadata)
if no_style:
label += " (best guess)"
else:
label += style_format(" (best guess)", "blue")
option = ChoiceHelper(metadata, label)
selector = SelectOne([option, *_abort_helpers()], **_chars())
choice = selector.prompt()
if isinstance(choice, (MnamerAbortException, MnamerSkipException)):
raise choice
else:
return choice
def subtitle_prompt() -> Language:
msg("select language")
choices = [ChoiceHelper(language, language.name) for language in Language.all()]
selector = SelectOne([*choices, *_abort_helpers()], **_chars())
choice = selector.prompt()
if isinstance(choice, (MnamerAbortException, MnamerSkipException)):
raise choice
else:
return choice
def crash_report(): # pragma: no cover
s = f"""
============================== CRASH REPORT BEGIN ==============================
--------------------------------- environment ----------------------------------
{_msg_format(SYSTEM)}
--------------------------------- stack trace ----------------------------------
{traceback.format_exc()}
=============================== CRASH REPORT END ===============================
Dang, it looks like mnamer crashed! Please consider filling an issue at
https://github.com/jkwill87/mnamer/issues along with this report.
"""
print(s)
raise SystemExit(1)
|
8d92255c290faad2206aac7634cc7fd8d2d3fe5c
|
d3031d50874a592cca6bbd6296a7f8aea734e48f
|
/Tests/test_file_wal.py
|
4be46e9d673f1b8d93fa7ffcc7a1d31f162fe108
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-permissive",
"HPND"
] |
permissive
|
python-pillow/Pillow
|
97abc2f04646ae702aca971d59c738a8239a0053
|
601324bf8915a6180f5616c817e63e2e7816b892
|
refs/heads/main
| 2023-09-03T22:55:27.094813
| 2023-09-02T04:20:46
| 2023-09-02T04:20:46
| 5,171,600
| 10,807
| 2,470
|
NOASSERTION
| 2023-09-14T13:41:58
| 2012-07-24T21:38:39
|
Python
|
UTF-8
|
Python
| false
| false
| 668
|
py
|
test_file_wal.py
|
from PIL import WalImageFile
from .helper import assert_image_equal_tofile
TEST_FILE = "Tests/images/hopper.wal"
def test_open():
with WalImageFile.open(TEST_FILE) as im:
assert im.format == "WAL"
assert im.format_description == "Quake2 Texture"
assert im.mode == "P"
assert im.size == (128, 128)
assert isinstance(im, WalImageFile.WalImageFile)
assert_image_equal_tofile(im, "Tests/images/hopper_wal.png")
def test_load():
with WalImageFile.open(TEST_FILE) as im:
assert im.load()[0, 0] == 122
# Test again now that it has already been loaded once
assert im.load()[0, 0] == 122
|
2eb20f8cfc37f505abdf4e9b12c5cf48b880359b
|
99199db3f78a344e72b281c71c690518ae07375a
|
/octavia/amphorae/backends/agent/api_server/certificate_update.py
|
79510a13d24ba241a9d15975500dc74c0c44de73
|
[
"Apache-2.0"
] |
permissive
|
openstack/octavia
|
3faf2afe2ade5bd3978bb3a0558d2eeefc648ba2
|
0426285a41464a5015494584f109eed35a0d44db
|
refs/heads/master
| 2023-09-01T20:12:48.272344
| 2023-08-31T17:24:04
| 2023-08-31T17:24:04
| 21,018,188
| 147
| 180
|
Apache-2.0
| 2021-03-30T12:34:30
| 2014-06-19T22:47:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
certificate_update.py
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import stat
import flask
from oslo_config import cfg
import webob
BUFFER = 1024
CONF = cfg.CONF
def upload_server_cert():
stream = flask.request.stream
file_path = CONF.amphora_agent.agent_server_cert
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
# mode 00600
mode = stat.S_IRUSR | stat.S_IWUSR
with os.fdopen(os.open(file_path, flags, mode), 'wb') as crt_file:
b = stream.read(BUFFER)
while b:
crt_file.write(b)
b = stream.read(BUFFER)
return webob.Response(json={'message': 'OK'}, status=202)
|
e6c2f576ac52f9bbc26ffbc7b371e34fff64e190
|
f52b8606074c2e0cc0c60b30b51c015fd932b0a2
|
/tests/v1/pull.py
|
5e7e3f2bcc3b7d6e6dd05229f0942a77c051ae84
|
[
"MIT"
] |
permissive
|
CiscoDevNet/virlutils
|
81af101bcca6a28fd584ab4b8f3e1aed5a6345c3
|
84afb01a6366d54febb9481c364a378f891327f4
|
refs/heads/master
| 2023-04-30T12:00:05.233334
| 2023-03-06T14:46:48
| 2023-03-06T14:46:48
| 114,168,527
| 144
| 47
|
MIT
| 2023-04-25T18:32:39
| 2017-12-13T21:02:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,234
|
py
|
pull.py
|
from . import BaseTest
from .mocks.github import MockGitHub
from click.testing import CliRunner
import requests_mock
from virl.cli.main import virl
class Tests(BaseTest):
def test_virl_pull(self):
with requests_mock.mock() as m:
# Mock the request to return what we expect from the API.
topo_url = "https://raw.githubusercontent.com/"
topo_url += "foo/bar/master/topology.virl"
m.get(topo_url, json=MockGitHub.get_topology())
runner = CliRunner()
result = runner.invoke(virl, ["pull", "foo/bar"])
self.assertEqual(0, result.exit_code)
def test_virl_pull_invalid_repo(self):
with requests_mock.mock() as m:
# Mock the request to return what we expect from the API.
topo_url = "https://raw.githubusercontent.com/"
topo_url += "doesnt/exist/master/topology.virl"
m.get(topo_url, status_code=400)
runner = CliRunner()
result = runner.invoke(virl, ["pull", "doesnt/exist"])
expected = "Pulling from doesnt/exist on branch master\nError pulling " "doesnt/exist - repo or file not found\n"
self.assertEqual(result.output, expected)
|
dfc4b4bfe70f2d9c598ecb9cb1eef0b71e1fd88f
|
e7bf1ff05319acc59bba5af5890041bd82c3e197
|
/mne/simulation/tests/test_evoked.py
|
0209f08876bff380f64157588eecc9cc538bbc38
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mne-tools/mne-python
|
7e8d7e945dfbbee6432a4955cf050fa823f2d34b
|
f44636f00666b8eb869417960926d01690ff4f42
|
refs/heads/main
| 2023-09-04T03:05:37.402100
| 2023-09-03T14:15:18
| 2023-09-03T14:15:18
| 1,301,584
| 2,437
| 1,418
|
BSD-3-Clause
| 2023-09-14T19:23:38
| 2011-01-28T03:31:13
|
Python
|
UTF-8
|
Python
| false
| false
| 7,244
|
py
|
test_evoked.py
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
from pathlib import Path
import numpy as np
from numpy.testing import (
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_allclose,
)
import pytest
from mne import (
read_cov,
read_forward_solution,
convert_forward_solution,
pick_types_forward,
read_evokeds,
pick_types,
EpochsArray,
compute_covariance,
compute_raw_covariance,
pick_channels_cov,
)
from mne.datasets import testing
from mne.simulation import simulate_sparse_stc, simulate_evoked, add_noise
from mne.io import read_raw_fif
from mne.cov import regularize, whiten_evoked
from mne.utils import catch_logging
data_path = testing.data_path(download=False)
fwd_fname = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-6-fwd.fif"
raw_fname = (
Path(__file__).parent.parent.parent / "io" / "tests" / "data" / "test_raw.fif"
)
ave_fname = (
Path(__file__).parent.parent.parent / "io" / "tests" / "data" / "test-ave.fif"
)
cov_fname = (
Path(__file__).parent.parent.parent / "io" / "tests" / "data" / "test-cov.fif"
)
@testing.requires_testing_data
def test_simulate_evoked():
"""Test simulation of evoked data."""
raw = read_raw_fif(raw_fname)
fwd = read_forward_solution(fwd_fname)
fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=False)
fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info["bads"])
cov = read_cov(cov_fname)
evoked_template = read_evokeds(ave_fname, condition=0, baseline=None)
evoked_template.pick_types(meg=True, eeg=True, exclude=raw.info["bads"])
cov = regularize(cov, evoked_template.info)
nave = evoked_template.nave
tmin = -0.1
sfreq = 1000.0 # Hz
tstep = 1.0 / sfreq
n_samples = 600
times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
# Generate times series for 2 dipoles
stc = simulate_sparse_stc(fwd["src"], n_dipoles=2, times=times, random_state=42)
# Generate noisy evoked data
iir_filter = [1, -0.9]
evoked = simulate_evoked(
fwd,
stc,
evoked_template.info,
cov,
iir_filter=iir_filter,
nave=nave,
random_state=0,
)
assert_array_almost_equal(evoked.times, stc.times)
assert len(evoked.data) == len(fwd["sol"]["data"])
assert_equal(evoked.nave, nave)
assert len(evoked.info["projs"]) == len(cov["projs"])
evoked_white = whiten_evoked(evoked, cov)
assert abs(evoked_white.data[:, 0].std() - 1.0) < 0.1
# make a vertex that doesn't exist in fwd, should throw error
stc_bad = stc.copy()
mv = np.max(fwd["src"][0]["vertno"][fwd["src"][0]["inuse"]])
stc_bad.vertices[0][0] = mv + 1
pytest.raises(ValueError, simulate_evoked, fwd, stc_bad, evoked_template.info, cov)
evoked_1 = simulate_evoked(fwd, stc, evoked_template.info, cov, nave=np.inf)
evoked_2 = simulate_evoked(fwd, stc, evoked_template.info, cov, nave=np.inf)
assert_array_equal(evoked_1.data, evoked_2.data)
cov["names"] = cov.ch_names[:-2] # Error channels are different.
with pytest.raises(RuntimeError, match="Not all channels present"):
simulate_evoked(fwd, stc, evoked_template.info, cov)
# We don't use an avg ref here, but let's ignore it. Also we know we have
# few samples, and that our epochs are not baseline corrected.
@pytest.mark.filterwarnings("ignore:No average EEG reference present")
@pytest.mark.filterwarnings("ignore:Too few samples")
@pytest.mark.filterwarnings("ignore:Epochs are not baseline corrected")
def test_add_noise():
"""Test noise addition."""
rng = np.random.default_rng(0)
raw = read_raw_fif(raw_fname)
raw.del_proj()
picks = pick_types(raw.info, meg=True, eeg=True, exclude=())
cov = compute_raw_covariance(raw, picks=picks)
with pytest.raises(RuntimeError, match="to be loaded"):
add_noise(raw, cov)
raw.crop(0, 1).load_data()
with pytest.raises(TypeError, match="Raw, Epochs, or Evoked"):
add_noise(0.0, cov)
with pytest.raises(TypeError, match="Covariance"):
add_noise(raw, 0.0)
# test a no-op (data preserved)
orig_data = raw[:][0]
zero_cov = cov.copy()
zero_cov["data"].fill(0)
add_noise(raw, zero_cov)
new_data = raw[:][0]
assert_allclose(orig_data, new_data, atol=1e-30)
# set to zero to make comparisons easier
raw._data[:] = 0.0
epochs = EpochsArray(np.zeros((1, len(raw.ch_names), 100)), raw.info.copy())
epochs.info["bads"] = []
evoked = epochs.average(picks=np.arange(len(raw.ch_names)))
for inst in (raw, epochs, evoked):
with catch_logging() as log:
add_noise(inst, cov, random_state=rng, verbose=True)
log = log.getvalue()
want = "to {0}/{1} channels ({0}".format(len(cov["names"]), len(raw.ch_names))
assert want in log
if inst is evoked:
inst = EpochsArray(inst.data[np.newaxis], inst.info)
if inst is raw:
cov_new = compute_raw_covariance(inst, picks=picks)
else:
cov_new = compute_covariance(inst)
assert cov["names"] == cov_new["names"]
r = np.corrcoef(cov["data"].ravel(), cov_new["data"].ravel())[0, 1]
assert r > 0.99
def test_rank_deficiency():
"""Test adding noise from M/EEG float32 (I/O) cov with projectors."""
# See gh-5940
evoked = read_evokeds(ave_fname, 0, baseline=(None, 0))
evoked.info["bads"] = ["MEG 2443"]
with evoked.info._unlock():
evoked.info["lowpass"] = 20 # fake for decim
picks = pick_types(evoked.info, meg=True, eeg=False)
picks = picks[::16]
evoked.pick_channels([evoked.ch_names[pick] for pick in picks])
evoked.info.normalize_proj()
cov = read_cov(cov_fname)
cov["projs"] = []
cov = regularize(cov, evoked.info, rank=None)
cov = pick_channels_cov(cov, evoked.ch_names)
evoked.data[:] = 0
add_noise(evoked, cov, random_state=0)
cov_new = compute_covariance(
EpochsArray(evoked.data[np.newaxis], evoked.info), verbose="error"
)
assert cov["names"] == cov_new["names"]
r = np.corrcoef(cov["data"].ravel(), cov_new["data"].ravel())[0, 1]
assert r > 0.98
@testing.requires_testing_data
def test_order():
"""Test that order does not matter."""
fwd = read_forward_solution(fwd_fname)
fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=False)
evoked = read_evokeds(ave_fname)[0].pick_types(meg=True, eeg=True)
assert "meg" in evoked
assert "eeg" in evoked
meg_picks = pick_types(evoked.info, meg=True)
eeg_picks = pick_types(evoked.info, eeg=True)
# MEG then EEG
assert (eeg_picks > meg_picks.max()).all()
times = np.arange(10) / 1000.0
stc = simulate_sparse_stc(fwd["src"], 1, times=times, random_state=0)
evoked_sim = simulate_evoked(fwd, stc, evoked.info, nave=np.inf)
reorder = np.concatenate([eeg_picks, meg_picks])
evoked.reorder_channels([evoked.ch_names[pick] for pick in reorder])
evoked_sim_2 = simulate_evoked(fwd, stc, evoked.info, nave=np.inf)
want_data = evoked_sim.data[reorder]
assert_allclose(evoked_sim_2.data, want_data)
|
6cd724387de777835fe1c3197a94bf2a70bb50b6
|
9c028c54fcba2d22d48bed063090e35e9af35dc4
|
/ciw/arrival_node.py
|
89163b40c1c2e979f8e03797d02ae3db733efaf1
|
[
"MIT"
] |
permissive
|
CiwPython/Ciw
|
b2612edf361c7c6af58fdf5f710b44c81f417d23
|
85d03a4988f34c1ca05d7781649042ea2761a42b
|
refs/heads/master
| 2023-04-19T03:45:53.599066
| 2023-04-06T17:42:08
| 2023-04-06T17:42:08
| 47,995,577
| 143
| 39
|
MIT
| 2023-02-07T13:06:10
| 2015-12-14T19:10:50
|
Python
|
UTF-8
|
Python
| false
| false
| 6,374
|
py
|
arrival_node.py
|
from __future__ import division
from random import random
from .individual import Individual
class ArrivalNode(object):
"""
Class for the arrival node of the network
"""
def __init__(self, simulation):
"""
Initialise the arrvial node.
"""
self.simulation = simulation
self.number_of_individuals = 0
self.number_of_individuals_per_class = [0] * self.simulation.network.number_of_classes
self.number_accepted_individuals = 0
self.number_accepted_individuals_per_class = [0] * self.simulation.network.number_of_classes
self.event_dates_dict = {nd + 1: {clss: False for clss in range(
self.simulation.network.number_of_classes)}
for nd in range(self.simulation.network.number_of_nodes)}
self.rejection_dict = {nd + 1: {clss:[] for clss in range(
self.simulation.network.number_of_classes)}
for nd in range(self.simulation.network.number_of_nodes)}
self.baulked_dict = {nd + 1: {clss:[] for clss in range(
self.simulation.network.number_of_classes)}
for nd in range(self.simulation.network.number_of_nodes)}
def initialise(self):
self.initialise_event_dates_dict()
self.find_next_event_date()
def __repr__(self):
"""
Representation of an arrival node.
"""
return 'Arrival Node'
def decide_baulk(self, next_node, next_individual):
"""
Either makes an individual baulk, or sends the individual
to the next node.
"""
if next_node.baulking_functions[self.next_class] is None:
self.send_individual(next_node, next_individual)
else:
rnd_num = random()
if rnd_num < next_node.baulking_functions[self.next_class](
next_node.number_of_individuals):
self.record_baulk(next_node)
else:
self.send_individual(next_node, next_individual)
def find_next_event_date(self):
"""
Finds the time of the next arrival.
"""
minnd = None
minclss = None
mindate = float("Inf")
for nd in self.event_dates_dict:
for clss in self.event_dates_dict[nd]:
if self.event_dates_dict[nd][clss] < mindate:
minnd = nd
minclss = clss
mindate = self.event_dates_dict[nd][clss]
self.next_node = minnd
self.next_class = minclss
self.next_event_date = mindate
def have_event(self):
"""
Finds a batch size. Creates that many Individuals and send
them to the relevent node. Then updates the event_dates_dict.
"""
batch = self.batch_size(self.next_node, self.next_class)
for _ in range(batch):
self.number_of_individuals += 1
self.number_of_individuals_per_class[self.next_class] += 1
priority_class = self.simulation.network.priority_class_mapping[
self.next_class]
next_individual = self.simulation.IndividualType(
self.number_of_individuals,
self.next_class,
priority_class,
simulation=self.simulation)
if self.simulation.network.process_based:
next_individual.route = self.simulation.network.customer_classes[
next_individual.customer_class].routing[self.next_node - 1](next_individual)
next_node = self.simulation.transitive_nodes[self.next_node - 1]
self.release_individual(next_node, next_individual)
self.event_dates_dict[self.next_node][
self.next_class] = self.increment_time(
self.event_dates_dict[self.next_node][
self.next_class], self.inter_arrival(
self.next_node, self.next_class))
self.find_next_event_date()
def increment_time(self, original, increment):
"""
Increments the original time by the increment.
"""
return original + increment
def initialise_event_dates_dict(self):
"""
Initialises the next event dates dictionary
with random times for each node and class.
"""
for nd in self.event_dates_dict:
for clss in self.event_dates_dict[nd]:
self.event_dates_dict[nd][clss] = self.inter_arrival(nd, clss)
def inter_arrival(self, nd, clss):
"""
Samples the inter-arrival time for next class and node.
"""
return self.simulation.inter_arrival_times[nd][clss]._sample(t=self.simulation.current_time)
def batch_size(self, nd, clss):
"""
Samples the batch size for next class and node.
Raises error if a positive integer is not sampled.
"""
batch = self.simulation.batch_sizes[nd][clss]._sample(t=self.simulation.current_time)
if isinstance(batch, int) and batch >= 0:
return batch
raise ValueError('Batch sizes must be positive integers.')
def record_baulk(self, next_node):
"""
Adds an individual to the baulked dictionary.
"""
self.baulked_dict[next_node.id_number][self.next_class].append(self.next_event_date)
def record_rejection(self, next_node):
"""
Adds an individual to the rejection dictionary.
"""
self.rejection_dict[next_node.id_number][self.next_class].append(self.next_event_date)
def release_individual(self, next_node, next_individual):
"""
Either rejects the next_individual die to lack of capacity,
or sends that individual to baulk or not.
"""
if next_node.number_of_individuals >= next_node.node_capacity:
self.record_rejection(next_node)
else:
self.decide_baulk(next_node, next_individual)
def send_individual(self, next_node, next_individual):
"""
Sends the next_individual to the next_node.
"""
self.number_accepted_individuals += 1
self.number_accepted_individuals_per_class[next_individual.customer_class] += 1
next_node.accept(next_individual)
def update_next_event_date(self):
"""
Passes, as updating next event happens at time of event.
"""
pass
|
364c900467ae12e7e1b0795e3365a78065508a11
|
3b87eaa7f1b8290d1a74ac2bec9573f81aab831d
|
/python/python/examples/try_stmt.py
|
8b926ab56c6ff469ef152b9557f1024bf84c4ac2
|
[
"MIT"
] |
permissive
|
antlr/grammars-v4
|
1f6ba461f9fb2c8f04335ca495249ab6eab8e0ae
|
98c2bc3b68eff9ad4b809d21a6c9d858c5b9ddfa
|
refs/heads/master
| 2023-08-16T13:37:23.691676
| 2023-08-13T15:20:52
| 2023-08-13T15:20:52
| 5,958,314
| 9,255
| 4,577
|
MIT
| 2023-09-13T21:17:22
| 2012-09-25T23:45:11
|
ANTLR
|
UTF-8
|
Python
| false
| false
| 462
|
py
|
try_stmt.py
|
# try_stmt: TRY COLON suite (except_clause+ else_clause? finaly_clause? | finaly_clause)
# TRY COLON suite except_clause
try:
pass
except:
pass
# TRY COLON suite except_clause except_clause else_clause
try:
pass
except Exception as ex:
pass
except:
pass
else:
pass
# TRY COLON suite except_clause finaly_clause
try:
pass
except Exception:
pass
finally:
pass
# TRY COLON suite finaly_clause
try:
pass
finally:
pass
|
7978f95215489747b36c0f1177bb0a689384ffeb
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/desktop/core/ext-py3/boto-2.49.0/boto/mws/connection.py
|
687fae74f0baec58395853266b0e596ad88513af
|
[
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 49,808
|
py
|
connection.py
|
# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import hashlib
import string
import collections
from boto.connection import AWSQueryConnection
from boto.exception import BotoServerError
import boto.mws.exception
import boto.mws.response
from boto.handler import XmlHandler
from boto.compat import filter, map, six, encodebytes
__all__ = ['MWSConnection']
api_version_path = {
'Feeds': ('2009-01-01', 'Merchant', '/'),
'Reports': ('2009-01-01', 'Merchant', '/'),
'Orders': ('2013-09-01', 'SellerId', '/Orders/2013-09-01'),
'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'),
'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'),
'Inbound': ('2010-10-01', 'SellerId',
'/FulfillmentInboundShipment/2010-10-01'),
'Outbound': ('2010-10-01', 'SellerId',
'/FulfillmentOutboundShipment/2010-10-01'),
'Inventory': ('2010-10-01', 'SellerId',
'/FulfillmentInventory/2010-10-01'),
'Recommendations': ('2013-04-01', 'SellerId',
'/Recommendations/2013-04-01'),
'CustomerInfo': ('2014-03-01', 'SellerId',
'/CustomerInformation/2014-03-01'),
'CartInfo': ('2014-03-01', 'SellerId',
'/CartInformation/2014-03-01'),
'Subscriptions': ('2013-07-01', 'SellerId',
'/Subscriptions/2013-07-01'),
'OffAmazonPayments': ('2013-01-01', 'SellerId',
'/OffAmazonPayments/2013-01-01'),
}
content_md5 = lambda c: encodebytes(hashlib.md5(c).digest()).strip()
decorated_attrs = ('action', 'response', 'section',
'quota', 'restore', 'version')
api_call_map = {}
def add_attrs_from(func, to):
for attr in decorated_attrs:
setattr(to, attr, getattr(func, attr, None))
to.__wrapped__ = func
return to
def structured_lists(*fields):
def decorator(func):
def wrapper(self, *args, **kw):
for key, acc in [f.split('.') for f in fields]:
if key in kw:
newkey = key + '.' + acc + (acc and '.' or '')
for i in range(len(kw[key])):
kw[newkey + str(i + 1)] = kw[key][i]
kw.pop(key)
return func(self, *args, **kw)
wrapper.__doc__ = "{0}\nLists: {1}".format(func.__doc__,
', '.join(fields))
return add_attrs_from(func, to=wrapper)
return decorator
def http_body(field):
def decorator(func):
def wrapper(*args, **kw):
if any([f not in kw for f in (field, 'content_type')]):
message = "{0} requires {1} and content_type arguments for " \
"building HTTP body".format(func.action, field)
raise KeyError(message)
kw['body'] = kw.pop(field)
kw['headers'] = {
'Content-Type': kw.pop('content_type'),
'Content-MD5': content_md5(kw['body']),
}
return func(*args, **kw)
wrapper.__doc__ = "{0}\nRequired HTTP Body: " \
"{1}".format(func.__doc__, field)
return add_attrs_from(func, to=wrapper)
return decorator
def destructure_object(value, into, prefix, members=False):
if isinstance(value, boto.mws.response.ResponseElement):
destructure_object(value.__dict__, into, prefix, members=members)
elif isinstance(value, collections.Mapping):
for name in value:
if name.startswith('_'):
continue
destructure_object(value[name], into, prefix + '.' + name,
members=members)
elif isinstance(value, six.string_types):
into[prefix] = value
elif isinstance(value, collections.Iterable):
for index, element in enumerate(value):
suffix = (members and '.member.' or '.') + str(index + 1)
destructure_object(element, into, prefix + suffix,
members=members)
elif isinstance(value, bool):
into[prefix] = str(value).lower()
else:
into[prefix] = value
def structured_objects(*fields, **kwargs):
def decorator(func):
def wrapper(*args, **kw):
members = kwargs.get('members', False)
for field in filter(lambda i: i in kw, fields):
destructure_object(kw.pop(field), kw, field, members=members)
return func(*args, **kw)
wrapper.__doc__ = "{0}\nElement|Iter|Map: {1}\n" \
"(ResponseElement or anything iterable/dict-like)" \
.format(func.__doc__, ', '.join(fields))
return add_attrs_from(func, to=wrapper)
return decorator
def requires(*groups):
def decorator(func):
def requires(*args, **kw):
hasgroup = lambda group: all(key in kw for key in group)
if 1 != len(list(filter(hasgroup, groups))):
message = ' OR '.join(['+'.join(g) for g in groups])
message = "{0} requires {1} argument(s)" \
"".format(func.action, message)
raise KeyError(message)
return func(*args, **kw)
message = ' OR '.join(['+'.join(g) for g in groups])
requires.__doc__ = "{0}\nRequired: {1}".format(func.__doc__,
message)
return add_attrs_from(func, to=requires)
return decorator
def exclusive(*groups):
def decorator(func):
def wrapper(*args, **kw):
hasgroup = lambda group: all(key in kw for key in group)
if len(list(filter(hasgroup, groups))) not in (0, 1):
message = ' OR '.join(['+'.join(g) for g in groups])
message = "{0} requires either {1}" \
"".format(func.action, message)
raise KeyError(message)
return func(*args, **kw)
message = ' OR '.join(['+'.join(g) for g in groups])
wrapper.__doc__ = "{0}\nEither: {1}".format(func.__doc__,
message)
return add_attrs_from(func, to=wrapper)
return decorator
def dependent(field, *groups):
def decorator(func):
def wrapper(*args, **kw):
hasgroup = lambda group: all(key in kw for key in group)
if field in kw and not any(hasgroup(g) for g in groups):
message = ' OR '.join(['+'.join(g) for g in groups])
message = "{0} argument {1} requires {2}" \
"".format(func.action, field, message)
raise KeyError(message)
return func(*args, **kw)
message = ' OR '.join(['+'.join(g) for g in groups])
wrapper.__doc__ = "{0}\n{1} requires: {2}".format(func.__doc__,
field,
message)
return add_attrs_from(func, to=wrapper)
return decorator
def requires_some_of(*fields):
def decorator(func):
def requires(*args, **kw):
if not any(i in kw for i in fields):
message = "{0} requires at least one of {1} argument(s)" \
"".format(func.action, ', '.join(fields))
raise KeyError(message)
return func(*args, **kw)
requires.__doc__ = "{0}\nSome Required: {1}".format(func.__doc__,
', '.join(fields))
return add_attrs_from(func, to=requires)
return decorator
def boolean_arguments(*fields):
def decorator(func):
def wrapper(*args, **kw):
for field in [f for f in fields if isinstance(kw.get(f), bool)]:
kw[field] = str(kw[field]).lower()
return func(*args, **kw)
wrapper.__doc__ = "{0}\nBooleans: {1}".format(func.__doc__,
', '.join(fields))
return add_attrs_from(func, to=wrapper)
return decorator
def api_action(section, quota, restore, *api):
def decorator(func, quota=int(quota), restore=float(restore)):
version, accesskey, path = api_version_path[section]
action = ''.join(api or map(str.capitalize, func.__name__.split('_')))
def wrapper(self, *args, **kw):
kw.setdefault(accesskey, getattr(self, accesskey, None))
if kw[accesskey] is None:
message = "{0} requires {1} argument. Set the " \
"MWSConnection.{2} attribute?" \
"".format(action, accesskey, accesskey)
raise KeyError(message)
kw['Action'] = action
kw['Version'] = version
response = self._response_factory(action, connection=self)
request = dict(path=path, quota=quota, restore=restore)
return func(self, request, response, *args, **kw)
for attr in decorated_attrs:
setattr(wrapper, attr, locals().get(attr))
wrapper.__doc__ = "MWS {0}/{1} API call; quota={2} restore={3:.2f}\n" \
"{4}".format(action, version, quota, restore,
func.__doc__)
api_call_map[action] = func.__name__
return wrapper
return decorator
class MWSConnection(AWSQueryConnection):
ResponseFactory = boto.mws.response.ResponseFactory
ResponseErrorFactory = boto.mws.exception.ResponseErrorFactory
def __init__(self, *args, **kw):
kw.setdefault('host', 'mws.amazonservices.com')
self._sandboxed = kw.pop('sandbox', False)
self.Merchant = kw.pop('Merchant', None) or kw.get('SellerId')
self.SellerId = kw.pop('SellerId', None) or self.Merchant
kw = self._setup_factories(kw.pop('factory_scopes', []), **kw)
super(MWSConnection, self).__init__(*args, **kw)
def _setup_factories(self, extrascopes, **kw):
for factory, (scope, Default) in {
'response_factory':
(boto.mws.response, self.ResponseFactory),
'response_error_factory':
(boto.mws.exception, self.ResponseErrorFactory),
}.items():
if factory in kw:
setattr(self, '_' + factory, kw.pop(factory))
else:
scopes = extrascopes + [scope]
setattr(self, '_' + factory, Default(scopes=scopes))
return kw
def _sandboxify(self, path):
if not self._sandboxed:
return path
splat = path.split('/')
splat[-2] += '_Sandbox'
return '/'.join(splat)
def _required_auth_capability(self):
return ['mws']
def _post_request(self, request, params, parser, body='', headers=None):
"""Make a POST request, optionally with a content body,
and return the response, optionally as raw text.
"""
headers = headers or {}
path = self._sandboxify(request['path'])
request = self.build_base_http_request('POST', path, None, data=body,
params=params, headers=headers,
host=self.host)
try:
response = self._mexe(request, override_num_retries=None)
except BotoServerError as bs:
raise self._response_error_factory(bs.status, bs.reason, bs.body)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self._response_error_factory(response.status,
response.reason, body)
if response.status != 200:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self._response_error_factory(response.status,
response.reason, body)
digest = response.getheader('Content-MD5')
if digest is not None:
assert content_md5(body) == digest
contenttype = response.getheader('Content-Type')
return self._parse_response(parser, contenttype, body)
def _parse_response(self, parser, contenttype, body):
if not contenttype.startswith('text/xml'):
return body
handler = XmlHandler(parser, self)
xml.sax.parseString(body, handler)
return parser
def method_for(self, name):
"""Return the MWS API method referred to in the argument.
The named method can be in CamelCase or underlined_lower_case.
This is the complement to MWSConnection.any_call.action
"""
action = '_' in name and string.capwords(name, '_') or name
if action in api_call_map:
return getattr(self, api_call_map[action])
return None
def iter_call(self, call, *args, **kw):
"""Pass a call name as the first argument and a generator
is returned for the initial response and any continuation
call responses made using the NextToken.
"""
method = self.method_for(call)
assert method, 'No call named "{0}"'.format(call)
return self.iter_response(method(*args, **kw))
def iter_response(self, response):
"""Pass a call's response as the initial argument and a
generator is returned for the initial response and any
continuation call responses made using the NextToken.
"""
yield response
more = self.method_for(response._action + 'ByNextToken')
while more and response._result.HasNext == 'true':
response = more(NextToken=response._result.NextToken)
yield response
@requires(['FeedType'])
@boolean_arguments('PurgeAndReplace')
@http_body('FeedContent')
@structured_lists('MarketplaceIdList.Id')
@api_action('Feeds', 15, 120)
def submit_feed(self, request, response, headers=None, body='', **kw):
"""Uploads a feed for processing by Amazon MWS.
"""
headers = headers or {}
return self._post_request(request, kw, response, body=body,
headers=headers)
@structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type',
'FeedProcessingStatusList.Status')
@api_action('Feeds', 10, 45)
def get_feed_submission_list(self, request, response, **kw):
"""Returns a list of all feed submissions submitted in the
previous 90 days.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Feeds', 0, 0)
def get_feed_submission_list_by_next_token(self, request, response, **kw):
"""Returns a list of feed submissions using the NextToken parameter.
"""
return self._post_request(request, kw, response)
@structured_lists('FeedTypeList.Type', 'FeedProcessingStatusList.Status')
@api_action('Feeds', 10, 45)
def get_feed_submission_count(self, request, response, **kw):
"""Returns a count of the feeds submitted in the previous 90 days.
"""
return self._post_request(request, kw, response)
@structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type')
@api_action('Feeds', 10, 45)
def cancel_feed_submissions(self, request, response, **kw):
"""Cancels one or more feed submissions and returns a
count of the feed submissions that were canceled.
"""
return self._post_request(request, kw, response)
@requires(['FeedSubmissionId'])
@api_action('Feeds', 15, 60)
def get_feed_submission_result(self, request, response, **kw):
"""Returns the feed processing report.
"""
return self._post_request(request, kw, response)
def get_service_status(self, **kw):
"""Instruct the user on how to get service status.
"""
sections = ', '.join(map(str.lower, api_version_path.keys()))
message = "Use {0}.get_(section)_service_status(), " \
"where (section) is one of the following: " \
"{1}".format(self.__class__.__name__, sections)
raise AttributeError(message)
@requires(['ReportType'])
@structured_lists('MarketplaceIdList.Id')
@boolean_arguments('ReportOptions=ShowSalesChannel')
@api_action('Reports', 15, 60)
def request_report(self, request, response, **kw):
"""Creates a report request and submits the request to Amazon MWS.
"""
return self._post_request(request, kw, response)
@structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type',
'ReportProcessingStatusList.Status')
@api_action('Reports', 10, 45)
def get_report_request_list(self, request, response, **kw):
"""Returns a list of report requests that you can use to get the
ReportRequestId for a report.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Reports', 0, 0)
def get_report_request_list_by_next_token(self, request, response, **kw):
"""Returns a list of report requests using the NextToken,
which was supplied by a previous request to either
GetReportRequestListByNextToken or GetReportRequestList, where
the value of HasNext was true in that previous request.
"""
return self._post_request(request, kw, response)
@structured_lists('ReportTypeList.Type',
'ReportProcessingStatusList.Status')
@api_action('Reports', 10, 45)
def get_report_request_count(self, request, response, **kw):
"""Returns a count of report requests that have been submitted
to Amazon MWS for processing.
"""
return self._post_request(request, kw, response)
@api_action('Reports', 10, 45)
def cancel_report_requests(self, request, response, **kw):
"""Cancel one or more report requests, returning the count of the
canceled report requests and the report request information.
"""
return self._post_request(request, kw, response)
@boolean_arguments('Acknowledged')
@structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type')
@api_action('Reports', 10, 60)
def get_report_list(self, request, response, **kw):
"""Returns a list of reports that were created in the previous
90 days that match the query parameters.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Reports', 0, 0)
def get_report_list_by_next_token(self, request, response, **kw):
"""Returns a list of reports using the NextToken, which
was supplied by a previous request to either
GetReportListByNextToken or GetReportList, where the
value of HasNext was true in the previous call.
"""
return self._post_request(request, kw, response)
@boolean_arguments('Acknowledged')
@structured_lists('ReportTypeList.Type')
@api_action('Reports', 10, 45)
def get_report_count(self, request, response, **kw):
"""Returns a count of the reports, created in the previous 90 days,
with a status of _DONE_ and that are available for download.
"""
return self._post_request(request, kw, response)
@requires(['ReportId'])
@api_action('Reports', 15, 60)
def get_report(self, request, response, **kw):
"""Returns the contents of a report.
"""
return self._post_request(request, kw, response)
@requires(['ReportType', 'Schedule'])
@api_action('Reports', 10, 45)
def manage_report_schedule(self, request, response, **kw):
"""Creates, updates, or deletes a report request schedule for
a specified report type.
"""
return self._post_request(request, kw, response)
@structured_lists('ReportTypeList.Type')
@api_action('Reports', 10, 45)
def get_report_schedule_list(self, request, response, **kw):
"""Returns a list of order report requests that are scheduled
to be submitted to Amazon MWS for processing.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Reports', 0, 0)
def get_report_schedule_list_by_next_token(self, request, response, **kw):
"""Returns a list of report requests using the NextToken,
which was supplied by a previous request to either
GetReportScheduleListByNextToken or GetReportScheduleList,
where the value of HasNext was true in that previous request.
"""
return self._post_request(request, kw, response)
@structured_lists('ReportTypeList.Type')
@api_action('Reports', 10, 45)
def get_report_schedule_count(self, request, response, **kw):
"""Returns a count of order report requests that are scheduled
to be submitted to Amazon MWS.
"""
return self._post_request(request, kw, response)
@requires(['ReportIdList'])
@boolean_arguments('Acknowledged')
@structured_lists('ReportIdList.Id')
@api_action('Reports', 10, 45)
def update_report_acknowledgements(self, request, response, **kw):
"""Updates the acknowledged status of one or more reports.
"""
return self._post_request(request, kw, response)
@requires(['ShipFromAddress', 'InboundShipmentPlanRequestItems'])
@structured_objects('ShipFromAddress', 'InboundShipmentPlanRequestItems')
@api_action('Inbound', 30, 0.5)
def create_inbound_shipment_plan(self, request, response, **kw):
"""Returns the information required to create an inbound shipment.
"""
return self._post_request(request, kw, response)
@requires(['ShipmentId', 'InboundShipmentHeader', 'InboundShipmentItems'])
@structured_objects('InboundShipmentHeader', 'InboundShipmentItems')
@api_action('Inbound', 30, 0.5)
def create_inbound_shipment(self, request, response, **kw):
"""Creates an inbound shipment.
"""
return self._post_request(request, kw, response)
@requires(['ShipmentId'])
@structured_objects('InboundShipmentHeader', 'InboundShipmentItems')
@api_action('Inbound', 30, 0.5)
def update_inbound_shipment(self, request, response, **kw):
"""Updates an existing inbound shipment. Amazon documentation
is ambiguous as to whether the InboundShipmentHeader and
InboundShipmentItems arguments are required.
"""
return self._post_request(request, kw, response)
@requires_some_of('ShipmentIdList', 'ShipmentStatusList')
@structured_lists('ShipmentIdList.Id', 'ShipmentStatusList.Status')
@api_action('Inbound', 30, 0.5)
def list_inbound_shipments(self, request, response, **kw):
"""Returns a list of inbound shipments based on criteria that
you specify.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Inbound', 30, 0.5)
def list_inbound_shipments_by_next_token(self, request, response, **kw):
"""Returns the next page of inbound shipments using the NextToken
parameter.
"""
return self._post_request(request, kw, response)
@requires(['ShipmentId'], ['LastUpdatedAfter', 'LastUpdatedBefore'])
@api_action('Inbound', 30, 0.5)
def list_inbound_shipment_items(self, request, response, **kw):
"""Returns a list of items in a specified inbound shipment, or a
list of items that were updated within a specified time frame.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Inbound', 30, 0.5)
def list_inbound_shipment_items_by_next_token(self, request, response, **kw):
"""Returns the next page of inbound shipment items using the
NextToken parameter.
"""
return self._post_request(request, kw, response)
@api_action('Inbound', 2, 300, 'GetServiceStatus')
def get_inbound_service_status(self, request, response, **kw):
"""Returns the operational status of the Fulfillment Inbound
Shipment API section.
"""
return self._post_request(request, kw, response)
@requires(['SellerSkus'], ['QueryStartDateTime'])
@structured_lists('SellerSkus.member')
@api_action('Inventory', 30, 0.5)
def list_inventory_supply(self, request, response, **kw):
"""Returns information about the availability of a seller's
inventory.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Inventory', 30, 0.5)
def list_inventory_supply_by_next_token(self, request, response, **kw):
"""Returns the next page of information about the availability
of a seller's inventory using the NextToken parameter.
"""
return self._post_request(request, kw, response)
@api_action('Inventory', 2, 300, 'GetServiceStatus')
def get_inventory_service_status(self, request, response, **kw):
"""Returns the operational status of the Fulfillment Inventory
API section.
"""
return self._post_request(request, kw, response)
@requires(['PackageNumber'])
@api_action('Outbound', 30, 0.5)
def get_package_tracking_details(self, request, response, **kw):
"""Returns delivery tracking information for a package in
an outbound shipment for a Multi-Channel Fulfillment order.
"""
return self._post_request(request, kw, response)
@requires(['Address', 'Items'])
@structured_objects('Address', 'Items')
@api_action('Outbound', 30, 0.5)
def get_fulfillment_preview(self, request, response, **kw):
"""Returns a list of fulfillment order previews based on items
and shipping speed categories that you specify.
"""
return self._post_request(request, kw, response)
@requires(['SellerFulfillmentOrderId', 'DisplayableOrderId',
'ShippingSpeedCategory', 'DisplayableOrderDateTime',
'DestinationAddress', 'DisplayableOrderComment',
'Items'])
@structured_objects('DestinationAddress', 'Items')
@api_action('Outbound', 30, 0.5)
def create_fulfillment_order(self, request, response, **kw):
"""Requests that Amazon ship items from the seller's inventory
to a destination address.
"""
return self._post_request(request, kw, response)
@requires(['SellerFulfillmentOrderId'])
@api_action('Outbound', 30, 0.5)
def get_fulfillment_order(self, request, response, **kw):
"""Returns a fulfillment order based on a specified
SellerFulfillmentOrderId.
"""
return self._post_request(request, kw, response)
@api_action('Outbound', 30, 0.5)
def list_all_fulfillment_orders(self, request, response, **kw):
"""Returns a list of fulfillment orders fulfilled after (or
at) a specified date or by fulfillment method.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Outbound', 30, 0.5)
def list_all_fulfillment_orders_by_next_token(self, request, response, **kw):
"""Returns the next page of inbound shipment items using the
NextToken parameter.
"""
return self._post_request(request, kw, response)
@requires(['SellerFulfillmentOrderId'])
@api_action('Outbound', 30, 0.5)
def cancel_fulfillment_order(self, request, response, **kw):
"""Requests that Amazon stop attempting to fulfill an existing
fulfillment order.
"""
return self._post_request(request, kw, response)
@api_action('Outbound', 2, 300, 'GetServiceStatus')
def get_outbound_service_status(self, request, response, **kw):
"""Returns the operational status of the Fulfillment Outbound
API section.
"""
return self._post_request(request, kw, response)
@requires(['CreatedAfter'], ['LastUpdatedAfter'])
@requires(['MarketplaceId'])
@exclusive(['CreatedAfter'], ['LastUpdatedAfter'])
@dependent('CreatedBefore', ['CreatedAfter'])
@exclusive(['LastUpdatedAfter'], ['BuyerEmail'], ['SellerOrderId'])
@dependent('LastUpdatedBefore', ['LastUpdatedAfter'])
@exclusive(['CreatedAfter'], ['LastUpdatedBefore'])
@structured_objects('OrderTotal', 'ShippingAddress',
'PaymentExecutionDetail')
@structured_lists('MarketplaceId.Id', 'OrderStatus.Status',
'FulfillmentChannel.Channel', 'PaymentMethod.')
@api_action('Orders', 6, 60)
def list_orders(self, request, response, **kw):
"""Returns a list of orders created or updated during a time
frame that you specify.
"""
toggle = set(('FulfillmentChannel.Channel.1',
'OrderStatus.Status.1', 'PaymentMethod.1',
'LastUpdatedAfter', 'LastUpdatedBefore'))
for do, dont in {
'BuyerEmail': toggle.union(['SellerOrderId']),
'SellerOrderId': toggle.union(['BuyerEmail']),
}.items():
if do in kw and any(i in dont for i in kw):
message = "Don't include {0} when specifying " \
"{1}".format(' or '.join(dont), do)
raise AssertionError(message)
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Orders', 6, 60)
def list_orders_by_next_token(self, request, response, **kw):
"""Returns the next page of orders using the NextToken value
that was returned by your previous request to either
ListOrders or ListOrdersByNextToken.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderId'])
@structured_lists('AmazonOrderId.Id')
@api_action('Orders', 6, 60)
def get_order(self, request, response, **kw):
"""Returns an order for each AmazonOrderId that you specify.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderId'])
@api_action('Orders', 30, 2)
def list_order_items(self, request, response, **kw):
"""Returns order item information for an AmazonOrderId that
you specify.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Orders', 30, 2)
def list_order_items_by_next_token(self, request, response, **kw):
"""Returns the next page of order items using the NextToken
value that was returned by your previous request to either
ListOrderItems or ListOrderItemsByNextToken.
"""
return self._post_request(request, kw, response)
@api_action('Orders', 2, 300, 'GetServiceStatus')
def get_orders_service_status(self, request, response, **kw):
"""Returns the operational status of the Orders API section.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Query'])
@api_action('Products', 20, 20)
def list_matching_products(self, request, response, **kw):
"""Returns a list of products and their attributes, ordered
by relevancy, based on a search query that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASINList'])
@structured_lists('ASINList.ASIN')
@api_action('Products', 20, 20)
def get_matching_product(self, request, response, **kw):
"""Returns a list of products and their attributes, based on
a list of ASIN values that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'IdType', 'IdList'])
@structured_lists('IdList.Id')
@api_action('Products', 20, 20)
def get_matching_product_for_id(self, request, response, **kw):
"""Returns a list of products and their attributes, based on
a list of Product IDs that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'SellerSKUList'])
@structured_lists('SellerSKUList.SellerSKU')
@api_action('Products', 20, 10, 'GetCompetitivePricingForSKU')
def get_competitive_pricing_for_sku(self, request, response, **kw):
"""Returns the current competitive pricing of a product,
based on the SellerSKUs and MarketplaceId that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASINList'])
@structured_lists('ASINList.ASIN')
@api_action('Products', 20, 10, 'GetCompetitivePricingForASIN')
def get_competitive_pricing_for_asin(self, request, response, **kw):
"""Returns the current competitive pricing of a product,
based on the ASINs and MarketplaceId that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'SellerSKUList'])
@structured_lists('SellerSKUList.SellerSKU')
@api_action('Products', 20, 5, 'GetLowestOfferListingsForSKU')
def get_lowest_offer_listings_for_sku(self, request, response, **kw):
"""Returns the lowest price offer listings for a specific
product by item condition and SellerSKUs.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASINList'])
@structured_lists('ASINList.ASIN')
@api_action('Products', 20, 5, 'GetLowestOfferListingsForASIN')
def get_lowest_offer_listings_for_asin(self, request, response, **kw):
"""Returns the lowest price offer listings for a specific
product by item condition and ASINs.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'SellerSKU'])
@api_action('Products', 20, 20, 'GetProductCategoriesForSKU')
def get_product_categories_for_sku(self, request, response, **kw):
"""Returns the product categories that a SellerSKU belongs to.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASIN'])
@api_action('Products', 20, 20, 'GetProductCategoriesForASIN')
def get_product_categories_for_asin(self, request, response, **kw):
"""Returns the product categories that an ASIN belongs to.
"""
return self._post_request(request, kw, response)
@api_action('Products', 2, 300, 'GetServiceStatus')
def get_products_service_status(self, request, response, **kw):
"""Returns the operational status of the Products API section.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'SellerSKUList'])
@structured_lists('SellerSKUList.SellerSKU')
@api_action('Products', 20, 10, 'GetMyPriceForSKU')
def get_my_price_for_sku(self, request, response, **kw):
"""Returns pricing information for your own offer listings, based on SellerSKU.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASINList'])
@structured_lists('ASINList.ASIN')
@api_action('Products', 20, 10, 'GetMyPriceForASIN')
def get_my_price_for_asin(self, request, response, **kw):
"""Returns pricing information for your own offer listings, based on ASIN.
"""
return self._post_request(request, kw, response)
@api_action('Sellers', 15, 60)
def list_marketplace_participations(self, request, response, **kw):
"""Returns a list of marketplaces that the seller submitting
the request can sell in, and a list of participations that
include seller-specific information in that marketplace.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Sellers', 15, 60)
def list_marketplace_participations_by_next_token(self, request, response,
**kw):
"""Returns the next page of marketplaces and participations
using the NextToken value that was returned by your
previous request to either ListMarketplaceParticipations
or ListMarketplaceParticipationsByNextToken.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId'])
@api_action('Recommendations', 5, 2)
def get_last_updated_time_for_recommendations(self, request, response,
**kw):
"""Checks whether there are active recommendations for each category
for the given marketplace, and if there are, returns the time when
recommendations were last updated for each category.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId'])
@structured_lists('CategoryQueryList.CategoryQuery')
@api_action('Recommendations', 5, 2)
def list_recommendations(self, request, response, **kw):
"""Returns your active recommendations for a specific category or for
all categories for a specific marketplace.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Recommendations', 5, 2)
def list_recommendations_by_next_token(self, request, response, **kw):
"""Returns the next page of recommendations using the NextToken
parameter.
"""
return self._post_request(request, kw, response)
@api_action('Recommendations', 2, 300, 'GetServiceStatus')
def get_recommendations_service_status(self, request, response, **kw):
"""Returns the operational status of the Recommendations API section.
"""
return self._post_request(request, kw, response)
@api_action('CustomerInfo', 15, 12)
def list_customers(self, request, response, **kw):
"""Returns a list of customer accounts based on search criteria that
you specify.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('CustomerInfo', 50, 3)
def list_customers_by_next_token(self, request, response, **kw):
"""Returns the next page of customers using the NextToken parameter.
"""
return self._post_request(request, kw, response)
@requires(['CustomerIdList'])
@structured_lists('CustomerIdList.CustomerId')
@api_action('CustomerInfo', 15, 12)
def get_customers_for_customer_id(self, request, response, **kw):
"""Returns a list of customer accounts based on search criteria that
you specify.
"""
return self._post_request(request, kw, response)
@api_action('CustomerInfo', 2, 300, 'GetServiceStatus')
def get_customerinfo_service_status(self, request, response, **kw):
"""Returns the operational status of the Customer Information API
section.
"""
return self._post_request(request, kw, response)
@requires(['DateRangeStart'])
@api_action('CartInfo', 15, 12)
def list_carts(self, request, response, **kw):
"""Returns a list of shopping carts in your Webstore that were last
updated during the time range that you specify.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('CartInfo', 50, 3)
def list_carts_by_next_token(self, request, response, **kw):
"""Returns the next page of shopping carts using the NextToken
parameter.
"""
return self._post_request(request, kw, response)
@requires(['CartIdList'])
@structured_lists('CartIdList.CartId')
@api_action('CartInfo', 15, 12)
def get_carts(self, request, response, **kw):
"""Returns shopping carts based on the CartId values that you specify.
"""
return self._post_request(request, kw, response)
@api_action('CartInfo', 2, 300, 'GetServiceStatus')
def get_cartinfo_service_status(self, request, response, **kw):
"""Returns the operational status of the Cart Information API section.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def register_destination(self, request, response, **kw):
"""Specifies a new destination where you want to receive notifications.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def deregister_destination(self, request, response, **kw):
"""Removes an existing destination from the list of registered
destinations.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId'])
@api_action('Subscriptions', 25, 0.5)
def list_registered_destinations(self, request, response, **kw):
"""Lists all current destinations that you have registered.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def send_test_notification_to_destination(self, request, response, **kw):
"""Sends a test notification to an existing destination.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Subscription'])
@structured_objects('Subscription', members=True)
@api_action('Subscriptions', 25, 0.5)
def create_subscription(self, request, response, **kw):
"""Creates a new subscription for the specified notification type
and destination.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'NotificationType', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def get_subscription(self, request, response, **kw):
"""Gets the subscription for the specified notification type and
destination.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'NotificationType', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def delete_subscription(self, request, response, **kw):
"""Deletes the subscription for the specified notification type and
destination.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId'])
@api_action('Subscriptions', 25, 0.5)
def list_subscriptions(self, request, response, **kw):
"""Returns a list of all your current subscriptions.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Subscription'])
@structured_objects('Subscription', members=True)
@api_action('Subscriptions', 25, 0.5)
def update_subscription(self, request, response, **kw):
"""Updates the subscription for the specified notification type and
destination.
"""
return self._post_request(request, kw, response)
@api_action('Subscriptions', 2, 300, 'GetServiceStatus')
def get_subscriptions_service_status(self, request, response, **kw):
"""Returns the operational status of the Subscriptions API section.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId', 'OrderReferenceAttributes'])
@structured_objects('OrderReferenceAttributes')
@api_action('OffAmazonPayments', 10, 1)
def set_order_reference_details(self, request, response, **kw):
"""Sets order reference details such as the order total and a
description for the order.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId'])
@api_action('OffAmazonPayments', 20, 2)
def get_order_reference_details(self, request, response, **kw):
"""Returns details about the Order Reference object and its current
state.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId'])
@api_action('OffAmazonPayments', 10, 1)
def confirm_order_reference(self, request, response, **kw):
"""Confirms that the order reference is free of constraints and all
required information has been set on the order reference.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId'])
@api_action('OffAmazonPayments', 10, 1)
def cancel_order_reference(self, request, response, **kw):
"""Cancel an order reference; all authorizations associated with
this order reference are also closed.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId'])
@api_action('OffAmazonPayments', 10, 1)
def close_order_reference(self, request, response, **kw):
"""Confirms that an order reference has been fulfilled (fully
or partially) and that you do not expect to create any new
authorizations on this order reference.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId', 'AuthorizationReferenceId',
'AuthorizationAmount'])
@structured_objects('AuthorizationAmount')
@api_action('OffAmazonPayments', 10, 1)
def authorize(self, request, response, **kw):
"""Reserves a specified amount against the payment method(s) stored in
the order reference.
"""
return self._post_request(request, kw, response)
@requires(['AmazonAuthorizationId'])
@api_action('OffAmazonPayments', 20, 2)
def get_authorization_details(self, request, response, **kw):
"""Returns the status of a particular authorization and the total
amount captured on the authorization.
"""
return self._post_request(request, kw, response)
@requires(['AmazonAuthorizationId', 'CaptureReferenceId', 'CaptureAmount'])
@structured_objects('CaptureAmount')
@api_action('OffAmazonPayments', 10, 1)
def capture(self, request, response, **kw):
"""Captures funds from an authorized payment instrument.
"""
return self._post_request(request, kw, response)
@requires(['AmazonCaptureId'])
@api_action('OffAmazonPayments', 20, 2)
def get_capture_details(self, request, response, **kw):
"""Returns the status of a particular capture and the total amount
refunded on the capture.
"""
return self._post_request(request, kw, response)
@requires(['AmazonAuthorizationId'])
@api_action('OffAmazonPayments', 10, 1)
def close_authorization(self, request, response, **kw):
"""Closes an authorization.
"""
return self._post_request(request, kw, response)
@requires(['AmazonCaptureId', 'RefundReferenceId', 'RefundAmount'])
@structured_objects('RefundAmount')
@api_action('OffAmazonPayments', 10, 1)
def refund(self, request, response, **kw):
"""Refunds a previously captured amount.
"""
return self._post_request(request, kw, response)
@requires(['AmazonRefundId'])
@api_action('OffAmazonPayments', 20, 2)
def get_refund_details(self, request, response, **kw):
"""Returns the status of a particular refund.
"""
return self._post_request(request, kw, response)
@api_action('OffAmazonPayments', 2, 300, 'GetServiceStatus')
def get_offamazonpayments_service_status(self, request, response, **kw):
"""Returns the operational status of the Off-Amazon Payments API
section.
"""
return self._post_request(request, kw, response)
|
8fd57c9c7c260154ace9ffc13ae832d7975922c4
|
5130754859e274cd06f63260439e5203c2000a11
|
/core/domain/image_services_test.py
|
a030cd6771bf56c1d33e74e81a3e65d9a788f353
|
[
"Apache-2.0"
] |
permissive
|
oppia/oppia
|
8ebc9c7c7f2b336e9a79ce04533abe3956f48cbe
|
d16fdf23d790eafd63812bd7239532256e30a21d
|
refs/heads/develop
| 2023-09-04T07:50:13.661276
| 2023-09-03T09:21:32
| 2023-09-03T09:21:32
| 40,687,563
| 6,172
| 4,666
|
Apache-2.0
| 2023-09-14T18:25:11
| 2015-08-14T00:16:14
|
Python
|
UTF-8
|
Python
| false
| false
| 5,410
|
py
|
image_services_test.py
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for methods in the image_services."""
from __future__ import annotations
import io
import os
import re
from core import feconf
from core import utils
from core.domain import image_services
from core.tests import test_utils
from PIL import Image
from PIL import ImageChops
class ImageServicesUnitTests(test_utils.GenericTestBase):
"""Tests for image_services."""
TEST_IMAGE_WIDTH = 3000
TEST_IMAGE_HEIGHT = 2092
def setUp(self) -> None:
super().setUp()
with utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'dummy_large_image.jpg'),
'rb', encoding=None) as f:
self.jpeg_raw_image = f.read()
with utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'),
'rb', encoding=None) as f:
self.png_raw_image = f.read()
def test_image_dimensions_are_output_correctly(self) -> None:
height, width = (
image_services.get_image_dimensions(self.jpeg_raw_image))
self.assertEqual(self.TEST_IMAGE_HEIGHT, height)
self.assertEqual(self.TEST_IMAGE_WIDTH, width)
def test_compress_image_returns_correct_dimensions(self) -> None:
compressed_image = (
image_services.compress_image(self.jpeg_raw_image, 0.5))
height, width = (
image_services.get_image_dimensions(compressed_image))
self.assertEqual(self.TEST_IMAGE_HEIGHT * 0.5, height)
self.assertEqual(self.TEST_IMAGE_WIDTH * 0.5, width)
def test_invalid_scaling_factor_triggers_value_error(self) -> None:
value_exception = self.assertRaisesRegex(
ValueError,
re.escape(
'Scaling factor should be in the interval (0, 1], '
'received 1.100000.'))
with value_exception:
image_services.compress_image(self.jpeg_raw_image, 1.1)
value_exception = self.assertRaisesRegex(
ValueError,
re.escape(
'Scaling factor should be in the interval (0, 1], '
'received 0.000000.'))
with value_exception:
image_services.compress_image(self.jpeg_raw_image, 0)
value_exception = self.assertRaisesRegex(
ValueError,
re.escape(
'Scaling factor should be in the interval (0, 1], '
'received -1.000000.'))
with value_exception:
image_services.compress_image(self.jpeg_raw_image, -1)
def test_compression_results_in_correct_format(self) -> None:
compressed_image = (
image_services.compress_image(self.jpeg_raw_image, 0.7))
pil_image = Image.open(io.BytesIO(compressed_image))
self.assertEqual(pil_image.format, 'JPEG')
compressed_image = (
image_services.compress_image(self.png_raw_image, 0.7))
pil_image = Image.open(io.BytesIO(compressed_image))
self.assertEqual(pil_image.format, 'PNG')
def test_compression_results_in_identical_files(self) -> None:
with utils.open_file(
os.path.join(
feconf.TESTS_DATA_DIR, 'compressed_image.jpg'),
'rb', encoding=None) as f:
correct_compressed_image = f.read()
correct_height, correct_width = (
image_services.get_image_dimensions(correct_compressed_image))
compressed_image = (
image_services.compress_image(self.jpeg_raw_image, 0.5))
# In order to make sure the images are the same, the function needs to
# open and save the image specifically using PIL since the "golden
# image" (image that we compare the compressed image to) is saved using
# PIL. This applies a slight quality change that won't appear unless we
# save it using PIL.
temp_image = Image.open(io.BytesIO(compressed_image))
image_format = temp_image.format
with io.BytesIO() as output:
temp_image.save(output, format=image_format)
compressed_image_content = output.getvalue()
height, width = image_services.get_image_dimensions(
compressed_image_content)
self.assertEqual(correct_height, height)
self.assertEqual(correct_width, width)
image1 = Image.open(io.BytesIO(correct_compressed_image)).convert('RGB')
image2 = Image.open(io.BytesIO(compressed_image_content)).convert('RGB')
diff = ImageChops.difference(image1, image2)
# Function diff.getbbox() returns a bounding box on all islands or
# regions of non-zero pixels. In other words, if we have a bounding
# box, there will be areas that are not 0 in the difference meaning
# that the 2 images are not equal.
self.assertFalse(diff.getbbox())
|
ce1bd08c46698f765bf5cf30df67fb140a95701a
|
d09310d0190b81117c39aa823acf8a568d710997
|
/hough transform/21HT.py
|
062bae289508fee06bf33e1fd0703d301785b1e5
|
[
"MIT"
] |
permissive
|
o0o0o0o0o0o0o/image-processing-from-scratch
|
36a05ed7f7a765a1a0fa0bd8f0902a23728c15e9
|
bd33441263cc9def01e0eaa4bad91574f18a2462
|
refs/heads/master
| 2022-07-09T23:13:39.108767
| 2022-06-22T19:25:46
| 2022-06-22T19:25:46
| 173,010,773
| 1,061
| 334
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,447
|
py
|
21HT.py
|
#coding:utf-8
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
def convolve(filter,mat,padding,strides):
'''
:param filter:卷积核,必须为二维(2 x 1也算二维) 否则返回None
:param mat:图片
:param padding:对齐
:param strides:移动步长
:return:返回卷积后的图片。(灰度图,彩图都适用)
@author:bilibili-会飞的吴克
'''
result = None
filter_size = filter.shape
mat_size = mat.shape
if len(filter_size) == 2:
if len(mat_size) == 3:
channel = []
for i in range(mat_size[-1]):
pad_mat = np.pad(mat[:,:,i], ((padding[0], padding[1]), (padding[2], padding[3])), 'constant')
temp = []
for j in range(0,mat_size[0],strides[1]):
temp.append([])
for k in range(0,mat_size[1],strides[0]):
val = (filter*pad_mat[j:j+filter_size[0],k:k+filter_size[1]]).sum()
temp[-1].append(val)
channel.append(np.array(temp))
channel = tuple(channel)
result = np.dstack(channel)
elif len(mat_size) == 2:
channel = []
pad_mat = np.pad(mat, ((padding[0], padding[1]), (padding[2], padding[3])), 'constant')
for j in range(0, mat_size[0], strides[1]):
channel.append([])
for k in range(0, mat_size[1], strides[0]):
val = (filter * pad_mat[j:j + filter_size[0],k:k + filter_size[1]]).sum()
channel[-1].append(val)
result = np.array(channel)
return result
def AHTforCircles(edge,center_threhold_factor = None,score_threhold = None,min_center_dist = None,minRad = None,maxRad = None,center_axis_scale = None,radius_scale = None,halfWindow = None,max_circle_num = None):
if center_threhold_factor == None:
center_threhold_factor = 10.0
if score_threhold == None:
score_threhold = 15.0
if min_center_dist == None:
min_center_dist = 80.0
if minRad == None:
minRad = 0.0
if maxRad == None:
maxRad = 1e7*1.0
if center_axis_scale == None:
center_axis_scale = 1.0
if radius_scale == None:
radius_scale = 1.0
if halfWindow == None:
halfWindow = 2
if max_circle_num == None:
max_circle_num = 6
min_center_dist_square = min_center_dist**2
sobel_kernel_y = np.array([[-1.0, -2.0, -1.0], [0.0, 0.0, 0.0], [1.0, 2.0, 1.0]])
sobel_kernel_x = np.array([[-1.0, 0.0, 1.0], [-2.0, 0.0, 2.0], [-1.0, 0.0, 1.0]])
edge_x = convolve(sobel_kernel_x,edge,[1,1,1,1],[1,1])
edge_y = convolve(sobel_kernel_y,edge,[1,1,1,1],[1,1])
center_accumulator = np.zeros((int(np.ceil(center_axis_scale*edge.shape[0])),int(np.ceil(center_axis_scale*edge.shape[1]))))
k = np.array([[r for c in range(center_accumulator.shape[1])] for r in range(center_accumulator.shape[0])])
l = np.array([[c for c in range(center_accumulator.shape[1])] for r in range(center_accumulator.shape[0])])
minRad_square = minRad**2
maxRad_square = maxRad**2
points = [[],[]]
edge_x_pad = np.pad(edge_x,((1,1),(1,1)),'constant')
edge_y_pad = np.pad(edge_y,((1,1),(1,1)),'constant')
Gaussian_filter_3 = 1.0 / 16 * np.array([(1.0, 2.0, 1.0), (2.0, 4.0, 2.0), (1.0, 2.0, 1.0)])
for i in range(edge.shape[0]):
for j in range(edge.shape[1]):
if not edge[i,j] == 0:
dx_neibor = edge_x_pad[i:i+3,j:j+3]
dy_neibor = edge_y_pad[i:i+3,j:j+3]
dx = (dx_neibor*Gaussian_filter_3).sum()
dy = (dy_neibor*Gaussian_filter_3).sum()
if not (dx == 0 and dy == 0):
t1 = (k/center_axis_scale-i)
t2 = (l/center_axis_scale-j)
t3 = t1**2 + t2**2
temp = (t3 > minRad_square)&(t3 < maxRad_square)&(np.abs(dx*t1-dy*t2) < 1e-4)
center_accumulator[temp] += 1
points[0].append(i)
points[1].append(j)
M = center_accumulator.mean()
for i in range(center_accumulator.shape[0]):
for j in range(center_accumulator.shape[1]):
neibor = \
center_accumulator[max(0, i - halfWindow + 1):min(i + halfWindow, center_accumulator.shape[0]),
max(0, j - halfWindow + 1):min(j + halfWindow, center_accumulator.shape[1])]
if not (center_accumulator[i,j] >= neibor).all():
center_accumulator[i,j] = 0
# 非极大值抑制
plt.imshow(center_accumulator,cmap='gray')
plt.axis('off')
plt.show()
center_threshold = M * center_threhold_factor
possible_centers = np.array(np.where(center_accumulator > center_threshold)) # 阈值化
sort_centers = []
for i in range(possible_centers.shape[1]):
sort_centers.append([])
sort_centers[-1].append(possible_centers[0,i])
sort_centers[-1].append(possible_centers[1, i])
sort_centers[-1].append(center_accumulator[sort_centers[-1][0],sort_centers[-1][1]])
sort_centers.sort(key=lambda x:x[2],reverse=True)
centers = [[],[],[]]
points = np.array(points)
for i in range(len(sort_centers)):
radius_accumulator = np.zeros(
(int(np.ceil(radius_scale * min(maxRad, np.sqrt(edge.shape[0] ** 2 + edge.shape[1] ** 2)) + 1))),dtype=np.float32)
if not len(centers[0]) < max_circle_num:
break
iscenter = True
for j in range(len(centers[0])):
d1 = sort_centers[i][0]/center_axis_scale - centers[0][j]
d2 = sort_centers[i][1]/center_axis_scale - centers[1][j]
if d1**2 + d2**2 < min_center_dist_square:
iscenter = False
break
if not iscenter:
continue
temp = np.sqrt((points[0,:] - sort_centers[i][0] / center_axis_scale) ** 2 + (points[1,:] - sort_centers[i][1] / center_axis_scale) ** 2)
temp2 = (temp > minRad) & (temp < maxRad)
temp = (np.round(radius_scale * temp)).astype(np.int32)
for j in range(temp.shape[0]):
if temp2[j]:
radius_accumulator[temp[j]] += 1
for j in range(radius_accumulator.shape[0]):
if j == 0 or j == 1:
continue
if not radius_accumulator[j] == 0:
radius_accumulator[j] = radius_accumulator[j]*radius_scale/np.log(j) #radius_accumulator[j]*radius_scale/j
score_i = radius_accumulator.argmax(axis=-1)
if radius_accumulator[score_i] < score_threhold:
iscenter = False
if iscenter:
centers[0].append(sort_centers[i][0]/center_axis_scale)
centers[1].append(sort_centers[i][1]/center_axis_scale)
centers[2].append(score_i/radius_scale)
centers = np.array(centers)
centers = centers.astype(np.float64)
return centers
def drawCircles(circles,edge,color = (0,0,255),err = 600):
if len(edge.shape) == 2:
result = np.dstack((edge,edge,edge))
else:
result = edge
for i in range(edge.shape[0]):
for j in range(edge.shape[1]):
dist_square = (circles[0]-i)**2 + (circles[1]-j)**2
e = np.abs(circles[2]**2 - dist_square)
if (e < err).any():
result[i,j] = color
if (dist_square < 25.0).any():
result[i,j] = (255,0,0)
return result
if __name__=='__main__':
pic_path = './HoughCircleImg/'
pics = os.listdir(pic_path)
params = {
'1.jpeg':{
'center_threhold_factor': 3.33,
'score_threhold':15.0,
'min_center_dist':80.0,
'minRad':0.0,
'maxRad':1e7*1.0,
'center_axis_scale':1.0,
'radius_scale':1.0,
'halfWindow':2,
'max_circle_num':1
},
'4.jpg':{
'center_threhold_factor': 2.0,
'score_threhold': 15.0,
'min_center_dist': 80.0,
'minRad': 0.0,
'maxRad': 1e7 * 1.0,
'center_axis_scale': 1.0,
'radius_scale': 1.0,
'halfWindow': 2,
'max_circle_num': 6
},
'2.jpeg':{
'center_threhold_factor': 3.33,
'score_threhold': 50.0,
'min_center_dist': 80.0,
'minRad': 0.0,
'maxRad': 1e7 * 1.0,
'center_axis_scale': 0.9,
'radius_scale': 1.0,
'halfWindow': 2,
'max_circle_num': 1
},
'3.jpeg':{
'center_threhold_factor': 1.5,
'score_threhold': 56.0,
'min_center_dist': 80.0,
'minRad': 0.0,
'maxRad': 1e7 * 1.0,
'center_axis_scale': 0.8,
'radius_scale': 1.0,
'halfWindow': 2,
'max_circle_num': 1
},
'0.jpg':{
'center_threhold_factor': 1.5,
'score_threhold': 30.0,
'min_center_dist': 80.0,
'minRad': 0.0,
'maxRad': 1e7 * 1.0,
'center_axis_scale': 1.0,
'radius_scale': 1.0,
'halfWindow': 2,
'max_circle_num': 1
}
}
for i in pics:
if i[-5:] == '.jpeg' or i[-4:] == '.jpg':
img = plt.imread(pic_path+i)
blurred = cv2.GaussianBlur(img, (3, 3), 0)
plt.imshow(blurred)
plt.axis('off')
plt.show()
if not len(blurred.shape) == 2:
gray = cv2.cvtColor(blurred, cv2.COLOR_RGB2GRAY)
else:
gray = blurred
edge = cv2.Canny(gray, 50, 150) # 二值图 (0 或 255) 得到 canny边缘检测的结果
circles = AHTforCircles(edge,
center_threhold_factor=params[i]['center_threhold_factor'],score_threhold=params[i]['score_threhold'],min_center_dist=params[i]['min_center_dist'],minRad=params[i]['minRad'],
maxRad=params[i]['maxRad'],center_axis_scale=params[i]['center_axis_scale'],radius_scale=params[i]['radius_scale'],
halfWindow=params[i]['halfWindow'],max_circle_num=params[i]['max_circle_num'])
final_img = drawCircles(circles,blurred)
plt.imshow(final_img)
plt.axis('off')
plt.show()
|
93e0a3975a6fbd359b16ea361f6ade68aca20bc5
|
4b9029d66c0b041f6b4797fbeb3572084a6c0dbc
|
/tests/plugin/NaomiTestCase.py
|
8df33c31dca62db2f3e9daa9746478fe2b77a442
|
[
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
borela/naomi
|
3a087a294a67175891f3283dfc9d4daaad64a326
|
3ffae6beb69788002cb7b86b7e6756e783473f66
|
refs/heads/master
| 2023-07-06T21:54:59.912344
| 2022-07-08T14:34:04
| 2022-07-08T14:34:04
| 56,852,832
| 618
| 30
|
NOASSERTION
| 2022-07-08T14:34:05
| 2016-04-22T12:10:29
|
MQL4
|
UTF-8
|
Python
| false
| false
| 1,690
|
py
|
NaomiTestCase.py
|
# Licensed under the Apache License, Version 2.0 (the “License”); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from os.path import dirname, join
from sublime import active_window, Region, set_clipboard
from unittest import TestCase
import sys
class NaomiTestCase(TestCase):
def loadFixture(self, *relative_path):
if self.fixture_loaded:
raise RuntimeError("Fixture already loaded.")
base_path = dirname(sys.modules[self.__class__.__module__].__file__)
full_path = join(base_path, *relative_path)
# Load fixture.
fixture_content = ""
with open(full_path, "r") as file:
fixture_content = file.read()
self.setViewText(fixture_content)
self.setCursorPosition(0)
self.fixture_loaded = True
def setCursorPosition(self, offset):
self.view.sel().clear()
self.view.sel().add(Region(offset))
def setViewText(self, text):
self.view.run_command("insert", {"characters": text})
def setUp(self):
set_clipboard("")
self.view = active_window().new_file()
self.view.set_scratch(True)
self.fixture_loaded = False
def tearDown(self):
if self.view:
self.view.close()
|
1fe73419240c643672ef7dd5e039ca49d44d1d87
|
31cf77b4c0342c6148b35ae2613d5e2501d5e755
|
/src/encoded/tests/test_types_antibody_lot.py
|
1cdc1cfcc76d5243bf3a05c4d9e066cc0b20f8f7
|
[
"MIT"
] |
permissive
|
ENCODE-DCC/encoded
|
096de8a6d60c959a783cc9517f1d60bd6c21b71f
|
80e05610c79b46d0890228555bb03e436b2fef11
|
refs/heads/dev
| 2023-08-08T15:45:07.493187
| 2023-08-03T20:01:24
| 2023-08-03T20:01:24
| 7,045,549
| 110
| 69
|
MIT
| 2023-09-12T23:59:45
| 2012-12-07T00:52:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 41,761
|
py
|
test_types_antibody_lot.py
|
import pytest
# A single characterization (primary or secondary) associated with an ab that is not submitted
# for review, should result in a not pursued antibody lot status.
def test_not_submitted_secondary_missing_primary(testapp, motif_enrichment, antibody_lot):
char = testapp.post_json('/antibody_characterization', motif_enrichment).json['@graph'][0]
testapp.patch_json(char['@id'], {'status': 'not submitted for review by lab'})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert ab['lot_reviews'][0]['status'] == 'not pursued'
def test_have_primary_missing_secondary(testapp,
immunoblot,
antibody_lot,
human,
target,
wrangler,
document,
k562):
char = testapp.post_json('/antibody_characterization', immunoblot).json['@graph'][0]
characterization_review = {
'biosample_ontology': k562['uuid'],
'organism': human['@id'],
'lane': 1
}
# An in progress primary and no secondary should result in awaiting characterization
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert ab['lot_reviews'][0]['status'] == 'awaiting characterization'
assert ab['lot_reviews'][0]['detail'] == 'Awaiting a compliant primary and submission of a secondary characterization.'
# Not yet reviewed primary and no secondary should result in ab status = awaiting characterization
characterization_review['lane_status'] = 'pending dcc review'
testapp.put_json(char['@id'], immunoblot).json['@graph'][0]
testapp.patch_json(char['@id'], {
'characterization_reviews': [characterization_review],
'status': 'pending dcc review'
})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert ab['lot_reviews'][0]['status'] == 'awaiting characterization'
assert ab['lot_reviews'][0]['detail'] == 'Pending review of primary characterization and ' + \
'awaiting submission of a secondary characterization.'
# No secondary and a primary that is not submitted for review should result in
# ab status = not pursued
testapp.put_json(char['@id'], immunoblot).json['@graph'][0]
testapp.patch_json(char['@id'], {'status': 'not submitted for review by lab'})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert ab['lot_reviews'][0]['status'] == 'not pursued'
assert ab['lot_reviews'][0]['detail'] == 'Awaiting a compliant primary and submission of a secondary characterization.'
'''
# Compliant primary and no secondary should result in ab status = not characterized to standards
characterization_review['lane_status'] = 'compliant'
testapp.patch_json(char['@id'], {
'status': 'compliant',
'characterization_reviews': [characterization_review],
'reviewed_by': wrangler['@id'],
'documents': [document['@id']]
})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert ab['lot_reviews'][0]['status'] == 'not characterized to standards'
assert ab['lot_reviews'][0]['detail'] == 'Awaiting submission of secondary characterization(s).'
'''
def test_have_secondary_missing_primary(testapp,
mass_spec,
motif_enrichment,
antibody_lot,
human,
target,
wrangler,
document):
# in progress secondary and no primary = awaiting characterization
char1 = testapp.post_json('/antibody_characterization', mass_spec).json['@graph'][0]
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert ab['lot_reviews'][0]['status'] == 'awaiting characterization'
assert ab['lot_reviews'][0]['detail'] == 'Awaiting submission of a primary characterization ' + \
'and a compliant secondary characterization.'
# Set the secondary for review and the ab status should be awaiting characterization
testapp.patch_json(char1['@id'], {'status': 'pending dcc review'})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert ab['lot_reviews'][0]['status'] == 'awaiting characterization'
assert ab['lot_reviews'][0]['detail'] == 'Awaiting submission of a primary characterization ' + \
'and pending review of a secondary characterization.'
# A compliant secondary without primaries is partially characterized
testapp.patch_json(char1['@id'], {'status': 'compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']]})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert ab['lot_reviews'][0]['status'] == 'partially characterized'
assert ab['lot_reviews'][0]['detail'] == 'Awaiting submission of a primary characterization.'
# Adding another secondary, regardless of status, should not change the ab status from
# partially characterized.
char2 = testapp.post_json('/antibody_characterization', motif_enrichment).json['@graph'][0]
testapp.patch_json(char2['@id'], {'status': 'not compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']]})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert ab['lot_reviews'][0]['status'] == 'partially characterized'
assert ab['lot_reviews'][0]['detail'] == 'Awaiting submission of a primary characterization.'
# If there are multiple secondary characterizations, the one with the highest status ranking should
# prevail
def test_multiple_secondary_one_primary(testapp,
mass_spec,
motif_enrichment,
immunoblot,
antibody_lot,
human,
target,
wrangler,
document,
k562):
prim_char = testapp.post_json('/antibody_characterization', immunoblot).json['@graph'][0]
sec_char1 = testapp.post_json('/antibody_characterization', motif_enrichment).json['@graph'][0]
sec_char2 = testapp.post_json('/antibody_characterization', mass_spec).json['@graph'][0]
characterization_review = {
'biosample_ontology': k562['uuid'],
'organism': human['@id'],
'lane': 1,
'lane_status': 'compliant'
}
testapp.patch_json(prim_char['@id'], {'status': 'compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'characterization_reviews': [characterization_review]})
testapp.patch_json(sec_char1['@id'], {'status': 'not compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']]})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert ab['lot_reviews'][0]['status'] == 'not characterized to standards'
assert ab['lot_reviews'][0]['detail'] == 'Awaiting a compliant secondary characterization.'
# Add another secondary characterization with exempted status and it should override the
# not compliant of the motif enrichment characterization
testapp.patch_json(sec_char2['@id'], {'status': 'exempt from standards',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'submitter_comment': 'Required plea for exemption',
'notes': 'Required reviewer note'})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert ab['lot_reviews'][0]['status'] == 'characterized to standards with exemption'
assert ab['lot_reviews'][0]['detail'] == 'Fully characterized with exemption.'
# Making the not compliant motif enrichment characterization now compliant should
# override the exempt from standards mass spec
testapp.patch_json(sec_char1['@id'], {'status': 'compliant'})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert ab['lot_reviews'][0]['status'] == 'characterized to standards'
assert ab['lot_reviews'][0]['detail'] == 'Fully characterized.'
# An antibody against histone modifications with primaries in human (compliant) and mouse
# (not compliant) and an exempt secondary should yield exemption in human and not
# charaterized to standards in mouse
def test_histone_mod_characterizations(testapp,
immunoblot,
immunoprecipitation,
mass_spec,
antibody_lot,
human,
mouse,
target_H3K9me3,
mouse_target_H3K9me3,
wrangler,
document,
liver,
erythroblast):
prim_char_human = testapp.post_json('/antibody_characterization', immunoblot).json['@graph'][0]
prim_char_mouse = testapp.post_json('/antibody_characterization', immunoprecipitation).json['@graph'][0]
sec_char = testapp.post_json('/antibody_characterization', mass_spec).json['@graph'][0]
testapp.patch_json(antibody_lot['@id'], {'targets': [target_H3K9me3['@id'], mouse_target_H3K9me3['@id']]})
characterization_review_human = {
'biosample_ontology': liver['uuid'],
'organism': human['@id'],
'lane': 1,
'lane_status': 'compliant'
}
characterization_review_mouse = {
'biosample_ontology': liver['uuid'],
'organism': mouse['@id'],
'lane': 1,
'lane_status': 'not compliant'
}
testapp.patch_json(prim_char_human['@id'], {'status': 'compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'target': target_H3K9me3['@id'],
'characterization_reviews': [
characterization_review_human]})
testapp.patch_json(prim_char_mouse['@id'], {'status': 'not compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'target': mouse_target_H3K9me3['@id'],
'characterization_reviews': [
characterization_review_mouse]})
testapp.patch_json(sec_char['@id'], {'status': 'exempt from standards',
'submitter_comment': 'Please exempt this.',
'notes': 'OK.',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'target': target_H3K9me3['@id']})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert len(ab['lot_reviews']) == 2
for review in ab['lot_reviews']:
if human['@id'] in review['organisms']:
assert review['status'] == 'characterized to standards with exemption'
assert review['detail'] == 'Fully characterized with exemption.'
if mouse['@id'] in review['organisms']:
assert review['status'] == 'not characterized to standards'
assert review['detail'] == 'Awaiting a compliant primary characterization.'
# Adding another primary in mouse that is exempt from standards should make mouse now exempt
prim_char_mouse2 = testapp.post_json('/antibody_characterization', immunoblot).json['@graph'][0]
characterization_review_mouse['lane_status'] = 'not compliant'
characterization_review_mouse2 = characterization_review_mouse.copy()
characterization_review_mouse2.update({'biosample_ontology': erythroblast['uuid'],
'lane_status': 'exempt from standards',
'lane': 2})
testapp.patch_json(prim_char_mouse2['@id'], {'status': 'exempt from standards',
'reviewed_by': wrangler['@id'],
'submitter_comment': 'Please exempt this.',
'notes': 'OK',
'documents': [document['@id']],
'target': mouse_target_H3K9me3['@id'],
'characterization_reviews': [
characterization_review_mouse,
characterization_review_mouse2]})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert len(ab['lot_reviews']) == 2
for review in ab['lot_reviews']:
assert review['status'] == 'characterized to standards with exemption'
assert review['detail'] == 'Fully characterized with exemption.'
# A test for multiple lanes to make sure all of the statuses for each cell type and tissue
# are calculated properly.
def test_multi_lane_primary(testapp,
immunoblot,
immunoprecipitation,
mass_spec,
antibody_lot,
human,
mouse,
target,
mouse_target,
wrangler,
document,
k562,
hepg2,
gm12878):
prim_char = testapp.post_json('/antibody_characterization', immunoblot).json['@graph'][0]
sec_char = testapp.post_json('/antibody_characterization', mass_spec).json['@graph'][0]
testapp.patch_json(antibody_lot['@id'], {'targets': [target['@id'], mouse_target['@id']]})
characterization_review = {
'biosample_ontology': k562['uuid'],
'organism': human['@id'],
'lane': 1,
'lane_status': 'compliant'
}
characterization_review_2 = {'biosample_ontology': hepg2['uuid'],
'organism': human['@id'],
'lane': 2,
'lane_status': 'not compliant'}
characterization_review_3 = {'biosample_ontology': gm12878['uuid'],
'organism': human['@id'],
'lane': 3,
'lane_status': 'exempt from standards'}
testapp.patch_json(prim_char['@id'], {'status': 'compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'submitter_comment': 'Please exempt this.',
'notes': 'OK.',
'target': target['@id'],
'characterization_reviews': [characterization_review,
characterization_review_2,
characterization_review_3]})
testapp.patch_json(sec_char['@id'], {'status': 'compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'target': target['@id']})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert len(ab['lot_reviews']) == 3
for review in ab['lot_reviews']:
if review['biosample_term_name'] == 'K562':
assert review['status'] == 'characterized to standards'
assert review['detail'] == 'Fully characterized.'
if review['biosample_term_name'] == 'GM12878':
assert review['status'] == 'characterized to standards with exemption'
assert review['detail'] == 'Fully characterized with exemption.'
if review['biosample_term_name'] == 'HepG2':
assert review['status'] == 'not characterized to standards'
assert review['detail'] == 'Awaiting a compliant primary characterization.'
# Now, if we change the secondary to be not reviewed, the antibody should now only be
# partially characterized on the strength of the compliant and exempt primaries. The
# not compliant primary should now be not characterized to standards.
testapp.patch_json(sec_char['@id'], {'status': 'not reviewed'})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert len(ab['lot_reviews']) == 3
for review in ab['lot_reviews']:
if review['biosample_term_name'] in ['K562', 'GM12878']:
assert review['status'] == 'partially characterized'
if review['biosample_term_name'] == 'HepG2':
assert review['status'] == 'not characterized to standards'
assert review['detail'] == 'Awaiting a compliant primary and secondary characterization not reviewed.'
# Status calculation test for when primaries have extraneous characterization_reviews
def test_bonus_char_reviews_in_primary(testapp,
immunoblot,
immunoprecipitation,
mass_spec,
antibody_lot,
human,
target,
wrangler,
document,
k562,
hepg2):
# A not submitted for review primary with no secondary should give status of not pursued
prim_char1 = testapp.post_json('/antibody_characterization', immunoblot).json['@graph'][0]
characterization_review1 = {
'biosample_ontology': k562['uuid'],
'organism': human['@id'],
'lane': 1,
'lane_status': 'pending dcc review'
}
testapp.patch_json(prim_char1['@id'], {'status': 'not submitted for review by lab',
'target': target['@id'],
'characterization_reviews': [characterization_review1]})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert ab['lot_reviews'][0]['status'] == 'not pursued'
assert ab['lot_reviews'][0]['detail'] == 'Awaiting a compliant primary and submission of a secondary characterization.'
# Adding an in progress primary in a different cell type should result in the ab awaiting characterization
prim_char2 = testapp.post_json('/antibody_characterization', immunoprecipitation).json['@graph'][0]
characterization_review2 = {
'biosample_ontology': hepg2['uuid'],
'organism': human['@id'],
'lane': 1,
'lane_status': 'pending dcc review'
}
testapp.patch_json(prim_char2['@id'], {'status': 'in progress',
'target': target['@id'],
'characterization_reviews': [characterization_review2]})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert len(ab['lot_reviews']) == 2
for review in ab['lot_reviews']:
if review['biosample_term_name'] == 'K562':
assert review['status'] == 'not pursued'
if review['biosample_term_name'] == 'HepG2':
assert review['status'] == 'awaiting characterization'
assert review['detail'] == 'Awaiting a compliant primary and submission of a secondary characterization.'
# Adding an exempted secondary should make the ab partially characterized
sec_char = testapp.post_json('/antibody_characterization', mass_spec).json['@graph'][0]
testapp.patch_json(sec_char['@id'], {'status': 'exempt from standards',
'target': target['@id'],
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'submitter_comment': 'Please exempt this.',
'notes': 'OK.'})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert len(ab['lot_reviews']) == 2
for review in ab['lot_reviews']:
if review['biosample_term_name'] in ['K562', 'HepG2']:
assert review['status'] == 'partially characterized'
assert review['detail'] == 'Awaiting a compliant primary characterization.'
# Status calculation test for when primary and secondary characterizations are both not reviewed
def test_chars_not_reviewed(testapp,
immunoblot,
mass_spec,
antibody_lot,
target,
wrangler):
prim_char = testapp.post_json('/antibody_characterization', immunoblot).json['@graph'][0]
testapp.patch_json(prim_char['@id'], {'status': 'not reviewed',
'reviewed_by': wrangler['@id'],
'target': target['@id']})
sec_char = testapp.post_json('/antibody_characterization', mass_spec).json['@graph'][0]
testapp.patch_json(sec_char['@id'], {'status': 'not reviewed',
'reviewed_by': wrangler['@id'],
'target': target['@id']})
res = testapp.get(antibody_lot['@id'] + '@@index-data')
ab = res.json['object']
assert ab['lot_reviews'][0]['status'] == 'awaiting characterization'
assert ab['lot_reviews'][0]['detail'] == 'Primary and secondary characterizations not reviewed.'
def test_encode4_tagged_ab_review_status(testapp,
encode4_tag_antibody_lot,
biosample_characterization_no_review,
biosample_characterization_2nd_opinion,
biosample_characterization_exempt,
biosample_characterization_not_compliant,
biosample_characterization_compliant,
biosample_2_liver):
res = testapp.get(encode4_tag_antibody_lot['@id'] + '@@index-data')
assert len(res.json['object']['used_by_biosample_characterizations']) == 0
assert res.json['object']['lot_reviews'] == [{
'biosample_term_id': 'NTR:99999999',
'biosample_term_name': 'any cell type or tissue',
'detail': 'Awaiting to be linked to biosample characterizations.',
'organisms': ['/organisms/human/'],
'status': 'awaiting characterization',
'targets': ['/targets/gfp-human/'],
}]
testapp.patch_json(biosample_characterization_not_compliant['@id'],
{'antibody': encode4_tag_antibody_lot['@id']})
res = testapp.get(encode4_tag_antibody_lot['@id'] + '@@index-data')
assert len(res.json['object']['used_by_biosample_characterizations']) == 1
assert res.json['object']['lot_reviews'] == [{
'biosample_term_id': 'UBERON:0000948',
'biosample_term_name': 'heart',
'detail': 'Awaiting compliant biosample characterizations.',
'organisms': ['/organisms/human/'],
'status': 'not characterized to standards',
'targets': ['/targets/gfp-human/'],
}]
testapp.patch_json(biosample_characterization_no_review['@id'],
{'antibody': encode4_tag_antibody_lot['@id']})
res = testapp.get(encode4_tag_antibody_lot['@id'] + '@@index-data')
assert len(res.json['object']['used_by_biosample_characterizations']) == 2
assert res.json['object']['lot_reviews'] == [{
'biosample_term_id': 'UBERON:0000948',
'biosample_term_name': 'heart',
'detail': 'Awaiting to be linked to biosample characterizations.',
'organisms': ['/organisms/human/'],
'status': 'awaiting characterization',
'targets': ['/targets/gfp-human/'],
}]
testapp.patch_json(biosample_characterization_2nd_opinion['@id'],
{'antibody': encode4_tag_antibody_lot['@id']})
res = testapp.get(encode4_tag_antibody_lot['@id'] + '@@index-data')
assert len(res.json['object']['used_by_biosample_characterizations']) == 3
assert res.json['object']['lot_reviews'] == [{
'biosample_term_id': 'UBERON:0000948',
'biosample_term_name': 'heart',
'detail': 'Awaiting to be linked to biosample characterizations.',
'organisms': ['/organisms/human/'],
'status': 'awaiting characterization',
'targets': ['/targets/gfp-human/'],
}]
testapp.patch_json(biosample_characterization_exempt['@id'],
{'antibody': encode4_tag_antibody_lot['@id']})
res = testapp.get(encode4_tag_antibody_lot['@id'] + '@@index-data')
assert len(res.json['object']['used_by_biosample_characterizations']) == 4
assert res.json['object']['lot_reviews'] == [{
'biosample_term_id': 'UBERON:0000948',
'biosample_term_name': 'heart',
'detail': 'Fully characterized with exemption.',
'organisms': ['/organisms/human/'],
'status': 'characterized to standards with exemption',
'targets': ['/targets/gfp-human/'],
}]
testapp.patch_json(biosample_characterization_compliant['@id'],
{'antibody': encode4_tag_antibody_lot['@id']})
res = testapp.get(encode4_tag_antibody_lot['@id'] + '@@index-data')
assert len(res.json['object']['used_by_biosample_characterizations']) == 5
assert res.json['object']['lot_reviews'] == [{
'biosample_term_id': 'UBERON:0000948',
'biosample_term_name': 'heart',
'detail': 'Fully characterized.',
'organisms': ['/organisms/human/'],
'status': 'characterized to standards',
'targets': ['/targets/gfp-human/'],
}]
testapp.patch_json(biosample_characterization_exempt['@id'],
{'characterizes': biosample_2_liver['@id']})
res = testapp.get(encode4_tag_antibody_lot['@id'] + '@@index-data')
assert len(res.json['object']['used_by_biosample_characterizations']) == 5
assert len(res.json['object']['lot_reviews']) == 2
assert {
'biosample_term_id': 'UBERON:0000948',
'biosample_term_name': 'heart',
'detail': 'Fully characterized.',
'organisms': ['/organisms/human/'],
'status': 'characterized to standards',
'targets': ['/targets/gfp-human/'],
} in res.json['object']['lot_reviews']
assert {
'biosample_term_id': 'UBERON:0002107',
'biosample_term_name': 'liver',
'detail': 'Fully characterized with exemption.',
'organisms': ['/organisms/human/'],
'status': 'characterized to standards with exemption',
'targets': ['/targets/gfp-human/'],
} in res.json['object']['lot_reviews']
def test_encode3_nontagged_ab_compliant_biosample_char(
testapp,
antibody_lot,
gfp_target,
immunoblot,
biosample,
wrangler,
document,
biosample_characterization_compliant,
):
# Antibody characterization only
prim_char = testapp.post_json(
'/antibody_characterization', immunoblot
).json['@graph'][0]
characterization_review = {
'biosample_ontology': biosample['biosample_ontology'],
'organism': biosample['organism'],
'lane': 1,
'lane_status': 'compliant'
}
testapp.patch_json(
prim_char['@id'],
{
'target': gfp_target['@id'],
'status': 'compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'characterization_reviews': [characterization_review]
}
)
lot_reviews = testapp.get(
antibody_lot['@id'] + '@@index-data'
).json['object']['lot_reviews']
assert len(lot_reviews) == 1
assert lot_reviews[0]['status'] == 'partially characterized'
assert lot_reviews[0]['detail'] == (
'Awaiting submission of a compliant secondary characterization.'
)
# With unreviewed biosample characterization
testapp.patch_json(
biosample_characterization_compliant['@id'],
{'antibody': antibody_lot['@id']}
)
lot_reviews = testapp.get(
antibody_lot['@id'] + '@@index-data'
).json['object']['lot_reviews']
assert len(lot_reviews) == 1
assert lot_reviews[0]['status'] == 'partially characterized'
assert lot_reviews[0]['detail'] == (
'Awaiting submission of a compliant secondary characterization.'
)
def test_encode3_tagged_ab_unreviewed_biosample_char(
testapp,
antibody_lot,
gfp_target,
immunoblot,
biosample,
wrangler,
document,
biosample_characterization_no_review,
):
testapp.patch_json(antibody_lot['@id'], {'targets': [gfp_target['@id']]})
# Antibody characterizations only
prim_char = testapp.post_json(
'/antibody_characterization', immunoblot
).json['@graph'][0]
characterization_review = {
'biosample_ontology': biosample['biosample_ontology'],
'organism': biosample['organism'],
'lane': 1,
'lane_status': 'compliant'
}
testapp.patch_json(
prim_char['@id'],
{
'target': gfp_target['@id'],
'status': 'compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'characterization_reviews': [characterization_review]
}
)
lot_reviews = testapp.get(
antibody_lot['@id'] + '@@index-data'
).json['object']['lot_reviews']
assert len(lot_reviews) == 1
assert lot_reviews[0]['status'] == 'partially characterized'
assert lot_reviews[0]['detail'] == (
'Awaiting submission of a compliant secondary characterization.'
)
# With unreviewed biosample characterization
testapp.patch_json(
biosample_characterization_no_review['@id'],
{'antibody': antibody_lot['@id']}
)
lot_reviews = testapp.get(
antibody_lot['@id'] + '@@index-data'
).json['object']['lot_reviews']
assert len(lot_reviews) == 1
assert lot_reviews[0]['status'] == 'partially characterized'
assert lot_reviews[0]['detail'] == (
'Awaiting submission of a compliant secondary characterization.'
)
def test_encode3_tagged_ab_compliant_biosample_char(
testapp,
antibody_lot,
gfp_target,
immunoblot,
biosample,
wrangler,
document,
biosample_characterization_compliant,
):
testapp.patch_json(antibody_lot['@id'], {'targets': [gfp_target['@id']]})
# Antibody characterizations only
prim_char = testapp.post_json(
'/antibody_characterization', immunoblot
).json['@graph'][0]
characterization_review = {
'biosample_ontology': biosample['biosample_ontology'],
'organism': biosample['organism'],
'lane': 1,
'lane_status': 'compliant'
}
testapp.patch_json(
prim_char['@id'],
{
'target': gfp_target['@id'],
'status': 'compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'characterization_reviews': [characterization_review]
}
)
lot_reviews = testapp.get(
antibody_lot['@id'] + '@@index-data'
).json['object']['lot_reviews']
assert len(lot_reviews) == 1
assert lot_reviews[0]['status'] == 'partially characterized'
assert lot_reviews[0]['detail'] == (
'Awaiting submission of a compliant secondary characterization.'
)
# With compliant biosample characterization
testapp.patch_json(
biosample_characterization_compliant['@id'],
{'antibody': antibody_lot['@id']}
)
lot_reviews = testapp.get(
antibody_lot['@id'] + '@@index-data'
).json['object']['lot_reviews']
assert len(lot_reviews) == 1
assert lot_reviews[0]['status'] == 'characterized to standards'
assert lot_reviews[0]['detail'] == 'Fully characterized.'
def test_encode3_tagged_ab_exempt_biosample_char(
testapp,
antibody_lot,
gfp_target,
immunoblot,
biosample,
wrangler,
document,
biosample_characterization_exempt,
):
testapp.patch_json(antibody_lot['@id'], {'targets': [gfp_target['@id']]})
# Antibody characterizations only
prim_char = testapp.post_json(
'/antibody_characterization', immunoblot
).json['@graph'][0]
characterization_review = {
'biosample_ontology': biosample['biosample_ontology'],
'organism': biosample['organism'],
'lane': 1,
'lane_status': 'compliant'
}
testapp.patch_json(
prim_char['@id'],
{
'target': gfp_target['@id'],
'status': 'compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'characterization_reviews': [characterization_review]
}
)
lot_reviews = testapp.get(
antibody_lot['@id'] + '@@index-data'
).json['object']['lot_reviews']
assert len(lot_reviews) == 1
assert lot_reviews[0]['status'] == 'partially characterized'
assert lot_reviews[0]['detail'] == (
'Awaiting submission of a compliant secondary characterization.'
)
# With exempt biosample characterization
testapp.patch_json(
biosample_characterization_exempt['@id'],
{'antibody': antibody_lot['@id']}
)
lot_reviews = testapp.get(
antibody_lot['@id'] + '@@index-data'
).json['object']['lot_reviews']
assert len(lot_reviews) == 1
assert lot_reviews[0]['status'] == (
'characterized to standards with exemption'
)
assert lot_reviews[0]['detail'] == 'Fully characterized with exemption.'
def test_encode3_tagged_ab_secondary_biosample_char(
testapp,
antibody_lot,
gfp_target,
immunoblot,
biosample,
wrangler,
document,
biosample_characterization_2nd_opinion,
):
testapp.patch_json(antibody_lot['@id'], {'targets': [gfp_target['@id']]})
# Antibody characterizations only
prim_char = testapp.post_json(
'/antibody_characterization', immunoblot
).json['@graph'][0]
characterization_review = {
'biosample_ontology': biosample['biosample_ontology'],
'organism': biosample['organism'],
'lane': 1,
'lane_status': 'compliant'
}
testapp.patch_json(
prim_char['@id'],
{
'target': gfp_target['@id'],
'status': 'compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'characterization_reviews': [characterization_review]
}
)
lot_reviews = testapp.get(
antibody_lot['@id'] + '@@index-data'
).json['object']['lot_reviews']
assert len(lot_reviews) == 1
assert lot_reviews[0]['status'] == 'partially characterized'
assert lot_reviews[0]['detail'] == (
'Awaiting submission of a compliant secondary characterization.'
)
# With secondary opinion biosample characterization
testapp.patch_json(
biosample_characterization_2nd_opinion['@id'],
{'antibody': antibody_lot['@id']}
)
lot_reviews = testapp.get(
antibody_lot['@id'] + '@@index-data'
).json['object']['lot_reviews']
assert len(lot_reviews) == 1
assert lot_reviews[0]['status'] == 'partially characterized'
assert lot_reviews[0]['detail'] == (
'Awaiting submission of a compliant secondary characterization.'
)
def test_encode3_tagged_ab_not_compliant_biosample_char(
testapp,
antibody_lot,
gfp_target,
immunoblot,
biosample,
wrangler,
document,
biosample_characterization_not_compliant,
):
testapp.patch_json(antibody_lot['@id'], {'targets': [gfp_target['@id']]})
# Antibody characterizations only
prim_char = testapp.post_json(
'/antibody_characterization', immunoblot
).json['@graph'][0]
characterization_review = {
'biosample_ontology': biosample['biosample_ontology'],
'organism': biosample['organism'],
'lane': 1,
'lane_status': 'compliant'
}
testapp.patch_json(
prim_char['@id'],
{
'target': gfp_target['@id'],
'status': 'compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'characterization_reviews': [characterization_review]
}
)
lot_reviews = testapp.get(
antibody_lot['@id'] + '@@index-data'
).json['object']['lot_reviews']
assert len(lot_reviews) == 1
assert lot_reviews[0]['status'] == 'partially characterized'
assert lot_reviews[0]['detail'] == (
'Awaiting submission of a compliant secondary characterization.'
)
# With non-compliant biosample characterization
testapp.patch_json(
biosample_characterization_not_compliant['@id'],
{'antibody': antibody_lot['@id']}
)
lot_reviews = testapp.get(
antibody_lot['@id'] + '@@index-data'
).json['object']['lot_reviews']
assert len(lot_reviews) == 1
assert lot_reviews[0]['status'] == 'partially characterized'
assert lot_reviews[0]['detail'] == (
'Awaiting submission of a compliant secondary characterization.'
)
def test_encode3_tagged_ab_other_exempt_biosample_char(
testapp,
antibody_lot,
gfp_target,
immunoblot,
biosample,
wrangler,
document,
biosample_characterization_exempt,
biosample_2_liver,
):
testapp.patch_json(antibody_lot['@id'], {'targets': [gfp_target['@id']]})
# Antibody characterizations only
prim_char = testapp.post_json(
'/antibody_characterization', immunoblot
).json['@graph'][0]
characterization_review = {
'biosample_ontology': biosample['biosample_ontology'],
'organism': biosample['organism'],
'lane': 1,
'lane_status': 'compliant'
}
testapp.patch_json(
prim_char['@id'],
{
'target': gfp_target['@id'],
'status': 'compliant',
'reviewed_by': wrangler['@id'],
'documents': [document['@id']],
'characterization_reviews': [characterization_review]
}
)
lot_reviews = testapp.get(
antibody_lot['@id'] + '@@index-data'
).json['object']['lot_reviews']
assert len(lot_reviews) == 1
assert lot_reviews[0]['status'] == 'partially characterized'
assert lot_reviews[0]['detail'] == (
'Awaiting submission of a compliant secondary characterization.'
)
# Different biosamples between antibody characterization and biosample
# characterization
testapp.patch_json(
biosample_characterization_exempt['@id'],
{'characterizes': biosample_2_liver['@id'], 'antibody': antibody_lot['@id']}
)
lot_reviews = testapp.get(
antibody_lot['@id'] + '@@index-data'
).json['object']['lot_reviews']
assert len(lot_reviews) == 2
assert {
"biosample_term_id": "UBERON:0000948",
"biosample_term_name": "heart",
"detail": "Awaiting submission of a compliant secondary characterization.",
"organisms": [
"/organisms/human/"
],
"status": "partially characterized",
"targets": [
"/targets/gfp-human/"
]
} in lot_reviews
assert {
"biosample_term_id": "UBERON:0002107",
"biosample_term_name": "liver",
"detail": "Fully characterized with exemption.",
"organisms": [
"/organisms/human/"
],
"status": "characterized to standards with exemption",
"targets": [
"/targets/gfp-human/"
]
} in lot_reviews
|
e59f3a1c1a5d6961eb974c836a4c6d02a68f94de
|
a2b20597759990445081057d35d113434cfcf970
|
/stubs/typeshed/typeshed/stubs/ExifRead/exifread/utils.pyi
|
d534019c3683cbf7f56e444d53ab82583b4a139c
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028
| 2023-09-02T07:40:35
| 2023-09-02T07:40:35
| 110,274,488
| 6,703
| 575
|
MIT
| 2023-09-13T17:02:32
| 2017-11-10T17:31:36
|
OCaml
|
UTF-8
|
Python
| false
| false
| 677
|
pyi
|
utils.pyi
|
from collections.abc import Mapping
from fractions import Fraction
from typing import Any, TypeVar, overload
from typing_extensions import Self
_T = TypeVar("_T")
@overload
def ord_(dta: str) -> int: ... # type: ignore[misc]
@overload
def ord_(dta: _T) -> _T: ...
def make_string(seq: str | list[int]) -> str: ...
def make_string_uc(seq: str | list[int]) -> str: ...
def get_gps_coords(tags: Mapping[str, Any]) -> tuple[float, float]: ...
class Ratio(Fraction):
def __new__(cls, numerator: int = ..., denominator: int | None = ...) -> Self: ...
@property
def num(self) -> int: ...
@property
def den(self) -> int: ...
def decimal(self) -> float: ...
|
ca60d0cd595a4ed62989a28b93c942a85b224994
|
cb9556a6cd54c480d59814540c42d96fa989b290
|
/beeprint/utils.py
|
93046340cc0c29cc1b1e950f8f1e0b8e9bc74619
|
[] |
no_license
|
panyanyany/beeprint
|
b48c9cdf4d737fc751373b51bea2997e66392592
|
54c38876b3c8f2009bb74dec1dffbc5ee8086498
|
refs/heads/master
| 2023-09-05T09:09:35.016032
| 2023-08-29T02:46:03
| 2023-08-29T02:46:03
| 17,399,131
| 433
| 21
| null | 2016-12-19T05:31:54
| 2014-03-04T10:59:34
|
Python
|
UTF-8
|
Python
| false
| false
| 4,478
|
py
|
utils.py
|
# -*- coding:utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import inspect
import sys
import types
import traceback
_unicode = None
if sys.version_info < (3, 0):
pyv = 2
_unicode = unicode
else:
pyv = 3
_unicode = str
def is_class_method(name, val):
if pyv == 2:
return isinstance(val, types.MethodType) and val.im_self is None
elif pyv == 3:
# in python 2, a class method is unbound method type
# in python 3, a class method is function type as well as a function
raise Exception("python 3 only has function type and bound method type")
def is_instance_method(name, val):
if pyv == 3:
return inspect.ismethod(val)
elif pyv == 2:
return isinstance(val, types.MethodType) and val.im_self is not None
def is_pan_function(name, val):
"""detect pan-function which including function and bound method in python 3
function and unbound method and bound method in python 2
"""
return inspect.isfunction(val) or inspect.ismethod(val) or inspect.isbuiltin(val)
def print_exc_plus():
"""
Print the usual traceback information, followed by a listing of all the
local variables in each frame.
"""
tb = sys.exc_info()[2]
while 1:
if not tb.tb_next:
break
tb = tb.tb_next
stack = []
f = tb.tb_frame
while f:
stack.append(f)
f = f.f_back
stack.reverse()
traceback.print_exc()
print("Locals by frame, innermost last")
for frame in stack:
print()
print("Frame %s in %s at line %s" % (frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_lineno))
for key, value in frame.f_locals.items():
print("\t%20s = " % key, end='')
# We have to be careful not to cause a new error in our error
# printer! Calling str() on an unknown object could cause an
# error we don't want.
try:
print(value)
except:
print("<ERROR WHILE PRINTING VALUE>")
def is_newline_obj(o):
if hasattr(o, '__module__'):
return True
return False
def is_class_instance(o):
# print('%s: class instance: %s' % (inspect.isclass(o), o))
if o is None:
return False
try:
# to detect:
# old-style class & new-style class
# instance of old-style class and of new-style class
# method of instance of both class
# function
# o.__module__ in python 3.5 some instance has no this attribute
if (inspect.isclass(o)
or inspect.isfunction(o)
or inspect.ismethod(o)):
return False
if isinstance(o, (int, float, list, tuple, dict, str, _unicode)):
return False
return True
except:
pass
return False
def is_class(o):
# print('%s: class: %s' % (inspect.isclass(o), o))
return inspect.isclass(o)
if is_class_instance(o) or inspect.isfunction(o) or inspect.isbuiltin(o) or inspect.ismethod(o):
return False
return True
def get_name(o):
if hasattr(o, '__name__'):
return o.__name__
if hasattr(o, '__class__'):
return o.__class__.__name__
raise Exception("%s" % type(o))
def has_parent_class(obj, parent_name):
if hasattr(obj, '__mro__'):
mro = obj.__mro__
elif hasattr(obj, '__class__'):
mro = obj.__class__.__mro__
else:
return False
for cls in mro:
cls_name = get_name(cls)
if cls_name.endswith(parent_name):
return True
return False
def has_custom_repr(o):
repr_typ_name = lambda o: type(o.__repr__).__name__
builtin_repr_names = ['method-wrapper', 'wrapper_descriptor', 'method-wrapper']
return hasattr(o, '__repr__') and (repr_typ_name(o) not in builtin_repr_names)
def get_type(o):
if is_class_instance(o):
label = 'instance'
elif inspect.isfunction(o):
label = 'function'
elif inspect.isbuiltin(o):
label = 'builtin'
elif inspect.ismethod(o):
label = 'method'
else:
'本身就是类,不是对象'
label = 'class'
return label
def is_base_type(o):
return isinstance(o, (list, tuple, dict, int, float, str, _unicode))
|
32ad2741f09341d65aaa56a1134323ffb9120d6c
|
99d79ada2d3b7746573f071823ec61f5f853d7a3
|
/examples/riscv_mini/conftest.py
|
1b4e91567b99c8c0c087f9d7e68e0ef0ab592058
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
phanrahan/magma
|
d8062c6163e2c2c2cedef82317dc8cc40038220a
|
b05fe5303ed17e668c6ec2ec3558cd5a52eff787
|
refs/heads/master
| 2023-08-23T18:08:22.494869
| 2023-08-08T18:53:05
| 2023-08-17T16:16:44
| 84,332,281
| 227
| 21
|
NOASSERTION
| 2023-09-14T21:32:19
| 2017-03-08T14:57:09
|
Python
|
UTF-8
|
Python
| false
| false
| 229
|
py
|
conftest.py
|
import pytest
from magma.config import config as magma_config
from magma.util import reset_global_context
@pytest.fixture(autouse=True)
def riscv_mini_test():
magma_config.compile_dir = 'normal'
reset_global_context()
|
875d5aa21f31cc534c7971c41d23f3e995f8f5b8
|
8a7507d2a51e85085114e685f052def13062d365
|
/tests/lib/canonical.py
|
a8b4e3a7d611d32fe4fe578b4c782d0f6d06cf69
|
[
"MIT"
] |
permissive
|
yaml/pyyaml
|
cc0715b0e72d792217403ef99e59368da175722a
|
155ec463f6a854ac14ccd5e2dda8017ce42a508a
|
refs/heads/main
| 2023-09-03T22:40:59.106106
| 2023-08-29T22:06:59
| 2023-08-29T22:07:06
| 2,700,147
| 2,334
| 541
|
MIT
| 2023-09-05T17:02:28
| 2011-11-03T05:09:49
|
Python
|
UTF-8
|
Python
| false
| false
| 12,371
|
py
|
canonical.py
|
import yaml, yaml.composer, yaml.constructor, yaml.resolver
class CanonicalError(yaml.YAMLError):
pass
class CanonicalScanner:
def __init__(self, data):
if isinstance(data, bytes):
try:
data = data.decode('utf-8')
except UnicodeDecodeError:
raise CanonicalError("utf-8 stream is expected")
self.data = data+'\0'
self.index = 0
self.tokens = []
self.scanned = False
def check_token(self, *choices):
if not self.scanned:
self.scan()
if self.tokens:
if not choices:
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
if not self.scanned:
self.scan()
if self.tokens:
return self.tokens[0]
def get_token(self, choice=None):
if not self.scanned:
self.scan()
token = self.tokens.pop(0)
if choice and not isinstance(token, choice):
raise CanonicalError("unexpected token "+repr(token))
return token
def get_token_value(self):
token = self.get_token()
return token.value
def scan(self):
self.tokens.append(yaml.StreamStartToken(None, None))
while True:
self.find_token()
ch = self.data[self.index]
if ch == '\0':
self.tokens.append(yaml.StreamEndToken(None, None))
break
elif ch == '%':
self.tokens.append(self.scan_directive())
elif ch == '-' and self.data[self.index:self.index+3] == '---':
self.index += 3
self.tokens.append(yaml.DocumentStartToken(None, None))
elif ch == '[':
self.index += 1
self.tokens.append(yaml.FlowSequenceStartToken(None, None))
elif ch == '{':
self.index += 1
self.tokens.append(yaml.FlowMappingStartToken(None, None))
elif ch == ']':
self.index += 1
self.tokens.append(yaml.FlowSequenceEndToken(None, None))
elif ch == '}':
self.index += 1
self.tokens.append(yaml.FlowMappingEndToken(None, None))
elif ch == '?':
self.index += 1
self.tokens.append(yaml.KeyToken(None, None))
elif ch == ':':
self.index += 1
self.tokens.append(yaml.ValueToken(None, None))
elif ch == ',':
self.index += 1
self.tokens.append(yaml.FlowEntryToken(None, None))
elif ch == '*' or ch == '&':
self.tokens.append(self.scan_alias())
elif ch == '!':
self.tokens.append(self.scan_tag())
elif ch == '"':
self.tokens.append(self.scan_scalar())
else:
raise CanonicalError("invalid token")
self.scanned = True
DIRECTIVE = '%YAML 1.1'
def scan_directive(self):
if self.data[self.index:self.index+len(self.DIRECTIVE)] == self.DIRECTIVE and \
self.data[self.index+len(self.DIRECTIVE)] in ' \n\0':
self.index += len(self.DIRECTIVE)
return yaml.DirectiveToken('YAML', (1, 1), None, None)
else:
raise CanonicalError("invalid directive")
def scan_alias(self):
if self.data[self.index] == '*':
TokenClass = yaml.AliasToken
else:
TokenClass = yaml.AnchorToken
self.index += 1
start = self.index
while self.data[self.index] not in ', \n\0':
self.index += 1
value = self.data[start:self.index]
return TokenClass(value, None, None)
def scan_tag(self):
self.index += 1
start = self.index
while self.data[self.index] not in ' \n\0':
self.index += 1
value = self.data[start:self.index]
if not value:
value = '!'
elif value[0] == '!':
value = 'tag:yaml.org,2002:'+value[1:]
elif value[0] == '<' and value[-1] == '>':
value = value[1:-1]
else:
value = '!'+value
return yaml.TagToken(value, None, None)
QUOTE_CODES = {
'x': 2,
'u': 4,
'U': 8,
}
QUOTE_REPLACES = {
'\\': '\\',
'\"': '\"',
' ': ' ',
'a': '\x07',
'b': '\x08',
'e': '\x1B',
'f': '\x0C',
'n': '\x0A',
'r': '\x0D',
't': '\x09',
'v': '\x0B',
'N': '\u0085',
'L': '\u2028',
'P': '\u2029',
'_': '_',
'0': '\x00',
}
def scan_scalar(self):
self.index += 1
chunks = []
start = self.index
ignore_spaces = False
while self.data[self.index] != '"':
if self.data[self.index] == '\\':
ignore_spaces = False
chunks.append(self.data[start:self.index])
self.index += 1
ch = self.data[self.index]
self.index += 1
if ch == '\n':
ignore_spaces = True
elif ch in self.QUOTE_CODES:
length = self.QUOTE_CODES[ch]
code = int(self.data[self.index:self.index+length], 16)
chunks.append(chr(code))
self.index += length
else:
if ch not in self.QUOTE_REPLACES:
raise CanonicalError("invalid escape code")
chunks.append(self.QUOTE_REPLACES[ch])
start = self.index
elif self.data[self.index] == '\n':
chunks.append(self.data[start:self.index])
chunks.append(' ')
self.index += 1
start = self.index
ignore_spaces = True
elif ignore_spaces and self.data[self.index] == ' ':
self.index += 1
start = self.index
else:
ignore_spaces = False
self.index += 1
chunks.append(self.data[start:self.index])
self.index += 1
return yaml.ScalarToken(''.join(chunks), False, None, None)
def find_token(self):
found = False
while not found:
while self.data[self.index] in ' \t':
self.index += 1
if self.data[self.index] == '#':
while self.data[self.index] != '\n':
self.index += 1
if self.data[self.index] == '\n':
self.index += 1
else:
found = True
class CanonicalParser:
def __init__(self):
self.events = []
self.parsed = False
def dispose(self):
pass
# stream: STREAM-START document* STREAM-END
def parse_stream(self):
self.get_token(yaml.StreamStartToken)
self.events.append(yaml.StreamStartEvent(None, None))
while not self.check_token(yaml.StreamEndToken):
if self.check_token(yaml.DirectiveToken, yaml.DocumentStartToken):
self.parse_document()
else:
raise CanonicalError("document is expected, got "+repr(self.tokens[0]))
self.get_token(yaml.StreamEndToken)
self.events.append(yaml.StreamEndEvent(None, None))
# document: DIRECTIVE? DOCUMENT-START node
def parse_document(self):
node = None
if self.check_token(yaml.DirectiveToken):
self.get_token(yaml.DirectiveToken)
self.get_token(yaml.DocumentStartToken)
self.events.append(yaml.DocumentStartEvent(None, None))
self.parse_node()
self.events.append(yaml.DocumentEndEvent(None, None))
# node: ALIAS | ANCHOR? TAG? (SCALAR|sequence|mapping)
def parse_node(self):
if self.check_token(yaml.AliasToken):
self.events.append(yaml.AliasEvent(self.get_token_value(), None, None))
else:
anchor = None
if self.check_token(yaml.AnchorToken):
anchor = self.get_token_value()
tag = None
if self.check_token(yaml.TagToken):
tag = self.get_token_value()
if self.check_token(yaml.ScalarToken):
self.events.append(yaml.ScalarEvent(anchor, tag, (False, False), self.get_token_value(), None, None))
elif self.check_token(yaml.FlowSequenceStartToken):
self.events.append(yaml.SequenceStartEvent(anchor, tag, None, None))
self.parse_sequence()
elif self.check_token(yaml.FlowMappingStartToken):
self.events.append(yaml.MappingStartEvent(anchor, tag, None, None))
self.parse_mapping()
else:
raise CanonicalError("SCALAR, '[', or '{' is expected, got "+repr(self.tokens[0]))
# sequence: SEQUENCE-START (node (ENTRY node)*)? ENTRY? SEQUENCE-END
def parse_sequence(self):
self.get_token(yaml.FlowSequenceStartToken)
if not self.check_token(yaml.FlowSequenceEndToken):
self.parse_node()
while not self.check_token(yaml.FlowSequenceEndToken):
self.get_token(yaml.FlowEntryToken)
if not self.check_token(yaml.FlowSequenceEndToken):
self.parse_node()
self.get_token(yaml.FlowSequenceEndToken)
self.events.append(yaml.SequenceEndEvent(None, None))
# mapping: MAPPING-START (map_entry (ENTRY map_entry)*)? ENTRY? MAPPING-END
def parse_mapping(self):
self.get_token(yaml.FlowMappingStartToken)
if not self.check_token(yaml.FlowMappingEndToken):
self.parse_map_entry()
while not self.check_token(yaml.FlowMappingEndToken):
self.get_token(yaml.FlowEntryToken)
if not self.check_token(yaml.FlowMappingEndToken):
self.parse_map_entry()
self.get_token(yaml.FlowMappingEndToken)
self.events.append(yaml.MappingEndEvent(None, None))
# map_entry: KEY node VALUE node
def parse_map_entry(self):
self.get_token(yaml.KeyToken)
self.parse_node()
self.get_token(yaml.ValueToken)
self.parse_node()
def parse(self):
self.parse_stream()
self.parsed = True
def get_event(self):
if not self.parsed:
self.parse()
return self.events.pop(0)
def check_event(self, *choices):
if not self.parsed:
self.parse()
if self.events:
if not choices:
return True
for choice in choices:
if isinstance(self.events[0], choice):
return True
return False
def peek_event(self):
if not self.parsed:
self.parse()
return self.events[0]
class CanonicalLoader(CanonicalScanner, CanonicalParser,
yaml.composer.Composer, yaml.constructor.Constructor, yaml.resolver.Resolver):
def __init__(self, stream):
if hasattr(stream, 'read'):
stream = stream.read()
CanonicalScanner.__init__(self, stream)
CanonicalParser.__init__(self)
yaml.composer.Composer.__init__(self)
yaml.constructor.Constructor.__init__(self)
yaml.resolver.Resolver.__init__(self)
yaml.CanonicalLoader = CanonicalLoader
def canonical_scan(stream):
return yaml.scan(stream, Loader=CanonicalLoader)
yaml.canonical_scan = canonical_scan
def canonical_parse(stream):
return yaml.parse(stream, Loader=CanonicalLoader)
yaml.canonical_parse = canonical_parse
def canonical_compose(stream):
return yaml.compose(stream, Loader=CanonicalLoader)
yaml.canonical_compose = canonical_compose
def canonical_compose_all(stream):
return yaml.compose_all(stream, Loader=CanonicalLoader)
yaml.canonical_compose_all = canonical_compose_all
def canonical_load(stream):
return yaml.load(stream, Loader=CanonicalLoader)
yaml.canonical_load = canonical_load
def canonical_load_all(stream):
return yaml.load_all(stream, Loader=CanonicalLoader)
yaml.canonical_load_all = canonical_load_all
|
39e0bbf61880bfc8ac46d607fec14c8bf8f4d189
|
d9b749e8a4982b8bcd66a19d4ee189fc607ab79e
|
/libunbound/python/doc/examples/example8-1.py
|
79060167d83c35a47ab9b2566f2706e578470c44
|
[
"LicenseRef-scancode-free-unknown",
"GPL-3.0-or-later",
"LicenseRef-scancode-autoconf-simple-exception",
"BSD-3-Clause"
] |
permissive
|
NLnetLabs/unbound
|
26312ae0b3b963f2ab700c4be3d9c45587bcf45e
|
10843805ac37002f1d9293c9835a3e68e41d392d
|
refs/heads/master
| 2023-08-31T23:34:52.610625
| 2023-08-31T11:54:03
| 2023-08-31T11:54:03
| 94,195,301
| 2,526
| 395
|
BSD-3-Clause
| 2023-09-07T14:37:56
| 2017-06-13T09:27:49
|
C
|
UTF-8
|
Python
| false
| false
| 918
|
py
|
example8-1.py
|
#!/usr/bin/python
# vim:fileencoding=utf-8
#
# Lookup for MX and NS records
#
import unbound
ctx = unbound.ub_ctx()
ctx.resolvconf("/etc/resolv.conf")
status, result = ctx.resolve("nic.cz", unbound.RR_TYPE_MX, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print "Result:"
print " raw data:", result.data
for k in result.data.mx_list:
print " priority:%d address:%s" % k
status, result = ctx.resolve("nic.cz", unbound.RR_TYPE_A, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print "Result:"
print " raw data:", result.data
for k in result.data.address_list:
print " address:%s" % k
status, result = ctx.resolve("nic.cz", unbound.RR_TYPE_NS, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print "Result:"
print " raw data:", result.data
for k in result.data.domain_list:
print " host: %s" % k
|
6253994bff4f5c8033e2679175970a9306ec5cd6
|
753cd066a9bd26b6c37c8d53a86c7a9c659ec18c
|
/vision/efficientdet/tensorflow2/ipu_utils/dataset.py
|
b61500f045572dbe05f8dff89fdb73821cf66593
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
graphcore/examples
|
ac872015808ed2a913d4d7bf0d63202ce15ebbae
|
e2f834dd60e7939672c1795b4ac62e89ad0bca49
|
refs/heads/master
| 2023-08-05T02:08:12.341836
| 2023-07-27T11:13:10
| 2023-07-27T11:13:10
| 143,977,106
| 311
| 80
|
MIT
| 2023-09-11T16:42:56
| 2018-08-08T07:29:17
|
Python
|
UTF-8
|
Python
| false
| false
| 4,065
|
py
|
dataset.py
|
# Copyright (c) 2022 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from functools import reduce
from pathlib import Path
from typing import List, Text, Tuple, Union
import numpy as np
import tensorflow as tf
import utils
from ipu_automl_io import preprocess_resize
from PIL import Image
T_DatasetScaleRawImages = Tuple[tf.data.Dataset, int, tf.Tensor]
def global_batch_size(args: argparse.Namespace) -> int:
return args.micro_batch_size
def input_tensor_shape(
args: argparse.Namespace, image_size: Union[Text, int, Tuple[int, int]], num_channels: int = 3
) -> Tuple[int, int]:
return utils.parse_image_size(image_size) + (num_channels,)
def _configure_dataset(dataset: tf.data.Dataset, args: argparse.Namespace, dataset_repeats: int = 1) -> tf.data.Dataset:
dataset = dataset.map(lambda x: tf.cast(x, dtype=args.io_precision))
dataset = dataset.cache()
# Repeat the image "benchmark_repeats" times
dataset = dataset.repeat(dataset_repeats)
dataset = dataset.batch(args.micro_batch_size, drop_remainder=True)
# Now repeat the entire dataset "num_repeats" times
dataset = dataset.repeat(args.num_repeats)
if args.dataset_prefetch_buffer > 0:
dataset = dataset.prefetch(args.dataset_prefetch_buffer)
return dataset
def generated_inference_dataset(
args: argparse.Namespace,
image_size: Union[Text, int, Tuple[int, int]],
) -> T_DatasetScaleRawImages:
num_samples = global_batch_size(args)
inputs = tf.random.uniform((num_samples,) + input_tensor_shape(args, image_size))
dataset = tf.data.Dataset.from_tensor_slices(inputs)
return _configure_dataset(dataset, args, args.benchmark_repeats), 1, inputs
def repeated_image_dataset(
args: argparse.Namespace, image_size: Union[Text, int, Tuple[int, int]]
) -> T_DatasetScaleRawImages:
num_samples = global_batch_size(args)
imgs = [np.array(Image.open(args.image_path))] * num_samples
imgs = tf.convert_to_tensor(imgs)
print("Preprocessing")
inputs, scales = preprocess_resize(imgs, image_size)
dataset = tf.data.Dataset.from_tensor_slices(inputs)
return _configure_dataset(dataset, args, args.benchmark_repeats), scales, imgs
def image_directory_dataset(
args: argparse.Namespace, image_size: Union[Text, int, Tuple[int, int]]
) -> T_DatasetScaleRawImages:
img_height, img_width = utils.parse_image_size(image_size)
extensions = ["jpg", "png", "jpeg"]
def load_imgs_from_ext(acc: List[Text], ext: Text) -> List[Text]:
imgs = [p for p in Path(args.image_path).rglob("*." + ext)]
return acc + imgs
img_paths = reduce(load_imgs_from_ext, extensions, [])
imgs = [tf.convert_to_tensor(Image.open(p)) for p in img_paths]
print("Preprocessing")
inputs, scales = preprocess_resize(imgs, image_size)
dataset = tf.data.Dataset.from_tensor_slices(inputs)
return _configure_dataset(dataset, args), scales, imgs
def get_dataset(args: argparse.Namespace, image_size: Union[Text, int, Tuple[int, int]]) -> T_DatasetScaleRawImages:
if args.dataset_type == "repeated-image" or args.dataset_type == "single-image":
return repeated_image_dataset(args, image_size)
elif args.dataset_type == "generated":
return generated_inference_dataset(args, image_size)
elif args.dataset_type == "image-directory":
return image_directory_dataset(args, image_size)
else:
raise NotImplementedError(f"Dataset type {args.dataset_type} has not been implemented")
|
089fa2891b027a48f9dd248d6e12484f394faccd
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/refactoring/move/fromImportAliases/before/src/lib.py
|
279b5630edd805dffcbd59dc1ebf67cc173c4e42
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 54
|
py
|
lib.py
|
def function1():
pass
def function2():
pass
|
47a5d41a29b48ea7f8ea501cfa2ebf4aa5d6e4b2
|
b8d80a23cb27af08a1c4d34b478c76228ae5fbb4
|
/insights/parsers/ntp_sources.py
|
8b5b48ca05cf72034dbb869dbcd74fd5f6737f75
|
[
"Apache-2.0"
] |
permissive
|
RedHatInsights/insights-core
|
bb243e2bf8a52446fefb95ebe05478d6e35efe2e
|
b0ea07fc3f4dd8801b505fe70e9b36e628152c4a
|
refs/heads/master
| 2023-09-04T21:15:40.456257
| 2023-09-04T10:46:56
| 2023-09-04T10:46:56
| 92,518,221
| 144
| 290
|
Apache-2.0
| 2023-09-14T02:40:13
| 2017-05-26T14:23:11
|
Python
|
UTF-8
|
Python
| false
| false
| 5,862
|
py
|
ntp_sources.py
|
"""
NTP sources - remote clock info from ``ntpq`` and ``chronyc``
=============================================================
The parsers here provide information about the time sources used by
``ntpd`` and ``chronyd``. These are gathered from the output of the
``ntpq -pn`` and ``chronyc sources`` commands respectively.
There is also a parser for parsing the output of ``ntpq -c 'rv 0 leap'``
command to give leap second status.
Parsers in this module are:
ChronycSources - command ``/usr/bin/chronyc sources``
-----------------------------------------------------
NtpqPn - command ``/usr/sbin/ntpq -pn``
---------------------------------------
NtpqLeap - command ``/usr/sbin/ntpq -c 'rv 0 leap'``
----------------------------------------------------
"""
from insights.core import CommandParser
from insights.core.exceptions import SkipComponent
from insights.core.plugins import parser
from insights.specs import Specs
@parser(Specs.chronyc_sources)
class ChronycSources(CommandParser, list):
"""
Chronyc Sources parser
Parses the list of NTP time sources in use by ``chronyd``. So far only
the source IP address and the mode and the state flags are retrieved.
Sample input::
210 Number of sources = 6
MS Name/IP address Stratum Poll Reach LastRx Last sample
===============================================================================
^- 10.20.30.40 2 9 377 95 -1345us[-1345us] +/- 87ms
^- 10.56.72.8 2 10 377 949 -3449us[-3483us] +/- 120ms
^* 10.64.108.95 2 10 377 371 -91us[ -128us] +/- 30ms
^- 10.8.205.17 2 8 377 27 +7161us[+7161us] +/- 52ms
Examples:
>>> type(chrony_sources)
<class 'insights.parsers.ntp_sources.ChronycSources'>
>>> len(chrony_sources)
4
>>> chrony_sources[0]['source']
'10.20.30.40'
>>> chrony_sources[0]['mode']
'^'
>>> chrony_sources[0]['state']
'-'
"""
def parse_content(self, content):
"""
Get source, mode and state for chrony
"""
data = []
if len(content) > 3:
for row in content[3:]:
if row.strip():
values = row.split(" ", 2)
data.append(
{
"source": values[1],
"mode": values[0][0],
"state": values[0][1]
}
)
if not data:
raise SkipComponent()
self.extend(data)
@property
def data(self):
"""
Set data as property to keep compatibility
"""
return self
@parser(Specs.ntpq_leap)
class NtpqLeap(CommandParser, dict):
"""
Converts the output of ``ntpq -c 'rv 0 leap'`` into a dictionary in the
``data`` property, and sets the ``leap`` property to the value of the
'leap' key if found.
Sample input::
leap=00
Examples:
>>> type(ntpq)
<class 'insights.parsers.ntp_sources.NtpqLeap'>
>>> ntpq.leap
'00'
"""
def parse_content(self, content):
if content and "Connection refused" in content[0]:
raise SkipComponent("NTP service is down and connection refused")
leap = None
for line in content:
if 'leap=' in line:
leap = line.split('leap=')[1].rstrip()
if leap is None:
raise SkipComponent()
self.update(leap=leap)
@property
def data(self):
"""
Set data as property to keep compatibility
"""
return self
@property
def leap(self):
"""
Return the value of the 'leap'
"""
return self.get('leap')
@parser(Specs.ntpq_pn)
class NtpqPn(CommandParser, list):
"""
Get source and flag for each NTP time source from the output of
``/usr/sbin/ntpq -pn``.
Currently, this only captures the source IP address and the 'flag'
character in the first column at this stage. Therefore it will need
to be extended should you wish to determine the stratum, polling rate
or other properties of the source.
Sample input::
remote refid st t when poll reach delay offset jitter
==============================================================================
+10.20.30.40 192.231.203.132 3 u 638 1024 377 0.242 2.461 1.886
*2001:388:608c:8 .GPS. 1 u 371 1024 377 29.323 1.939 1.312
-2001:44b8:1::1 216.218.254.202 2 u 396 1024 377 37.869 -3.340 6.458
+150.203.1.10 202.6.131.118 2 u 509 1024 377 20.135 0.800 3.260
Examples:
>>> type(ntp_sources)
<class 'insights.parsers.ntp_sources.NtpqPn'>
>>> len(ntp_sources)
4
>>> ntp_sources[0]['source']
'10.20.30.40'
"""
def parse_content(self, content):
if content and "Connection refused" in content[0]:
raise SkipComponent("NTP service is down and connection refused")
data = []
if len(content) > 2:
for row in content[2:]:
if row.strip():
values = row.split(" ", 2)
if row.startswith(" "):
data.append({"source": values[1], "flag": " "})
else:
data.append({"source": values[0][1:], "flag": values[0][0]})
if not data:
raise SkipComponent()
self.extend(data)
@property
def data(self):
"""
Set data as property to keep compatibility
"""
return self
|
c109820e2837d95fa311edae724a2a6320ec4919
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/framework/docstr/index_add.py
|
625aeb8f8dad4953d7d7a7f061da4d50e4c3548d
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 3,014
|
py
|
index_add.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.Tensor.index_add_,
r"""
index_add_(dim, index, source, *, alpha=1) -> Tensor
The interface is consistent with PyTorch.
Accumulate the elements of :attr:`alpha` times ``source`` into the :attr:`self`
tensor by adding to the indices in the order given in :attr:`index`. For example,
if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of
``source`` is subtracted from the ``j``\ th row of :attr:`self`.
The :attr:`dim`\ th dimension of ``source`` must have the same size as the
length of :attr:`index` (which must be a vector), and all other dimensions must
match :attr:`self`, or an error will be raised.
For a 3-D tensor the output is given as::
self[index[i], :, :] += alpha * src[i, :, :] # if dim == 0
self[:, index[i], :] += alpha * src[:, i, :] # if dim == 1
self[:, :, index[i]] += alpha * src[:, :, i] # if dim == 2
Args:
dim (int): dimension along which to index
index (Tensor): indices of ``source`` to select from,
should have dtype either `oneflow.int64` or `oneflow.int32`
source (Tensor): the tensor containing values to add
Keyword args:
alpha (Number): the scalar multiplier for ``source``
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.ones(5, 3)
>>> t = flow.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=flow.float)
>>> index = flow.tensor([0, 4, 2])
>>> x.index_add_(0, index, t)
tensor([[ 2., 3., 4.],
[ 1., 1., 1.],
[ 8., 9., 10.],
[ 1., 1., 1.],
[ 5., 6., 7.]], dtype=oneflow.float32)
>>> x.index_add_(0, index, t, alpha=-1)
tensor([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow._C.index_add,
r"""
index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor
See :meth:`oneflow.Tensor.index_add_` for function description.
""",
)
add_docstr(
oneflow._C.index_add_,
r"""
index_add_(dim, index, source, *, alpha=1) -> Tensor
Out-of-place version of :meth:`oneflow.Tensor.index_add_`.
""",
)
|
6037693a6a5bd5a8ea5237fd6e522fdb8a656fe0
|
5b6ba0f288b1e2ac236af846a9bf546a63228476
|
/xfel/ui/components/xfel_gui_init.py
|
9cdccb62530fecb0ec51c8dcc43b9208bc6f8d45
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
cctbx/cctbx_project
|
5b547b416cadbdf95cca21dace9f54272a08d98a
|
7f4dfb6c873fd560920f697cbfd8a5ff6eed82fa
|
refs/heads/master
| 2023-08-17T17:44:05.077010
| 2023-08-16T22:40:22
| 2023-08-16T22:40:22
| 39,508,026
| 206
| 131
|
NOASSERTION
| 2023-09-14T17:12:55
| 2015-07-22T13:36:27
|
Python
|
UTF-8
|
Python
| false
| false
| 148,318
|
py
|
xfel_gui_init.py
|
from __future__ import absolute_import, division, print_function
from six.moves import range, zip, map
'''
Author : Lyubimov, A.Y.
Created : 06/02/2016
Last Changed: 02/09/2017
Description : XFEL UI Initialization module
'''
import os
import wx
import time
import numpy as np
from threading import Thread
from wx.lib.scrolledpanel import ScrolledPanel
from libtbx import easy_run
try:
from MySQLdb import OperationalError # test import
except ImportError:
from libtbx.utils import Sorry
raise Sorry('Mysql not available')
from xfel.clustering.cluster import Cluster
import xfel.ui.components.xfel_gui_controls as gctr
import xfel.ui.components.xfel_gui_dialogs as dlg
import xfel.ui.components.xfel_gui_plotter as pltr
from xfel.ui import load_cached_settings, save_cached_settings
from xfel.ui.db import get_run_path
from xfel.ui.db.xfel_db import xfel_db_application
from prime.postrefine.mod_gui_frames import PRIMEInputWindow, PRIMERunWindow
from prime.postrefine.mod_input import master_phil
from iota.utils.utils import Capturing, set_base_dir
icons = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons/')
import getpass
user = getpass.getuser()
import libtbx.load_env
license = libtbx.env.under_root(os.path.join("modules","cctbx_project","LICENSE.txt"))
if not os.path.exists(license):
license = libtbx.env.under_root(os.path.join("cctbx", "LICENSE.txt")) # conda installations
description = 'The cctbx.xfel UI is developed for use during data collection ' \
'and initial processing of serial crystallographic data from' \
'XFELs and synchrotrons.'
class TagSet(object):
def __init__(self, tag_selection_mode, tags):
assert tag_selection_mode in ['union', 'intersection']
self.mode = tag_selection_mode
self.tags = tags
def __str__(self):
if len(self.tags) > 1:
return ", ".join([t.name for t in self.tags]) + (' (%s)' % self.mode[0])
else:
return ", ".join([t.name for t in self.tags])
# ------------------------------- Run Sentinel ------------------------------- #
# Set up events for and for finishing all cycles
tp_EVT_RUN_REFRESH = wx.NewEventType()
EVT_RUN_REFRESH = wx.PyEventBinder(tp_EVT_RUN_REFRESH, 1)
class RefreshRuns(wx.PyCommandEvent):
''' Send event when finished all cycles '''
def __init__(self, etype, eid):
wx.PyCommandEvent.__init__(self, etype, eid)
class RunSentinel(Thread):
''' Worker thread for runs; generated so that the GUI does not lock up when
processing is running '''
def __init__(self,
parent,
active=True):
Thread.__init__(self)
self.parent = parent
self.active = active
if self.parent.params.facility.name == 'standalone':
if self.parent.params.facility.standalone.monitor_for == 'folders' and \
self.parent.params.facility.standalone.folders.method == 'status_file':
from xfel.ui.db.xfel_db import cheetah_run_finder
self.finder = cheetah_run_finder(self.parent.params)
else:
from xfel.ui.db.xfel_db import standalone_run_finder
self.finder = standalone_run_finder(self.parent.params)
def post_refresh(self):
evt = RefreshRuns(tp_EVT_RUN_REFRESH, -1)
wx.PostEvent(self.parent.run_window.runs_tab, evt)
wx.PostEvent(self.parent.run_window.trials_tab, evt)
def run(self):
# one time post for an initial update
self.post_refresh()
db = xfel_db_application(self.parent.params)
use_ids = self.parent.params.facility.name not in ['lcls']
while self.active:
# Find the delta
known_runs = [r.run for r in db.get_all_runs()]
if self.parent.params.facility.name == 'lcls':
unknown_run_runs = [str(run['run']) for run in db.list_lcls_runs() if
str(run['run']) not in known_runs]
unknown_run_paths = [''] * len(unknown_run_runs)
elif self.parent.params.facility.name == 'standalone':
standalone_runs = [run for run in self.finder.list_runs() if
run[0] not in known_runs]
unknown_run_runs = [r[0] for r in standalone_runs]
unknown_run_paths = [r[1] for r in standalone_runs]
if len(unknown_run_runs) > 0:
for run_run, run_path in zip(unknown_run_runs, unknown_run_paths):
db.create_run(run = run_run, path = run_path)
new_runs = [r for r in db.get_all_runs() if r.run in unknown_run_runs]
if len(self.parent.run_window.runs_tab.persistent_tags) > 0:
tags = [t for t in db.get_all_tags() if t.name in self.parent.run_window.runs_tab.persistent_tags]
for r in new_runs:
for t in tags:
r.add_tag(t)
# Sync new runs to rungroups
for rungroup in db.get_all_rungroups(only_active=True):
first_run, last_run = rungroup.get_first_and_last_runs()
# HACK: to get working -- TODO: make nice
if use_ids:
first_run = first_run.id
last_run = last_run.id if last_run is not None else None
else:
first_run = int(first_run.run)
last_run = int(last_run.run) if last_run is not None else None
rungroup.sync_runs(first_run, last_run, use_ids=use_ids)
print("%d new runs" % len(unknown_run_runs))
self.post_refresh()
time.sleep(10)
# ------------------------------- Job Monitor ------------------------------- #
# Set up events for and for finishing all cycles
tp_EVT_JOB_MONITOR = wx.NewEventType()
EVT_JOB_MONITOR = wx.PyEventBinder(tp_EVT_JOB_MONITOR, 1)
class MonitorJobs(wx.PyCommandEvent):
''' Send event when finished all cycles '''
def __init__(self, etype, eid, trials = None, jobs = None):
wx.PyCommandEvent.__init__(self, etype, eid)
self.trials = trials
self.jobs = jobs
class JobMonitor(Thread):
''' Monitor thread for jobs; generated so that the GUI does not lock up when
monitoring is running '''
def __init__(self,
parent,
active=True):
Thread.__init__(self)
self.parent = parent
self.active = active
self.only_active_jobs = True
def post_refresh(self, trials = None, jobs = None):
evt = MonitorJobs(tp_EVT_JOB_MONITOR, -1, trials, jobs)
wx.PostEvent(self.parent.run_window.jobs_tab, evt)
def run(self):
from xfel.ui.components.submission_tracker import TrackerFactory
# one time post for an initial update
self.post_refresh()
db = xfel_db_application(self.parent.params)
tracker = TrackerFactory.from_params(self.parent.params)
while self.active:
self.parent.run_window.jmn_light.change_status('idle')
trials = db.get_all_trials()
jobs = db.get_all_jobs(active = self.only_active_jobs)
for job in jobs:
if job.status in ['DONE', 'EXIT', 'SUBMIT_FAIL', 'DELETED']:
continue
new_status = tracker.track(job.submission_id, job.get_log_path())
# Handle the case where the job was submitted but no status is available yet
if job.status == "SUBMITTED" and new_status == "ERR":
pass
elif job.status != new_status:
job.status = new_status
self.post_refresh(trials, jobs)
self.parent.run_window.jmn_light.change_status('on')
time.sleep(5)
# ------------------------------- Job Sentinel ------------------------------- #
# Set up events for and for finishing all cycles
tp_EVT_JOB_REFRESH = wx.NewEventType()
EVT_JOB_REFRESH = wx.PyEventBinder(tp_EVT_JOB_REFRESH, 1)
class RefreshJobs(wx.PyCommandEvent):
''' Send event when finished all cycles '''
def __init__(self, etype, eid):
wx.PyCommandEvent.__init__(self, etype, eid)
class JobSentinel(Thread):
''' Worker thread for jobs; generated so that the GUI does not lock up when
processing is running '''
def __init__(self,
parent,
active=True):
Thread.__init__(self)
self.parent = parent
self.active = active
def post_refresh(self):
pass
def run(self):
# one time post for an initial update
self.post_refresh()
db = xfel_db_application(self.parent.params)
from xfel.ui.db.job import submit_all_jobs
while self.active:
submit_all_jobs(db)
self.post_refresh()
time.sleep(2)
# ----------------------------- Progress Sentinel ---------------------------- #
# Set up events for and for finishing all cycles
tp_EVT_PRG_REFRESH = wx.NewEventType()
EVT_PRG_REFRESH = wx.PyEventBinder(tp_EVT_PRG_REFRESH, 1)
class RefreshStats(wx.PyCommandEvent):
''' Send event when finished all cycles '''
def __init__(self, etype, eid, result=None):
wx.PyCommandEvent.__init__(self, etype, eid)
self.result = result
def GetValue(self):
return self.result
class ProgressSentinel(Thread):
''' Worker thread for jobs; generated so that the GUI does not lock up when
processing is running '''
def __init__(self,
parent,
active=True):
Thread.__init__(self)
self.parent = parent
self.active = active
self.output = self.parent.params.output_folder
self.number_of_pickles = 0
self.info = {}
self.noiso_cells = []
# on initialization (and restart), make sure stats drawn from scratch
self.parent.run_window.status_tab.redraw_windows = True
def post_refresh(self):
evt = RefreshStats(tp_EVT_PRG_REFRESH, -1, self.info)
wx.PostEvent(self.parent.run_window.status_tab, evt)
def run(self):
# one time post for an initial update
self.post_refresh()
db = xfel_db_application(self.parent.params)
while self.active:
self.parent.run_window.prg_light.change_status('idle')
if len(db.get_all_trials()) > 0:
trial = db.get_trial(
trial_number=self.parent.run_window.status_tab.trial_no)
trial_has_isoforms = len(trial.isoforms) > 0
tags = self.parent.run_window.status_tab.selected_tags
tag_ids = [tag.id for tag in tags]
cells = db.get_stats(trial=trial, tags=tags, isigi_cutoff = self.parent.run_window.status_tab.isigi_cutoff)()
if self.parent.run_window.status_tab.tag_trial_changed:
self.parent.run_window.status_tab.redraw_windows = True
self.parent.run_window.status_tab.tag_trial_changed = False
run_numbers = []
runs = []
for rb in trial.rungroups:
for run in rb.runs:
if run.run not in run_numbers:
if len(tags) > 0:
for tag in run.tags:
if tag.id in tag_ids:
run_numbers.append(run.run)
runs.append(run)
else:
run_numbers.append(run.run)
runs.append(run)
if not trial_has_isoforms:
n_img = len(db.get_all_events(trial, runs))
for cell in cells:
# Check for cell isoform
if cell.isoform is None:
self.noiso_cells.append({'a':cell.cell_a,
'b':cell.cell_b,
'c':cell.cell_c,
'alpha':cell.cell_alpha,
'beta':cell.cell_beta,
'gamma':cell.cell_gamma,
'n_img':n_img})
else:
current_rows = self.parent.run_window.status_tab.rows
if current_rows != {}:
if cell.isoform._db_dict['name'] in current_rows:
bins = cell.bins[
:int(current_rows[cell.isoform._db_dict['name']]['high_bin'])]
highest_bin = cell.bins[int(current_rows[cell.isoform._db_dict['name']]['high_bin'])]
else:
assert False, "This isoform is not available yet"
else:
bins = cell.bins
d_mins = [b.d_min for b in bins]
highest_bin = bins[d_mins.index(min(d_mins))]
counts_all = [int(i.count) for i in bins]
totals_all = [int(i.total_hkl) for i in bins]
counts_highest = int(highest_bin.count)
totals_highest = int(highest_bin.total_hkl)
# Apply throttle to multiplicity calculation
if trial.process_percent is None:
process_percent = 100
else:
process_percent = trial.process_percent
n_img = len(db.get_all_events(trial, runs, isoform = cell.isoform))
# Generate multiplicity graph for isoforms
mult_all = sum(counts_all) / sum(totals_all) / (process_percent / 100)
mult_highest = counts_highest / totals_highest / (process_percent / 100)
self.info[cell.isoform._db_dict['name']] = {'multiplicity_all':mult_all,
'multiplicity_highest':mult_highest,
'bins':bins,
'isoform':cell.isoform._db_dict['name'],
'a':cell.cell_a,
'b':cell.cell_b,
'c':cell.cell_c,
'alpha':cell.cell_alpha,
'beta':cell.cell_beta,
'gamma':cell.cell_gamma,
'n_img':n_img}
#if len(self.noiso_cells) > 0:
if len(self.info) == 0 and len(self.noiso_cells) > 0:
sum_n_img = sum([cell['n_img'] for cell in self.noiso_cells])
mean_a = sum([cell['n_img']*cell['a'] for cell in self.noiso_cells])/sum_n_img
mean_b = sum([cell['n_img']*cell['b'] for cell in self.noiso_cells])/sum_n_img
mean_c = sum([cell['n_img']*cell['c'] for cell in self.noiso_cells])/sum_n_img
mean_alpha = sum([cell['n_img']*cell['alpha'] for cell in self.noiso_cells])/sum_n_img
mean_beta = sum([cell['n_img']*cell['beta'] for cell in self.noiso_cells])/sum_n_img
mean_gamma = sum([cell['n_img']*cell['gamma'] for cell in self.noiso_cells])/sum_n_img
noiso_entry = {'multiplicity_all':0,
'multiplicity_highest':0,
'bins':None,
'isoform':None,
'a':mean_a,
'b':mean_b,
'c':mean_c,
'alpha':mean_alpha,
'beta':mean_beta,
'gamma':mean_gamma,
'n_img':sum_n_img}
self.info['noiso'] = noiso_entry
self.post_refresh()
self.info = {}
self.parent.run_window.prg_light.change_status('on')
time.sleep(5)
# ----------------------------- Run Stats Sentinel ---------------------------- #
# Set up events for monitoring hitrate, indexing rate and I/sig(I)
tp_EVT_RUNSTATS_REFRESH = wx.NewEventType()
EVT_RUNSTATS_REFRESH = wx.PyEventBinder(tp_EVT_RUNSTATS_REFRESH, 1)
class RefreshRunStats(wx.PyCommandEvent):
''' Send event when finished all cycles '''
def __init__(self, etype, eid, result=None):
wx.PyCommandEvent.__init__(self, etype, eid)
self.result = result
def GetValue(self):
return self.result
class RunStatsSentinel(Thread):
''' Worker thread for run stats; generated so that the GUI does not lock up when
processing is running '''
def __init__(self,
parent,
active=True):
Thread.__init__(self)
self.parent = parent
self.active = active
self.output = self.parent.params.output_folder
self.number_of_pickles = 0
self.info = {}
self.run_numbers = []
self.stats = []
self.run_tags = []
self.run_statuses = []
def post_refresh(self):
evt = RefreshRunStats(tp_EVT_RUNSTATS_REFRESH, -1, self.info)
wx.PostEvent(self.parent.run_window.runstats_tab, evt)
def run(self):
# one time post for an initial update
self.post_refresh()
self.db = xfel_db_application(self.parent.params)
while self.active:
self.parent.run_window.runstats_light.change_status('idle')
self.plot_stats()
self.fetch_timestamps(indexed=True)
self.fetch_timestamps(indexed=False)
self.post_refresh()
self.info = {}
self.parent.run_window.runstats_light.change_status('on')
time.sleep(5)
def refresh_stats(self):
#from xfel.ui.components.timeit import duration
from xfel.ui.db.stats import HitrateStats
import copy, time
t1 = time.time()
if self.parent.run_window.runstats_tab.trial_no is not None:
trial = self.db.get_trial(
trial_number=self.parent.run_window.runstats_tab.trial_no)
selected_runs = copy.deepcopy(self.parent.run_window.runstats_tab.selected_runs)
self.run_numbers = []
trial_ids = []
rungroup_ids = []
self.stats = []
self.trgr = {}
self.run_tags = []
self.run_statuses = []
for rg in trial.rungroups:
for run in rg.runs:
if run.run not in self.run_numbers and run.run in selected_runs:
self.run_numbers.append(run.run)
trial_ids.append(trial.id)
rungroup_ids.append(rg.id)
self.trgr[run.run] = (trial, rg, run)
self.stats.append(HitrateStats(self.db, run.run, trial.trial, rg.id,
i_sigi_cutoff=self.parent.run_window.runstats_tab.i_sigi,
d_min=self.parent.run_window.runstats_tab.d_min)())
self.run_tags.append([tag.name for tag in run.tags])
jobs = self.db.get_all_jobs()
for idx in range(len(self.run_numbers)):
run_no = self.run_numbers[idx]
rg_id = rungroup_ids[idx]
t_id = trial_ids[idx]
found_it = False
for job in jobs:
try:
ok = job.run.run == run_no and job.rungroup.id == rg_id and job.trial.id == t_id
except AttributeError:
pass
else:
if ok:
self.run_statuses.append(job.status)
found_it = True; break
if not found_it: self.run_statuses.append('UNKWN')
t2 = time.time()
def get_xtc_process_params_for_run(self, trial, rg, run):
params = {}
params['experiment'] = str(self.parent.db.params.facility.lcls.experiment)
try:
params['output_dir'] = os.path.join(str(self.parent.db.params.output_folder),
"r%04d"%(int(run.run)), "%03d_rg%03d"%(trial.trial, rg.rungroup_id), "all")
except ValueError:
params['output_dir'] = os.path.join(str(self.parent.db.params.output_folder),
"r%s"%(run.run), "%03d_rg%03d"%(trial.trial, rg.rungroup_id), "all")
params['run'] = run.run
params['address'] = rg.detector_address
params['format'] = rg.format
params['config'] = rg.config_str if hasattr(rg, 'config_str') else None
params['beamx'] = rg.beamx if hasattr(rg, 'beamx') else None
params['beamy'] = rg.beamy if hasattr(rg, 'beamy') else None
params['distance'] = rg.detz_parameter if hasattr(rg, 'detz_parameter') else None
params['bin_size'] = rg.binning if hasattr(rg, 'binning') else None
params['energy'] = rg.energy if hasattr(rg, 'energy') else None
params['gain_mask_level'] = rg.gain_mask_level if hasattr(rg, 'gain_mask_level') else None
return params
def fetch_timestamps(self, indexed=False):
from xfel.ui.components.run_stats_plotter import \
get_multirun_should_have_indexed_timestamps, get_paths_from_timestamps, get_strings_from_timestamps
runs, timestamps = \
get_multirun_should_have_indexed_timestamps(self.stats,
self.run_numbers,
self.parent.run_window.runstats_tab.d_min,
self.parent.run_window.runstats_tab.n_strong,
indexed=indexed)
image_paths_by_run = []
timestamps_and_params_by_run = []
for i in range(len(runs)):
run = runs[i]
ts = timestamps[i]
outdir = "out" if indexed else "all"
prepend = os.path.join(get_run_path(self.output, *self.trgr[run]), outdir)
tag = "idx" if indexed else "shot"
image_paths = get_paths_from_timestamps(ts, prepend=prepend, tag=tag, ext=self.trgr[run][1].format)
image_paths_by_run.append(image_paths)
timestamp_strings = get_strings_from_timestamps(ts, long_form=True)
timestamps_and_params_by_run.append((self.get_xtc_process_params_for_run(*self.trgr[run]), timestamp_strings))
if indexed:
self.parent.run_window.runstats_tab.strong_indexed_image_timestamps = \
timestamps_and_params_by_run
self.parent.run_window.runstats_tab.strong_indexed_image_paths = \
image_paths_by_run
else:
self.parent.run_window.runstats_tab.should_have_indexed_timestamps = \
timestamps_and_params_by_run
self.parent.run_window.runstats_tab.should_have_indexed_image_paths = \
image_paths_by_run
def plot_stats(self):
from xfel.ui.components.run_stats_plotter import plot_multirun_stats
self.refresh_stats()
sizex, sizey = self.parent.run_window.runstats_tab.runstats_panelsize
figure = self.parent.run_window.runstats_tab.figure
figure.clear()
plot_multirun_stats(
self.stats, self.run_numbers,
d_min=self.parent.run_window.runstats_tab.d_min,
n_multiples=self.parent.run_window.runstats_tab.n_multiples,
interactive=True,
ratio_cutoff=self.parent.run_window.runstats_tab.ratio,
n_strong_cutoff=self.parent.run_window.runstats_tab.n_strong,
i_sigi_cutoff=self.parent.run_window.runstats_tab.i_sigi,
run_tags=self.run_tags,
run_statuses=self.run_statuses,
minimalist=self.parent.run_window.runstats_tab.entire_expt,
xsize=(sizex-25)/85, ysize=(sizey-25)/95,
high_vis=self.parent.high_vis,
figure=figure)
# convert px to inches with fudge factor for scaling inside borders
figure.canvas.draw_idle()
# ----------------------------- Spotfinder Sentinel ---------------------------- #
# Set up events for monitoring spotfinder results against a set threshold
tp_EVT_SPOTFINDER_REFRESH = wx.NewEventType()
EVT_SPOTFINDER_REFRESH = wx.PyEventBinder(tp_EVT_SPOTFINDER_REFRESH, 1)
class RefreshSpotfinder(wx.PyCommandEvent):
''' Send event when finished all cycles '''
def __init__(self, etype, eid, result=None):
wx.PyCommandEvent.__init__(self, etype, eid)
self.result = result
def GetValue(self):
return self.result
class SpotfinderSentinel(Thread):
''' Worker thread for spotfinder stats; generated so that the GUI does not lock up when
processing is running '''
def __init__(self,
parent,
active=True):
Thread.__init__(self)
self.parent = parent
self.active = active
self.output = self.parent.params.output_folder
self.number_of_pickles = 0
self.info = {}
self.run_numbers = []
self.stats = []
self.spot_length_stats = []
self.run_tags = []
self.run_statuses = []
# on initialization (and restart), make sure spotfinder stats drawn from scratch
self.parent.run_window.spotfinder_tab.redraw_windows = True
def post_refresh(self):
evt = RefreshSpotfinder(tp_EVT_SPOTFINDER_REFRESH, -1, self.info)
wx.PostEvent(self.parent.run_window.spotfinder_tab, evt)
def run(self):
# one time post for an initial update
self.post_refresh()
self.db = xfel_db_application(self.parent.params)
while self.active:
self.parent.run_window.spotfinder_light.change_status('idle')
self.plot_stats_static()
self.post_refresh()
self.info = {}
self.parent.run_window.spotfinder_light.change_status('on')
time.sleep(5)
def refresh_stats(self):
from xfel.ui.db.stats import SpotfinderStats
from xfel.ui.components.spotfinder_scraper import get_spot_length_stats
import copy
if self.parent.run_window.spotfinder_tab.trial_no is not None:
trial = self.db.get_trial(
trial_number=self.parent.run_window.spotfinder_tab.trial_no)
selected_runs = copy.deepcopy(self.parent.run_window.spotfinder_tab.selected_runs)
self.run_numbers = []
trial_ids = []
rungroup_ids = []
self.stats = []
self.spot_length_stats = []
self.trgr = {}
self.run_tags = []
self.run_statuses = []
self.output = self.parent.params.output_folder
for rg in trial.rungroups:
for run in rg.runs:
if run.run not in self.run_numbers and run.run in selected_runs:
self.run_numbers.append(run.run)
trial_ids.append(trial.id)
rungroup_ids.append(rg.id)
self.trgr[run.run] = (trial, rg, run)
# spot count
sf_stats = SpotfinderStats(self.db, run.run, trial.trial, rg.id)()
self.stats.append(sf_stats)
self.run_tags.append([tag.name for tag in run.tags])
# spot lengths
if self.parent.params.dispatcher == "cxi.xtc_process": #LABELIT backend
outdir = "integration"
else:
outdir = "out"
run_outdir = os.path.join(get_run_path(self.output, trial, rg, run), outdir)
try:
self.spot_length_stats.append(get_spot_length_stats(run_outdir, ref_stats=sf_stats))
except OSError:
print("Outdir %s no longer accessible." % run_outdir)
except Exception as e:
print(e)
from dials.array_family import flex
self.spot_length_stats.append((flex.double(), flex.double(), flex.double()))
jobs = self.db.get_all_jobs()
for idx in range(len(self.run_numbers)):
run_no = self.run_numbers[idx]
rg_id = rungroup_ids[idx]
t_id = trial_ids[idx]
for job in jobs:
if job.run.run == run_no and job.rungroup.id == rg_id and job.trial.id == t_id:
self.run_statuses.append(job.status)
def plot_stats_static(self):
from xfel.ui.components.spotfinder_plotter import plot_multirun_spotfinder_stats
self.refresh_stats()
sizex, sizey = self.parent.run_window.spotfinder_tab.spotfinder_panelsize
self.parent.run_window.spotfinder_tab.png = plot_multirun_spotfinder_stats(
self.stats, self.run_numbers,
spot_length_stats=self.spot_length_stats,
interactive=False,
run_tags=self.run_tags,
run_statuses=self.run_statuses,
n_min=self.parent.run_window.spotfinder_tab.n_min,
minimalist=self.parent.run_window.spotfinder_tab.entire_expt,
easy_run=True,
xsize=(sizex-25)/85, ysize=sizey/95,
high_vis=self.parent.high_vis)
# convert px to inches with fudge factor for scaling inside borders
self.parent.run_window.spotfinder_tab.redraw_windows = True
# ---------------------------- Image Dumping Thread ---------------------------- #
class ImageDumpThread(Thread):
def __init__(self,
command):
Thread.__init__(self)
self.active = True
self.command = command
def run(self):
print(self.command)
easy_run.fully_buffered(command=self.command).show_stderr()
# ----------------------------- Unit Cell Sentinel ----------------------------- #
# Set up events for monitoring unit cell statistics
tp_EVT_UNITCELL_REFRESH = wx.NewEventType()
EVT_UNITCELL_REFRESH = wx.PyEventBinder(tp_EVT_UNITCELL_REFRESH, 1)
class RefreshUnitCell(wx.PyCommandEvent):
''' Send event when finished all cycles '''
def __init__(self, etype, eid):
wx.PyCommandEvent.__init__(self, etype, eid)
class UnitCellSentinel(Thread):
''' Worker thread for unit cell analysis; generated so that the GUI does not lock up when
processing is running '''
def __init__(self,
parent,
active=True):
Thread.__init__(self)
self.parent = parent
self.active = active
def post_refresh(self):
evt = RefreshUnitCell(tp_EVT_UNITCELL_REFRESH, -1)
wx.PostEvent(self.parent.run_window.unitcell_tab, evt)
def run(self):
import xfel.ui.components.xfel_gui_plotter as pltr
feature_vectors = {
"Triclinic": None,
"Monoclinic": "a,b,c",
"Orthorhombic": "a,b,c",
"Tetragonal": "a,c",
"Hexagonal": "a,c",
"Cubic": None,
}
# one time post for an initial update
self.post_refresh()
self.db = xfel_db_application(self.parent.params)
while self.active:
self.parent.run_window.unitcell_light.change_status('idle')
trial = self.parent.run_window.unitcell_tab.trial
tag_sets = self.parent.run_window.unitcell_tab.tag_sets
sizex, sizey = self.parent.run_window.unitcell_tab.unit_cell_panelsize
info_list = []
legend_list = []
for tag_set in tag_sets:
legend_list.append(str(tag_set))
cells = self.db.get_stats(trial=trial,
tags=tag_set.tags,
isigi_cutoff=1.0,
tag_selection_mode=tag_set.mode)()
info = []
for cell in cells:
info.append({'a':cell.cell_a,
'b':cell.cell_b,
'c':cell.cell_c,
'alpha':cell.cell_alpha,
'beta':cell.cell_beta,
'gamma':cell.cell_gamma,
'n_img':0})
info_list.append(info)
iqr_ratio = 1.5 if self.parent.run_window.unitcell_tab.reject_outliers else None
figure = self.parent.run_window.unitcell_tab.figure
plotter = pltr.PopUpCharts(interactive=True, figure=figure)
if not self.parent.run_window.unitcell_tab.plot_clusters:
figure.clear()
plotter.plot_uc_histogram(
info_list=info_list,
legend_list=legend_list,
xsize=(sizex-115)/82, ysize=(sizey-115)/82,
high_vis=self.parent.high_vis,
iqr_ratio=iqr_ratio)
figure.canvas.draw_idle()
elif len(info_list) > 0:
from uc_metrics.clustering.step1 import phil_scope
from uc_metrics.clustering.step_dbscan3d import dbscan_plot_manager
from cctbx.sgtbx import space_group_info
if len(info_list) > 1:
print("Warning, only first tag set will be plotted")
params = phil_scope.extract()
try:
sg = self.parent.run_window.unitcell_tab.trial.cell.lookup_symbol
except AttributeError:
sg = "P1"
sg = "".join(sg.split()) # remove spaces
params.input.space_group = sg
iterable = ["{a} {b} {c} {alpha} {beta} {gamma} ".format(**c) + sg for c in info_list[0]]
params.input.__inject__('iterable', iterable)
params.file_name = None
params.cluster.dbscan.eps = float(self.parent.run_window.unitcell_tab.plot_eps.eps.GetValue())
params.show_plot = True
params.plot.legend = legend_list[0]
reject_outliers = self.parent.run_window.unitcell_tab.chk_reject_outliers.GetValue()
params.plot.outliers = not reject_outliers
sginfo = space_group_info(params.input.space_group)
cs = sginfo.group().crystal_system()
params.input.feature_vector = feature_vectors.get(cs)
if params.input.feature_vector:
figure = self.parent.run_window.unitcell_tab.figure
figure.clear()
plots = dbscan_plot_manager(params)
plots.wrap_3D_features(fig = figure, embedded = True)
figure.canvas.draw_idle()
else:
print("Unsupported crystal system", cs)
self.post_refresh()
self.parent.run_window.unitcell_light.change_status('on')
time.sleep(5)
# ------------------------------- Frames Sentinel ------------------------------- #
# Set up events for FramesSeninel
tp_EVT_FRAMES_REFRESH = wx.NewEventType()
EVT_FRAMES_REFRESH = wx.PyEventBinder(tp_EVT_FRAMES_REFRESH, 1)
class RefreshFrames(wx.PyCommandEvent):
''' Send event when finished all cycles '''
def __init__(self, etype, eid):
wx.PyCommandEvent.__init__(self, etype, eid)
class FramesSentinel(Thread):
''' Worker thread for frames; generated so that the GUI does not lock up when
processing is running '''
def __init__(self,
parent,
active=True):
Thread.__init__(self)
self.parent = parent
self.active = active
def post_refresh(self):
pass
def run(self):
# one time post for an initial update
self.post_refresh()
db = xfel_db_application(self.parent.parent.params)
while self.active:
trial = db.get_trial(trial_number=int(self.parent.trial_number.ctr.GetStringSelection()))
runs = [db.get_run(run_number=int(r)) for r in self.parent.trial_runs.ctr.GetCheckedStrings()]
print("Total events in trial", trial.trial, end=' ')
if len(runs) == 0:
runs = None
else:
print("runs", ", ".join(sorted([str(r.run) for r in runs])), end=' ')
print(":", len(db.get_all_events(trial, runs)))
self.post_refresh()
time.sleep(2)
# ------------------------------- Clustering --------------------------------- #
# Set up events for and for finishing all cycles
tp_EVT_CLUSTERING = wx.NewEventType()
EVT_CLUSTERING = wx.PyEventBinder(tp_EVT_CLUSTERING, 1)
class ClusteringResult(wx.PyCommandEvent):
''' Send event when finished all cycles '''
def __init__(self, etype, eid, result=None):
wx.PyCommandEvent.__init__(self, etype, eid)
self.result = result
def GetValue(self):
return self.result
class Clusterer():
def __init__(self, trial, runblocks, tags, output, sample_size, threshold):
self.trial = trial
self.runblocks = runblocks
self.tags = tags
self.output = output
self.sample_size = sample_size
self.threshold = threshold
def unit_cell_clustering(self):
# 1. Get all pickle files, check if new ones arrived
run_numbers = []
rb_paths = []
tag_ids = set([t.id for t in self.tags])
for rb in self.runblocks:
for run in rb.runs:
if run.run not in run_numbers:
if len(tag_ids) > 0:
run_tag_ids = set([t.id for t in run.tags])
if len(tag_ids.intersection(run_tag_ids)) == 0:
continue
run_numbers.append(run.run)
# test for integration folder
path = os.path.join(get_run_path(self.output, self.trial, rb, run), "integration")
if not os.path.exists(path):
path = os.path.join(get_run_path(self.output, self.trial, rb, run), "out")
rb_paths.append(path)
all_pickles = []
for path in rb_paths:
try:
pickles = [os.path.join(path, i) for i in os.listdir(path) if
i.endswith('pickle') and 'int-' in i]
all_pickles = all_pickles + pickles
except OSError as error:
print('Folder not found!')
print(error)
if len(all_pickles) == 0:
print('No images integrated (yet)')
return
# If clustering button was pressed, do clustering
# 2. Pick subset
subset = list(np.random.choice(all_pickles, size=int(self.sample_size)))
# 3. Do clustering
ucs = Cluster.from_files(subset, use_b=True)
clusters, _ = ucs.ab_cluster(self.threshold,
log=False, write_file_lists=False,
schnell=False, doplot=False)
return clusters
class ClusteringWorker(Thread):
''' Worker thread for jobs; generated so that the GUI does not lock up when
processing is running '''
def __init__(self,
parent,
trial,
runblocks,
tags,
output,
sample_size=1000,
threshold=250,):
Thread.__init__(self)
self.parent = parent
self.trial = trial
self.runblocks = runblocks
self.tags = tags
self.output = output
self.sample_size = sample_size
self.threshold = threshold
def run(self):
clusterer = Clusterer(self.trial, self.runblocks, self.tags, self.output,
self.sample_size, self.threshold)
self.clusters = clusterer.unit_cell_clustering()
evt = ClusteringResult(tp_EVT_CLUSTERING, -1, self.clusters)
wx.PostEvent(self.parent, evt)
# ----------------------------- Merging Stats Sentinel ---------------------------- #
# Set up events for monitoring merging stats
tp_EVT_MERGINGSTATS_REFRESH = wx.NewEventType()
EVT_MERGINGSTATS_REFRESH = wx.PyEventBinder(tp_EVT_MERGINGSTATS_REFRESH, 1)
class RefreshMergingStats(wx.PyCommandEvent):
''' Send event when finished all cycles '''
def __init__(self, etype, eid, result=None):
wx.PyCommandEvent.__init__(self, etype, eid)
self.result = result
def GetValue(self):
return self.result
class MergingStatsSentinel(Thread):
''' Worker thread for merging stats; generated so that the GUI does not lock up when
processing is running '''
def __init__(self,
parent,
active=True):
Thread.__init__(self)
self.parent = parent
self.active = active
self.output = self.parent.params.output_folder
# on initialization (and restart), make sure run stats drawn from scratch
self.parent.run_window.mergingstats_tab.redraw_windows = True
def post_refresh(self):
evt = RefreshMergingStats(tp_EVT_MERGINGSTATS_REFRESH, -1)
wx.PostEvent(self.parent.run_window.mergingstats_tab, evt)
def run(self):
# one time post for an initial update
self.post_refresh()
self.db = xfel_db_application(self.parent.params)
while self.active:
self.parent.run_window.mergingstats_light.change_status('idle')
self.plot_stats_static()
self.post_refresh()
self.parent.run_window.mergingstats_light.change_status('on')
time.sleep(5)
def plot_stats_static(self):
from xfel.ui.db.merging_log_scraper import Scraper
if not self.parent.run_window.mergingstats_tab.dataset_versions: return
sizex, sizey = self.parent.run_window.mergingstats_tab.mergingstats_panelsize
sizex = (sizex-25)/85
sizey = (sizey-25)/95
if len(self.parent.run_window.mergingstats_tab.dataset_versions) > 1:
all_results = []
for folder in self.parent.run_window.mergingstats_tab.dataset_versions:
scraper = Scraper(folder, '#')
all_results.append(scraper.scrape())
self.parent.run_window.mergingstats_tab.png = scraper.plot_many_results(all_results,
self.parent.run_window.mergingstats_tab.dataset_name,
sizex, sizey, interactive=False)
else:
scraper = Scraper(self.parent.run_window.mergingstats_tab.dataset_versions[0], '%')
results = scraper.scrape()
self.parent.run_window.mergingstats_tab.png = scraper.plot_single_results(results,
self.parent.run_window.mergingstats_tab.dataset_name,
sizex, sizey, interactive=False)
self.parent.run_window.mergingstats_tab.redraw_windows = True
# ------------------------------- Main Window -------------------------------- #
class MainWindow(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title, size=(200, 200))
self.run_sentinel = None
self.job_sentinel = None
self.job_monitor = None
self.spotfinder_sentinel = None
self.runstats_sentinel = None
self.unitcell_sentinel = None
self.mergingstats_sentinel = None
self.params = load_cached_settings()
self.db = None
self.high_vis = False
# Toolbar
self.toolbar = self.CreateToolBar(wx.TB_TEXT)
self.tb_btn_quit = self.toolbar.AddTool(wx.ID_EXIT,
label='Quit',
bitmap=wx.Bitmap('{}/32x32/exit.png'.format(icons)),
bmpDisabled=wx.NullBitmap,
shortHelp='Quit',
longHelp='Exit CCTBX.XFEL')
self.toolbar.AddSeparator()
if not self.params.monitoring_mode:
self.tb_btn_watch_new_runs = self.toolbar.AddTool(wx.ID_ANY,
label='Watch for new runs',
bitmap=wx.Bitmap('{}/32x32/quick_restart.png'.format(icons)),
bmpDisabled=wx.NullBitmap,
shortHelp='Watch for new runs',
longHelp='Watch for new runs')
self.tb_btn_auto_submit = self.toolbar.AddTool(wx.ID_ANY,
label='Auto-submit jobs',
bitmap=wx.Bitmap('{}/32x32/play.png'.format(icons)),
bmpDisabled=wx.NullBitmap,
shortHelp='Auto-submit jobs',
longHelp='Auto-submit all pending jobs')
self.toolbar.AddSeparator()
#self.tb_btn_calibrate = self.toolbar.AddTool(wx.ID_ANY,
# label='Calibration',
# bitmap=wx.Bitmap('{}/32x32/calib.png'.format(icons)),
# bmpDisabled=wx.NullBitmap,
# shortHelp='Calibration',
# longHelp='Detector geometry calibration')
#self.toolbar.AddSeparator()
self.tb_btn_settings = self.toolbar.AddTool(wx.ID_ANY,
label='Settings',
bitmap=wx.Bitmap('{}/32x32/settings.png'.format(icons)),
bmpDisabled=wx.NullBitmap,
shortHelp='Settings',
longHelp='Database, user and experiment settings')
self.tb_btn_zoom = self.toolbar.AddCheckTool(wx.ID_ANY,
label='Large text',
bitmap1=wx.Bitmap('{}/32x32/search.png'.format(icons)),
bmpDisabled=wx.NullBitmap,
shortHelp='Change text size',
longHelp='Change text size for plots')
self.toolbar.Realize()
# Status bar
self.sb = self.CreateStatusBar()
# Menu bar
menubar = wx.MenuBar()
m_help = wx.Menu()
self.mb_about = m_help.Append(wx.ID_ANY, '&About')
self.mb_docs = m_help.Append(wx.ID_ANY, '&Online help')
menubar.Append(m_help, '&Help')
self.SetMenuBar(menubar)
# Place elements in main PRIME window
main_box = wx.BoxSizer(wx.VERTICAL)
# Instantiate windows
self.run_window = RunWindow(self)
# Single input window
main_box.Add(self.run_window, 1, flag=wx.ALL | wx.EXPAND, border=10)
main_box.Add((-1, 20))
# Menubar button bindings
self.Bind(wx.EVT_MENU, self.OnAboutBox, self.mb_about)
self.Bind(wx.EVT_MENU, self.OnDocs, self.mb_docs)
# Bindings
self.Bind(wx.EVT_TOOL, self.onQuit, self.tb_btn_quit)
if not self.params.monitoring_mode:
self.Bind(wx.EVT_TOOL, self.onWatchRuns, self.tb_btn_watch_new_runs)
self.Bind(wx.EVT_TOOL, self.onAutoSubmit, self.tb_btn_auto_submit)
#self.Bind(wx.EVT_TOOL, self.onCalibration, self.tb_btn_calibrate)
self.Bind(wx.EVT_TOOL, self.onSettings, self.tb_btn_settings)
self.Bind(wx.EVT_TOOL, self.onZoom, self.tb_btn_zoom)
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.onTabChange,
self.run_window.main_nbook)
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGING, self.onLeavingTab,
self.run_window.main_nbook)
# Draw the main window sizer
self.SetSizer(main_box)
def connect_to_db(self, drop_tables = False):
self.db = xfel_db_application(self.params, drop_tables = drop_tables, verify_tables = True)
return True
def stop_sentinels(self):
if not self.params.monitoring_mode:
self.stop_run_sentinel()
self.stop_job_sentinel()
self.stop_job_monitor()
#self.stop_spotfinder_sentinel()
self.stop_runstats_sentinel()
self.stop_unitcell_sentinel()
self.stop_mergingstats_sentinel()
def start_run_sentinel(self):
self.run_sentinel = RunSentinel(self, active=True)
self.run_sentinel.start()
self.run_window.run_light.change_status('on')
self.toolbar.SetToolNormalBitmap(self.tb_btn_watch_new_runs.Id, wx.Bitmap('{}/32x32/pause.png'.format(icons)))
def stop_run_sentinel(self, block = True):
if self.run_sentinel is not None and self.run_sentinel.active:
self.run_sentinel.active = False
if block:
self.run_sentinel.join()
self.run_window.run_light.change_status('off')
self.toolbar.SetToolNormalBitmap(self.tb_btn_watch_new_runs.Id, wx.Bitmap('{}/32x32/quick_restart.png'.format(icons)))
def start_job_monitor(self):
self.job_monitor = JobMonitor(self, active=True)
self.job_monitor.start()
def stop_job_monitor(self, block = True):
if self.job_monitor is not None and self.job_monitor.active:
self.job_monitor.active = False
if block:
self.job_monitor.join()
def start_job_sentinel(self):
self.job_sentinel = JobSentinel(self, active=True)
self.job_sentinel.start()
self.run_window.job_light.change_status('on')
self.toolbar.SetToolNormalBitmap(self.tb_btn_auto_submit.Id, wx.Bitmap('{}/32x32/pause.png'.format(icons)))
def stop_job_sentinel(self, block = True):
if self.job_sentinel is not None and self.job_sentinel.active:
self.job_sentinel.active = False
if block:
self.job_sentinel.join()
self.run_window.job_light.change_status('off')
self.toolbar.SetToolNormalBitmap(self.tb_btn_auto_submit.Id, wx.Bitmap('{}/32x32/play.png'.format(icons)))
def start_prg_sentinel(self):
self.prg_sentinel = ProgressSentinel(self, active=True)
self.prg_sentinel.start()
self.run_window.prg_light.change_status('on')
def stop_prg_sentinel(self, block = True):
if self.prg_sentinel is not None and self.prg_sentinel.active:
self.prg_sentinel.active = False
if block:
self.prg_sentinel.join()
self.run_window.prg_light.change_status('off')
def start_spotfinder_sentinel(self):
self.spotfinder_sentinel = SpotfinderSentinel(self, active=True)
self.spotfinder_sentinel.start()
self.run_window.spotfinder_light.change_status('on')
def stop_spotfinder_sentinel(self, block = True):
if self.spotfinder_sentinel is not None and self.spotfinder_sentinel.active:
self.spotfinder_sentinel.active = False
if block:
self.spotfinder_sentinel.join()
self.run_window.spotfinder_light.change_status('off')
def start_runstats_sentinel(self):
self.runstats_sentinel = RunStatsSentinel(self, active=True)
self.runstats_sentinel.start()
self.run_window.runstats_light.change_status('on')
def stop_runstats_sentinel(self, block = True):
if self.runstats_sentinel is not None and self.runstats_sentinel.active:
self.runstats_sentinel.active = False
if block:
self.runstats_sentinel.join()
self.run_window.runstats_light.change_status('off')
def start_unitcell_sentinel(self):
self.unitcell_sentinel = UnitCellSentinel(self, active=True)
self.unitcell_sentinel.start()
self.run_window.unitcell_light.change_status('on')
def stop_unitcell_sentinel(self, block = True):
if self.unitcell_sentinel is not None and self.unitcell_sentinel.active:
self.unitcell_sentinel.active = False
if block:
self.unitcell_sentinel.join()
self.run_window.unitcell_light.change_status('off')
def start_mergingstats_sentinel(self):
self.mergingstats_sentinel = MergingStatsSentinel(self, active=True)
self.mergingstats_sentinel.start()
self.run_window.mergingstats_light.change_status('on')
def stop_mergingstats_sentinel(self, block = True):
if self.mergingstats_sentinel is not None and self.mergingstats_sentinel.active:
self.mergingstats_sentinel.active = False
if block:
self.mergingstats_sentinel.join()
self.run_window.mergingstats_light.change_status('off')
def OnAboutBox(self, e):
''' About dialog '''
import wx.adv
info = wx.adv.AboutDialogInfo()
info.SetName('cctbx.xfel')
info.SetLicense(open(license).read())
info.SetDescription(description)
info.AddDeveloper('Artem Lyubimov')
info.AddDeveloper('Aaron Brewster')
info.AddDeveloper('Iris Young')
info.AddDeveloper('Asmit Bhowmick')
info.AddDeveloper('Daniel Paley')
info.AddDeveloper('Derek A. Mendez')
info.AddDeveloper('Johannes Blaschke')
info.AddDeveloper('Robert Bolotovsky')
info.AddDeveloper('Axel Brunger')
info.AddDeveloper('Nicholas Sauter')
wx.adv.AboutBox(info)
def OnDocs(self, e):
import webbrowser
url = 'http://cci.lbl.gov/publications/download/CCN_2019_p22_Brewster.pdf'
print('Opening', url)
webbrowser.open(url)
def onSettings(self, e):
settings_dlg = dlg.SettingsDialog(self,
params=self.params)
settings_dlg.db_cred.btn_big.Disable()
settings_dlg.SetTitle('Settings')
if (settings_dlg.ShowModal() == wx.ID_OK):
self.params = settings_dlg.params
save_cached_settings(self.params)
if self.params.facility.name == 'lcls':
self.title = 'CCTBX.XFEL | {} | {}'.format(self.params.experiment_tag,
self.params.facility.lcls.experiment)
else:
self.title = 'CCTBX.XFEL | {}'.format(self.params.experiment_tag)
def onZoom(self, e):
self.high_vis = not self.high_vis
def onCalibration(self, e):
calib_dlg = dlg.CalibrationDialog(self, db=self.db)
calib_dlg.Fit()
calib_dlg.ShowModal()
def onWatchRuns(self, e):
''' Toggle autosubmit '''
if self.run_sentinel is not None and self.run_sentinel.active:
self.stop_run_sentinel(block = True)
else:
self.start_run_sentinel()
def onAutoSubmit(self, e):
''' Toggle autosubmit '''
if self.job_sentinel is not None and self.job_sentinel.active:
self.stop_job_sentinel(block = True)
else:
self.start_job_sentinel()
def onTabChange(self, e):
name = self.run_window.main_nbook.GetPageText((self.run_window.main_nbook.GetSelection()))
if name == self.run_window.jobs_tab.name:
if self.job_monitor is None or not self.job_monitor.active:
self.start_job_monitor()
self.run_window.jmn_light.change_status('on')
# Disabled
#elif name == self.run_window.spotfinder_tab.name:
# if self.job_monitor is None or not self.job_monitor.active:
# self.start_job_monitor()
# self.run_window.jmn_light.change_status('on')
# if self.spotfinder_sentinel is None or not self.spotfinder_sentinel.active:
# self.start_spotfinder_sentinel()
# self.run_window.spotfinder_light.change_status('on')
elif name == self.run_window.trials_tab.name:
self.run_window.trials_tab.refresh_trials()
elif name == self.run_window.runstats_tab.name:
if not self.params.monitoring_mode and (self.job_monitor is None or not self.job_monitor.active):
self.start_job_monitor()
self.run_window.jmn_light.change_status('on')
if self.run_window.runstats_tab.auto_update and (self.runstats_sentinel is None or not self.runstats_sentinel.active):
self.start_runstats_sentinel()
self.run_window.runstats_light.change_status('on')
elif name == self.run_window.unitcell_tab.name:
if self.run_window.unitcell_tab.auto_update and (self.unitcell_sentinel is None or not self.unitcell_sentinel.active):
self.start_unitcell_sentinel()
self.run_window.unitcell_light.change_status('on')
elif name == self.run_window.datasets_tab.name:
self.run_window.datasets_tab.refresh_datasets()
elif name == self.run_window.mergingstats_tab.name:
self.run_window.mergingstats_tab.refresh_datasets()
if self.mergingstats_sentinel is None or not self.mergingstats_sentinel.active:
self.start_mergingstats_sentinel()
self.run_window.mergingstats_light.change_status('on')
#elif name == self.run_window.merge_tab.name:
# self.run_window.merge_tab.find_trials()
def onLeavingTab(self, e):
name = self.run_window.main_nbook.GetPageText((self.run_window.main_nbook.GetSelection()))
if name == self.run_window.jobs_tab.name:
if self.job_monitor.active:
self.stop_job_monitor(block = False)
self.run_window.jmn_light.change_status('off')
# Disabled
#elif name == self.run_window.spotfinder_tab.name:
# if self.job_monitor.active:
# self.stop_job_monitor(block = False)
# self.run_window.jmn_light.change_status('off')
# if self.spotfinder_sentinel.active:
# self.stop_spotfinder_sentinel(block = False)
# self.run_window.spotfinder_light.change_status('off')
elif name == self.run_window.runstats_tab.name:
if not self.params.monitoring_mode and self.job_monitor.active:
self.stop_job_monitor(block = False)
self.run_window.jmn_light.change_status('off')
if self.runstats_sentinel.active:
self.stop_runstats_sentinel(block = False)
self.run_window.runstats_light.change_status('off')
elif name == self.run_window.unitcell_tab.name:
if self.unitcell_sentinel.active:
self.stop_unitcell_sentinel(block = False)
self.run_window.unitcell_light.change_status('off')
elif name == self.run_window.mergingstats_tab.name:
if self.mergingstats_sentinel.active:
self.stop_mergingstats_sentinel(block = False)
self.run_window.mergingstats_light.change_status('off')
def onQuit(self, e):
self.stop_sentinels()
save_cached_settings(self.params)
self.Destroy()
class RunWindow(wx.Panel):
''' Window panel that will house all the run tabs '''
def __init__(self, parent):
self.parent = parent
super(RunWindow, self).__init__(self.parent)
self.main_panel = wx.Panel(self)
self.main_nbook = wx.Notebook(self.main_panel, style=0)
self.runs_tab = RunTab(self.main_nbook, main=self.parent)
self.trials_tab = TrialsTab(self.main_nbook, main=self.parent)
self.jobs_tab = JobsTab(self.main_nbook, main=self.parent)
#self.spotfinder_tab = SpotfinderTab(self.main_nbook, main=self.parent) # Disabled
self.runstats_tab = RunStatsTab(self.main_nbook, main=self.parent)
self.unitcell_tab = UnitCellTab(self.main_nbook, main=self.parent)
self.datasets_tab = DatasetTab(self.main_nbook, main=self.parent)
#self.merge_tab = MergeTab(self.main_nbook, main=self.parent)
self.mergingstats_tab = MergingStatsTab(self.main_nbook, main=self.parent)
self.main_nbook.AddPage(self.runs_tab, self.runs_tab.name)
self.main_nbook.AddPage(self.trials_tab, self.trials_tab.name)
self.main_nbook.AddPage(self.jobs_tab, self.jobs_tab.name)
#self.main_nbook.AddPage(self.spotfinder_tab, self.spotfinder_tab.name) # Disabled
self.main_nbook.AddPage(self.runstats_tab, self.runstats_tab.name)
self.main_nbook.AddPage(self.unitcell_tab, self.unitcell_tab.name)
self.main_nbook.AddPage(self.datasets_tab, self.datasets_tab.name)
#self.main_nbook.AddPage(self.merge_tab, self.merge_tab.name)
self.main_nbook.AddPage(self.mergingstats_tab, self.mergingstats_tab.name)
self.sentinel_box = wx.FlexGridSizer(1, 6, 0, 20)
self.run_light = gctr.SentinelStatus(self.main_panel, label='Run Sentinel')
self.job_light = gctr.SentinelStatus(self.main_panel, label='Job Sentinel')
self.jmn_light = gctr.SentinelStatus(self.main_panel, label='Job Monitor')
#self.spotfinder_light = gctr.SentinelStatus(self.main_panel, label='Spotfinder Sentinel')
self.runstats_light = gctr.SentinelStatus(self.main_panel, label='Run Stats Sentinel')
self.unitcell_light = gctr.SentinelStatus(self.main_panel, label='Unit Cell Sentinel')
self.mergingstats_light = gctr.SentinelStatus(self.main_panel, label='Merging Stats Sentinel')
self.sentinel_box.Add(self.run_light)
self.sentinel_box.Add(self.job_light)
self.sentinel_box.Add(self.jmn_light)
#self.sentinel_box.Add(self.spotfinder_light)
self.sentinel_box.Add(self.runstats_light)
self.sentinel_box.Add(self.unitcell_light)
self.sentinel_box.Add(self.mergingstats_light)
nb_sizer = wx.BoxSizer(wx.VERTICAL)
nb_sizer.Add(self.main_nbook, 1, flag=wx.EXPAND | wx.ALL, border=3)
nb_sizer.Add((-1, 20))
nb_sizer.Add(self.sentinel_box, flag=wx.ALIGN_CENTER_HORIZONTAL)
self.main_panel.SetSizer(nb_sizer)
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(self.main_panel, 1, flag=wx.EXPAND | wx.ALL, border=3)
self.SetSizer(main_sizer)
if self.parent.params.monitoring_mode:
self.runs_tab.Hide()
self.trials_tab.Hide()
self.jobs_tab.Hide()
self.datasets_tab.Hide()
self.run_light.Hide()
self.job_light.Hide()
self.jmn_light.Hide()
# --------------------------------- UI Tabs ---------------------------------- #
class BaseTab(wx.Panel):
''' Base class for runtime tab '''
def __init__(self, parent):
wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY, size=(200, 200))
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.main_sizer)
class RunTab(BaseTab):
def __init__(self, parent, main):
BaseTab.__init__(self, parent=parent)
self.name = 'Runs'
self.main = main
self.last_run = 0
self.all_runs = []
self.all_tags = []
self.all_tag_buttons = []
self.persistent_tags = []
self.run_panel = ScrolledPanel(self)
self.run_sizer = wx.BoxSizer(wx.VERTICAL)
self.run_panel.SetSizer(self.run_sizer)
self.colname_sizer = wx.FlexGridSizer(1, 2, 0, 10)
run_label = wx.StaticText(self, label='Run', size=(60, -1))
tag_label = wx.StaticText(self, label='Sample Tags', size=(620, -1))
self.colname_sizer.Add(run_label, flag=wx.ALIGN_RIGHT)
self.colname_sizer.Add(tag_label, flag=wx.ALIGN_RIGHT | wx.EXPAND)
self.colname_sizer.AddGrowableCol(1, 1)
self.main_sizer.Add(self.colname_sizer, flag=wx.ALL | wx.EXPAND, border=10)
self.btn_multirun_tags = wx.Button(self, label='Change Tags on Multiple Runs', size=(240, -1))
self.btn_persistent_tags = gctr.Button(self, name='btn_persistent_tags', label='Manage Persistent Tags', size=(240, -1))
self.btn_manage_tags = gctr.Button(self, name='btn_manage_tags', label='Manage Tags', size=(120, -1))
self.main_sizer.Add(self.run_panel, 1, flag=wx.EXPAND | wx.ALL, border=10)
self.main_sizer.Add(wx.StaticLine(self), flag=wx.EXPAND | wx.ALL, border=10)
self.button_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.button_sizer.Add(self.btn_multirun_tags,
flag=wx.RIGHT | wx.LEFT | wx.BOTTOM,
border=10)
self.button_sizer.Add(self.btn_persistent_tags,
flag=wx.RIGHT | wx.LEFT | wx.BOTTOM,
border=10)
self.button_sizer.Add(self.btn_manage_tags,
flag=wx.RIGHT | wx.LEFT | wx.BOTTOM,
border=10)
self.main_sizer.Add(self.button_sizer, flag=wx.EXPAND | wx.ALL, border=10)
# Bindings
self.Bind(EVT_RUN_REFRESH, self.onRefresh)
self.Bind(wx.EVT_BUTTON, self.onMultiRunTags, self.btn_multirun_tags)
self.Bind(wx.EVT_BUTTON, self.onManagePersistentTags, self.btn_persistent_tags)
self.Bind(wx.EVT_BUTTON, self.onManageTags, self.btn_manage_tags)
def onRefresh(self, e):
self.refresh_rows()
def onMultiRunTags(self, e):
"""Add or remove tags applied to multiple runs at a time."""
multirun_tag_dialog = dlg.MultiRunTagDialog(self, db=self.main.db)
multirun_tag_dialog.Fit()
multirun_tag_dialog.ShowModal()
multirun_tag_dialog.Destroy()
def onManageTags(self, e):
''' User can add / remove / edit sample tags '''
mtag_dlg = dlg.TagDialog(self, db=self.main.db)
mtag_dlg.Fit()
mtag_dlg.ShowModal()
mtag_dlg.Destroy()
# Update tags on all tag buttons
for btn in self.all_tag_buttons:
btn.tags = btn.run.tags
btn.update_label()
def onManagePersistentTags(self, e):
'''Update which tags are applied automatically to new runs'''
tags = self.main.db.get_all_tags()
tag_names = [i.name for i in tags]
mptag_dlg = wx.MultiChoiceDialog(self,
message='Available tags',
caption='Persistent Tags',
choices=tag_names)
# Get indices of selected items (if any) and set them to checked
persistent_tags = self.persistent_tags
indices = [tag_names.index(i) for i in tag_names if i in persistent_tags]
mptag_dlg.SetSelections(indices)
mptag_dlg.Fit()
if (mptag_dlg.ShowModal() == wx.ID_OK):
indices = mptag_dlg.GetSelections()
tag_names = [t.name for t in tags if tag_names.index(t.name) in indices]
self.persistent_tags = tag_names
mptag_dlg.Destroy()
def refresh_rows(self, all=False):
# Get new runs
old_run_numbers = [run.run for run in self.all_runs]
all_runs = self.main.db.get_all_runs()
new_runs = [run for run in all_runs if run.run not in old_run_numbers]
font = self.GetFont()
dc = wx.ScreenDC()
dc.SetFont(font)
if len(all_runs) > 0:
max_width = max([dc.GetTextExtent(str(run))[0] for run in all_runs])
else:
max_width = None
# Update either all or only new runs
if all:
runs = self.all_runs
else:
runs = new_runs
for run in runs:
run_row = RunEntry(self.run_panel, run=run, params=self.main.params, label_width = max_width)
self.all_tag_buttons.append(run_row.tag_button)
self.run_sizer.Add(run_row, flag=wx.ALL | wx.EXPAND, border=0)
self.all_runs = all_runs
# Update labels on all new tag buttons
self.all_tags = self.main.db.get_all_tags()
for button in self.all_tag_buttons:
button.all_tags = self.all_tags
button.update_label()
self.run_panel.SetupScrolling(scrollToTop=False)
self.run_panel.Refresh()
class TrialsTab(BaseTab):
def __init__(self, parent, main):
BaseTab.__init__(self, parent=parent)
self.name = 'Trials'
self.main = main
self.show_active_only = False
self.trial_panel = ScrolledPanel(self, size=(200, 200))
self.trial_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.trial_panel.SetSizer(self.trial_sizer)
self.btn_sizer = wx.FlexGridSizer(1, 2, 0, 10)
self.btn_sizer.AddGrowableCol(0)
self.btn_add_trial = wx.Button(self, label='New Trial', size=(120, -1))
self.btn_active_only = wx.ToggleButton(self,
label='Show Only Active Trials',
size=(180, self.btn_add_trial.GetSize()[1]))
self.btn_sizer.Add(self.btn_active_only, flag=wx.ALIGN_RIGHT)
self.btn_sizer.Add(self.btn_add_trial)
self.main_sizer.Add(self.trial_panel, 1, flag=wx.EXPAND | wx.ALL, border=10)
self.main_sizer.Add(self.btn_sizer, flag=wx.EXPAND | wx.ALL, border=10)
# Bindings
self.Bind(EVT_RUN_REFRESH, self.onRefresh)
self.Bind(wx.EVT_BUTTON, self.onAddTrial, self.btn_add_trial)
self.Bind(wx.EVT_TOGGLEBUTTON, self.onActiveOnly, self.btn_active_only)
def onRefresh(self, e):
self.refresh_trials()
def refresh_trials(self):
self.trial_sizer.Clear(delete_windows=True)
self.all_trials = self.main.db.get_all_trials()
for trial in self.all_trials:
if self.show_active_only:
if trial.active:
self.add_trial(trial=trial)
else:
self.add_trial(trial=trial)
self.trial_panel.SetSizer(self.trial_sizer)
#self.trial_panel.Layout()
self.trial_sizer.Layout()
self.trial_panel.SetupScrolling(scrollToTop=False)
def add_trial(self, trial):
new_trial = TrialPanel(self.trial_panel,
db=self.main.db,
trial=trial,
box_label='Trial {} {}'.format(trial.trial,
trial.comment[:min(len(trial.comment), 20)] if trial.comment is not None else ""))
new_trial.chk_active.SetValue(trial.active)
new_trial.refresh_trial()
self.trial_sizer.Add(new_trial, flag=wx.EXPAND | wx.ALL, border=10)
def onAddTrial(self, e):
new_trial_dlg = dlg.TrialDialog(self, db=self.main.db)
new_trial_dlg.Fit()
if new_trial_dlg.ShowModal() == wx.ID_OK:
self.refresh_trials()
def onActiveOnly(self, e):
self.show_active_only = self.btn_active_only.GetValue()
self.refresh_trials()
class JobsTab(BaseTab):
def __init__(self, parent, main):
BaseTab.__init__(self, parent=parent)
self.name = 'Jobs'
self.main = main
self.all_trials = []
self.all_jobs = None
self.filter = 'All jobs'
self.data = {}
self.job_list = gctr.SortableListCtrl(self, style=wx.LC_REPORT|wx.BORDER_SUNKEN)
self.job_list.InsertColumn(0, "Job")
self.job_list.InsertColumn(1, "Type")
self.job_list.InsertColumn(2, "Dataset")
self.job_list.InsertColumn(3, "Trial")
self.job_list.InsertColumn(4, "Run")
self.job_list.InsertColumn(5, "Block")
self.job_list.InsertColumn(6, "Task")
self.job_list.InsertColumn(7, "Subm ID")
self.job_list.InsertColumn(8, "Status")
self.job_list.integer_columns = {0, 6}
self.job_list_sort_flag = [0, 0, 0, 0, 0, 0, 0, 0, 0]
self.job_list_col = 0
self.trial_choice = gctr.ChoiceCtrl(self,
label='Filter by:',
label_size=(60, -1),
label_style='normal',
ctrl_size=(100, -1),
choices=[])
self.btn_stop_job = wx.Button(self, label='Stop job', size=(120, -1))
self.btn_delete_job = wx.Button(self, label='Delete job', size=(120, -1))
self.btn_restart_job = wx.Button(self, label='Restart job', size=(120, -1))
self.chk_active = wx.CheckBox(self, label='Only display jobs from active trials/blocks')
self.chk_active.SetValue(True)
self.option_sizer = wx.FlexGridSizer(1, 5, 0, 20)
self.option_sizer.AddMany([(self.trial_choice), (self.chk_active), (self.btn_stop_job), (self.btn_delete_job), (self.btn_restart_job)])
self.main_sizer.Add(self.job_list, 1, flag=wx.EXPAND | wx.ALL, border=10)
self.main_sizer.Add(self.option_sizer, flag=wx.EXPAND | wx.ALL, border=10)
self.Bind(wx.EVT_BUTTON, self.onStopJob, self.btn_stop_job)
self.Bind(wx.EVT_BUTTON, self.onDeleteJob, self.btn_delete_job)
self.Bind(wx.EVT_BUTTON, self.onRestartJob, self.btn_restart_job)
self.Bind(wx.EVT_CHOICE, self.onTrialChoice, self.trial_choice.ctr)
self.chk_active.Bind(wx.EVT_CHECKBOX, self.onToggleActive)
self.Bind(EVT_JOB_MONITOR, self.onMonitorJobs)
self.Bind(wx.EVT_LIST_COL_CLICK, self.onColClick, self.job_list)
def onToggleActive(self, e):
self.main.job_monitor.only_active_jobs = self.chk_active.GetValue()
def onTrialChoice(self, e):
self.filter = self.trial_choice.ctr.GetString(
self.trial_choice.ctr.GetSelection())
def GetSelectedJobIds(self):
return [self.job_list.GetItemData(i) for i in range(self.job_list.GetItemCount()) if self.job_list.IsSelected(i)]
def onStopJob(self, e):
if self.all_jobs is None:
return
jobs_to_stop = self.GetSelectedJobIds()
if len(jobs_to_stop) == 0:
return
if len(jobs_to_stop) == 1:
message='Are you sure to stop job %d?'%jobs_to_stop[0]
else:
message='Are you sure to stop %d jobs?'%len(jobs_to_stop)
msg = wx.MessageDialog(self,
message=message,
caption='Warning',
style=wx.YES_NO | wx.ICON_EXCLAMATION)
if (msg.ShowModal() == wx.ID_NO):
return
from xfel.ui.components.submission_tracker import JobStopper
stopper = JobStopper(self.main.params.mp.method)
for job in self.all_jobs:
if job.id in jobs_to_stop:
stopper.stop_job(job.submission_id)
def onDeleteJob(self, e):
if self.all_jobs is None:
return
jobs_to_delete = self.GetSelectedJobIds()
if len(jobs_to_delete) == 0:
return
if len(jobs_to_delete) == 1:
message='Are you sure to delete all processing results from job %d?'%jobs_to_delete[0]
else:
message='Are you sure to delete all processing results from %d jobs?'%len(jobs_to_delete)
msg = wx.MessageDialog(self,
message=message,
caption='Warning',
style=wx.YES_NO | wx.ICON_EXCLAMATION)
if (msg.ShowModal() == wx.ID_NO):
return
for job in self.all_jobs:
if job.id in jobs_to_delete:
job.delete()
def onRestartJob(self, e):
if self.all_jobs is None:
return
jobs_to_restart = self.GetSelectedJobIds()
if len(jobs_to_restart) == 0:
return
if len(jobs_to_restart) == 1:
message='Are you sure to restart job %d? This will delete all processing results from from this job and re-submit it. Be sure the job has been stopped first.'%jobs_to_restart[0]
else:
message='Are you sure to restart %d jobs? This will delete all processing results from these jobs and re-submit them. Be sure the jobs have been stopped first.'%len(jobs_to_restart)
msg = wx.MessageDialog(self,
message=message,
caption='Warning',
style=wx.YES_NO | wx.ICON_EXCLAMATION)
if (msg.ShowModal() == wx.ID_NO):
return
for job in self.all_jobs:
if job.id in jobs_to_restart:
job.delete()
if job.status != "DELETED":
print("Couldn't restart job", job.id, "job is not deleted")
continue
job.remove_from_db()
def onMonitorJobs(self, e):
# Find new trials
if e.trials is not None:
all_db_trials = [str(i.trial) for i in e.trials]
new_trials = [i for i in all_db_trials if i not in self.all_trials]
if len(new_trials) > 0:
self.find_trials()
self.all_trials = all_db_trials
if e.jobs is not None:
self.all_jobs = e.jobs
if str(self.filter).lower() == 'all jobs':
selected_trials = [int(t) for t in self.all_trials]
else:
selected_trials = [int(self.filter.split()[-1])]
selected_jobs = self.GetSelectedJobIds()
if self.job_list.GetFocusedItem() > 0:
focused_job_id = self.job_list.GetItemData(self.job_list.GetFocusedItem())
else:
focused_job_id = None
self.data = {} # reset contents of the table, with unique row ids
for job in e.jobs:
if job.trial is not None:
if job.trial.trial not in selected_trials: continue
# Order: job, type, dataset, trial, run, rungroup, submission id, status
j = str(job.id)
jt = job.task.type if job.task is not None else "-"
ds = job.dataset.name if job.dataset is not None else "-"
t = "t%03d" % job.trial.trial if job.trial is not None else "-"
try:
r = "r%04d" % int(job.run.run) if job.run is not None else "-"
except ValueError:
r = "r%s" % job.run.run
rg = "rg%03d" % job.rungroup.id if job.rungroup is not None else "-"
tsk = "%d" % job.task.id if job.task is not None else "-"
sid = str(job.submission_id)
short_status = str(job.status).strip("'")
if short_status == "SUBMIT_FAIL":
short_status = "S_FAIL"
elif short_status == "SUBMITTED":
short_status = "SUBMIT"
s = short_status
self.data[job.id] = [j, jt, ds, t, r, rg, tsk, sid, s]
found_it = False
# Look to see if item already in list
for i in range(self.job_list.GetItemCount()):
if self.job_list.GetItemData(i) == job.id:
for k, item in enumerate(self.data[job.id]):
self.job_list.SetItem(i, k, item)
found_it = True
break
if found_it: continue
# Item not present, so deposit the row in the table
local_job_id = self.job_list.Append(self.data[job.id])
self.job_list.SetItemData(local_job_id, job.id)
# Remove items not sent in the event or otherwise filtered out
# Need to do it in reverse order to avoid list re-ordering when deleting items
for i in reversed(range(self.job_list.GetItemCount())):
if self.job_list.GetItemData(i) not in self.data:
self.job_list.DeleteItem(i)
# Initialize sortable column mixin
self.job_list.initialize_sortable_columns(n_col=5, itemDataMap=self.data)
self.job_list.RestoreSortOrder(self.job_list_col, self.job_list_sort_flag)
# Restore selected items
for i in range(self.job_list.GetItemCount()):
job_id = self.job_list.GetItemData(i)
if job_id in selected_jobs:
self.job_list.Select(i)
if job_id == focused_job_id:
self.job_list.Focus(i)
def onColClick(self, e):
# Note: the sortable list binds the event first and activates this method after.
# print "Click recorded in column %s" % str(self.job_list._col) ## DEBUG
# print self.job_list.GetSortState() ## DEBUG
self.job_list_col = self.job_list._col
self.job_list_sort_flag = self.job_list._colSortFlag
def find_trials(self):
print("Found trials")
if self.main.db is not None:
choices = ['All jobs'] + \
['trial {}'.format(i.trial) for i in self.main.db.get_all_trials()]
self.trial_choice.ctr.Clear()
for choice in choices:
self.trial_choice.ctr.Append(choice)
if self.filter == 'All jobs':
self.trial_choice.ctr.SetSelection(0)
else:
self.trial_choice.ctr.SetSelection(int(self.filter[-1]))
class SpotfinderTab(BaseTab):
def __init__(self, parent, main):
BaseTab.__init__(self, parent=parent)
self.name = 'Spotfinder'
self.main = main
self.all_trials = []
self.trial_no = None
self.trial = None
self.all_runs = []
self.selected_runs = []
self.tag_trial_changed = True
self.tag_runs_changed = True
self.tag_last_five = False
self.entire_expt = False
self.png = None
self.static_bitmap = None
self.redraw_windows = True
self.n_min = 4
self.spotfinder_panel = wx.Panel(self, size=(100, 100))
self.spotfinder_panelsize = self.spotfinder_panel.GetSize()
self.spotfinder_box = wx.StaticBox(self.spotfinder_panel, label='Run Statistics')
self.spotfinder_sizer = wx.StaticBoxSizer(self.spotfinder_box, wx.HORIZONTAL)
self.spotfinder_panel.SetSizer(self.spotfinder_sizer)
self.trial_number = gctr.ChoiceCtrl(self,
label='Trial:',
label_size=(90, -1),
label_style='normal',
ctrl_size=(100, -1),
choices=[])
self.last_five_runs = wx.Button(self,
label='Auto plot last five runs',
size=(200, -1))
self.plot_entire_expt = wx.Button(self,
label='Auto plot entire experiment',
size=(200,-1))
self.n_min_selector = gctr.OptionCtrl(self,
label='minimum # spots:',
label_size=(160, -1),
ctrl_size=(30, -1),
items=[('n_min', 4)])
self.run_numbers = gctr.CheckListCtrl(self,
label='Selected runs:',
label_size=(200, -1),
label_style='normal',
ctrl_size=(150, 224),
direction='vertical',
choices=[])
self.bottom_sizer = wx.FlexGridSizer(1, 2, 0, 4)
options_box = wx.StaticBox(self, label='Display Options')
self.options_box_sizer = wx.StaticBoxSizer(options_box, wx.VERTICAL)
self.options_opt_sizer = wx.GridBagSizer(1, 1)
self.options_opt_sizer.Add(self.trial_number, pos=(0, 0),
flag=wx.ALL, border=2)
self.options_opt_sizer.Add(self.last_five_runs, pos=(1, 0),
flag=wx.ALL, border=2)
self.options_opt_sizer.Add(self.plot_entire_expt, pos=(2, 0),
flag=wx.ALL, border=2)
self.options_opt_sizer.Add(self.n_min_selector, pos=(3, 0),
flag=wx.ALL, border=2)
self.options_opt_sizer.Add(self.run_numbers, pos=(0, 1), span=(8, 1),
flag=wx.BOTTOM | wx.TOP | wx.RIGHT | wx.EXPAND,
border=10)
self.options_box_sizer.Add(self.options_opt_sizer)
self.bottom_sizer.Add(self.options_box_sizer)
self.main_sizer.Add(self.spotfinder_panel, 1,
flag=wx.EXPAND | wx.ALL, border=10)
self.main_sizer.Add(self.bottom_sizer, 0,
flag=wx.EXPAND | wx.ALL, border=10)
# Bindings
self.Bind(wx.EVT_CHOICE, self.onTrialChoice, self.trial_number.ctr)
self.Bind(wx.EVT_BUTTON, self.onLastFiveRuns, self.last_five_runs)
self.Bind(wx.EVT_BUTTON, self.onEntireExpt, self.plot_entire_expt)
self.Bind(wx.EVT_TEXT_ENTER, self.onNMin, self.n_min_selector.n_min)
self.Bind(wx.EVT_CHECKLISTBOX, self.onRunChoice, self.run_numbers.ctr)
self.Bind(EVT_SPOTFINDER_REFRESH, self.onRefresh)
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnSize(self, e):
self.spotfinder_panelsize = self.spotfinder_panel.GetSize()
e.Skip()
def onTrialChoice(self, e):
trial_idx = self.trial_number.ctr.GetSelection()
if trial_idx == 0:
self.trial_no = None
self.trial = None
self.run_numbers.ctr.Clear()
self.all_runs = []
self.selected_runs = []
else:
trial_no = self.trial_number.ctr.GetClientData(trial_idx)
if trial_no is not None:
self.trial_no = int(trial_no)
self.trial = self.main.db.get_trial(trial_number=int(self.trial_no))
self.spotfinder_box.SetLabel('Spotfinder Results - Trial {}'.format(self.trial_no))
self.find_runs()
def onRunChoice(self, e):
self.tag_last_five = False
self.entire_expt = False
run_numbers_selected = self.run_numbers.ctr.GetCheckedStrings()
if self.trial is not None:
self.selected_runs = [r.run for r in self.trial.runs if r.run in run_numbers_selected]
self.main.run_window.spotfinder_light.change_status('idle')
def find_trials(self):
all_db_trials = [str(i.trial) for i in self.main.db.get_all_trials()]
new_trials = [i for i in all_db_trials if i not in self.all_trials]
if len(new_trials) > 0:
self.trial_number.ctr.Clear()
self.all_trials = [None] + all_db_trials
for trial in self.all_trials:
if trial is None:
entry = 'None'
self.trial_number.ctr.Append(entry)
self.trial_number.ctr.SetClientData(0, None)
else:
entry = trial
self.trial_number.ctr.Append(entry)
item_idx = self.trial_number.ctr.FindString(entry)
self.trial_number.ctr.SetClientData(item_idx, trial)
if self.trial_no is not None:
self.trial_number.ctr.SetSelection(self.trial_no)
else:
self.trial_number.ctr.SetSelection(0)
def find_runs(self):
self.run_numbers.ctr.Clear()
if self.trial is not None:
self.runs_available = [str(r.run) for r in self.trial.runs]
if len(self.all_runs) > 0:
self.run_numbers.ctr.InsertItems(items=self.all_runs, pos=0)
def onRefresh(self, e):
self.refresh_trials()
self.refresh_runs()
if self.tag_last_five:
self.select_last_n_runs(5)
elif self.entire_expt:
self.select_all()
if self.redraw_windows:
self.plot_static_spotfinder_stats()
self.redraw_windows = False
if self.trial is not None:
self.spotfinder_box.SetLabel('Spotfinder Results - Trial {}'.format(self.trial_no))
else:
self.spotfinder_box.SetLabel('Spotfinder Results - No trial selected')
def refresh_trials(self):
if self.all_trials == []:
self.find_trials()
avail_trials = [str(i.trial) for i in self.main.db.get_all_trials()]
for t in avail_trials:
if t not in self.all_trials:
self.trial_number.ctr.Append(t)
self.all_trials.append(t)
item_idx = self.trial_number.ctr.FindString(t)
self.trial_number.ctr.SetClientData(item_idx, t)
def refresh_runs(self):
if self.all_runs == []:
self.find_runs()
if self.trial is not None:
avail_runs = [str(r.run) for r in self.trial.runs]
for r in avail_runs:
if r not in self.all_runs:
self.run_numbers.ctr.Append(r)
self.all_runs.append(r)
def plot_static_spotfinder_stats(self):
if self.png is not None:
if self.static_bitmap is not None:
self.static_bitmap.Destroy()
img = wx.Image(self.png, wx.BITMAP_TYPE_ANY)
self.static_bitmap = wx.StaticBitmap(
self.spotfinder_panel, wx.ID_ANY, wx.Bitmap(img))
self.spotfinder_sizer.Add(self.static_bitmap, 0, wx.EXPAND | wx.ALL, 3)
self.spotfinder_panel.SetSizer(self.spotfinder_sizer)
self.spotfinder_panel.Layout()
def select_last_n_runs(self, n):
if self.trial is not None:
self.selected_runs = [r.run for r in self.trial.runs][-n:]
def select_all(self):
if self.trial is not None:
self.selected_runs = [r.run for r in self.trial.runs]
def onLastFiveRuns(self, e):
self.entire_expt = False
self.tag_last_five = True
self.select_last_n_runs(5)
self.main.run_window.spotfinder_light.change_status('idle')
def onEntireExpt(self, e):
self.entire_expt = True
self.tag_last_five = False
self.select_all()
self.main.run_window.spotfinder_light.change_status('idle')
def onNMin(self, e):
n_min = self.n_min_selector.n_min.GetValue()
if n_min.isdigit():
self.n_min = int(n_min)
class RunStatsTab(SpotfinderTab):
def __init__(self, parent, main):
BaseTab.__init__(self, parent=parent)
self.name = 'Run Stats'
self.main = main
self.all_trials = []
self.trial_no = None
self.trial = None
self.all_runs = []
self.selected_runs = []
self.tag_trial_changed = True
self.tag_runs_changed = True
self.tag_last_five = False
self.entire_expt = False
self.d_min = 2.5
self.n_multiples = 2
self.ratio = 1
self.n_strong = 16
self.i_sigi = 1
self.n_dump = 10
self.should_have_indexed_image_paths = None
self.should_have_indexed_timestamps = None
self.strong_indexed_image_paths = None
self.strong_indexed_image_timestamps = None
self.auto_update = True
self.runstats_panel = wx.Panel(self, size=(100, 100))
self.runstats_box = wx.StaticBox(self.runstats_panel, label='Run Statistics')
self.runstats_sizer = wx.StaticBoxSizer(self.runstats_box, wx.HORIZONTAL)
self.runstats_panel.SetSizer(self.runstats_sizer)
import matplotlib as mpl
from matplotlib.backends.backend_wxagg import (
FigureCanvasWxAgg as FigureCanvas,
NavigationToolbar2WxAgg as NavigationToolbar)
self.figure = mpl.figure.Figure()
self.canvas = FigureCanvas(self.runstats_box, -1, self.figure)
self.toolbar = NavigationToolbar(self.canvas)
self.toolbar.SetWindowStyle(wx.TB_VERTICAL)
self.toolbar.Realize()
self.runstats_sizer.Add(self.canvas, 1, wx.EXPAND)
self.runstats_sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.options_box = wx.StaticBox(self, label='Statistics Options')
self.trial_number = gctr.ChoiceCtrl(self.options_box,
label='Trial:',
label_size=(90, -1),
label_style='normal',
ctrl_size=(100, -1),
choices=[])
self.last_five_runs = wx.Button(self.options_box,
label='Auto plot last five runs',
size=(200, -1))
self.plot_entire_expt = wx.Button(self.options_box,
label='Auto plot entire experiment',
size=(200,-1))
self.d_min_select = gctr.OptionCtrl(self.options_box,
name='rs_d_min',
label='High resolution limit:',
sub_labels=[''],
label_size=(160, -1),
ctrl_size=(30, -1),
items=[('d_min', 2.5)])
self.n_multiples_selector = gctr.OptionCtrl(self.options_box,
name='rs_multiples',
label='# multiples threshold:',
sub_labels=[''],
label_size=(160, -1),
ctrl_size=(30, -1),
items=[('multiples', 2)])
self.ratio_cutoff = gctr.OptionCtrl(self.options_box,
name='rs_ratio',
label='two theta ratio cutoff:',
sub_labels=[''],
label_size=(160, -1),
ctrl_size=(30, -1),
items=[('ratio', 1)])
self.n_strong_cutoff = gctr.OptionCtrl(self.options_box,
name='rs_n_strong',
label='# strong spots cutoff:',
sub_labels=[''],
label_size=(160, -1),
ctrl_size=(30, -1),
items=[('n_strong', 16)])
self.i_sigi_cutoff = gctr.OptionCtrl(self.options_box,
name='rs_isigi',
label='I/sig(I) cutoff:',
sub_labels=[''],
label_size=(160, -1),
ctrl_size=(30, -1),
items=[('isigi', 1)])
self.n_dump_cutoff = gctr.OptionCtrl(self.options_box,
name='rs_n_dump',
label='# images to dump:',
sub_labels=[''],
label_size=(160, -1),
ctrl_size=(30, -1),
items=[('n_dump', 10)])
self.run_numbers = gctr.CheckListCtrl(self.options_box,
label='Selected runs:',
label_size=(200, -1),
label_style='normal',
ctrl_size=(150, 224),
direction='vertical',
choices=[])
self.strong_indexed_box = wx.StaticBox(self, label='Strongest Indexed Images')
self.strong_indexed_list = wx.TextCtrl(self.strong_indexed_box,
style=wx.TE_MULTILINE | wx.TE_READONLY)
self.idx_show_images_button = wx.Button(self.strong_indexed_box,
label='Open images',
size=(200, -1))
self.should_have_indexed_box = wx.StaticBox(self, label='Strong Images that Didn\'t Index')
self.should_have_indexed_list = wx.TextCtrl(self.should_have_indexed_box,
style=wx.TE_MULTILINE | wx.TE_READONLY)
self.shi_dump_images_button = wx.Button(self.should_have_indexed_box,
label='Dump images',
size=(200, -1))
self.bottom_sizer = wx.FlexGridSizer(1, 2, 0, 10)
self.options_box_sizer = wx.StaticBoxSizer(self.options_box, wx.VERTICAL)
self.options_opt_sizer = wx.GridBagSizer(1, 1)
self.options_opt_sizer.Add(self.trial_number, pos=(0, 0),
flag=wx.ALL, border=2)
self.options_opt_sizer.Add(self.last_five_runs, pos=(1, 0),
flag=wx.ALL, border=2)
self.options_opt_sizer.Add(self.plot_entire_expt, pos=(2, 0),
flag=wx.ALL, border=2)
self.options_opt_sizer.Add(self.d_min_select, pos=(3, 0),
flag=wx.ALL, border=2)
self.options_opt_sizer.Add(self.n_multiples_selector, pos=(4, 0),
flag=wx.ALL, border=2)
self.options_opt_sizer.Add(self.ratio_cutoff, pos=(5, 0),
flag=wx.ALL, border=2)
self.options_opt_sizer.Add(self.n_strong_cutoff, pos=(7, 0),
flag=wx.ALL, border=2)
self.options_opt_sizer.Add(self.i_sigi_cutoff, pos=(6, 0),
flag=wx.ALL, border=2)
self.options_opt_sizer.Add(self.n_dump_cutoff, pos=(8, 0),
flag=wx.ALL, border=2)
self.options_opt_sizer.Add(self.run_numbers, pos=(0, 1), span=(8, 1),
flag=wx.BOTTOM | wx.TOP | wx.RIGHT | wx.EXPAND,
border=10)
self.options_box_sizer.Add(self.options_opt_sizer)
self.bottom_sizer.Add(self.options_box_sizer)
self.dump_images_sizer = wx.GridBagSizer(3, 1)
self.strong_indexed_box_sizer = wx.StaticBoxSizer(self.strong_indexed_box, wx.VERTICAL)
self.strong_indexed_results_sizer = wx.GridBagSizer(1, 1)
self.strong_indexed_results_sizer.Add(self.strong_indexed_list, pos=(0, 0),
span=(5, 45),
flag=wx.LEFT | wx.RIGHT | wx.EXPAND,
border=10)
self.strong_indexed_box_sizer.Add(self.strong_indexed_results_sizer)
self.strong_indexed_box_sizer.Add(self.idx_show_images_button,
flag=wx.LEFT | wx.RIGHT | wx.ALL | wx.EXPAND,
border=5)
self.should_have_indexed_box_sizer = wx.StaticBoxSizer(self.should_have_indexed_box, wx.VERTICAL)
self.should_have_indexed_results_sizer = wx.GridBagSizer(1, 1)
self.should_have_indexed_results_sizer.Add(self.should_have_indexed_list, pos=(0, 0),
span=(5, 45),
flag=wx.LEFT | wx.RIGHT | wx.EXPAND,
border=10)
self.should_have_indexed_box_sizer.Add(self.should_have_indexed_results_sizer)
self.should_have_indexed_box_sizer.Add(self.shi_dump_images_button,
flag=wx.LEFT | wx.RIGHT | wx.ALL | wx.EXPAND,
border=5)
self.manage_panel = wx.Panel(self)
self.manage_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.btn_toggle_options = wx.ToggleButton(self.manage_panel,
label='Hide options')
self.chk_auto_update = wx.CheckBox(self.manage_panel, label='Auto update')
self.chk_auto_update.SetValue(True)
self.manage_sizer.Add(self.btn_toggle_options)
self.manage_sizer.Add(self.chk_auto_update)
self.manage_panel.SetSizer(self.manage_sizer)
self.dump_images_sizer.Add(self.manage_panel, pos=(0, 0))
self.dump_images_sizer.Add(self.strong_indexed_box_sizer, pos=(1, 0))
self.dump_images_sizer.Add(self.should_have_indexed_box_sizer, pos=(2, 0))
if self.main.params.dispatcher != "cctbx.xfel.xtc_process":
self.n_dump_cutoff.Hide()
self.strong_indexed_box.Hide()
self.strong_indexed_list.Hide()
self.idx_show_images_button.Hide()
self.should_have_indexed_box.Hide()
self.should_have_indexed_list.Hide()
self.shi_dump_images_button.Hide()
# self.bottom_sizer.Add(self.should_have_indexed_box_sizer, flag=wx.EXPAND | wx.ALL)
# self.bottom_sizer.Add(self.strong_indexed_box_sizer, flag=wx.EXPAND | wx.ALL)
self.bottom_sizer.Add(self.dump_images_sizer, flag=wx.EXPAND | wx.ALL)
self.main_sizer.Add(self.runstats_panel, 1,
flag=wx.EXPAND | wx.ALL, border=10)
self.main_sizer.Add(self.bottom_sizer, 0,
flag=wx.EXPAND | wx.ALL, border=10)
self.static_bitmap = wx.StaticBitmap(
self.runstats_panel, wx.ID_ANY)#, wx.Bitmap(img))
self.runstats_sizer.Add(self.static_bitmap, 0, wx.EXPAND | wx.ALL, 3)
self.runstats_panel.SetSizer(self.runstats_sizer)
# Bindings
self.Bind(wx.EVT_CHOICE, self.onTrialChoice, self.trial_number.ctr)
self.Bind(wx.EVT_BUTTON, self.onLastFiveRuns, self.last_five_runs)
self.Bind(wx.EVT_BUTTON, self.onEntireExpt, self.plot_entire_expt)
self.Bind(wx.EVT_TEXT_ENTER, self.onDMin, self.d_min_select.d_min)
self.Bind(wx.EVT_TEXT_ENTER, self.onMultiples, self.n_multiples_selector.multiples)
self.Bind(wx.EVT_TEXT_ENTER, self.onRatioCutoff, self.ratio_cutoff.ratio)
self.Bind(wx.EVT_TEXT_ENTER, self.onHitCutoff, self.n_strong_cutoff.n_strong)
self.Bind(wx.EVT_TEXT_ENTER, self.onIsigICutoff, self.i_sigi_cutoff.isigi)
self.Bind(wx.EVT_TEXT_ENTER, self.onNDump, self.n_dump_cutoff.n_dump)
self.Bind(wx.EVT_CHECKLISTBOX, self.onRunChoice, self.run_numbers.ctr)
self.Bind(wx.EVT_BUTTON, self.onOpenImages, self.idx_show_images_button)
self.Bind(wx.EVT_BUTTON, self.onDumpImages, self.shi_dump_images_button)
self.Bind(wx.EVT_TOGGLEBUTTON, self.onToggleOptions, self.btn_toggle_options)
self.Bind(wx.EVT_CHECKBOX, self.onChkAutoUpdate, self.chk_auto_update)
self.Bind(EVT_RUNSTATS_REFRESH, self.onRefresh)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Layout()
self.Fit()
self.runstats_panelsize = self.runstats_box.GetSize()
def OnSize(self, e):
self.runstats_panelsize = self.runstats_box.GetSize()
e.Skip()
def onTrialChoice(self, e):
trial_idx = self.trial_number.ctr.GetSelection()
self.trial_no = None
self.trial = None
self.run_numbers.ctr.Clear()
self.all_runs = []
self.selected_runs = []
if trial_idx > 0:
trial_no = self.trial_number.ctr.GetClientData(trial_idx)
if trial_no is not None:
self.trial_no = int(trial_no)
self.trial = self.main.db.get_trial(trial_number=int(self.trial_no))
self.runstats_box.SetLabel('Run Statistics - Trial {}'.format(self.trial_no))
self.find_runs()
def onRunChoice(self, e):
self.tag_last_five = False
self.entire_expt = False
run_numbers_selected = self.run_numbers.ctr.GetCheckedStrings()
if self.trial is not None:
self.selected_runs = [r.run for r in self.trial.runs if r.run in run_numbers_selected]
self.main.run_window.runstats_light.change_status('idle')
def onRefresh(self, e):
self.refresh_trials()
self.refresh_runs()
if self.tag_last_five:
self.select_last_n_runs(5)
elif self.entire_expt:
self.select_all()
self.print_strong_indexed_paths()
self.print_should_have_indexed_paths()
if self.trial is not None:
self.runstats_box.SetLabel('Run Statistics - Trial {}'.format(self.trial_no))
else:
self.runstats_box.SetLabel('Run Statistics - No trial selected')
def onToggleOptions(self, e):
if self.btn_toggle_options.GetValue():
self.options_box.Hide()
self.strong_indexed_box.Hide()
self.should_have_indexed_box.Hide()
else:
self.options_box.Show()
if self.main.params.dispatcher == "cctbx.xfel.xtc_process":
self.strong_indexed_box.Show()
self.should_have_indexed_box.Show()
self.Layout()
self.Fit()
def onChkAutoUpdate(self, e):
self.auto_update = self.chk_auto_update.GetValue()
if self.auto_update and (self.main.runstats_sentinel is None or not self.main.runstats_sentinel.active):
self.main.start_runstats_sentinel()
else:
self.main.stop_runstats_sentinel()
def print_strong_indexed_paths(self):
try:
paths = []
for p in self.strong_indexed_image_paths:
paths.extend(p)
image_paths = '\n'.join(paths)
self.strong_indexed_list.SetValue(image_paths)
except TypeError:
print("Error getting list of best indexed images")
pass
def print_should_have_indexed_paths(self):
if self.trial is not None:
try:
paths = []
for p in self.should_have_indexed_image_paths:
paths.extend(p)
image_paths = '\n'.join(paths)
self.should_have_indexed_list.SetValue(image_paths)
except TypeError:
print("Error getting list of images that should have indexed")
pass
def onLastFiveRuns(self, e):
self.entire_expt = False
self.tag_last_five = True
self.select_last_n_runs(5)
self.main.run_window.runstats_light.change_status('idle')
def onEntireExpt(self, e):
self.entire_expt = True
self.tag_last_five = False
self.select_all()
self.main.run_window.runstats_light.change_status('idle')
def onDMin(self, e):
try:
d_min = float(self.d_min_select.d_min.GetValue())
self.d_min = d_min
except ValueError:
pass
def onMultiples(self, e):
try:
mult = int(self.n_multiples_selector.multiples.GetValue())
self.n_multiples = mult
except ValueError:
pass
def onRatioCutoff(self, e):
try:
ratio = float(self.ratio_cutoff.ratio.GetValue())
self.ratio = ratio
except ValueError:
pass
def onHitCutoff(self, e):
n_strong = self.n_strong_cutoff.n_strong.GetValue()
if n_strong.isdigit():
self.n_strong = int(n_strong)
def onIsigICutoff(self, e):
try:
isigi = float(self.i_sigi_cutoff.isigi.GetValue())
self.i_sigi = isigi
except ValueError:
pass
def onNDump(self, e):
n_dump = self.n_dump_cutoff.n_dump.GetValue()
if n_dump.isdigit():
self.n_dump = int(n_dump)
def dump_timestamps(self, params, ts_list, img_list):
if not os.path.isdir(params['output_dir']):
os.makedirs(params['output_dir'])
command = ('cctbx.xfel.xtc_dump input.experiment=%s '%params['experiment'])+\
('input.run_num=%s input.address=%s '%(str(params['run']), params['address']))+\
('format.file_format=%s '%params['format'])+\
('output.output_dir=%s '%params['output_dir'])
if params['format'] == 'cbf':
command += 'format.cbf.detz_offset=%f '%params['distance']
if params['energy'] is not None:
command += 'format.cbf.override_energy=%f '%params['energy']
if 'Rayonix' in params['address']:
command += 'format.cbf.mode=rayonix '
if params['beamx'] is not None:
command += 'format.cbf.rayonix.override_beam_x=%d '%params['beamx']
if params['beamy'] is not None:
command += 'format.cbf.rayonix.override_beam_y=%d '%params['beamy']
if params['bin_size'] is not None:
command += 'format.cbf.rayonix.bin_size=%d '%params['bin_size']
elif 'cspad' in params['address'].lower():
if params['gain_mask_level'] is not None:
command += 'format.cbf.cspad.gain_mask_value=%f '% params['gain_mask_level']
elif params['format'] == 'pickle':
if params['config'] is not None:
command += 'input.cfg=%s '%params['config']
command += 'dispatch.selected_events=True '
for timestamp_string in ts_list[:self.n_dump]:
command += 'input.timestamp=%s '%timestamp_string
command += '&& dials.image_viewer %s'%\
' '.join(map(str, img_list[:self.n_dump]))
thread = ImageDumpThread(command)
thread.start()
def onDumpImages(self, e):
for idx in range(len(self.should_have_indexed_timestamps)):
params, ts_list = self.should_have_indexed_timestamps[idx]
imgs = self.should_have_indexed_image_paths[idx]
self.dump_timestamps(params, ts_list, imgs)
def onOpenImages(self, e):
for idx in range(len(self.strong_indexed_image_timestamps)):
params, ts_list = self.strong_indexed_image_timestamps[idx]
ext = '.' + params['format']
image_paths = self.strong_indexed_image_paths[idx][:self.n_dump]
indexed_paths = [path.split(ext)[0]+'_indexed.refl' for path in image_paths]
if all([os.path.exists(p) for p in (image_paths + indexed_paths)]):
command = str('dials.image_viewer ' + ' '.join(image_paths) + \
' ' + ' '.join(indexed_paths))
thread = ImageDumpThread(command)
thread.start()
else:
shot_paths = [p.split('out')[0] + 'all' + p.split('out')[1].replace('idx', 'shot') \
for p in image_paths]
self.dump_timestamps(params, ts_list, shot_paths)
class UnitCellTab(BaseTab):
def __init__(self, parent, main):
BaseTab.__init__(self, parent=parent)
self.name = 'Unit Cell'
self.main = main
self.all_trials = []
self.trial_no = None
self.trial = None
self.tags = []
self.tag_sets = []
self.reject_outliers = True
self.plot_clusters = False
self.auto_update = True
# self.tab_panel = wx.Panel(self, size=(300, 300))
self.tab_sizer = wx.BoxSizer(wx.HORIZONTAL)
# self.tab_panel.SetSizer(self.tab_sizer)
self.selection_columns_panel = wx.Panel(self, size=(230, 120))
self.selection_columns_box = wx.StaticBox(self.selection_columns_panel, label='Select tag sets')
self.selection_columns_sizer = wx.StaticBoxSizer(self.selection_columns_box, wx.VERTICAL)
self.selection_columns_panel.SetSizer(self.selection_columns_sizer)
# self.selection_columns_panel = wx.Panel(self, size=(100, 120))
# self.selection_columns_sizer = wx.BoxSizer(wx.HORIZONTAL)
# self.selection_columns_panel.SetSizer(self.selection_columns_sizer)
self.trial_number = gctr.ChoiceCtrl(self.selection_columns_panel,
label='Trial:',
label_size=(90, -1),
label_style='normal',
ctrl_size=(100, -1),
choices=[])
self.tag_checklist = gctr.CheckListCtrl(self.selection_columns_panel,
label='Available tags:',
label_size=(200, -1),
label_style='normal',
ctrl_size=(150, 100),
direction='vertical',
choices=[])
self.selection_type_radio = gctr.RadioCtrl(self.selection_columns_panel,
name='uc_selection_type',
label='',
label_style='normal',
label_size=(-1, -1),
direction='horizontal',
items={'union':'union',
'inter':'intersection'})
self.add_sele_button = wx.Button(self.selection_columns_panel,
label='Add selection',
size=(200, -1))
self.tag_set_checklist = gctr.CheckListCtrl(self.selection_columns_panel,
label='Tag sets to display:',
label_size=(200, -1),
label_style='normal',
ctrl_size=(150, 100),
direction='vertical',
choices=[])
self.remove_sele_button = wx.Button(self.selection_columns_panel,
label='Remove selection',
size=(200, -1))
self.reset_sele_button = wx.Button(self.selection_columns_panel,
label='Reset selections',
size=(200, -1))
self.chk_reject_outliers = wx.CheckBox(self.selection_columns_panel, label='Reject outliers')
self.chk_reject_outliers.SetValue(True)
self.chk_plot_clusters = wx.CheckBox(self.selection_columns_panel, label='Plot clusters')
self.chk_auto_update = wx.CheckBox(self.selection_columns_panel, label='Auto update')
self.chk_auto_update.SetValue(True)
self.plot_eps = gctr.OptionCtrl(self.selection_columns_panel,
name='uc_plot_eps',
label='Cluster epsilon',
sub_labels=[''],
label_size=(160, -1),
ctrl_size=(50, -1),
items=[('eps', 0.8)])
self.plot_eps.eps.Disable()
try:
import uc_metrics # import dependency
except ImportError:
self.chk_plot_clusters.Hide()
self.plot_eps.Hide()
self.add_sele_sizer = wx.GridBagSizer(4, 1)
self.add_sele_sizer.Add(self.trial_number, pos=(0, 0),
flag=wx.ALL, border=0)
self.add_sele_sizer.Add(self.tag_checklist, pos=(1, 0),
flag=wx.ALL | wx.EXPAND, border=0)
self.add_sele_sizer.Add(self.selection_type_radio, pos=(2, 0),
flag=wx.ALL | wx.ALIGN_CENTER, border=0)
self.add_sele_sizer.Add(self.add_sele_button, pos=(3, 0),
flag=wx.ALL)
self.selection_columns_sizer.Add(self.add_sele_sizer, flag=wx.ALL | wx.EXPAND, border=10)
self.remove_sele_sizer = wx.GridBagSizer(3, 1)
self.remove_sele_sizer.Add(self.tag_set_checklist, pos=(0, 0),
flag=wx.ALL | wx.EXPAND, border=0)
self.remove_sele_sizer.Add(self.remove_sele_button, pos=(1, 0),
flag=wx.ALL)
self.remove_sele_sizer.Add(self.reset_sele_button, pos=(2, 0),
flag=wx.ALL)
self.selection_columns_sizer.Add(self.remove_sele_sizer, flag=wx.ALL | wx.EXPAND, border=10)
self.selection_columns_sizer.Add(self.chk_reject_outliers, flag=wx.ALL | wx.EXPAND, border=10)
self.selection_columns_sizer.Add(self.chk_plot_clusters, flag=wx.ALL | wx.EXPAND, border=10)
self.selection_columns_sizer.Add(self.plot_eps, flag=wx.ALL | wx.EXPAND, border=10)
self.selection_columns_sizer.Add(self.chk_auto_update, flag=wx.ALL | wx.EXPAND, border=10)
self.unit_cell_panel = wx.Panel(self, size=(200, 120))
self.unit_cell_box = wx.StaticBox(self.unit_cell_panel, label='Unit cell analysis')
self.unit_cell_panelsize = self.unit_cell_box.GetSize()
self.unit_cell_sizer = wx.StaticBoxSizer(self.unit_cell_box, wx.VERTICAL)
self.unit_cell_panel.SetSizer(self.unit_cell_sizer)
import matplotlib as mpl
from matplotlib.backends.backend_wxagg import (
FigureCanvasWxAgg as FigureCanvas,
NavigationToolbar2WxAgg as NavigationToolbar)
self.figure = mpl.figure.Figure()
self.canvas = FigureCanvas(self.unit_cell_box, -1, self.figure)
self.toolbar = NavigationToolbar(self.canvas)
self.toolbar.Realize()
self.unit_cell_sizer.Add(self.canvas, 1, wx.EXPAND)
self.unit_cell_sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
# self.main_sizer.Add(self.selection_columns_panel, 1,
# flag=wx.EXPAND | wx.ALL, border=10)
# self.main_sizer.Add(self.unit_cell_panel, 1,
# flag=wx.EXPAND | wx.ALL, border=10)
self.tab_sizer.Add(self.selection_columns_panel, 0,
flag=wx.ALIGN_LEFT | wx.EXPAND, border=10)
self.tab_sizer.Add(self.unit_cell_panel, 1,
flag=wx.EXPAND | wx.ALL, border=0)
self.main_sizer.Add(self.tab_sizer, 1,
flag=wx.EXPAND | wx.ALL, border=10)
self.selection_columns_sizer.Layout()
self.Bind(wx.EVT_CHOICE, self.onTrialChoice, self.trial_number.ctr)
self.Bind(wx.EVT_BUTTON, self.onAddTagSet, self.add_sele_button)
self.Bind(wx.EVT_BUTTON, self.onRemoveTagSet, self.remove_sele_button)
self.Bind(wx.EVT_BUTTON, self.onResetTagSets, self.reset_sele_button)
self.Bind(wx.EVT_CHECKBOX, self.onChkRejectOutliers, self.chk_reject_outliers)
self.Bind(wx.EVT_CHECKBOX, self.onChkPlotClusters, self.chk_plot_clusters)
self.Bind(wx.EVT_CHECKBOX, self.onChkAutoUpdate, self.chk_auto_update)
self.Bind(EVT_UNITCELL_REFRESH, self.onRefresh)
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnSize(self, e):
self.unit_cell_panelsize = self.unit_cell_box.GetSize()
e.Skip()
def find_trials(self):
all_db_trials = [str(i.trial) for i in self.main.db.get_all_trials()]
new_trials = [i for i in all_db_trials if i not in self.all_trials]
if len(new_trials) > 0:
self.trial_number.ctr.Clear()
self.all_trials = [None] + all_db_trials
for trial in self.all_trials:
if trial is not None:
entry = trial
self.trial_number.ctr.Append(entry)
item_idx = self.trial_number.ctr.FindString(entry)
self.trial_number.ctr.SetClientData(item_idx, trial)
else:
entry = 'None'
self.trial_number.ctr.Append(entry)
self.trial_number.ctr.SetClientData(0, None)
if self.trial_no is not None:
self.trial_number.ctr.SetSelection(self.trial_no)
else:
self.trial_number.ctr.SetSelection(0)
def onTrialChoice(self, e):
trial_idx = self.trial_number.ctr.GetSelection()
if trial_idx == 0:
self.trial_no = None
self.trial = None
elif self.trial_number.ctr.GetClientData(trial_idx) != self.trial_no:
self.trial_no = int(self.trial_number.ctr.GetClientData(trial_idx))
self.trial = self.main.db.get_trial(trial_number=self.trial_no)
self.find_tags()
def find_tags(self):
self.tag_checklist.ctr.Clear()
if self.trial is not None:
self.tags = self.trial.tags
tag_names = [t.name for t in self.tags]
if tag_names:
self.tag_checklist.ctr.InsertItems(items=tag_names, pos=0)
self.refresh_tag_sets()
def onAddTagSet(self, e):
checked_items = self.tag_checklist.ctr.GetCheckedStrings()
selected_tags = [i for i in self.main.db.get_all_tags()
if i.name in checked_items]
selected_tags = selected_tags
if self.selection_type_radio.union.GetValue() == 1:
mode = 'union'
else:
mode = 'intersection'
tag_set = TagSet(mode, selected_tags)
self.tag_sets.append(tag_set)
self.refresh_tag_sets()
def refresh_tag_sets(self):
self.tag_set_checklist.ctr.Clear()
tag_set_strings = [str(ts) for ts in self.tag_sets]
if tag_set_strings:
self.tag_set_checklist.ctr.InsertItems(items = tag_set_strings, pos=0)
def onRemoveTagSet(self, e):
all_items = self.tag_set_checklist.ctr.GetStrings()
checked_items = self.tag_set_checklist.ctr.GetCheckedStrings()
selected_tag_sets = [ts for ts in self.tag_sets if str(ts) in checked_items]
for ts in selected_tag_sets:
idx = all_items.index(str(ts))
self.tag_set_checklist.ctr.Delete(idx)
self.tag_sets.remove(ts)
def onResetTagSets(self, e):
self.tag_set_checklist.ctr.Clear()
self.tag_sets = []
self.selected_tag_sets = []
def onChkRejectOutliers(self, e):
self.reject_outliers = self.chk_reject_outliers.GetValue()
def onChkPlotClusters(self, e):
self.plot_clusters = self.chk_plot_clusters.GetValue()
if self.plot_clusters:
self.plot_eps.eps.Enable()
else:
self.plot_eps.eps.Disable()
def onChkAutoUpdate(self, e):
self.auto_update = self.chk_auto_update.GetValue()
if self.auto_update and (self.main.unitcell_sentinel is None or not self.main.unitcell_sentinel.active):
self.main.start_unitcell_sentinel()
else:
self.main.stop_unitcell_sentinel()
def onRefresh(self, e):
self.find_trials()
class DatasetTab(BaseTab):
def __init__(self, parent, main):
BaseTab.__init__(self, parent=parent)
self.name = 'Datasets'
self.main = main
self.show_active_only = False
self.dataset_panel = ScrolledPanel(self, size=(200, 200))
self.dataset_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.dataset_panel.SetSizer(self.dataset_sizer)
self.btn_sizer = wx.FlexGridSizer(1, 2, 0, 10)
self.btn_sizer.AddGrowableCol(0)
self.btn_add_dataset = wx.Button(self, label='New Dataset', size=(120, -1))
self.btn_active_only = wx.ToggleButton(self,
label='Show Only Active Datasets',
size=(180, self.btn_add_dataset.GetSize()[1]))
self.btn_sizer.Add(self.btn_active_only, flag=wx.ALIGN_RIGHT)
self.btn_sizer.Add(self.btn_add_dataset)
self.main_sizer.Add(self.dataset_panel, 1, flag=wx.EXPAND | wx.ALL, border=10)
self.main_sizer.Add(self.btn_sizer, flag=wx.EXPAND | wx.ALL, border=10)
# Bindings
self.Bind(wx.EVT_BUTTON, self.onAddDataset, self.btn_add_dataset)
self.Bind(wx.EVT_TOGGLEBUTTON, self.onActiveOnly, self.btn_active_only)
#self.refresh_datasets()
def refresh_datasets(self):
self.dataset_sizer.Clear(delete_windows=True)
self.all_datasets = self.main.db.get_all_datasets()
for dataset in self.all_datasets:
if self.show_active_only:
if dataset.active:
self.add_dataset(dataset=dataset)
else:
self.add_dataset(dataset=dataset)
self.dataset_panel.SetSizer(self.dataset_sizer)
self.dataset_sizer.Layout()
self.dataset_panel.SetupScrolling(scrollToTop=False)
def add_dataset(self, dataset):
new_dataset = DatasetPanel(self.dataset_panel,
db=self.main.db,
dataset=dataset)
new_dataset.chk_active.SetValue(dataset.active)
new_dataset.refresh_dataset()
self.dataset_sizer.Add(new_dataset, flag=wx.EXPAND | wx.ALL, border=10)
def onAddDataset(self, e):
new_dataset_dlg = dlg.DatasetDialog(self, db=self.main.db)
new_dataset_dlg.Fit()
if new_dataset_dlg.ShowModal() == wx.ID_OK:
self.refresh_datasets()
def onActiveOnly(self, e):
self.show_active_only = self.btn_active_only.GetValue()
self.refresh_datasets()
class MergingStatsTab(BaseTab):
def __init__(self, parent, main):
BaseTab.__init__(self, parent=parent)
self.name = 'Merging stats'
self.main = main
self.all_datasets = []
self.dataset_versions = []
self.png = None
self.static_bitmap = None
self.redraw_windows = True
self.tab_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.datasets_panel = wx.Panel(self, size=(240, 120))
self.datasets_box = wx.StaticBox(self.datasets_panel, label='Select dataset')
self.datasets_sizer = wx.StaticBoxSizer(self.datasets_box, wx.VERTICAL)
self.datasets_panel.SetSizer(self.datasets_sizer)
self.datasets = wx.ListBox(self.datasets_panel,
size=(220, 100))
self.datasets_sizer.Add(self.datasets, flag=wx.EXPAND | wx.ALL, border = 5)
self.chk_active = wx.CheckBox(self.datasets_panel, label='Active only')
self.chk_active.SetValue(True)
self.datasets_sizer.Add(self.chk_active, flag=wx.EXPAND | wx.ALL, border = 5)
self.dataset_version = gctr.ChoiceCtrl(self.datasets_panel,
label='Dataset version:',
label_size=(120, -1),
label_style='normal',
ctrl_size=(100, -1),
choices=[])
self.datasets_sizer.Add(self.dataset_version, flag=wx.EXPAND | wx.ALL, border = 5)
self.plots_panel = wx.Panel(self, size=(200, 120))
self.mergingstats_panelsize = self.plots_panel.GetSize()
self.plots_box = wx.StaticBox(self.plots_panel, label='Statistics')
self.plots_sizer = wx.StaticBoxSizer(self.plots_box, wx.VERTICAL)
self.plots_panel.SetSizer(self.plots_sizer)
self.tab_sizer.Add(self.datasets_panel, 0,
flag=wx.ALIGN_LEFT | wx.EXPAND, border=10)
self.tab_sizer.Add(self.plots_panel, 1,
flag=wx.EXPAND | wx.ALL, border=0)
self.main_sizer.Add(self.tab_sizer, 1,
flag=wx.EXPAND | wx.ALL, border=10)
self.Bind(wx.EVT_LISTBOX, self.onSelectDataset, self.datasets)
self.Bind(wx.EVT_CHOICE, self.onVersionChoice, self.dataset_version.ctr)
self.Bind(EVT_MERGINGSTATS_REFRESH, self.onRefresh)
self.chk_active.Bind(wx.EVT_CHECKBOX, self.onToggleActivity)
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnSize(self, e):
self.mergingstats_panelsize = self.plots_panel.GetSize()
e.Skip()
def onToggleActivity(self, e):
self.refresh_datasets()
def refresh_datasets(self):
self.datasets.Clear()
self.all_datasets = self.main.db.get_all_datasets()
if self.chk_active.GetValue():
self.all_datasets = [d for d in self.all_datasets if d.active]
for dataset in self.all_datasets:
self.datasets.Append(dataset.name)
self.refresh_dataset()
def onVersionChoice(self, e):
self.refresh_stats()
def onSelectDataset(self, e):
self.refresh_dataset()
def refresh_dataset(self):
self.dataset_version.ctr.Clear()
sel = self.datasets.GetSelection()
if sel < 0: return
try:
dataset = self.all_datasets[sel]
except IndexError:
pass
else:
self.dataset_version.ctr.Append('All')
for version in dataset.versions:
self.dataset_version.ctr.Append(str(version.version))
self.dataset_version.ctr.SetSelection(0)
self.refresh_stats()
def refresh_stats(self):
sel = self.datasets.GetSelection()
dataset = self.all_datasets[sel]
self.dataset_name = dataset.name
if self.dataset_version.ctr.GetSelection() == 0:
self.dataset_versions = [version.output_path() for version in dataset.versions]
else:
version = dataset.versions[self.dataset_version.ctr.GetSelection()-1]
self.dataset_name += " v%03d"%version.version
self.dataset_versions = [version.output_path()]
def onRefresh(self, e):
self.plot_merging_stats()
def plot_merging_stats(self):
if self.png is not None:
if self.static_bitmap is not None:
try:
self.static_bitmap.Destroy()
except RuntimeError as e:
if "StaticBitmap has been deleted" not in str(e):
raise
img = wx.Image(self.png, wx.BITMAP_TYPE_ANY)
self.static_bitmap = wx.StaticBitmap(
self.plots_panel, wx.ID_ANY, wx.Bitmap(img))
self.plots_sizer.Add(self.static_bitmap, 0, wx.EXPAND | wx.ALL, 3)
self.plots_panel.SetSizer(self.plots_sizer)
self.plots_panel.Layout()
class MergeTab(BaseTab):
def __init__(self, parent, main, prefix='prime'):
BaseTab.__init__(self, parent=parent)
self.name = 'Merge'
self.main = main
self.prefix = prefix
self.prime_filename = '{}.phil'.format(self.prefix)
self.output = self.main.params.output_folder
self.run_paths = []
self.trial_no = None
self.all_trials = []
self.all_tags = []
self.selected_tags = []
self.run_paths = []
self.prime_panel = PRIMEInputWindow(self)
self.toolbar = wx.ToolBar(self, style=wx.TB_HORZ_TEXT | wx.TB_FLAT)
self.tb_btn_def = self.toolbar.AddTool(wx.ID_ANY, label=' Defaults',
bitmap=wx.Bitmap('{}/24x24/def.png'.format(icons)),
bmpDisabled=wx.NullBitmap,
shortHelp='Default Settings',
longHelp='Generate default PRIME settings')
self.tb_btn_load = self.toolbar.AddTool(wx.ID_OPEN, label=' Load PHIL',
bitmap=wx.Bitmap('{}/24x24/open.png'.format(icons)),
bmpDisabled=wx.NullBitmap,
shortHelp='Load PHIL file',
longHelp='Load PHIL file with PRIME settings')
self.tb_btn_save = self.toolbar.AddTool(wx.ID_SAVE, label=' Save PHIL',
bitmap=wx.Bitmap('{}/24x24/save.png'.format(icons)),
bmpDisabled=wx.NullBitmap,
shortHelp='Save PHIL file',
longHelp='Save PHIL file with PRIME settings')
self.tb_btn_cmd = self.toolbar.AddTool(wx.ID_ANY, label=' Command',
bitmap=wx.Bitmap('{}/24x24/term.png'.format(icons)),
bmpDisabled=wx.NullBitmap,
shortHelp='PRIME Command',
longHelp='Output PRIME command to stdout')
self.toolbar.EnableTool(self.tb_btn_cmd.GetId(), False)
self.toolbar.AddSeparator()
self.tb_btn_run = self.toolbar.AddTool(wx.ID_ANY, label=' Run PRIME',
bitmap=wx.Bitmap('{}/24x24/run.png'.format(icons)),
bmpDisabled=wx.NullBitmap,
shortHelp='Run PRIME',
longHelp='Scale, merge and post-refine with PRIME')
self.toolbar.EnableTool(self.tb_btn_run.GetId(), False)
self.toolbar.Realize()
# Modify PRIME input window to hide input control
self.prime_panel.inp_box.Hide()
self.prime_panel.out_box.ctr.SetValue(self.output)
# Input box
self.input_panel = wx.Panel(self)
input_box = wx.StaticBox(self.input_panel, label='PRIME Input')
self.input_box_sizer = wx.StaticBoxSizer(input_box, wx.HORIZONTAL)
self.input_panel.SetSizer(self.input_box_sizer)
self.trial_number = gctr.ChoiceCtrl(self.input_panel,
label='Trial:',
label_size=(80, -1),
label_style='normal',
ctrl_size=(140, -1),
choices=[])
self.tag_title = wx.StaticText(self.input_panel, label='Tags:')
self.tag_list = gctr.CheckListCtrl(self.input_panel,
ctrl_size=(200, 100),
choices=[],
direction='vertical')
self.opt_prefix = gctr.OptionCtrl(self.input_panel,
label='List prefix:',
label_size=(80, -1),
ctrl_size=(140, -1),
items=[('prefix', 'prime')])
self.input_number = wx.StaticText(self.input_panel,
label='0 images in 0 folders:')
self.input_list = wx.TextCtrl(self.input_panel,
style=wx.TE_MULTILINE | wx.TE_READONLY)
self.trial_tag_sizer = wx.GridBagSizer(2, 3)
self.trial_tag_sizer.Add(self.opt_prefix, pos=(0, 0))
self.trial_tag_sizer.Add(self.trial_number, pos=(1, 0),
flag=wx.TOP, border=10)
self.trial_tag_sizer.Add(self.tag_title, pos=(0, 1),
flag=wx.LEFT | wx.EXPAND,
border=15)
self.trial_tag_sizer.Add(self.tag_list, pos=(1, 1),
flag=wx.LEFT | wx.EXPAND,
border=15)
self.trial_tag_sizer.Add(self.input_number, pos=(0, 2),
flag=wx.LEFT | wx.EXPAND | wx.ALIGN_RIGHT,
border=15)
self.trial_tag_sizer.Add(self.input_list, pos=(1, 2),
flag=wx.LEFT | wx.EXPAND | wx.ALIGN_RIGHT,
border=15)
self.input_box_sizer.Add(self.trial_tag_sizer, 1, flag=wx.ALL | wx.EXPAND,
border=10)
self.trial_tag_sizer.AddGrowableCol(2)
self.trial_tag_sizer.AddGrowableRow(1)
self.main_sizer.Add(self.toolbar, border=10,
flag=wx.EXPAND | wx.LEFT | wx.RIGHT)
self.main_sizer.Add(self.input_panel, proportion=1,
flag=wx.ALL | wx.EXPAND, border=10)
self.main_sizer.Add(self.prime_panel, border=10,
flag=wx.RIGHT | wx.LEFT | wx.BOTTOM | wx.EXPAND)
self.Bind(wx.EVT_TEXT, self.onInput, self.input_list)
#self.Bind(wx.EVT_BUTTON, self.onIsoRef, self.prime_panel.ref_box.btn_browse)
#self.Bind(wx.EVT_TEXT, self.onIsoRef, self.prime_panel.ref_box.ctr)
self.Bind(wx.EVT_CHOICE, self.onTrialChoice, self.trial_number.ctr)
self.Bind(wx.EVT_CHECKLISTBOX, self.onTagCheck, self.tag_list.ctr)
self.Bind(wx.EVT_TOOL, self.onRun, self.tb_btn_run)
self.Bind(wx.EVT_TOOL, self.onRun, self.tb_btn_cmd)
self.Bind(wx.EVT_TOOL, self.onLoad, self.tb_btn_load)
self.Bind(wx.EVT_TOOL, self.onSave, self.tb_btn_save)
def onTagCheck(self, e):
checked_items = self.tag_list.ctr.GetCheckedStrings()
self.selected_tags = [i for i in self.main.db.get_all_tags() if i.name
in checked_items]
self.find_integrated_pickles()
def onTrialChoice(self, e):
trial_idx = self.trial_number.ctr.GetSelection()
if self.trial_number.ctr.GetClientData(trial_idx) == 0:
self.toolbar.EnableTool(self.tb_btn_run.GetId(), False)
self.toolbar.EnableTool(self.tb_btn_cmd.GetId(), False)
self.tag_list.ctr.Clear()
self.input_list.SetValue('')
elif self.trial_number.ctr.GetClientData(trial_idx) != self.trial_no:
self.trial_no = self.trial_number.ctr.GetClientData(trial_idx)
self.trial = self.main.db.get_trial(trial_number=int(self.trial_no))
self.find_tags()
self.find_integrated_pickles()
def find_tags(self):
self.tag_list.ctr.Clear()
self.tags = []
self.tag_names = []
tag_ids = []
for run in self.trial.runs:
for tag in run.tags:
if tag.id not in tag_ids:
self.tags.append(tag)
tag_ids.append(tag.id)
self.tag_names.append(tag.name)
self.tag_title.SetLabel('Tags for trial {}:'.format(self.trial.trial))
if self.tag_names:
self.tag_list.ctr.InsertItems(items=self.tag_names, pos=0)
def find_trials(self):
all_db_trials = [str(i.trial) for i in self.main.db.get_all_trials()]
new_trials = [i for i in all_db_trials if i not in self.all_trials]
if len(new_trials) > 0:
self.trial_number.ctr.Clear()
self.all_trials = [None] + \
[str(i.trial) for i in self.main.db.get_all_trials()]
for trial in self.all_trials:
if trial is not None:
entry = 'Trial {}'.format(trial)
self.trial_number.ctr.Append(entry)
item_idx = self.trial_number.ctr.FindString(entry)
self.trial_number.ctr.SetClientData(item_idx, trial)
else:
entry = '-- select a trial --'
self.trial_number.ctr.Append(entry)
self.trial_number.ctr.SetClientData(0, None)
if self.trial_no is not None:
self.trial_number.ctr.SetSelection(self.trial_no)
else:
self.trial_number.ctr.SetSelection(0)
def find_integrated_pickles(self):
# Find runblock paths associated with the trial
run_numbers = []
run_ids = []
self.run_paths = []
if self.main.params.dispatcher == "cxi.xtc_process": #LABELIT backend
integration_dir = "integration"
else:
integration_dir = "out"
for rb in self.trial.rungroups:
for run in rb.runs:
if run.run not in run_numbers:
if len(self.selected_tags) == 0:
self.run_paths.append(os.path.join(
get_run_path(self.output, self.trial, rb, run), integration_dir))
run_numbers.append(run.run)
else:
for tag_id in [int(t.id) for t in self.selected_tags]:
if tag_id in [int(t.id) for t in run.tags]:
run_ids.append(int(run.id))
self.run_paths.append(os.path.join(
get_run_path(self.output, self.trial, rb, run), integration_dir))
break
# Display paths in input list text control
input_paths = '\n'.join(self.run_paths)
self.input_list.SetValue(input_paths)
# Find appropriate integration pickles in runblock paths
self.all_pickles = []
for path in self.run_paths:
try:
pickles = [os.path.join(path, i) for i in os.listdir(path) if
i.endswith('pickle') and 'int-' in i]
self.all_pickles = self.all_pickles + pickles
except OSError as error:
print('Folder not found: {}'.format(path))
continue
self.input_number.SetLabel('{} images in {} folders:'
''.format(len(self.all_pickles),
len(self.run_paths)))
def onInput(self, e):
self.toolbar.EnableTool(self.tb_btn_run.GetId(), True)
self.toolbar.EnableTool(self.tb_btn_cmd.GetId(), True)
def onLoad(self, e):
# Extract params from file
load_dlg = wx.FileDialog(self,
message="Load script file",
defaultDir=os.curdir,
defaultFile="*.phil",
wildcard="*.phil",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST,
)
if load_dlg.ShowModal() == wx.ID_OK:
script = load_dlg.GetPaths()[0]
out_dir = os.path.dirname(script)
self.prime_filename = os.path.basename(script)
self.load_script(out_dir=out_dir)
load_dlg.Destroy()
def load_script(self, out_dir):
''' Loads PRIME script '''
import iotbx.phil as ip
script = os.path.join(out_dir, self.prime_filename)
user_phil = ip.parse(open(script).read())
self.pparams = master_phil.fetch(sources=[user_phil]).extract()
self.prime_panel.pparams = self.pparams
if len(self.pparams.data) > 0:
self.prime_panel.inp_box.ctr.SetValue(str(self.pparams.data[0]))
current_dir = os.path.dirname(self.pparams.run_no)
self.prime_panel.out_box.ctr.SetValue(str(current_dir))
if str(self.prime_panel.out_box.ctr.GetValue).lower() == '':
self.prime_panel.out_box.ctr.SetValue(self.out_dir)
if str(self.pparams.title).lower() != 'none':
self.prime_panel.title_box.ctr.SetValue(str(self.pparams.title))
if str(self.pparams.hklisoin).lower() != 'none':
self.prime_panel.ref_box.ctr.SetValue(str(self.pparams.hklisoin))
elif str(self.pparams.hklrefin).lower() != 'none':
self.prime_panel.ref_box.ctr.SetValue(str(self.pparams.hklrefin))
self.prime_panel.opt_chk_useref.SetValue(True)
if str(self.pparams.n_residues).lower() == 'none':
self.prime_panel.opt_spc_nres.SetValue(500)
else:
self.prime_panel.opt_spc_nres.SetValue(int(self.pparams.n_residues))
self.prime_panel.opt_spc_nproc.SetValue(int(self.pparams.n_processors))
def onSave(self, e):
self.init_settings()
# Generate text of params
final_phil = master_phil.format(python_object=self.pparams)
with Capturing() as txt_output:
final_phil.show()
txt_out = ''
for one_output in txt_output:
txt_out += one_output + '\n'
# Save param file
save_dlg = wx.FileDialog(self,
message="Save PRIME Script",
defaultDir=os.curdir,
defaultFile="*.phil",
wildcard="*.phil",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT
)
if save_dlg.ShowModal() == wx.ID_OK:
with open(save_dlg.GetPath(), 'w') as savefile:
savefile.write(txt_out)
def onIsoRef(self, e):
if self.prime_panel.ref_box.ctr.GetValue() != '':
self.prime_panel.opt_chk_useref.Enable()
else:
self.prime_panel.opt_chk_useref.Disable()
def init_settings(self):
# Determine where/what PRIME folders are
prime_dir = os.path.join(self.output, 'prime')
self.working_dir = os.path.join(prime_dir, 'trial_{}'.format(self.trial_no))
if not os.path.exists(prime_dir):
os.mkdir(prime_dir)
if not os.path.exists(self.working_dir):
os.mkdir(self.working_dir)
# Write list of pickles to file
list_prefix = self.opt_prefix.prefix.GetValue()
if list_prefix == None or list_prefix == '':
list_prefix = 'prime'
self.pickle_path_file = os.path.join(self.working_dir,
'{}_trial_{}.lst'.format(list_prefix, self.trial_no))
print('Saving list of pickles to ', self.pickle_path_file)
with open(self.pickle_path_file, 'w') as lfile:
for pickle in self.all_pickles:
lfile.write('{}\n'.format(pickle))
self.pparams = self.prime_panel.pparams
self.pparams.data = [self.pickle_path_file]
self.pparams.run_no = set_base_dir(out_dir=self.working_dir)
self.out_dir = self.prime_panel.out_box.ctr.GetValue()
self.pparams.title = self.prime_panel.title_box.ctr.GetValue()
if str(self.prime_panel.ref_box.ctr.GetValue()).lower() != '':
self.pparams.hklisoin = self.prime_panel.ref_box.ctr.GetValue()
if self.prime_panel.opt_chk_useref.GetValue():
self.pparams.hklrefin = self.prime_panel.ref_box.ctr.GetValue()
self.pparams.n_residues = self.prime_panel.opt_spc_nres.GetValue()
self.pparams.n_processors = self.prime_panel.opt_spc_nproc.GetValue()
def onRun(self, e):
# Run full processing
from xfel.util.mp import get_lsf_submit_command
from xfel.ui import settings_dir
import datetime
import copy
params = copy.deepcopy(self.main.params)
params.mp.nproc = self.prime_panel.opt_spc_nproc.GetValue()
# Generate script filename (w/ timestamp)
ts = '{:%Y%m%d_%H%M%S}'.format(datetime.datetime.now())
script_filename = 'trial_{:03d}_{}.sh'.format(int(self.trial_no), ts)
self.init_settings()
prime_phil = master_phil.format(python_object=self.pparams)
with Capturing() as output:
prime_phil.show()
txt_out = ''
for one_output in output:
txt_out += one_output + '\n'
prime_file = os.path.join(settings_dir, self.prime_filename)
out_file = os.path.join(self.working_dir, 'stdout.log')
with open(prime_file, 'w') as pf:
pf.write(txt_out)
if params.mp.method == 'local':
command=None
else:
job_name = 'prime_t{}'.format(self.trial_no)
cmd = '-J {} prime.postrefine {}'.format(job_name, prime_file)
submit_path = os.path.join(settings_dir, script_filename)
command = str(get_lsf_submit_command(cmd, submit_path, self.working_dir,
params.mp)())
if e.GetId() == self.tb_btn_run.GetId():
self.prime_run_window = PRIMERunWindow(self, -1,
title='PRIME Output',
params=self.pparams,
prime_file=prime_file,
# out_file=out_file,
mp_method=params.mp.method,
command=command)
self.prime_run_window.prev_pids = easy_run.fully_buffered('pgrep -u {} {}'
''.format(user, 'python')).stdout_lines
self.prime_run_window.Show(True)
elif e.GetId() == self.tb_btn_cmd.GetId():
print('Submission command:')
print(command)
# Try and write files to created folder
# ------------------------------- UI Elements -------------------------------- #
class TrialPanel(wx.Panel):
''' A scrolled panel that contains run blocks and trial controls '''
def __init__(self, parent, db, trial, box_label=None):
wx.Panel.__init__(self, parent=parent, size=(270, 200))
self.db = db
self.trial = trial
self.parent = parent
trial_box = wx.StaticBox(self, label=box_label)
self.main_sizer = wx.StaticBoxSizer(trial_box, wx.VERTICAL)
self.block_panel = ScrolledPanel(self, size=(150, 180))
self.block_sizer = wx.BoxSizer(wx.VERTICAL)
self.block_panel.SetSizer(self.block_sizer)
self.one_block_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.add_panel = wx.Panel(self)
self.add_sizer = wx.BoxSizer(wx.VERTICAL)
self.add_panel.SetSizer(self.add_sizer)
# Add "New Block" button to a separate sizer (so it is always on bottom)
self.btn_add_block = wx.Button(self.add_panel, label='New Block',
size=(200, -1))
self.btn_select_blocks = wx.Button(self.add_panel, label='Select Blocks',
size=(200, -1))
self.btn_view_phil = gctr.BitmapButton(self.add_panel, name='btn_view_phil',
bitmap=wx.Bitmap('{}/16x16/viewmag.png'.format(icons)))
self.chk_active = wx.CheckBox(self.add_panel, label='Active Trial')
self.view_sizer = wx.FlexGridSizer(1, 2, 0, 10)
self.view_sizer.Add(self.btn_view_phil)
self.view_sizer.Add(self.chk_active, flag=wx.EXPAND)
self.add_sizer.Add(self.btn_add_block,
flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER,
border=10)
self.add_sizer.Add(self.btn_select_blocks,
flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER,
border=10)
self.add_sizer.Add(self.view_sizer,
flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_LEFT,
border=10)
self.main_sizer.Add(self.block_panel, 1, flag=wx.EXPAND | wx.ALL, border=10)
self.main_sizer.Add(self.add_panel, flag=wx.ALL, border=5)
# Bindings
self.Bind(wx.EVT_BUTTON, self.onAddBlock, self.btn_add_block)
self.Bind(wx.EVT_BUTTON, self.onSelectBlocks, self.btn_select_blocks)
self.Bind(wx.EVT_BUTTON, self.onViewPHIL, self.btn_view_phil)
self.chk_active.Bind(wx.EVT_CHECKBOX, self.onToggleActivity)
self.SetSizer(self.main_sizer)
def onViewPHIL(self, e):
view_dlg = dlg.TrialDialog(self, db=self.db, trial=self.trial, new=False)
view_dlg.Fit()
view_dlg.ShowModal()
view_dlg.Destroy()
def onToggleActivity(self, e):
if self.chk_active.GetValue():
self.trial.active = True
else:
self.trial.active = False
def onAddBlock(self, e):
rblock_dlg = dlg.RunBlockDialog(self, trial=self.trial,
db=self.db)
rblock_dlg.Fit()
if (rblock_dlg.ShowModal() == wx.ID_OK):
self.refresh_trial()
rblock_dlg.Destroy()
def onSelectBlocks(self, e):
rblocksel_dlg = dlg.SelectRunBlocksDialog(self, trial=self.trial,
db=self.db)
rblocksel_dlg.Fit()
if (rblocksel_dlg.ShowModal() == wx.ID_OK):
self.refresh_trial()
rblocksel_dlg.Destroy()
def refresh_trial(self):
self.block_sizer.Clear(delete_windows=True)
self.active_blocks = self.trial.rungroups
for block in self.active_blocks:
self.draw_block_button(block)
self.block_panel.Layout()
self.block_panel.SetupScrolling(scrollToTop=False)
def draw_block_button(self, block):
''' Add new run block button '''
new_block = gctr.RunBlock(self.block_panel, block=block)
self.Bind(wx.EVT_BUTTON, self.onRunBlockOptions, new_block.new_runblock)
self.block_sizer.Add(new_block,
flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER,
border=5)
def onRunBlockOptions(self, e):
''' Open dialog and change run_block options '''
run_block = e.GetEventObject().block
rblock_dlg = dlg.RunBlockDialog(self, block=run_block,
db=self.db)
rblock_dlg.Fit()
if (rblock_dlg.ShowModal() == wx.ID_OK):
wx.CallAfter(self.refresh_trial)
rblock_dlg.Destroy()
class DatasetPanel(wx.Panel):
''' A scrolled panel that contains dataset and task controls '''
def __init__(self, parent, db, dataset, box_label=""):
wx.Panel.__init__(self, parent=parent, size=(270, 200))
self.db = db
self.dataset = dataset
self.parent = parent
self.dataset_box = wx.StaticBox(self, label=box_label)
self.main_sizer = wx.StaticBoxSizer(self.dataset_box, wx.VERTICAL)
self.dataset_comment = wx.StaticText(self)
self.task_panel = ScrolledPanel(self, size=(150, 180))
self.task_sizer = wx.BoxSizer(wx.VERTICAL)
self.task_panel.SetSizer(self.task_sizer)
self.one_task_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.add_panel = wx.Panel(self)
self.add_sizer = wx.BoxSizer(wx.VERTICAL)
self.add_panel.SetSizer(self.add_sizer)
# Add "New task" button to a separate sizer (so it is always on bottom)
self.btn_add_task = wx.Button(self.add_panel, label='New Task',
size=(200, -1))
self.btn_select_tasks = wx.Button(self.add_panel, label='Select Tasks',
size=(200, -1))
self.btn_edit_dataset = wx.BitmapButton(self.add_panel,
bitmap=wx.Bitmap('{}/16x16/viewmag.png'.format(icons)))
self.chk_active = wx.CheckBox(self.add_panel, label='Active Dataset')
self.chk_sizer = wx.FlexGridSizer(1, 2, 0, 10)
self.chk_sizer.Add(self.btn_edit_dataset)
self.chk_sizer.Add(self.chk_active, flag=wx.EXPAND)
self.add_sizer.Add(self.btn_add_task,
flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER,
border=10)
self.add_sizer.Add(self.btn_select_tasks,
flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER,
border=10)
self.add_sizer.Add(self.chk_sizer,
flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_LEFT,
border=10)
self.main_sizer.Add(self.dataset_comment, 0, flag=wx.ALL, border=10)
self.main_sizer.Add(self.task_panel, 1, flag=wx.EXPAND | wx.ALL, border=10)
self.main_sizer.Add(self.add_panel, flag=wx.ALL, border=5)
# Bindings
self.Bind(wx.EVT_BUTTON, self.onAddTask, self.btn_add_task)
self.Bind(wx.EVT_BUTTON, self.onSelectTasks, self.btn_select_tasks)
self.Bind(wx.EVT_BUTTON, self.onEditDataset, self.btn_edit_dataset)
self.chk_active.Bind(wx.EVT_CHECKBOX, self.onToggleActivity)
self.SetSizer(self.main_sizer)
def onToggleActivity(self, e):
if self.chk_active.GetValue():
self.dataset.active = True
else:
self.dataset.active = False
def onAddTask(self, e):
task_dlg = dlg.TaskDialog(self, dataset=self.dataset,
db=self.db)
task_dlg.Fit()
if (task_dlg.ShowModal() == wx.ID_OK):
self.refresh_dataset()
task_dlg.Destroy()
def onSelectTasks(self, e):
tasksel_dlg = dlg.SelectTasksDialog(self, dataset=self.dataset,
db=self.db)
tasksel_dlg.Fit()
if (tasksel_dlg.ShowModal() == wx.ID_OK):
self.refresh_dataset()
tasksel_dlg.Destroy()
def refresh_dataset(self):
self.dataset_comment.SetLabel(self.dataset.comment if self.dataset.comment is not None else "")
self.dataset_box.SetLabel('Dataset {} {}'.format(self.dataset.dataset_id,
self.dataset.name[:min(len(self.dataset.name), 20)]
if self.dataset.name is not None else ""))
self.task_sizer.Clear(delete_windows=True)
tags = self.dataset.tags
if tags:
tags_text = "Tags: " + ",".join([t.name for t in tags])
else:
tags_text = "No tags selected"
label = wx.StaticText(self.task_panel, label = tags_text)
self.task_sizer.Add(label,
flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER,
border=5)
for task in self.dataset.tasks:
self.draw_task_button(task)
self.task_panel.Layout()
self.task_panel.SetupScrolling(scrollToTop=False)
def draw_task_button(self, task):
''' Add new run block button '''
new_task = gctr.TaskCtrl(self.task_panel, task=task)
self.Bind(wx.EVT_BUTTON, self.onTaskOptions, new_task.new_task)
self.task_sizer.Add(new_task,
flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER,
border=5)
def onTaskOptions(self, e):
''' Open dialog and change task options '''
task = e.GetEventObject().task
task_dlg = dlg.TaskDialog(self, task=task,
db=self.db)
task_dlg.Fit()
if (task_dlg.ShowModal() == wx.ID_OK):
wx.CallAfter(self.refresh_dataset)
task_dlg.Destroy()
def onEditDataset(self, e):
new_dataset_dlg = dlg.DatasetDialog(self, db=self.db, dataset=self.dataset, new=False)
new_dataset_dlg.Fit()
if new_dataset_dlg.ShowModal() == wx.ID_OK:
self.refresh_dataset()
class RunEntry(wx.Panel):
''' Adds run row to table, with average and view buttons'''
def __init__(self, parent, run, params, label_width = None):
self.run = run
self.params = params
wx.Panel.__init__(self, parent=parent)
if label_width is None: label_width = 60
self.sizer = wx.FlexGridSizer(1, 4, 0, 10)
run_no = wx.StaticText(self, label=str(run),
size=(label_width, -1))
self.tag_button = gctr.TagButton(self, run=run)
self.avg_button = wx.Button(self, label='Average')
self.view_button = wx.Button(self, label='View')
self.view_button.Hide()
self.sizer.Add(run_no, flag=wx.EXPAND)
self.sizer.Add(self.tag_button, flag=wx.EXPAND)
self.sizer.AddGrowableCol(1)
self.sizer.Add(self.avg_button)
self.sizer.Add(self.view_button, flag=wx.RESERVE_SPACE_EVEN_IF_HIDDEN)
# Button Bindings
self.Bind(wx.EVT_BUTTON, self.onTagButton, self.tag_button)
self.Bind(wx.EVT_BUTTON, self.onAvgButton, self.avg_button)
self.Bind(wx.EVT_BUTTON, self.onViewButton, self.view_button)
self.SetSizer(self.sizer)
def onTagButton(self, e):
self.tag_button.change_tags()
def onAvgButton(self, e):
avg = dlg.AveragingDialog(self, self.run, self.params)
avg.Fit()
avg.Center()
if (avg.ShowModal() == wx.ID_OK):
e.GetEventObject().SetLabel('Running')
e.GetEventObject().Disable()
self.view_button.Show()
# TODO: hook up the calibration app
def onViewButton(self, e):
# TODO: hook up view function
pass
|
63744591f25d4c1b1f251b1443be21ba247b564c
|
a98c221480dea8907aede403606b4450ec86b9f3
|
/tfts/models/seq2seq.py
|
4a552f4a629553da93577eb9d95498b7142c258a
|
[
"MIT"
] |
permissive
|
LongxingTan/Time-series-prediction
|
c38c05724b50e36e43647ddef591582556578822
|
b6381e47fa110b76334b907ff3973bdb2d0f1091
|
refs/heads/master
| 2023-08-31T18:00:31.865106
| 2023-08-19T15:48:03
| 2023-08-19T15:48:03
| 131,993,562
| 746
| 152
|
MIT
| 2023-09-12T11:59:21
| 2018-05-03T12:43:20
|
Python
|
UTF-8
|
Python
| false
| false
| 14,814
|
py
|
seq2seq.py
|
"""
`Sequence to Sequence Learning with Neural Networks
<https://arxiv.org/abs/1409.3215>`_
"""
from typing import Any, Callable, Dict, Optional, Tuple, Type
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.layers import GRU, LSTM, RNN, Dense, Dropout, GRUCell, LSTMCell
from tfts.layers.attention_layer import FullAttention
params = {
"rnn_type": "gru",
"bi_direction": False,
"rnn_size": 64,
"dense_size": 64,
"num_stacked_layers": 1,
"scheduler_sampling": 0, # teacher forcing
"use_attention": False,
"attention_sizes": 64,
"attention_heads": 2,
"attention_dropout": 0,
"skip_connect_circle": False,
"skip_connect_mean": False,
}
class Seq2seq(object):
"""Seq2seq model"""
def __init__(
self,
predict_sequence_length: int = 1,
custom_model_params: Optional[Dict[str, Any]] = None,
custom_model_head: Optional[Callable] = None,
):
if custom_model_params:
params.update(custom_model_params)
self.params = params
self.predict_sequence_length = predict_sequence_length
self.encoder = Encoder(
rnn_type=params["rnn_type"], rnn_size=params["rnn_size"], dense_size=params["dense_size"]
)
self.decoder = Decoder1(
rnn_type=params["rnn_type"],
rnn_size=params["rnn_size"],
predict_sequence_length=predict_sequence_length,
use_attention=params["use_attention"],
attention_sizes=params["attention_sizes"],
attention_heads=params["attention_heads"],
attention_dropout=params["attention_dropout"],
)
def __call__(self, inputs, teacher=None):
"""An RNN seq2seq structure for time series
:param inputs: _description_
:type inputs: _type_
:param teacher: teacher forcing decoding, defaults to None
:type teacher: _type_, optional
:return: _description_
:type: _type_
"""
if isinstance(inputs, (list, tuple)):
x, encoder_feature, decoder_feature = inputs
encoder_feature = tf.concat([x, encoder_feature], axis=-1)
elif isinstance(inputs, dict):
x = inputs["x"]
encoder_feature = inputs["encoder_feature"]
decoder_feature = inputs["decoder_feature"]
encoder_feature = tf.concat([x, encoder_feature], axis=-1)
else:
encoder_feature = x = inputs
decoder_feature = tf.cast(
tf.tile(
tf.reshape(tf.range(self.predict_sequence_length), (1, self.predict_sequence_length, 1)),
(tf.shape(encoder_feature)[0], 1, 1),
),
tf.float32,
)
encoder_outputs, encoder_state = self.encoder(encoder_feature)
decoder_outputs = self.decoder(
decoder_feature,
decoder_init_input=x[:, -1, 0:1],
init_state=encoder_state,
teacher=teacher,
scheduler_sampling=self.params["scheduler_sampling"],
encoder_output=encoder_outputs,
)
if self.params["skip_connect_circle"]:
x_mean = x[:, -self.predict_sequence_length :, 0:1]
decoder_outputs = decoder_outputs + x_mean
if self.params["skip_connect_mean"]:
x_mean = tf.tile(tf.reduce_mean(x[..., 0:1], axis=1, keepdims=True), [1, self.predict_sequence_length, 1])
decoder_outputs = decoder_outputs + x_mean
return decoder_outputs
class Encoder(tf.keras.layers.Layer):
def __init__(self, rnn_type, rnn_size, rnn_dropout=0, dense_size=32, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.rnn_type = rnn_type
if rnn_type.lower() == "gru":
self.rnn = GRU(
units=rnn_size, activation="tanh", return_state=True, return_sequences=True, dropout=rnn_dropout
)
elif rnn_type.lower() == "lstm":
self.rnn = LSTM(
units=rnn_size,
activation="tanh",
return_state=True,
return_sequences=True,
dropout=rnn_dropout,
)
self.dense = Dense(units=dense_size, activation="tanh")
def call(self, inputs):
"""Seq2seq encoder
Parameters
----------
inputs : tf.Tensor
_description_
Returns
-------
tf.Tensor
batch_size * input_seq_length * rnn_size, state: batch_size * rnn_size
"""
if self.rnn_type.lower() == "gru":
outputs, state = self.rnn(inputs)
state = self.dense(state)
elif self.rnn_type.lower() == "lstm":
outputs, state1, state2 = self.rnn(inputs)
state = (state1, state2)
else:
raise ValueError("No supported rnn type of {}".format(self.rnn_type))
# encoder_hidden_state = tuple(self.dense(hidden_state) for _ in range(params['num_stacked_layers']))
# outputs = self.dense(outputs) # => batch_size * input_seq_length * dense_size
return outputs, state
class Decoder1(tf.keras.layers.Layer):
def __init__(
self,
rnn_type="gru",
rnn_size=32,
predict_sequence_length=3,
use_attention=False,
attention_sizes=32,
attention_heads=1,
attention_dropout=0.0,
):
super(Decoder1, self).__init__()
self.predict_sequence_length = predict_sequence_length
self.use_attention = use_attention
self.rnn_type = rnn_type
self.rnn_size = rnn_size
self.attention_sizes = attention_sizes
self.attention_heads = attention_heads
self.attention_dropout = attention_dropout
def build(self, input_shape):
if self.rnn_type.lower() == "gru":
self.rnn_cell = GRUCell(self.rnn_size)
elif self.rnn_type.lower() == "lstm":
self.rnn_cell = LSTMCell(units=self.rnn_size)
self.dense = Dense(units=1, activation=None)
if self.use_attention:
self.attention = FullAttention(
hidden_size=self.attention_sizes,
num_heads=self.attention_heads,
attention_dropout=self.attention_dropout,
)
super().build(input_shape)
def call(
self,
decoder_features,
decoder_init_input,
init_state,
teacher=None,
scheduler_sampling=0,
training=None,
**kwargs
):
"""Seq2seq decoder1: step by step
:param decoder_features: _description_
:type decoder_features: _type_
:param decoder_init_input: _description_
:type decoder_init_input: _type_
:param init_state: _description_
:type init_state: _type_
:param teacher: _description_, defaults to None
:type teacher: _type_, optional
:param scheduler_sampling: _description_, defaults to 0
:type scheduler_sampling: int, optional
:param training: _description_, defaults to None
:type training: _type_, optional
:return: _description_
:rtype: _type_
"""
decoder_outputs = []
prev_output = decoder_init_input
prev_state = init_state
if teacher is not None:
teacher = tf.squeeze(teacher, 2)
teachers = tf.split(teacher, self.predict_sequence_length, axis=1)
for i in range(self.predict_sequence_length):
if training:
p = np.random.uniform(low=0, high=1, size=1)[0]
if teacher is not None and p > scheduler_sampling:
this_input = teachers[i]
else:
this_input = prev_output
else:
this_input = prev_output
if decoder_features is not None:
this_input = tf.concat([this_input, decoder_features[:, i]], axis=-1)
if self.use_attention:
if self.rnn_type.lower() == "gru":
# q: (batch, 1, feature), att_output: (batch, 1, feature)
att = self.attention(
tf.expand_dims(prev_state, 1), k=kwargs["encoder_output"], v=kwargs["encoder_output"]
)
att = tf.squeeze(att, 1) # (batch, feature)
elif self.rnn_type.lower() == "lstm":
# q: (batch, 1, feature * 2), att_output: (batch, 1, feature)
att = self.attention(
tf.expand_dims(tf.concat(prev_state, 1), 1),
k=kwargs["encoder_output"],
v=kwargs["encoder_output"],
)
att = tf.squeeze(att, 1) # (batch, feature)
this_input = tf.concat([this_input, att], axis=-1)
this_output, this_state = self.rnn_cell(this_input, prev_state)
prev_state = this_state
prev_output = self.dense(this_output)
decoder_outputs.append(prev_output)
decoder_outputs = tf.concat(decoder_outputs, axis=-1)
return tf.expand_dims(decoder_outputs, -1)
class Decoder2(tf.keras.layers.Layer):
def __init__(
self,
rnn_type="gru",
rnn_size=32,
predict_sequence_length=3,
use_attention=False,
attention_sizes=32,
attention_heads=1,
attention_dropout=0.0,
):
super(Decoder2, self).__init__()
self.rnn_type = rnn_type
self.rnn_size = rnn_size
self.predict_sequence_length = predict_sequence_length
self.use_attention = use_attention
self.attention_sizes = attention_sizes
self.attention_heads = attention_heads
self.attention_dropout = attention_dropout
def build(self, input_shape):
if self.rnn_type.lower() == "gru":
self.rnn_cell = GRUCell(self.rnn_size)
elif self.rnn_type.lower() == "lstm":
self.rnn = LSTMCell(units=self.rnn_size)
self.dense = Dense(units=1)
if self.use_attention:
self.attention = FullAttention(
hidden_size=self.attention_sizes,
num_heads=self.attention_heads,
attention_dropout=self.attention_dropout,
)
super().build(input_shape)
def forward(
self,
decoder_feature,
decoder_init_value,
init_state,
teacher=None,
scheduler_sampling=0,
training=None,
**kwargs
):
def cond_fn(time, prev_output, prev_state, decoder_output_ta):
return time < self.predict_sequence_length
def body(time, prev_output, prev_state, decoder_output_ta):
if time == 0 or teacher is None:
this_input = prev_output
else:
this_input = teacher[:, time - 1, :]
if decoder_feature is not None:
this_feature = decoder_feature[:, time, :]
this_input = tf.concat([this_input, this_feature], axis=1)
if self.use_attention:
attention_feature = self.attention(
tf.expand_dims(prev_state[-1], 1), k=kwargs["encoder_output"], v=kwargs["encoder_output"]
)
attention_feature = tf.squeeze(attention_feature, 1)
this_input = tf.concat([this_input, attention_feature], axis=-1)
this_output, this_state = self.rnn_cell(this_input, prev_state)
project_output = self.dense(this_output)
decoder_output_ta = decoder_output_ta.write(time, project_output)
return time + 1, project_output, this_state, decoder_output_ta
loop_init = [
tf.constant(0, dtype=tf.int32), # steps
decoder_init_value, # decoder each step
init_state, # state
tf.TensorArray(dtype=tf.float32, size=self.predict_sequence_length),
]
_, _, _, decoder_outputs_ta = tf.while_loop(cond_fn, body, loop_init)
decoder_outputs = decoder_outputs_ta.stack()
decoder_outputs = tf.transpose(decoder_outputs, [1, 0, 2])
return decoder_outputs
def call(
self,
decoder_feature,
decoder_init_input,
init_state,
teacher=None,
scheduler_sampling=0,
training=None,
**kwargs
):
"""Decoder model2
Parameters
----------
decoder_feature : _type_
_description_
init_state : _type_
_description_
decoder_init_input : _type_
_description_
teacher : _type_, optional
_description_, by default None
Returns
-------
_type_
_description_
"""
return self.forward(
decoder_feature=decoder_feature,
decoder_init_value=decoder_init_input,
init_state=[init_state], # for tf2
teacher=teacher,
)
class Decoder3(tf.keras.layers.Layer):
# multi-steps static decoding
def __init__(self, rnn_type="gru", rnn_size=32, rnn_dropout=0, dense_size=1, **kwargs) -> None:
super(Decoder3, self).__init__()
if rnn_type.lower() == "gru":
self.rnn = GRU(
units=rnn_size, activation="tanh", return_state=False, return_sequences=True, dropout=rnn_dropout
)
elif rnn_type.lower() == "lstm":
self.rnn = LSTM(
units=rnn_size,
activation="tanh",
return_state=False,
return_sequences=True,
dropout=rnn_dropout,
)
self.dense = Dense(units=dense_size, activation=None)
self.drop = Dropout(0.1)
def call(
self,
decoder_features,
decoder_init_input,
init_state,
teacher=None,
scheduler_sampling=0,
training=None,
**kwargs
):
"""Decoder3: just simple
Parameters
----------
decoder_features : _type_
_description_
decoder_init_input : _type_
_description_
init_state : _type_
_description_
teacher : _type_, optional
_description_, by default None
scheduler_sampling : int, optional
_description_, by default 0
training : _type_, optional
_description_, by default None
Returns
-------
_type_
_description_
"""
x = self.rnn(decoder_features, initial_state=init_state)
# x = self.drop(x)
x = self.dense(x)
return x
|
567f9d450c9b3deff69f03d1b8da9225a4249e0d
|
c675ff5fcd3b13fa39352bb8cac11d75262659a8
|
/reactivex/disposable/scheduleddisposable.py
|
067d8667605ac5f9c2da12feece1a959a3642af4
|
[
"MIT"
] |
permissive
|
ReactiveX/RxPY
|
469eb714996c205989e99899a6f1ab1ae2f42dd0
|
af1663d35810fdcd4c25a3ed2e8f0d71b55c341d
|
refs/heads/master
| 2023-08-14T19:27:40.086304
| 2023-01-08T10:02:08
| 2023-03-04T15:33:19
| 8,946,089
| 4,764
| 467
|
MIT
| 2023-09-05T02:53:16
| 2013-03-22T06:16:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
scheduleddisposable.py
|
from threading import RLock
from typing import Any
from reactivex import abc
from .singleassignmentdisposable import SingleAssignmentDisposable
class ScheduledDisposable(abc.DisposableBase):
"""Represents a disposable resource whose disposal invocation will
be scheduled on the specified Scheduler"""
def __init__(
self, scheduler: abc.SchedulerBase, disposable: abc.DisposableBase
) -> None:
"""Initializes a new instance of the ScheduledDisposable class
that uses a Scheduler on which to dispose the disposable."""
self.scheduler = scheduler
self.disposable = SingleAssignmentDisposable()
self.disposable.disposable = disposable
self.lock = RLock()
super().__init__()
@property
def is_disposed(self) -> bool:
return self.disposable.is_disposed
def dispose(self) -> None:
"""Disposes the wrapped disposable on the provided scheduler."""
def action(scheduler: abc.SchedulerBase, state: Any) -> None:
"""Scheduled dispose action"""
self.disposable.dispose()
self.scheduler.schedule(action)
|
b719fe9d12473290591d766c41e78fb862bf7b50
|
fa3f6d4e9169fb95f828013d179d03accdff381b
|
/grr/server/grr_response_server/gui/webauth_test.py
|
f39e7876d1cde3db6dbe54e2ee2133340dc29f22
|
[
"Apache-2.0"
] |
permissive
|
google/grr
|
c51a2bd251ed2f7adae538541990a2cc01fdcc8c
|
44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6
|
refs/heads/master
| 2023-09-05T20:02:36.823914
| 2023-07-26T09:34:09
| 2023-07-26T09:34:09
| 14,909,673
| 4,683
| 927
|
Apache-2.0
| 2023-07-26T09:34:10
| 2013-12-04T00:17:53
|
Python
|
UTF-8
|
Python
| false
| false
| 12,354
|
py
|
webauth_test.py
|
#!/usr/bin/env python
"""Tests for the web auth managers."""
import base64
from unittest import mock
from absl import app
import requests
from werkzeug import test as werkzeug_test
from google.oauth2 import id_token
from grr_response_server import data_store
from grr_response_server.gui import http_response
from grr_response_server.gui import validate_iap
from grr_response_server.gui import webauth
from grr_response_server.gui import wsgiapp
from grr.test_lib import test_lib
class RemoteUserWebAuthManagerTest(test_lib.GRRBaseTest):
def setUp(self):
super().setUp()
self.manager = webauth.RemoteUserWebAuthManager()
self.success_response = http_response.HttpResponse("foobar")
def HandlerStub(self, request, *args, **kwargs):
del request, args, kwargs # Unused.
return self.success_response
def testRejectsRequestWithoutRemoteUserHeader(self):
environ = werkzeug_test.EnvironBuilder(environ_base={
"REMOTE_ADDR": "127.0.0.1"
}).get_environ()
request = wsgiapp.HttpRequest(environ)
response = self.manager.SecurityCheck(self.HandlerStub, request)
self.assertEqual(
response.get_data(as_text=True), "No username header found.")
def testRejectsRequestFromUntrustedIp(self):
environ = werkzeug_test.EnvironBuilder(environ_base={
"REMOTE_ADDR": "127.0.0.2"
}).get_environ()
request = wsgiapp.HttpRequest(environ)
response = self.manager.SecurityCheck(self.HandlerStub, request)
self.assertRegex(
response.get_data(as_text=True),
"Request sent from an IP not in AdminUI.remote_user_trusted_ips. "
"Source was .+")
def testRejectsRequestWithEmptyUsername(self):
environ = werkzeug_test.EnvironBuilder(environ_base={
"REMOTE_ADDR": "127.0.0.1",
"HTTP_X_REMOTE_USER": ""
}).get_environ()
request = wsgiapp.HttpRequest(environ)
response = self.manager.SecurityCheck(self.HandlerStub, request)
self.assertEqual(
response.get_data(as_text=True), "Empty username is not allowed.")
def testProcessesRequestWithUsernameFromTrustedIp(self):
environ = werkzeug_test.EnvironBuilder(environ_base={
"REMOTE_ADDR": "127.0.0.1",
"HTTP_X_REMOTE_USER": "foo"
}).get_environ()
request = wsgiapp.HttpRequest(environ)
response = self.manager.SecurityCheck(self.HandlerStub, request)
self.assertEqual(response, self.success_response)
def testProcessesRequestWithEmail_configDisabled(self):
environ = werkzeug_test.EnvironBuilder(
environ_base={
"REMOTE_ADDR": "127.0.0.1",
"HTTP_X_REMOTE_USER": "foo",
"HTTP_X_REMOTE_EXTRA_EMAIL": "foo@bar.org",
}).get_environ()
request = wsgiapp.HttpRequest(environ)
response = self.manager.SecurityCheck(self.HandlerStub, request)
self.assertIsNone(request.email)
self.assertEqual(response, self.success_response)
def testProcessesRequestWithEmail_configEnabled(self):
environ = werkzeug_test.EnvironBuilder(
environ_base={
"REMOTE_ADDR": "127.0.0.1",
"HTTP_X_REMOTE_USER": "foo",
"HTTP_X_REMOTE_EXTRA_EMAIL": "foo@bar.org",
}).get_environ()
request = wsgiapp.HttpRequest(environ)
with test_lib.ConfigOverrider({"Email.enable_custom_email_address": True}):
response = self.manager.SecurityCheck(self.HandlerStub, request)
self.assertEqual(request.email, "foo@bar.org")
self.assertEqual(response, self.success_response)
class FirebaseWebAuthManagerTest(test_lib.GRRBaseTest):
def setUp(self):
super().setUp()
config_overrider = test_lib.ConfigOverrider({
"AdminUI.firebase_auth_domain": "foo-bar.firebaseapp.com",
"API.DefaultRouter": "DisabledApiCallRouter"
})
config_overrider.Start()
self.addCleanup(config_overrider.Stop)
self.manager = webauth.FirebaseWebAuthManager()
self.success_response = http_response.HttpResponse("foobar")
self.checked_request = None
def HandlerStub(self, request, *args, **kwargs):
_ = args
_ = kwargs
self.checked_request = request
return self.success_response
def testPassesThroughHomepageWhenAuthorizationHeaderIsMissing(self):
environ = werkzeug_test.EnvironBuilder().get_environ()
request = wsgiapp.HttpRequest(environ)
response = self.manager.SecurityCheck(self.HandlerStub, request)
self.assertEqual(response, self.success_response)
def testReportsErrorOnNonHomepagesWhenAuthorizationHeaderIsMissing(self):
environ = werkzeug_test.EnvironBuilder(path="/foo").get_environ()
request = wsgiapp.HttpRequest(environ)
response = self.manager.SecurityCheck(self.HandlerStub, request)
self.assertEqual(
response.get_data(as_text=True),
"JWT token validation failed: JWT token is missing.")
def testReportsErrorWhenBearerPrefixIsMissing(self):
environ = werkzeug_test.EnvironBuilder(
path="/foo", headers={
"Authorization": "blah"
}).get_environ()
request = wsgiapp.HttpRequest(environ)
response = self.manager.SecurityCheck(self.HandlerStub, request)
self.assertEqual(
response.get_data(as_text=True),
"JWT token validation failed: JWT token is missing.")
@mock.patch.object(
id_token, "verify_firebase_token", side_effect=ValueError("foobar error"))
def testPassesThroughHomepageOnVerificationFailure(self, mock_method):
_ = mock_method
environ = werkzeug_test.EnvironBuilder(headers={
"Authorization": "Bearer blah"
}).get_environ()
request = wsgiapp.HttpRequest(environ)
response = self.manager.SecurityCheck(self.HandlerStub, request)
self.assertEqual(response, self.success_response)
@mock.patch.object(
id_token, "verify_firebase_token", side_effect=ValueError("foobar error"))
def testReportsErrorOnVerificationFailureOnNonHomepage(self, mock_method):
_ = mock_method
environ = werkzeug_test.EnvironBuilder(
path="/foo", headers={
"Authorization": "Bearer blah"
}).get_environ()
request = wsgiapp.HttpRequest(environ)
response = self.manager.SecurityCheck(self.HandlerStub, request)
self.assertEqual(
response.get_data(as_text=True),
"JWT token validation failed: foobar error")
@mock.patch.object(id_token, "verify_firebase_token")
def testVerifiesTokenWithProjectIdFromDomain(self, mock_method):
environ = werkzeug_test.EnvironBuilder(headers={
"Authorization": "Bearer blah"
}).get_environ()
request = wsgiapp.HttpRequest(environ)
self.manager.SecurityCheck(self.HandlerStub, request)
self.assertEqual(mock_method.call_count, 1)
self.assertEqual(mock_method.call_args_list[0][0], ("blah", request))
self.assertEqual(mock_method.call_args_list[0][1], dict(audience="foo-bar"))
@mock.patch.object(
id_token, "verify_firebase_token", return_value={"iss": "blah"})
def testReportsErrorIfIssuerIsWrong(self, mock_method):
_ = mock_method
environ = werkzeug_test.EnvironBuilder(
path="/foo", headers={
"Authorization": "Bearer blah"
}).get_environ()
request = wsgiapp.HttpRequest(environ)
response = self.manager.SecurityCheck(self.HandlerStub, request)
self.assertEqual(
response.get_data(as_text=True),
"JWT token validation failed: Wrong issuer.")
@mock.patch.object(
id_token,
"verify_firebase_token",
return_value={
"iss": "https://securetoken.google.com/foo-bar",
"email": "foo@bar.com"
})
def testFillsRequestUserFromTokenEmailOnSuccess(self, mock_method):
_ = mock_method
environ = werkzeug_test.EnvironBuilder(headers={
"Authorization": "Bearer blah"
}).get_environ()
request = wsgiapp.HttpRequest(environ)
self.manager.SecurityCheck(self.HandlerStub, request)
self.assertTrue(self.checked_request)
self.assertEqual(self.checked_request.user, "foo@bar.com")
class IAPWebAuthManagerTest(test_lib.GRRBaseTest):
def testNoHeader(self):
"""Test requests sent to the Admin UI without an IAP Header."""
environ = werkzeug_test.EnvironBuilder(path="/").get_environ()
request = wsgiapp.HttpRequest(environ)
def Handler(request, *args, **kwargs):
del request, args, kwargs # Unused.
return http_response.HttpResponse("foobar", status=200)
manager = webauth.IAPWebAuthManager()
response = manager.SecurityCheck(Handler, request)
self.assertEqual(response.status_code, 401)
@mock.patch.object(requests, "get")
def testFailedSignatureKey(self, mock_get):
"""Test requests with an invalid JWT Token."""
mock_get.return_value.status_code = 200
mock_get.return_value.json.return_value = {
"6BEeoA": (
"-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAElmi1hJdqtbvdX1INOf5B9dWvkydY\n"
"oowHUXiw8ELWzk/YHESNr8vXQoyOuLOEtLZeCQbFkeLUqxYp1sTArKNu/A==\n"
"-----END PUBLIC KEY-----\n"),
}
assertion_header = (
"eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCIsI"
"mtpZCI6IjZCRWVvQSJ9.eyJpc3MiOiJodHRwczovL2Nsb3VkLmdvb2dsZS5jb20"
"vaWFwIiwic3ViIjoiYWNjb3VudHMuZ29vZ2xlLaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaDciLCJlbWFpbCI6ImFaaaaaaaazaaaaaaaaaaaaaaaaaaaaaa8iLCJhd"
"WQiOiIvcHJvamVjdaaaaaaaaaaaaaaaaaayaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaayOegyMzkzOTQ2NCIsImV4cCI6MTU0Njk4MDUwNiwiaWF0Ijo"
"xNTQ2OTc5OTA2LCJaaCI6InNwb3apaaaaaaaaaaaaaaapayJ9.NZwDs0U_fubYS"
"OmYNJAI9ufgoC84zXOCzZkxclWBVXhb1dBVQHpO-VZW-lworDvKxX_BWqagKYTq"
"wc4ELBcKTQ")
environ = werkzeug_test.EnvironBuilder(
path="/",
headers={
"X-Goog-IAP-JWT-Assertion": assertion_header
},
).get_environ()
request = wsgiapp.HttpRequest(environ)
def Handler(request, *args, **kwargs):
del request, args, kwargs # Unused.
self.fail("Handler shouldn't have been executed.")
manager = webauth.IAPWebAuthManager()
response = manager.SecurityCheck(Handler, request)
mock_get.assert_called_once_with(
"https://www.gstatic.com/iap/verify/public_key")
self.assertEqual(response.status_code, 401)
@mock.patch.object(
validate_iap,
"ValidateIapJwtFromComputeEngine",
return_value=("temp", "temp"))
def testSuccessfulKey(self, mock_method):
"""Validate account creation upon successful JWT Authentication."""
environ = werkzeug_test.EnvironBuilder(
path="/", headers={
"X-Goog-IAP-JWT-Assertion": ("valid_key")
}).get_environ()
request = wsgiapp.HttpRequest(environ)
def Handler(request, *args, **kwargs):
del args, kwargs # Unused.
self.assertEqual(request.user, "temp")
return http_response.HttpResponse("success", status=200)
manager = webauth.IAPWebAuthManager()
response = manager.SecurityCheck(Handler, request)
self.assertEqual(response.status_code, 200)
class BasicWebAuthManagerTest(test_lib.GRRBaseTest):
def _SetupUser(self, user, password):
data_store.REL_DB.WriteGRRUser(user, password)
def testSecurityCheckUnicode(self):
user = "żymścimił"
# TODO(hanuszczak): Test password with unicode characters as well. Currently
# this will not work because `CryptedPassword` is broken and does not work
# with unicode objects.
password = "quux"
self._SetupUser(user, password)
authorization = "{user}:{password}".format(user=user, password=password)
token = base64.b64encode(authorization.encode("utf-8")).decode("ascii")
environ = werkzeug_test.EnvironBuilder(
path="/foo", headers={
"Authorization": "Basic %s" % token,
}).get_environ()
request = wsgiapp.HttpRequest(environ)
def Handler(request, *args, **kwargs):
del args, kwargs # Unused.
self.assertEqual(request.user, user)
return http_response.HttpResponse(b"foobar", status=200)
manager = webauth.BasicWebAuthManager()
response = manager.SecurityCheck(Handler, request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_data(), b"foobar")
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
129b18c5b8c674870ee16ac5d877b3e2ca7b306d
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/bond/test_init.py
|
33919219301d1dfceb03fb150aaee1e0f36078a3
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 12,176
|
py
|
test_init.py
|
"""Tests for the Bond module."""
import asyncio
from unittest.mock import MagicMock, Mock
from aiohttp import ClientConnectionError, ClientResponseError
from bond_async import DeviceType
import pytest
from homeassistant.components.bond.const import DOMAIN
from homeassistant.components.fan import DOMAIN as FAN_DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import ATTR_ASSUMED_STATE, CONF_ACCESS_TOKEN, CONF_HOST
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.setup import async_setup_component
from .common import (
ceiling_fan,
patch_bond_bridge,
patch_bond_device,
patch_bond_device_ids,
patch_bond_device_properties,
patch_bond_device_state,
patch_bond_version,
patch_setup_entry,
patch_start_bpup,
remove_device,
setup_bond_entity,
setup_platform,
)
from tests.common import MockConfigEntry
from tests.typing import WebSocketGenerator
async def test_async_setup_no_domain_config(hass: HomeAssistant) -> None:
"""Test setup without configuration is noop."""
result = await async_setup_component(hass, DOMAIN, {})
assert result is True
@pytest.mark.parametrize(
"exc",
[
ClientConnectionError,
ClientResponseError(MagicMock(), MagicMock(), status=404),
asyncio.TimeoutError,
OSError,
],
)
async def test_async_setup_raises_entry_not_ready(
hass: HomeAssistant, exc: Exception
) -> None:
"""Test that it throws ConfigEntryNotReady when exception occurs during setup."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
config_entry.add_to_hass(hass)
with patch_bond_version(side_effect=exc):
await hass.config_entries.async_setup(config_entry.entry_id)
assert config_entry.state is ConfigEntryState.SETUP_RETRY
async def test_async_setup_raises_fails_if_auth_fails(hass: HomeAssistant) -> None:
"""Test that setup fails if auth fails during setup."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
config_entry.add_to_hass(hass)
with patch_bond_version(
side_effect=ClientResponseError(MagicMock(), MagicMock(), status=401)
):
await hass.config_entries.async_setup(config_entry.entry_id)
assert config_entry.state is ConfigEntryState.SETUP_ERROR
async def test_async_setup_entry_sets_up_hub_and_supported_domains(
hass: HomeAssistant,
) -> None:
"""Test that configuring entry sets up cover domain."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
with patch_bond_bridge(), patch_bond_version(
return_value={
"bondid": "ZXXX12345",
"target": "test-model",
"fw_ver": "test-version",
"mcu_ver": "test-hw-version",
}
), patch_setup_entry("cover") as mock_cover_async_setup_entry, patch_setup_entry(
"fan"
) as mock_fan_async_setup_entry, patch_setup_entry(
"light"
) as mock_light_async_setup_entry, patch_setup_entry(
"switch"
) as mock_switch_async_setup_entry:
result = await setup_bond_entity(hass, config_entry, patch_device_ids=True)
assert result is True
await hass.async_block_till_done()
assert config_entry.entry_id in hass.data[DOMAIN]
assert config_entry.state is ConfigEntryState.LOADED
assert config_entry.unique_id == "ZXXX12345"
# verify hub device is registered correctly
device_registry = dr.async_get(hass)
hub = device_registry.async_get_device(identifiers={(DOMAIN, "ZXXX12345")})
assert hub.name == "bond-name"
assert hub.manufacturer == "Olibra"
assert hub.model == "test-model"
assert hub.sw_version == "test-version"
assert hub.hw_version == "test-hw-version"
assert hub.configuration_url == "http://some host"
# verify supported domains are setup
assert len(mock_cover_async_setup_entry.mock_calls) == 1
assert len(mock_fan_async_setup_entry.mock_calls) == 1
assert len(mock_light_async_setup_entry.mock_calls) == 1
assert len(mock_switch_async_setup_entry.mock_calls) == 1
async def test_unload_config_entry(hass: HomeAssistant) -> None:
"""Test that configuration entry supports unloading."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
result = await setup_bond_entity(
hass,
config_entry,
patch_version=True,
patch_device_ids=True,
patch_platforms=True,
patch_bridge=True,
)
assert result is True
await hass.async_block_till_done()
await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.entry_id not in hass.data[DOMAIN]
assert config_entry.state is ConfigEntryState.NOT_LOADED
async def test_old_identifiers_are_removed(hass: HomeAssistant) -> None:
"""Test we remove the old non-unique identifiers."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
config_entry.add_to_hass(hass)
old_identifers = (DOMAIN, "device_id")
new_identifiers = (DOMAIN, "ZXXX12345", "device_id")
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={old_identifers},
manufacturer="any",
name="old",
)
with patch_bond_bridge(), patch_bond_version(
return_value={
"bondid": "ZXXX12345",
"target": "test-model",
"fw_ver": "test-version",
}
), patch_start_bpup(), patch_bond_device_ids(
return_value=["bond-device-id", "device_id"]
), patch_bond_device(
return_value={
"name": "test1",
"type": DeviceType.GENERIC_DEVICE,
}
), patch_bond_device_properties(
return_value={}
), patch_bond_device_state(
return_value={}
):
assert await hass.config_entries.async_setup(config_entry.entry_id) is True
await hass.async_block_till_done()
assert config_entry.entry_id in hass.data[DOMAIN]
assert config_entry.state is ConfigEntryState.LOADED
assert config_entry.unique_id == "ZXXX12345"
# verify the device info is cleaned up
assert device_registry.async_get_device(identifiers={old_identifers}) is None
assert device_registry.async_get_device(identifiers={new_identifiers}) is not None
async def test_smart_by_bond_device_suggested_area(hass: HomeAssistant) -> None:
"""Test we can setup a smart by bond device and get the suggested area."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
config_entry.add_to_hass(hass)
with patch_bond_bridge(
side_effect=ClientResponseError(Mock(), Mock(), status=404)
), patch_bond_version(
return_value={
"bondid": "KXXX12345",
"target": "test-model",
"fw_ver": "test-version",
}
), patch_start_bpup(), patch_bond_device_ids(
return_value=["bond-device-id", "device_id"]
), patch_bond_device(
return_value={
"name": "test1",
"type": DeviceType.GENERIC_DEVICE,
"location": "Den",
}
), patch_bond_device_properties(
return_value={}
), patch_bond_device_state(
return_value={}
):
assert await hass.config_entries.async_setup(config_entry.entry_id) is True
await hass.async_block_till_done()
assert config_entry.entry_id in hass.data[DOMAIN]
assert config_entry.state is ConfigEntryState.LOADED
assert config_entry.unique_id == "KXXX12345"
device_registry = dr.async_get(hass)
device = device_registry.async_get_device(identifiers={(DOMAIN, "KXXX12345")})
assert device is not None
assert device.suggested_area == "Den"
async def test_bridge_device_suggested_area(hass: HomeAssistant) -> None:
"""Test we can setup a bridge bond device and get the suggested area."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
config_entry.add_to_hass(hass)
with patch_bond_bridge(
return_value={
"name": "Office Bridge",
"location": "Office",
}
), patch_bond_version(
return_value={
"bondid": "ZXXX12345",
"target": "test-model",
"fw_ver": "test-version",
}
), patch_start_bpup(), patch_bond_device_ids(
return_value=["bond-device-id", "device_id"]
), patch_bond_device(
return_value={
"name": "test1",
"type": DeviceType.GENERIC_DEVICE,
"location": "Bathroom",
}
), patch_bond_device_properties(
return_value={}
), patch_bond_device_state(
return_value={}
):
assert await hass.config_entries.async_setup(config_entry.entry_id) is True
await hass.async_block_till_done()
assert config_entry.entry_id in hass.data[DOMAIN]
assert config_entry.state is ConfigEntryState.LOADED
assert config_entry.unique_id == "ZXXX12345"
device_registry = dr.async_get(hass)
device = device_registry.async_get_device(identifiers={(DOMAIN, "ZXXX12345")})
assert device is not None
assert device.suggested_area == "Office"
async def test_device_remove_devices(
hass: HomeAssistant, hass_ws_client: WebSocketGenerator
) -> None:
"""Test we can only remove a device that no longer exists."""
assert await async_setup_component(hass, "config", {})
config_entry = await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_version={"bondid": "test-hub-id"},
bond_device_id="test-device-id",
)
registry: EntityRegistry = er.async_get(hass)
entity = registry.entities["fan.name_1"]
assert entity.unique_id == "test-hub-id_test-device-id"
device_registry = dr.async_get(hass)
device_entry = device_registry.async_get(entity.device_id)
assert (
await remove_device(
await hass_ws_client(hass), device_entry.id, config_entry.entry_id
)
is False
)
dead_device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(DOMAIN, "test-hub-id", "remove-device-id")},
)
assert (
await remove_device(
await hass_ws_client(hass), dead_device_entry.id, config_entry.entry_id
)
is True
)
dead_device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(DOMAIN, "wrong-hub-id", "test-device-id")},
)
assert (
await remove_device(
await hass_ws_client(hass), dead_device_entry.id, config_entry.entry_id
)
is True
)
hub_device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(DOMAIN, "test-hub-id")},
)
assert (
await remove_device(
await hass_ws_client(hass), hub_device_entry.id, config_entry.entry_id
)
is False
)
async def test_smart_by_bond_v3_firmware(hass: HomeAssistant) -> None:
"""Test we can detect smart by bond with the v3 firmware."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_version={"bondid": "KXXXX12345", "target": "breck-northstar"},
bond_device_id="test-device-id",
)
assert ATTR_ASSUMED_STATE not in hass.states.get("fan.name_1").attributes
|
579b7e2a95075229fed06c66fd6f49c2fd3e7c0f
|
03aef1b35a54fb2f1ee89523f086a5184a84f154
|
/gitlint-core/gitlint/contrib/rules/signedoff_by.py
|
5ea8217ab775b1f4f158add737d92d596b857906
|
[
"MIT"
] |
permissive
|
jorisroovers/gitlint
|
955f693e30d1149ec84bde38d54617c6e3daa1f5
|
4d9119760056492eabc201bfad5de2f9e660b85f
|
refs/heads/main
| 2023-09-03T15:52:33.666513
| 2023-09-02T08:25:48
| 2023-09-02T08:25:48
| 41,824,974
| 741
| 113
|
MIT
| 2023-09-13T01:42:56
| 2015-09-02T20:06:16
|
Python
|
UTF-8
|
Python
| false
| false
| 620
|
py
|
signedoff_by.py
|
from gitlint.rules import CommitRule, RuleViolation
class SignedOffBy(CommitRule):
"""This rule will enforce that each commit body contains a "Signed-off-by" line.
We keep things simple here and just check whether the commit body contains a line that starts with "Signed-off-by".
"""
name = "contrib-body-requires-signed-off-by"
id = "CC1"
def validate(self, commit):
for line in commit.message.body:
if line.lower().startswith("signed-off-by"):
return []
return [RuleViolation(self.id, "Body does not contain a 'Signed-off-by' line", line_nr=1)]
|
a5a7e9e04b11f17eff251be7bf1bf49f6f09c030
|
c530897cb72b6943c7226b25824444cad5f3503b
|
/usaspending_api/common/management/commands/clear_usaspending_cache.py
|
befe1b194abde5d2b95506953f77a11f840de2df
|
[
"CC0-1.0"
] |
permissive
|
fedspendingtransparency/usaspending-api
|
fc63a22d32ea0207b7273d3e1ef26ba9dbabc42a
|
38f920438697930ae3ac57bbcaae9034877d8fb7
|
refs/heads/master
| 2023-09-01T22:00:36.633612
| 2023-08-29T18:39:18
| 2023-08-29T18:39:18
| 65,394,827
| 276
| 118
|
CC0-1.0
| 2023-09-14T20:33:15
| 2016-08-10T15:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 564
|
py
|
clear_usaspending_cache.py
|
import logging
from django.core.management.base import BaseCommand
from django.core.cache import caches
class Command(BaseCommand):
"""
This command will clear the usaspending-cache (useful after a load or a deletion
to ensure end users don't see stale data)
"""
help = "Clears the usaspending-cache"
logger = logging.getLogger("script")
def handle(self, *args, **options):
self.logger.info("Clearing usaspending-cache...")
cache = caches["usaspending-cache"]
cache.clear()
self.logger.info("Done.")
|
18d616e6afe7122d766423720526170fddadd9c5
|
1ad268817e4f048815df6e7b7669c45257a37b0e
|
/kartothek/core/factory.py
|
a3590628b750e500f8bb4f972a31b49d409f46e1
|
[
"MIT"
] |
permissive
|
JDASoftwareGroup/kartothek
|
07c7f2fceb3dcee5cf8d0a6a93f4c1060eb0bcf4
|
1821ea5df60d4079d3911b3c2f17be11d8780e22
|
refs/heads/master
| 2023-05-26T11:43:04.781173
| 2021-12-10T09:15:19
| 2021-12-10T09:15:19
| 184,608,549
| 178
| 59
|
MIT
| 2023-05-15T21:56:50
| 2019-05-02T15:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 6,845
|
py
|
factory.py
|
# -*- coding: utf-8 -*-
import copy
from typing import TYPE_CHECKING, Any, Optional, TypeVar, cast
from kartothek.core.dataset import DatasetMetadata, DatasetMetadataBase
from kartothek.core.typing import StoreInput
from kartothek.core.utils import lazy_store
from kartothek.utils.migration_helpers import (
deprecate_parameters_if_set,
get_deprecation_warning_remove_parameter_multi_table,
)
if TYPE_CHECKING:
from simplekv import KeyValueStore
__all__ = ("DatasetFactory",)
T = TypeVar("T", bound="DatasetFactory")
class DatasetFactory(DatasetMetadataBase):
"""
Container holding metadata caching storage access.
"""
_nullable_attributes = ["_cache_metadata", "_cache_store"]
@deprecate_parameters_if_set(
get_deprecation_warning_remove_parameter_multi_table(
deprecated_in="5.3", removed_in="6.0"
),
"load_dataset_metadata",
)
def __init__(
self,
dataset_uuid: str,
store_factory: StoreInput,
load_schema: bool = True,
load_all_indices: bool = False,
load_dataset_metadata: bool = True,
) -> None:
"""
A dataset factory object which can be used to cache dataset load operations. This class should be the primary user entry point when
reading datasets.
Example using the eager backend:
.. code::
from functools import partial
from storefact import get_store_from_url
from kartothek.io.eager import read_table
ds_factory = DatasetFactory(
dataset_uuid="my_test_dataset",
store=partial(get_store_from_url, store_url)
)
df = read_table(factory=ds_factory)
Parameters
----------
dataset_uuid
The unique indetifier for the dataset.
store_factory
A callable which creates a KeyValueStore object
load_schema
Load the schema information immediately.
load_all_indices
Load all indices immediately.
load_dataset_metadata
Keep the user metadata in memory
"""
self._cache_metadata: Optional[DatasetMetadata] = None
self._cache_store = None
self.store_factory = lazy_store(store_factory)
self.dataset_uuid = dataset_uuid
self.load_schema = load_schema
self._ds_callable = None
self.is_loaded = False
self.load_dataset_metadata = load_dataset_metadata
self.load_all_indices_flag = load_all_indices
def __repr__(self):
return "<DatasetFactory: uuid={} is_loaded={}>".format(
self.dataset_uuid, self.is_loaded
)
@property
def store(self) -> "KeyValueStore":
if self._cache_store is None:
self._cache_store = self.store_factory()
return self._cache_store
def _instantiate_metadata_cache(self: T) -> T:
if self._cache_metadata is None:
if self._ds_callable:
# backwards compat
self._cache_metadata = self._ds_callable()
else:
self._cache_metadata = DatasetMetadata.load_from_store(
uuid=self.dataset_uuid,
store=self.store,
load_schema=self.load_schema,
load_all_indices=self.load_all_indices_flag,
)
if not self.load_dataset_metadata:
self._cache_metadata.metadata = {}
self.is_loaded = True
return self
@property
def dataset_metadata(self) -> DatasetMetadata:
self._instantiate_metadata_cache()
# The above line ensures non-None
return cast(DatasetMetadata, self._cache_metadata)
def invalidate(self) -> None:
self.is_loaded = False
self._cache_metadata = None
self._cache_store = None
def __getattr__(self, name):
# __getattr__ should only be called if the attribute cannot be found. if the
# attribute is None, it still falls back to this call
if name in self._nullable_attributes:
return object.__getattribute__(self, name)
self._instantiate_metadata_cache()
ds = getattr(self, "dataset_metadata")
return getattr(ds, name)
def __getstate__(self):
# remove cache
return {k: v for k, v in self.__dict__.items() if not k.startswith("_cache_")}
def __setstate__(self, state):
self.__init__(
dataset_uuid=state["dataset_uuid"],
store_factory=state["store_factory"],
load_schema=state["load_schema"],
load_all_indices=state["load_all_indices_flag"],
)
def __deepcopy__(self, memo) -> "DatasetFactory":
new_obj = DatasetFactory(
dataset_uuid=self.dataset_uuid,
store_factory=self.store_factory,
load_schema=self.load_schema,
load_all_indices=self.load_all_indices_flag,
)
if self._cache_metadata is not None:
new_obj._cache_metadata = copy.deepcopy(self._cache_metadata)
return new_obj
def load_index(self: T, column, store=None) -> T:
self._cache_metadata = self.dataset_metadata.load_index(column, self.store)
return self
@deprecate_parameters_if_set(
get_deprecation_warning_remove_parameter_multi_table(
deprecated_in="5.3", removed_in="6.0"
),
"load_partition_indices",
)
def load_all_indices(
self: T, store: Any = None, load_partition_indices: bool = True,
) -> T:
self._cache_metadata = self.dataset_metadata.load_all_indices(
self.store, load_partition_indices=load_partition_indices
)
return self
def load_partition_indices(self: T) -> T:
self._cache_metadata = self.dataset_metadata.load_partition_indices()
return self
@deprecate_parameters_if_set(
get_deprecation_warning_remove_parameter_multi_table(
deprecated_in="5.3", removed_in="6.0"
),
"load_dataset_metadata",
)
def _ensure_factory(
dataset_uuid: Optional[str],
store: Optional[StoreInput],
factory: Optional[DatasetFactory],
load_dataset_metadata: bool,
load_schema: bool = True,
) -> DatasetFactory:
if store is None and dataset_uuid is None and factory is not None:
return factory
elif store is not None and dataset_uuid is not None and factory is None:
return DatasetFactory(
dataset_uuid=dataset_uuid,
store_factory=lazy_store(store),
load_dataset_metadata=load_dataset_metadata,
load_schema=load_schema,
)
else:
raise ValueError(
"Need to supply either a `factory` or `dataset_uuid` and `store`"
)
|
c15f467d77be64eff8e63b6be6b483a1ad09ff8f
|
3f66c9877f0c8a394dbc1fa98dedb9273316b175
|
/flow.py
|
6b72cb26a0fd7231fb797ef3fa04c0631f7a1c9e
|
[
"MIT"
] |
permissive
|
onejgordon/flow-dashboard
|
c06f6760d0087cebebe75102b543ac35d7aa8469
|
b8d85d9313e51cf386f6d2e5944fc958a7d96769
|
refs/heads/develop
| 2023-09-03T12:20:57.223724
| 2023-07-02T15:55:02
| 2023-07-02T15:55:02
| 84,657,014
| 1,801
| 250
|
MIT
| 2023-04-01T02:06:25
| 2017-03-11T14:46:24
|
Python
|
UTF-8
|
Python
| false
| false
| 10,956
|
py
|
flow.py
|
import os
import webapp2
from constants import COOKIE_NAME
from actions import adminActions
from views import views
import imp
import api
import tasks
try:
imp.find_module('secrets', ['settings'])
except ImportError:
from settings import secrets_template as secrets
else:
from settings import secrets
SECS_PER_WEEK = 60 * 60 * 24 * 7
# Enable ctypes -> Jinja2 tracebacks
PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
ROOT_DIRECTORY = os.path.dirname(__file__)
if not PRODUCTION_MODE:
# from google.appengine.tools.devappserver2.python import sandbox
# sandbox._WHITE_LIST_C_MODULES += ['_ctypes', 'gestalt']
TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'src')
else:
TEMPLATE_DIRECTORY = os.path.join(ROOT_DIRECTORY, 'dist')
curr_path = os.path.abspath(os.path.dirname(__file__))
config = {
'webapp2_extras.sessions': {
'secret_key': secrets.COOKIE_KEY,
'session_max_age': SECS_PER_WEEK,
'cookie_args': {'max_age': SECS_PER_WEEK},
'cookie_name': COOKIE_NAME
},
'webapp2_extras.jinja2': {
'template_path': TEMPLATE_DIRECTORY
}
}
app = webapp2.WSGIApplication(
[
# Admin Actions
webapp2.Route('/admin/gauth/initialize', handler=adminActions.Init, name="aInit"),
webapp2.Route('/admin/gauth/hacks', handler=adminActions.Hacks),
# API
webapp2.Route('/api/user/me', handler=api.UserAPI, handler_method="update_self", methods=["POST"]),
webapp2.Route('/api/user', handler=api.UserAPI, handler_method="list", methods=["GET"]),
webapp2.Route('/api/project/active', handler=api.ProjectAPI, handler_method="active", methods=["GET"]),
webapp2.Route('/api/project', handler=api.ProjectAPI, handler_method="list", methods=["GET"]),
webapp2.Route('/api/project', handler=api.ProjectAPI, handler_method="update", methods=["POST"]),
webapp2.Route('/api/project/delete', handler=api.ProjectAPI, handler_method="delete", methods=["POST"]),
webapp2.Route('/api/habit', handler=api.HabitAPI, handler_method="list", methods=["GET"]),
webapp2.Route('/api/habit/recent', handler=api.HabitAPI, handler_method="recent", methods=["GET"]),
webapp2.Route('/api/habit/range', handler=api.HabitAPI, handler_method="range", methods=["GET"]),
webapp2.Route('/api/habit/toggle', handler=api.HabitAPI, handler_method="toggle", methods=["POST"]),
webapp2.Route('/api/habit/increment', handler=api.HabitAPI, handler_method="increment", methods=["POST"]),
webapp2.Route('/api/habit/commit', handler=api.HabitAPI, handler_method="commit", methods=["POST"]),
webapp2.Route('/api/habit/delete', handler=api.HabitAPI, handler_method="delete", methods=["POST"]),
webapp2.Route('/api/habit', handler=api.HabitAPI, handler_method="update", methods=["POST"]),
webapp2.Route('/api/habit/delete', handler=api.HabitAPI, handler_method="delete", methods=["POST"]),
webapp2.Route('/api/habit/<id>', handler=api.HabitAPI, handler_method="detail", methods=["GET"]),
webapp2.Route('/api/goal', handler=api.GoalAPI, handler_method="list", methods=["GET"]),
webapp2.Route('/api/goal/current', handler=api.GoalAPI, handler_method="current", methods=["GET"]),
webapp2.Route('/api/goal', handler=api.GoalAPI, handler_method="update", methods=["POST"]),
webapp2.Route('/api/event', handler=api.EventAPI, handler_method="list", methods=["GET"]),
webapp2.Route('/api/event', handler=api.EventAPI, handler_method="update", methods=["POST"]),
webapp2.Route('/api/event/batch', handler=api.EventAPI, handler_method="batch_create", methods=["POST"]),
webapp2.Route('/api/event/delete', handler=api.EventAPI, handler_method="delete", methods=["POST"]),
webapp2.Route('/api/journal/today', handler=api.JournalAPI, handler_method="today", methods=["GET"]),
webapp2.Route('/api/journal/year', handler=api.JournalAPI, handler_method="year", methods=["GET"]),
webapp2.Route('/api/journal/submit', handler=api.JournalAPI, handler_method="submit", methods=["POST"]),
webapp2.Route('/api/journal', handler=api.JournalAPI, handler_method="list", methods=["GET"]),
webapp2.Route('/api/journal', handler=api.JournalAPI, handler_method="update", methods=["POST"]),
webapp2.Route('/api/snapshot', handler=api.SnapshotAPI, handler_method="submit", methods=["POST"]),
webapp2.Route('/api/snapshot', handler=api.SnapshotAPI, handler_method="list", methods=["GET"]),
webapp2.Route('/api/tracking', handler=api.TrackingAPI, handler_method="list", methods=["GET"]),
webapp2.Route('/api/tracking', handler=api.TrackingAPI, handler_method="update", methods=["POST"]),
webapp2.Route('/api/task', handler=api.TaskAPI, handler_method="list", methods=["GET"]),
webapp2.Route('/api/task', handler=api.TaskAPI, handler_method="update", methods=["POST"]),
webapp2.Route('/api/task/delete', handler=api.TaskAPI, handler_method="delete", methods=["POST"]),
webapp2.Route('/api/task/action', handler=api.TaskAPI, handler_method="action", methods=["POST"]),
webapp2.Route('/api/readable', handler=api.ReadableAPI, handler_method="list", methods=["GET"]),
webapp2.Route('/api/readable', handler=api.ReadableAPI, handler_method="update", methods=["POST"]),
webapp2.Route('/api/readable/delete', handler=api.ReadableAPI, handler_method="delete", methods=["POST"]),
webapp2.Route('/api/readable/batch', handler=api.ReadableAPI, handler_method="batch_create", methods=["POST"]),
webapp2.Route('/api/readable/random', handler=api.ReadableAPI, handler_method="random_batch", methods=["GET"]),
webapp2.Route('/api/readable/search', handler=api.ReadableAPI, handler_method="search", methods=["GET"]),
webapp2.Route('/api/quote', handler=api.QuoteAPI, handler_method="list", methods=["GET"]),
webapp2.Route('/api/quote', handler=api.QuoteAPI, handler_method="update", methods=["POST"]),
webapp2.Route('/api/quote/batch', handler=api.QuoteAPI, handler_method="batch_create", methods=["POST"]),
webapp2.Route('/api/quote/random', handler=api.QuoteAPI, handler_method="random_batch", methods=["GET"]),
webapp2.Route('/api/quote/search', handler=api.QuoteAPI, handler_method="search", methods=["GET"]),
webapp2.Route('/api/quote/action', handler=api.QuoteAPI, handler_method="action", methods=["POST"]),
webapp2.Route('/api/quote/delete', handler=api.QuoteAPI, handler_method="delete", methods=["POST"]),
webapp2.Route('/api/analysis', handler=api.AnalysisAPI, handler_method="get", methods=["GET"]),
webapp2.Route('/api/journaltag', handler=api.JournalTagAPI, handler_method="list", methods=["GET"]),
webapp2.Route('/api/report', handler=api.ReportAPI, handler_method="list", methods=["GET"]),
webapp2.Route('/api/report/generate', handler=api.ReportAPI, handler_method="generate", methods=["POST"]),
webapp2.Route('/api/report/serve', handler=api.ReportAPI, handler_method="serve", methods=["GET"]),
webapp2.Route('/api/report/delete', handler=api.ReportAPI, handler_method="delete", methods=["POST"]),
webapp2.Route('/api/feedback', handler=api.FeedbackAPI, handler_method="submit", methods=["POST"]),
webapp2.Route('/api/auth/google_login', handler=api.AuthenticationAPI, handler_method="google_login"),
webapp2.Route('/api/auth/google_auth', handler=api.AuthenticationAPI, handler_method="google_auth"),
webapp2.Route('/api/auth/google/token', handler=api.AuthenticationAPI, handler_method="google_token", methods=["POST"]),
webapp2.Route('/api/auth/google/oauth2callback', handler=api.AuthenticationAPI, handler_method="google_oauth2_callback"),
webapp2.Route('/api/auth/google/<service_name>/authenticate', handler=api.AuthenticationAPI, handler_method="google_service_authenticate"),
webapp2.Route('/api/auth/fbook_auth', handler=api.AuthenticationAPI, handler_method="fbook_auth"),
webapp2.Route('/api/auth/logout', handler=api.AuthenticationAPI, handler_method="logout"),
# Integrations
webapp2.Route('/api/integrations/update_integration_settings', handler=api.IntegrationsAPI, handler_method="update_integration_settings", methods=["POST"]),
webapp2.Route('/api/integrations/goodreads', handler=api.IntegrationsAPI, handler_method="goodreads_shelf", methods=["GET"]),
webapp2.Route('/api/integrations/pocket', handler=api.IntegrationsAPI, handler_method="pocket_sync", methods=["GET"]),
webapp2.Route('/api/integrations/pocket/authenticate', handler=api.IntegrationsAPI, handler_method="pocket_authenticate", methods=["POST"]),
webapp2.Route('/api/integrations/pocket/authorize', handler=api.IntegrationsAPI, handler_method="pocket_authorize", methods=["POST"]),
webapp2.Route('/api/integrations/pocket/disconnect', handler=api.IntegrationsAPI, handler_method="pocket_disconnect", methods=["POST"]),
webapp2.Route('/api/integrations/evernote/authenticate', handler=api.IntegrationsAPI, handler_method="evernote_authenticate", methods=["POST"]),
webapp2.Route('/api/integrations/evernote/authorize', handler=api.IntegrationsAPI, handler_method="evernote_authorize", methods=["POST"]),
webapp2.Route('/api/integrations/evernote/disconnect', handler=api.IntegrationsAPI, handler_method="evernote_disconnect", methods=["POST"]),
webapp2.Route('/api/integrations/evernote/webhook', handler=api.IntegrationsAPI, handler_method="evernote_webhook", methods=["GET"]),
# Agent
webapp2.Route('/api/agent/apiai/request', handler=api.AgentAPI, handler_method="apiai_request", methods=["POST"]),
webapp2.Route('/api/agent/fbook/request', handler=api.AgentAPI, handler_method="fbook_request"),
webapp2.Route('/api/agent/flowapp/request', handler=api.AgentAPI, handler_method="flowapp_request"),
webapp2.Route('/api/agent/spoof', handler=api.AgentAPI, handler_method="spoof", methods=["POST"]),
# Reports
webapp2.Route('/api/report/serve', handler=api.ReportAPI, handler_method="serve", methods=["GET"]),
# Cron jobs (see cron.yaml)
webapp2.Route('/cron/readables/sync', handler=tasks.SyncReadables),
webapp2.Route('/cron/pull/github', handler=tasks.SyncGithub),
webapp2.Route('/cron/pull/google_fit', handler=tasks.SyncFromGoogleFit),
webapp2.Route('/cron/push/bigquery', handler=tasks.PushToBigQuery),
webapp2.Route('/cron/reports/delete_old', handler=tasks.DeleteOldReports),
webapp2.Route('/_ah/warmup', handler=tasks.WarmupHandler),
# Private app (react)
webapp2.Route(r'/<:.*>', handler=views.App, name="PrivateApp"),
], debug=True,
config=config)
|
56c39364a4aab99a9b126d3939f7d7bc87c9b207
|
95fc101e34e54c35ea8ce3d572078951a3cf3a18
|
/data/fileList.py
|
0a614173dc6e3f2ca334322a7287b1c9557af4b4
|
[
"Apache-2.0"
] |
permissive
|
SoftwareGift/FeatherNets_Face-Anti-spoofing-Attack-Detection-Challenge-CVPR2019
|
61e157820ea7dfefac2104d94a542612212b0117
|
220d5398a5da3147546fec1f2ca4b297f22cf39f
|
refs/heads/regression
| 2023-08-14T07:31:11.540484
| 2019-09-25T13:08:36
| 2019-09-25T13:08:36
| 173,394,014
| 896
| 288
|
NOASSERTION
| 2020-10-06T18:32:41
| 2019-03-02T02:29:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,815
|
py
|
fileList.py
|
# coding: utf-8
# # Use CASIA-SURF training dataset and our private dataset for training
# In[1]:
from pathlib import Path #从pathlib中导入Path
import os
# data_dir = os.getcwd() + '/our_filelist'
# txt_dir=[i for i in list(Path(data_dir).glob("**/2*.txt")) ]#
# Use CASIA-SURF traing data and our private data
# str1 = '/home/zp/disk1T/CASIASURF/data'
# str2 = os.getcwd()
# str3 = '/home/zp/disk1T/TSNet-LW/data'
# for i in range(len(txt_dir)):
# s = str(txt_dir[i]).replace('[','').replace(']','')#去除[],这两行按数据不同,可以选择
# s2 = s.replace("'",'').replace('our_filelist','')
# fp = open(s2,'w')
# with open(s,'r') as f:
# lines = f.read().splitlines()
# for i in lines:
# i = i.replace(str1,str2)
# i = i.replace(str3,str2)
# fp.write( i + '\n')
# fp.close()
# # Use CASIA-SURF Val data for val
# Use CASIA-SURF training data for training
import fileinput
rgb = open('./rgb_train.txt','a')
depth = open('./depth_train.txt','a')
ir = open('./ir_train.txt','a')
label = open('./label_train.txt','a')
pwd = os.getcwd() +'/'# the val data path
for line in fileinput.input("train_list.txt"):
list = line.split(' ')
rgb.write(pwd +list[0]+'\n')
depth.write(pwd +list[1]+'\n')
ir.write(pwd +list[2]+'\n')
label.write(list[3])
rgb.close()
depth.close()
ir.close()
label.close()
import fileinput
rgb = open('./rgb_val.txt','a')
depth = open('./depth_val.txt','a')
ir = open('./ir_val.txt','a')
label = open('./label_val.txt','a')
pwd = os.getcwd() +'/'# the val data path
for line in fileinput.input("val_private_list.txt"):
list = line.split(' ')
rgb.write(pwd +list[0]+'\n')
depth.write(pwd +list[1]+'\n')
ir.write(pwd +list[2]+'\n')
label.write(list[3])
rgb.close()
depth.close()
ir.close()
label.close()
# Use CASIA-SURF Test data for test
# To make it easier for you to test, prepare the label for the test set.
import fileinput
rgb = open('./rgb_test.txt','a')
depth = open('./depth_test.txt','a')
ir = open('./ir_test.txt','a')
label = open('./label_test.txt','a')
pwd = os.getcwd() +'/'# the val data path
for line in fileinput.input("test_private_list.txt"):
list = line.split(' ')
rgb.write(pwd +list[0]+'\n')
depth.write(pwd +list[1]+'\n')
ir.write(pwd +list[2]+'\n')
label.write(list[3])
rgb.close()
depth.close()
ir.close()
label.close()
# In test phase,we use the IR data for training
# replace '/home/zp/disk1T/libxcam-testset/'
f = open('ir_final_train.txt','w')
ir_file = 'ir_final_train_tmp.txt'
s = '/home/zp/disk1T/libxcam-testset/data'
import os
dir_pwd = os.getcwd()
with open(ir_file,'r') as fp:
lines = fp.read().splitlines()
for line in lines:
line = line.replace(s,dir_pwd)
f.write(line + '\n')
f.close()
|
be925726af31738a735d744d9831da9008a897df
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/SimG4CMS/HcalTestBeam/test/python/run2006_37_cfg.py
|
706da5a7ebf08193149604b15df21ba9976a04b1
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 5,616
|
py
|
run2006_37_cfg.py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Modifier_h2tb_cff import h2tb
process = cms.Process("PROD", h2tb)
process.load('SimG4CMS.HcalTestBeam.TB2006Geometry37XML_cfi')
process.load('SimGeneral.HepPDTESSource.pdt_cfi')
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load("Geometry.EcalCommonData.ecalSimulationParameters_cff")
process.load('Geometry.HcalTestBeamData.hcalDDDSimConstants_cff')
process.load('Configuration.EventContent.EventContent_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedFlat_cfi')
process.load('GeneratorInterface.Core.generatorSmeared_cfi')
process.load('SimG4Core.Application.g4SimHits_cfi')
process.load('IOMC.RandomEngine.IOMC_cff')
if hasattr(process,'MessageLogger'):
process.MessageLogger.HCalGeom=dict()
process.MessageLogger.HcalSim=dict()
process.TFileService = cms.Service("TFileService",
fileName = cms.string('hcaltb06_37.root')
)
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.RandomNumberGeneratorService.g4SimHits.initialSeed = 9876
process.RandomNumberGeneratorService.VtxSmeared.initialSeed = 123456789
beamPosition = -800.0
process.common_beam_direction_parameters = cms.PSet(
MinE = cms.double(50.0),
MaxE = cms.double(50.0),
PartID = cms.vint32(-211),
MinEta = cms.double(0.2175),
MaxEta = cms.double(0.2175),
MinPhi = cms.double(0.15708),
MaxPhi = cms.double(0.15708),
BeamPosition = cms.double(beamPosition)
)
process.source = cms.Source("EmptySource",
firstRun = cms.untracked.uint32(1),
firstEvent = cms.untracked.uint32(1)
)
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
process.common_beam_direction_parameters,
),
Verbosity = cms.untracked.int32(0),
AddAntiParticle = cms.bool(False)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.o1 = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
fileName = cms.untracked.string('sim2006_37.root')
)
process.Timing = cms.Service("Timing")
from IOMC.EventVertexGenerators.VtxSmearedParameters_cfi import *
process.VtxSmeared = cms.EDProducer("BeamProfileVtxGenerator",
process.common_beam_direction_parameters,
VtxSmearedCommon,
BeamMeanX = cms.double(0.0),
BeamMeanY = cms.double(0.0),
BeamSigmaX = cms.double(0.0001),
BeamSigmaY = cms.double(0.0001),
Psi = cms.double(999.9),
GaussianProfile = cms.bool(False),
BinX = cms.int32(50),
BinY = cms.int32(50),
File = cms.string('beam.profile'),
UseFile = cms.bool(False),
TimeOffset = cms.double(0.)
)
process.testbeam = cms.EDAnalyzer("HcalTB06Analysis",
process.common_beam_direction_parameters,
ECAL = cms.bool(True),
TestBeamAnalysis = cms.PSet(
EHCalMax = cms.untracked.double(400.0),
ETtotMax = cms.untracked.double(400.0),
beamEnergy = cms.untracked.double(50.),
TimeLimit = cms.double(180.0),
EcalWidth = cms.double(0.362),
HcalWidth = cms.double(0.640),
EcalFactor = cms.double(1.0),
HcalFactor = cms.double(100.0),
MIP = cms.double(0.8),
Verbose = cms.untracked.bool(True),
MakeTree = cms.untracked.bool(True)
)
)
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.generatorSmeared*process.g4SimHits*process.testbeam)
process.outpath = cms.EndPath(process.o1)
process.g4SimHits.NonBeamEvent = True
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.type = 'SimG4Core/Physics/FTFP_BERT_EMM'
process.g4SimHits.Physics.Region = 'HcalRegion'
process.g4SimHits.Physics.DefaultCutValue = 1.
process.g4SimHits.StackingAction.KillGamma = False
process.g4SimHits.CaloSD.BeamPosition = beamPosition
process.g4SimHits.ECalSD.UseBirkLaw = True
process.g4SimHits.ECalSD.BirkL3Parametrization = True
process.g4SimHits.ECalSD.BirkC1 = 0.033
process.g4SimHits.ECalSD.BirkC2 = 0.0
process.g4SimHits.ECalSD.SlopeLightYield = 0.02
process.g4SimHits.HCalSD.UseBirkLaw = True
process.g4SimHits.HCalSD.BirkC1 = 0.0052
process.g4SimHits.HCalSD.BirkC2 = 0.142
process.g4SimHits.HCalSD.BirkC3 = 1.75
process.g4SimHits.HCalSD.UseLayerWt = False
process.g4SimHits.HCalSD.WtFile = ' '
process.g4SimHits.HCalSD.UseShowerLibrary = False
process.g4SimHits.HCalSD.TestNumberingScheme = False
process.g4SimHits.HCalSD.UseHF = False
process.g4SimHits.HCalSD.ForTBH2 = True
|
7b510bb3a2fdbf2d361fdc3ce8bff714c351f526
|
31d9c3ee15671d45d8fdb8533a35be74bad58c18
|
/montreal_forced_aligner/command_line/anchor.py
|
7d7a740356c48762080c220022b66691f5a0977b
|
[
"MIT"
] |
permissive
|
MontrealCorpusTools/Montreal-Forced-Aligner
|
6547a9b4176fe2663d1210c296e95c9073b4de52
|
7fbccdd1df52c606704332c6a78731b4d51291eb
|
refs/heads/main
| 2023-09-01T12:17:22.009722
| 2023-08-30T08:27:16
| 2023-08-30T08:27:16
| 44,983,969
| 1,078
| 248
|
MIT
| 2023-09-12T03:57:04
| 2015-10-26T17:02:06
|
Python
|
UTF-8
|
Python
| false
| false
| 677
|
py
|
anchor.py
|
"""Command line functions for launching anchor annotation"""
from __future__ import annotations
import logging
import sys
import rich_click as click
__all__ = ["anchor_cli"]
logger = logging.getLogger("mfa")
@click.command(name="anchor", short_help="Launch Anchor")
@click.help_option("-h", "--help")
def anchor_cli(*args, **kwargs) -> None: # pragma: no cover
"""
Launch Anchor Annotator (if installed)
"""
try:
from anchor.command_line import main
except ImportError:
logger.error(
"Anchor annotator utility is not installed, please install it via pip install anchor-annotator."
)
sys.exit(1)
main()
|
c4344b4eea51867e7c8b56b27ffd7b658827b802
|
39b021eabbb8e3be1734cf92fd641965a796b0eb
|
/contrib/nn/constraints.py
|
046986b5fed107909c2aca2702832f49efda43fe
|
[
"MIT"
] |
permissive
|
deepchem/deepchem
|
066cbf42316b2f6bec0166727e0264a485d5266f
|
ee6e67ebcf7bf04259cf13aff6388e2b791fea3d
|
refs/heads/master
| 2023-09-02T01:32:17.860111
| 2023-08-31T18:49:00
| 2023-08-31T18:49:00
| 43,098,215
| 4,876
| 1,905
|
MIT
| 2023-09-14T19:10:44
| 2015-09-24T23:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,584
|
py
|
constraints.py
|
"""Place constraints on models."""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from deepchem.nn import model_ops
from deepchem.nn.activations import get_from_module
class Constraint(object):
def __call__(self, p):
return p
class MaxNorm(Constraint):
"""MaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have a norm less than or equal to a desired value.
Parameters
----------
m: the maximum norm for the incoming weights.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape (input_dim, output_dim),
set axis to 0 to constrain each weight vector
of length `(input_dim,)`.
# References
- [Dropout: A Simple Way to Prevent Neural Networks from Overfitting Srivastava, Hinton, et al. 2014](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)
"""
def __init__(self, m=2, axis=0):
self.m = m
self.axis = axis
def __call__(self, p):
norms = model_ops.sqrt(model_ops.sum(
tf.square(p), axis=self.axis, keepdims=True))
desired = model_ops.clip(norms, 0, self.m)
p *= (desired / (model_ops.epsilon() + norms))
return p
class NonNeg(Constraint):
"""Constrains the weights to be non-negative.
"""
def __call__(self, p):
p *= tf.cast(p >= 0., tf.float32)
return p
class UnitNorm(Constraint):
"""Constrains the weights incident to each hidden unit to have unit norm.
# Arguments
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Convolution2D` layer with `dim_ordering="tf"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, axis=0):
self.axis = axis
def __call__(self, p):
return p / (model_ops.epsilon() + model_ops.sqrt(
model_ops.sum(tf.square(p), axis=self.axis, keepdims=True)))
# Aliases.
maxnorm = MaxNorm
nonneg = NonNeg
unitnorm = UnitNorm
def get(identifier, kwargs=None):
return get_from_module(identifier, globals(), 'constraint',
instantiate=True, kwargs=kwargs)
|
f99ff9fa5530605f0db7af2fd1cf7c380f111fd2
|
32809f6f425bf5665fc19de2bc929bacc3eeb469
|
/src/1186-Maximum-Subarray-Sum-with-One-Deletion/1186.py
|
44630671e9bec0570742303eb2e78a1026dd82e2
|
[] |
no_license
|
luliyucoordinate/Leetcode
|
9f6bf01f79aa680e2dff11e73e4d10993467f113
|
bcc04d49969654cb44f79218a7ef2fd5c1e5449a
|
refs/heads/master
| 2023-05-25T04:58:45.046772
| 2023-05-24T11:57:20
| 2023-05-24T11:57:20
| 132,753,892
| 1,575
| 569
| null | 2023-05-24T11:57:22
| 2018-05-09T12:30:59
|
C++
|
UTF-8
|
Python
| false
| false
| 379
|
py
|
1186.py
|
class Solution:
def maximumSum(self, arr: List[int]) -> int:
deleted = notDeleted = 0
res = float('-inf')
for i, a in enumerate(arr):
deleted = max(deleted + a, a)
if i > 0:
deleted = max(deleted, notDeleted)
notDeleted = max(notDeleted + a, a)
res = max(res, deleted)
return res
|
057621c3f109ef60745780f7de42c6ad23b63c22
|
e3eecdd6c4795d5119abf6dc8c7633aef354b23f
|
/fio_plot/fiolib/jsonparsing.py
|
4225970beaf279c2a451d086db8ba4303032d283
|
[
"BSD-3-Clause"
] |
permissive
|
louwrentius/fio-plot
|
1d182ea039ce0ca460a0a9eb2acbc6a019cbfb05
|
0b6418489b7be19385d515987e30500ef4e08457
|
refs/heads/master
| 2023-08-19T11:37:07.280468
| 2023-07-06T11:55:59
| 2023-07-06T11:55:59
| 87,225,610
| 273
| 85
|
BSD-3-Clause
| 2023-08-03T19:54:53
| 2017-04-04T19:15:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,275
|
py
|
jsonparsing.py
|
import sys
from . import (
jsonparsing_support as jsonsupport
)
def printkeys(data, depth=0, maxdepth=3):
"""
For debugging only
"""
if depth <= maxdepth:
if isinstance(data, dict):
for key,value in data.items():
print(f"{'-' * depth} {key}")
printkeys(value, depth+1)
elif isinstance(data, list):
for item in data:
printkeys(item, depth+1)
def get_json_root_path(record):
rootpath = None
keys = record.keys()
if "jobs" in keys:
rootpath = "jobs"
if "client_stats" in keys:
rootpath = "client_stats"
if rootpath is None:
print("\nNo valid JSON root path found, this should never happen.\n")
return rootpath
def get_json_global_options(record):
options = {}
if "global options" in record.keys():
options = record["global options"]
return options
def sort_list_of_dictionaries(data):
sortedlist = sorted(data, key=lambda k: (int(k["iodepth"]), int(k["numjobs"])))
return sortedlist
def process_json_record(settings, directory, record, jsonrootpath, globaloptions):
joboptions = None
hosts = {}
jobs = []
for job in record[jsonrootpath]:
# This section is just to deal with the "All clients" job included in
# client / server JSON output
#
if job["jobname"] != "All clients":
job["job options"] = {**job["job options"], **globaloptions}
if not joboptions:
joboptions = job["job options"]
else:
job["job options"] = joboptions
job["hostname"] = "All clients"
#
# End of section
#
if jsonsupport.check_for_valid_hostname(job):
hostname = job["hostname"]
if hostname not in hosts.keys():
hosts[hostname] = []
row = jsonsupport.return_data_row(settings, job)
row["fio_version"] = record["fio version"]
if hosts:
hosts[hostname].append(row)
else:
jobs.append(row)
directory["data"].extend(jsonsupport.merge_job_data_hosts_jobs(settings, hosts, jobs))
def parse_json_data(settings, dataset):
"""
This funcion traverses the relevant JSON structure to gather data
and store it in a flat dictionary. We do this for each imported json file.
"""
for directory in dataset: # for directory in list of directories
directory["data"] = []
for record in directory["rawdata"]: # each record is the raw JSON data of a file in a directory
jsonrootpath = get_json_root_path(record)
globaloptions = get_json_global_options(record)
#for item in record["client_stats"]:
# if "job options" in item.keys():
# print(item["job options"]["iodepth"])
process_json_record(settings, directory, record, jsonrootpath, globaloptions)
#print("================================")
#print(directory["data"])
#for directory in dataset:
# for item in directory["data"]:
# print(item["iodepth"])
directory["data"] = sort_list_of_dictionaries(directory["data"])
return dataset
|
96daef61bc6c43e9b84ec0fd202aabf219dad429
|
f91124a8bbd0b719f0fd0b4e021e766ca8f5dbb0
|
/tf_coder/value_search/value_search_main.py
|
16a8be7b17f8cc1d0d80e9d5975716d97c0ffde7
|
[
"Apache-2.0"
] |
permissive
|
google-research/tensorflow-coder
|
0922c5ae4b22a84f1fc0f9545f2a433937cbfa17
|
dc7ff6080a8b2159d6f14c4a82e5332dcbf2f8ed
|
refs/heads/master
| 2023-08-31T16:14:19.401996
| 2023-02-24T23:15:19
| 2023-02-24T23:15:19
| 289,581,495
| 288
| 31
|
Apache-2.0
| 2023-02-24T23:16:56
| 2020-08-22T23:13:07
|
Python
|
UTF-8
|
Python
| false
| false
| 7,470
|
py
|
value_search_main.py
|
# Copyright 2021 The TF-Coder Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Exhaustive value search (enumerating by weight of expression)."""
import gc
import json
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Must happen before importing tf.
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # CPU is faster than GPU.
import sys # pylint: disable=g-import-not-at-top
from absl import app
from absl import flags
import numpy as np
from scipy.stats import mstats
from tf_coder.benchmarks import all_benchmarks
from tf_coder.benchmarks import autopandas_benchmarks
from tf_coder.benchmarks import google_benchmarks
from tf_coder.benchmarks import stackoverflow_benchmarks
from tf_coder.datasets import collect_tensor_data
from tf_coder.models import tensor_features_model
from tf_coder.natural_language import description_handler_factory
from tf_coder.value_search import value_search
from tf_coder.value_search import value_search_settings as settings_module
FLAGS = flags.FLAGS
flags.DEFINE_string('benchmark_name', 'ALL',
'The name of a benchmark to run, or "ALL".')
flags.DEFINE_multi_string('settings',
['timeout=300'],
'Settings to override the defaults.')
flags.DEFINE_string(
'json_output',
'',
'Where the results json file should be written. It will only be written if '
'all benchmarks are run. Use an empty string to avoid writing this file.')
flags.DEFINE_string(
'notes',
'',
'Any notes to write into the results json file.')
def benchmark_name_validator(benchmark_name):
"""Checks that benchmark_name is "ALL", or refers to exactly one benchmark."""
return (benchmark_name == 'ALL' or
all_benchmarks.find_benchmark_with_name(benchmark_name) is not None)
flags.register_validator('benchmark_name', benchmark_name_validator,
message=('benchmark_name must be "ALL" or refer to '
'exactly one benchmark.'))
def run_on_all_benchmarks(settings, description_handler, json_output,
benchmark_name, notes):
"""Runs value search on all benchmarks, printing results to stdout."""
benchmark_count = 0
benchmark_success = 0
unsolved_benchmarks = []
solution_times = [] # Only including successful tasks.
results_json = {
'benchmark_name': benchmark_name,
'settings': settings.as_dict(),
'notes': notes,
'results': [],
}
if (settings.tensor_model.config_path and
settings.tensor_model.checkpoint_path):
tensor_config = tensor_features_model.load_config(
settings.tensor_model.config_path)
tensor_model = tensor_features_model.get_model(tensor_config)
checkpoint = tensor_features_model.create_checkpoint(tensor_model)
checkpoint.restore(settings.tensor_model.checkpoint_path).expect_partial()
# Warm up. Running the model for the first time takes an extra ~10 seconds.
print('Warming up the tensor features model...')
value_search.operation_multipliers_from_tensor_model(
all_benchmarks.find_benchmark_with_name('simple_cast'),
tensor_model, tensor_config, settings)
print('Finished warming up.')
else:
tensor_config = None
tensor_model = None
print('=' * 80)
if benchmark_name == 'ALL':
# Only run on benchmarks from these important modules.
modules = [google_benchmarks, stackoverflow_benchmarks,
autopandas_benchmarks]
else:
# Allow searching by name among even more benchmark modules.
modules = None
for benchmark in all_benchmarks.get_chosen_benchmarks(
benchmark_name, modules=modules):
gc.collect()
print('Performing value search for benchmark {}.\n'
.format(benchmark.name))
benchmark_count += 1
result = value_search.run_value_search(
benchmark=benchmark,
settings=settings,
description_handler=description_handler,
tensor_model=tensor_model,
tensor_config=tensor_config)
if settings.printing.statistics:
print('\nOperation statistics:\n{}'.format(
result.statistics.statistics_as_string(
num_unique_values=len(result.value_set),
elapsed_time=result.total_time,
sort_by_time=settings.printing.statistics_sort_by_time)))
solutions = result.solutions
if solutions:
first_solution = solutions[0]
print('\nBest solution of weight {} found in {:.2f} sec:\n{}'.format(
first_solution.weight, first_solution.time,
first_solution.expression))
benchmark_success += 1
solution_times.append(first_solution.time)
else:
unsolved_benchmarks.append(benchmark)
print('=' * 80)
sys.stdout.flush()
results_json['results'].append({
'name': benchmark.name,
'solved': bool(solutions),
'solution': solutions[0].expression if solutions else None,
'solution_weight': solutions[0].weight if solutions else None,
'time': solutions[0].time if solutions else None,
'operations': (
[op.name for op in collect_tensor_data.extract_operations(
solutions[0].value)]
if solutions else None),
})
solve_time_total = sum(solution_times)
solve_time_mean = np.mean(solution_times)
solve_time_geometric_mean = mstats.gmean(solution_times)
results_json['num_benchmarks'] = benchmark_count
results_json['num_solved'] = benchmark_success
results_json['solve_time_total'] = solve_time_total
results_json['solve_time_mean'] = solve_time_mean
results_json['solve_time_geometric_mean'] = solve_time_geometric_mean
print('Solved {} out of {} benchmarks in {:.2f} sec.'.format(
benchmark_success, benchmark_count, solve_time_total))
print('\n'
'Arithmetic mean of solve times: {:.2f} sec\n'
'Geometric mean of solve times: {:.2f} sec\n'.format(
solve_time_mean, solve_time_geometric_mean))
print('Unsolved benchmarks:')
for unsolved in unsolved_benchmarks:
print('Name: {}, target program: {}'.format(
unsolved.name, unsolved.target_program))
print()
if json_output and benchmark_name == 'ALL':
with open(json_output, 'w') as json_file:
json.dump(results_json, json_file,
indent=4, sort_keys=True, separators=(',', ': '))
json_file.write('\n')
print('Wrote JSON results to {}.'.format(json_output))
else:
print('Did not write JSON results file.')
def main(unused_argv):
settings = settings_module.from_list(FLAGS.settings)
description_handler = description_handler_factory.create_handler(
settings.description_handler_name)
print('Description handler: {!r}\n'.format(description_handler))
run_on_all_benchmarks(settings, description_handler, FLAGS.json_output,
FLAGS.benchmark_name, FLAGS.notes)
if __name__ == '__main__':
app.run(main)
|
e63ff354ef41479b097fb8e8ef8ed878ebf4e352
|
da1500e0d3040497614d5327d2461a22e934b4d8
|
/third_party/angle/scripts/generate_new_renderer.py
|
9b26c5ee674ffa691d7e6bd76705fdf333c53124
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"Apache-2.0",
"MIT"
] |
permissive
|
youtube/cobalt
|
34085fc93972ebe05b988b15410e99845efd1968
|
acefdaaadd3ef46f10f63d1acae2259e4024d383
|
refs/heads/main
| 2023-09-01T13:09:47.225174
| 2023-09-01T08:54:54
| 2023-09-01T08:54:54
| 50,049,789
| 169
| 80
|
BSD-3-Clause
| 2023-09-14T21:50:50
| 2016-01-20T18:11:34
| null |
UTF-8
|
Python
| false
| false
| 9,340
|
py
|
generate_new_renderer.py
|
#!/usr/bin/python2
#
# Copyright 2015 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# generate_new_renderer.py:
# Utility script to generate stubs for a new Renderer class.
# Usage: generate_new_renderer.py <renderer name> <renderer suffix>
# Renderer name is the folder for the renderer subdirectory
# Renderer suffix is the abbreviation to append after the class names.
#
# The script is fairly robust but may not work for all new methods or
# other unexpected features. It expects that abstract methods are all
# grouped after the public destructor or after the private
# DISALLOW_COPY_AND_ASSIGN macro.
import os, sys, re, string, datetime
if len(sys.argv) < 3:
print('Usage: ' + sys.argv[0] + ' <renderer dir name> <renderer class suffix>')
sys.exit(1)
renderer_name = sys.argv[1]
renderer_suffix = sys.argv[2]
# change to the renderer directory
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), "..", "src", "libANGLE", "renderer"))
# ensure subdir exists
if not os.path.isdir(renderer_name):
os.mkdir(renderer_name)
impl_classes = [
'Buffer',
'Compiler',
'Context',
'Device',
'Display',
'FenceNV',
'FenceSync',
'Framebuffer',
'Image',
'Path',
'Program',
'Query',
'Renderbuffer',
'Sampler',
'Shader',
'Surface',
'Texture',
'TransformFeedback',
'VertexArray',
]
h_file_template = """//
// Copyright 2016 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// $TypedImpl.h:
// Defines the class interface for $TypedImpl, implementing $BaseImpl.
//
#ifndef LIBANGLE_RENDERER_${RendererNameCaps}_${TypedImplCaps}_H_
#define LIBANGLE_RENDERER_${RendererNameCaps}_${TypedImplCaps}_H_
#include "libANGLE/renderer/$BaseImpl.h"
namespace rx
{
class $TypedImpl : public $BaseImpl
{
public:
$TypedImpl($ConstructorParams);
~$TypedImpl() override;
$ImplMethodDeclarations$PrivateImplMethodDeclarations};
} // namespace rx
#endif // LIBANGLE_RENDERER_${RendererNameCaps}_${TypedImplCaps}_H_
"""
cpp_file_template = """//
// Copyright $Year The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// $TypedImpl.cpp:
// Implements the class methods for $TypedImpl.
//
#include "libANGLE/renderer/$RendererName/$TypedImpl.h"
#include "common/debug.h"
namespace rx
{
$TypedImpl::$TypedImpl($ConstructorParams) : $BaseImpl($BaseContructorArgs)
{
}
$TypedImpl::~$TypedImpl()
{
}
$ImplMethodDefinitions
} // namespace rx
"""
def generate_impl_declaration(impl_stub):
# ensure the wrapped lines are aligned vertically
temp = re.sub(r'\n ', '\n', impl_stub)
return temp + ' override;\n'
def generate_impl_definition(impl_stub, typed_impl):
function_signature = impl_stub.strip()
# strip comments
function_signature = re.sub(r'\/\/[^\n]*\n', '', function_signature).strip()
prog = re.compile(r'^(.+[ \*\&])([^ \(\*\&]+\()')
return_value = prog.match(function_signature).group(1)
# ensure the wrapped lines are aligned vertically
spaces = ' ' * len(typed_impl)
function_signature = re.sub(r'\n ', '\n' + spaces, function_signature)
# add class scoping
function_signature = prog.sub(r'\1' + typed_impl + r'::\2', function_signature)
function_signature += '\n'
return_statement = ''
return_type = return_value.strip()
if return_type != 'void':
# specialized return values for Errors, pointers, etc
if return_type == 'gl::Error':
return_statement = ' return gl::InvalidOperation();\n'
elif return_type == 'egl::Error':
return_statement = ' return egl::EglBadAccess();\n'
elif return_type == 'LinkResult':
return_statement = ' return gl::InvalidOperation();\n'
elif re.search(r'\*$', return_type):
return_statement = ' return static_cast<' + return_type + '>(0);\n'
elif re.search(r'const ([^ \&]+) \&$', return_type):
obj_type = re.search(r'const ([^ \&]+) \&$', return_type).group(1)
return_statement = ' static ' + obj_type + ' local;\n' + ' return local;\n'
else:
return_statement = ' return ' + return_type + '();\n'
body = '{\n' + ' UNIMPLEMENTED();\n' + return_statement + '}\n'
return '\n' + function_signature + body
def get_constructor_args(constructor):
params = re.search(r'\((.*)\)', constructor).group(1)
args = ', '.join(re.findall(r'[^\w]?(\w+)(?:\,|$)', params))
return params, args
def parse_impl_header(base_impl):
impl_h_file_path = base_impl + '.h'
impl_h_file = open(impl_h_file_path, 'r')
# extract impl stubs
copy = False
copy_private = False
impl_stubs = ''
private_impl_stubs = ''
constructor = base_impl + '() {}'
for line in impl_h_file:
clean_line = line.strip()
match = re.search(r'^(?:explicit )?(' + base_impl + r'\([^\)]*\))', clean_line)
if match:
constructor = match.group(1)
# begin capture when reading the destructor.
# begin capture also in the private scope (a few special cases)
# end capture when we reach a non-virtual function, or different scope.
if '~' + base_impl in clean_line:
copy = True
copy_private = False
elif 'private:' in clean_line:
copy = False
copy_private = True
elif ';' in clean_line and ' = 0' not in clean_line:
copy = False
copy_private = False
elif '}' in clean_line or 'protected:' in clean_line or 'private:' in clean_line:
copy = False
copy_private = False
elif copy:
impl_stubs += line
elif copy_private:
private_impl_stubs += line
impl_h_file.close()
return impl_stubs, private_impl_stubs, constructor
def get_base_class(base_impl):
impl_h_file_path = base_impl + '.h'
with open(impl_h_file_path, 'r') as impl_h_file:
for line in impl_h_file:
match = re.search(r'^class ' + base_impl + r' : public (\w+)', line)
if match:
return match.group(1)
return False
for impl_class in impl_classes:
base_impl = impl_class + 'Impl'
typed_impl = impl_class + renderer_suffix
h_file_path = os.path.join(renderer_name, typed_impl + '.h')
cpp_file_path = os.path.join(renderer_name, typed_impl + '.cpp')
h_file = open(h_file_path, 'w')
cpp_file = open(cpp_file_path, 'w')
# extract impl stubs
impl_stubs, private_impl_stubs, constructor = parse_impl_header(base_impl)
# Handle base classes, skipping angle::NonCopyable.
base_class = get_base_class(base_impl)
if base_class and base_class != 'angle':
base_impl_stubs, base_private_impl_stubs, base_constructor = parse_impl_header(base_class)
impl_stubs += base_impl_stubs
private_impl_stubs += base_private_impl_stubs
impl_method_declarations = ''
impl_method_definitions = ''
private_impl_method_declarations = ''
for impl_stub in impl_stubs.split(' = 0;\n'):
# use 'virtual' to identify the strings with functions
if 'virtual' in impl_stub:
temp = re.sub(r'virtual ', '', impl_stub)
impl_method_declarations += generate_impl_declaration(temp)
impl_method_definitions += generate_impl_definition(temp, typed_impl)
for impl_stub in private_impl_stubs.split(' = 0;\n'):
# use 'virtual' to identify the strings with functions
if 'virtual' in impl_stub:
temp = re.sub(r'virtual ', '', impl_stub)
private_impl_method_declarations += generate_impl_declaration(temp)
impl_method_definitions += generate_impl_definition(temp, typed_impl)
constructor_params, base_constructor_args = get_constructor_args(constructor)
if private_impl_method_declarations:
private_impl_method_declarations = "\n private:\n" + private_impl_method_declarations
substitutions = {
'BaseImpl': base_impl,
'TypedImpl': typed_impl,
'TypedImplCaps': typed_impl.upper(),
'RendererName': renderer_name,
'RendererNameCaps': renderer_name.upper(),
'ImplMethodDeclarations': impl_method_declarations,
'ImplMethodDefinitions': impl_method_definitions,
'ConstructorParams': constructor_params,
'BaseContructorArgs': base_constructor_args,
'PrivateImplMethodDeclarations': private_impl_method_declarations,
'Year': datetime.datetime.now().year,
}
h_file.write(string.Template(h_file_template).substitute(substitutions))
cpp_file.write(string.Template(cpp_file_template).substitute(substitutions))
h_file.close()
cpp_file.close()
# Print a block of source files to add to the build
print("Generated files:")
for impl_class in impl_classes:
path = "libANGLE/renderer/" + renderer_name + "/" + impl_class + renderer_suffix
print('\'' + path + ".cpp\',")
print('\'' + path + ".h\',")
|
44c3d1407484bbbb64564e966cb9e6e175f90c0a
|
ab05cd17c414313a217c510fed056ce6de1a4cc9
|
/omniduct/caches/filesystem.py
|
b83b3e6f52e3d9fc52ab2b79a0946a599697823a
|
[
"MIT"
] |
permissive
|
airbnb/omniduct
|
c1d8ccdab42c1c386ca659a5421d4eab582c6659
|
a91ebed6fd6f0f1319d20c5b19d89543897296b7
|
refs/heads/master
| 2023-08-23T18:52:32.173073
| 2022-05-09T03:05:24
| 2022-05-09T03:05:24
| 82,863,327
| 259
| 56
|
MIT
| 2023-04-22T00:08:37
| 2017-02-22T23:35:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,829
|
py
|
filesystem.py
|
import six
import yaml
from interface_meta import override
from omniduct.filesystems.base import FileSystemClient
from omniduct.filesystems.local import LocalFsClient
from .base import Cache
class FileSystemCache(Cache):
"""
An implementation of `Cache` that wraps around a `FilesystemClient`.
"""
PROTOCOLS = ['filesystem_cache']
@override
def _init(self, path, fs=None):
"""
path (str): The top-level path of the cache in the filesystem.
fs (FileSystemClient, str): The filesystem client to use as the
datastore of this cache. If not specified, this will default to the
local filesystem using `LocalFsClient`. If specified as a string,
and connected to a `DuctRegistry`, upon first use an attempt will be
made to look up a `FileSystemClient` instance in the registry by
this name.
"""
self.fs = fs or LocalFsClient()
self.path = path
# Currently config is not used, but will be in future versions
self._config = None
self.connection_fields += ('fs',)
@override
def _prepare(self):
Cache._prepare(self)
if self.registry is not None:
if isinstance(self.fs, six.string_types):
self.fs = self.registry.lookup(self.fs, kind=FileSystemCache.Type.FILESYSTEM)
assert isinstance(self.fs, FileSystemClient), "Provided cache is not an instance of `omniduct.filesystems.base.FileSystemClient`."
self._prepare_cache()
def _prepare_cache(self):
config_path = self.fs.path_join(self.path, 'config')
if self.fs.exists(config_path):
with self.fs.open(config_path) as fh:
try:
return yaml.safe_load(fh)
except yaml.error.YAMLError:
raise RuntimeError(
"Path nominated for cache ('{}') has a corrupt "
"configuration. Please manually empty or delete this "
"path cache, and try again.".format(self.path)
)
# Cache needs initialising
if self.fs.exists(self.path):
if not self.fs.isdir(self.path):
raise RuntimeError(
"Path nominated for cache ('{}') is not a directory.".format(self.path)
)
elif self.fs.listdir(self.path):
raise RuntimeError(
"Cache directory ({}) needs to be initialised, and is not "
"empty. Please manually delete and/or empty this path, and "
"try again.".format(self.path)
)
else: # Create cache directory
self.fs.mkdir(self.path, recursive=True, exist_ok=True)
# Write config file to mark cache as initialised
with self.fs.open(config_path, 'w') as fh:
yaml.safe_dump({'version': 1}, fh, default_flow_style=False)
return {'version': 1}
@override
def _connect(self):
self.fs.connect()
@override
def _is_connected(self):
return self.fs.is_connected()
@override
def _disconnect(self):
return self.fs.disconnect()
# Implementations for abstract methods in Cache
@override
def _namespace(self, namespace):
if namespace is None:
return '__default__'
assert isinstance(namespace, str) and namespace != 'config'
return namespace
@override
def _get_namespaces(self):
return [d for d in self.fs.listdir(self.path) if d != 'config']
@override
def _has_namespace(self, namespace):
return self.fs.exists(self.fs.path_join(self.path, namespace))
@override
def _remove_namespace(self, namespace):
return self.fs.remove(self.fs.path_join(self.path, namespace), recursive=True)
@override
def _get_keys(self, namespace):
return self.fs.listdir(self.fs.path_join(self.path, namespace))
@override
def _has_key(self, namespace, key):
return self.fs.exists(self.fs.path_join(self.path, namespace, key))
@override
def _remove_key(self, namespace, key):
return self.fs.remove(self.fs.path_join(self.path, namespace, key), recursive=True)
@override
def _get_bytecount_for_key(self, namespace, key):
path = self.fs.path_join(self.path, namespace, key)
return sum([
f.bytes
for f in self.fs.dir(path)
])
@override
def _get_stream_for_key(self, namespace, key, stream_name, mode, create):
path = self.fs.path_join(self.path, namespace, key)
if create:
self.fs.mkdir(path, recursive=True, exist_ok=True)
return self.fs.open(self.fs.path_join(path, stream_name), mode=mode)
|
f6094dca49583d79a602cac2e9816a1b2b4d2d02
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/SemanticHumanMatting/eval.py
|
60d1f7f13d31edfa7a10c68a562fd362d98a0f68
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,843
|
py
|
eval.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Evaluation for Semantic Human Matting"""
import os
import time
import cv2
import numpy as np
from mindspore import dtype as mstype
from mindspore import Tensor, context, load_checkpoint, load_param_into_net
import src.model.network as network
def safe_makedirs(path_dir):
if not os.path.exists(path_dir):
os.makedirs(path_dir)
def safe_modify_file_name(file_name):
if not os.path.exists(file_name):
if "jpg" in file_name:
return file_name.replace("jpg", "png")
return file_name.replace("png", "jpg")
return file_name
def seg_process(cfg, image, image_gt, net):
"""Perform inference and calculate metric"""
origin_h, origin_w, _ = image.shape
# resize and normalize
image_resize = cv2.resize(image, (cfg["size"], cfg["size"]), interpolation=cv2.INTER_CUBIC)
image_resize = (
image_resize
- (
104.0,
112.0,
121.0,
)
) / 255.0
# construct input tensor
x = np.expand_dims(image_resize, axis=3)
inputs = np.transpose(x, (3, 2, 0, 1))
# inference
trimap, alpha = net(Tensor(inputs, dtype=mstype.float32))
# generate mask
trimap_np = trimap[0, 0, :, :].asnumpy()
trimap_np = cv2.resize(trimap_np, (origin_w, origin_h), interpolation=cv2.INTER_CUBIC)
mask_result = np.multiply(trimap_np[..., np.newaxis], image)
trimap_1 = mask_result.copy()
mask_result[trimap_1 < 10] = 255
mask_result[trimap_1 >= 10] = 0
# generate foreground image
alpha_np = alpha[0, 0, :, :].asnumpy()
alpha_fg = cv2.resize(alpha_np, (origin_w, origin_h), interpolation=cv2.INTER_CUBIC)
fg = np.multiply(alpha_fg[..., np.newaxis], image)
# generate metric Sad (original image size)
image_gt = image_gt[:, :, 0]
image_gt = image_gt.astype(np.float64) / 255
sad = np.abs(alpha_fg - image_gt).sum() / 1000
return mask_result, fg, sad
def camera_seg(cfg, net):
"""Perform inference, save result and calculate metric"""
test_pic_path = cfg["test_pic_path"]
output_path = cfg["output_path"]
safe_makedirs(output_path)
f_log = open(os.path.join(output_path, "log.txt"), "w")
time_0 = time.time()
file_test_list = os.path.join(test_pic_path, "test", "test.txt")
list_sad = list()
with open(file_test_list) as f:
lines = f.readlines()
for line in lines:
line = line.strip()
img_clip = os.path.join(test_pic_path, "test", "clip_img", line.replace("matting", "clip"))
img_alpha = os.path.join(test_pic_path, "test", "alpha", line)
img_clip = safe_modify_file_name(img_clip)
img_alpha = safe_modify_file_name(img_alpha)
path_save = os.path.join(output_path, "clip_img_rst", os.path.split(line)[0].replace("matting", "clip"))
safe_makedirs(path_save)
img_src = cv2.imread(img_clip)
img_gt = cv2.imread(img_alpha)
mask_result, fg, sad = seg_process(cfg, img_src, img_gt, net)
file_name = os.path.split(line)[1]
cv2.imwrite(os.path.join(path_save, file_name), mask_result)
cv2.imwrite(os.path.join(path_save, file_name.split(".")[0] + "_fg.jpg"), fg)
log = "{} sad: {}".format(os.path.join(path_save, file_name), sad)
print(log)
f_log.write(log + "\n")
list_sad.append(sad)
log = "Total time: {}, ave_sad: {}".format(time.time() - time_0, np.mean(list_sad))
print(log)
f_log.write(log)
def run_test(cfg):
device_id = int(os.getenv("DEVICE_ID", "0"))
print("device_id: {}".format(device_id))
context.set_context(
mode=context.GRAPH_MODE,
device_id=device_id,
device_target=cfg["device_target"],
reserve_class_name_in_scope=False,
)
net = network.net()
print(cfg["model"])
param_dict = load_checkpoint(cfg["model"])
load_param_into_net(net, param_dict)
net.set_train(False)
camera_seg(cfg, net)
if __name__ == "__main__":
from src.config import get_args, get_config_from_yaml
run_test(get_config_from_yaml(get_args())["test"])
|
9d86104dbb9dab1940cdd06f7c2ae9831830d0d0
|
1ed4e86b420ad69233d6afc530dc9bc6b27c7039
|
/clients/recreation_client.py
|
94a2dacab5eb2eb1bf3d26e49358e7a167dde063
|
[
"Apache-2.0"
] |
permissive
|
banool/recreation-gov-campsite-checker
|
7f5f50e48325dace863652b909e59e46618f419d
|
7f56fcfe361d92ab1d6feb81b7be858a1f0a9016
|
refs/heads/master
| 2023-08-31T21:22:29.611472
| 2023-07-25T18:12:12
| 2023-07-25T18:38:35
| 140,966,466
| 290
| 179
|
Apache-2.0
| 2023-08-24T00:54:25
| 2018-07-14T17:50:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,472
|
py
|
recreation_client.py
|
import logging
import requests
import user_agent
from utils import formatter
LOG = logging.getLogger(__name__)
class RecreationClient:
BASE_URL = "https://www.recreation.gov"
AVAILABILITY_ENDPOINT = (
BASE_URL + "/api/camps/availability/campground/{park_id}/month"
)
MAIN_PAGE_ENDPOINT = BASE_URL + "/api/camps/campgrounds/{park_id}"
headers = {"User-Agent": user_agent.generate_user_agent() }
@classmethod
def get_availability(cls, park_id, month_date):
params = {"start_date": formatter.format_date(month_date)}
LOG.debug(
"Querying for {} with these params: {}".format(park_id, params)
)
url = cls.AVAILABILITY_ENDPOINT.format(park_id=park_id)
resp = cls._send_request(url, params)
return resp
@classmethod
def get_park_name(cls, park_id):
resp = cls._send_request(
cls.MAIN_PAGE_ENDPOINT.format(park_id=park_id), {}
)
return resp["campground"]["facility_name"]
@classmethod
def _send_request(cls, url, params):
resp = requests.get(url, params=params, headers=cls.headers)
if resp.status_code != 200:
raise RuntimeError(
"failedRequest",
"ERROR, {status_code} code received from {url}: {resp_text}".format(
status_code=resp.status_code, url=url, resp_text=resp.text
),
)
return resp.json()
|
691b1d57956b7718ba2c8147a17434d5ac281062
|
17b4b0b4714f3a784466d0209a40b8880dfb5fb0
|
/owtf/plugins/base.py
|
ba01ffae70d3c8c32dfe2615d0f8e632f2dcfef8
|
[
"BSD-3-Clause"
] |
permissive
|
owtf/owtf
|
8b72d43ee01272d100408200a898d76deeb1569f
|
240825989a3850241b6b5dba6bcae1042a5dc384
|
refs/heads/develop
| 2023-08-31T20:25:57.364620
| 2023-08-30T23:02:24
| 2023-08-30T23:02:24
| 3,215,654
| 1,683
| 534
|
BSD-3-Clause
| 2023-09-01T17:23:00
| 2012-01-19T06:42:44
|
Python
|
UTF-8
|
Python
| false
| false
| 428
|
py
|
base.py
|
from abc import ABCMeta, abstractmethod
class Plugin(object):
"""Abstract base class definition for plugins.
Plugins must be a subclass of Plugin and
must define the following members.
"""
__metaclass__ = ABCMeta
name = None
description = None
author = None
# Type is a tuple of tags.
# For example, ('web', 'grep')
type = None
@abstractmethod
def run(self):
pass
|
5104c3bec36529a38876e5c8bb2451fc933aa9f8
|
33be4b8a076a83adb73ff6b267d8135dbbdd16fc
|
/chapter9/ness6rest.py
|
ea65115118ab2ebee846e58a85f96d5725601e40
|
[
"MIT"
] |
permissive
|
PacktPublishing/Mastering-Python-for-Networking-and-Security-Second-Edition
|
73bbe8bd6464585bb3cef788eb8fd62ec89f8983
|
b748cca5841b013b44c98f1884611b66676fef1a
|
refs/heads/master
| 2023-01-27T10:59:59.868438
| 2023-01-18T10:10:33
| 2023-01-18T10:10:33
| 254,302,959
| 117
| 67
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 44,065
|
py
|
ness6rest.py
|
# Copyright (c) 2014-2015, Tenable Network Security, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of Tenable Network Security, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, TITLE,
# NON-INFRINGEMENT, INTEGRATION, PERFORMANCE, AND ACCURACY AND ANY IMPLIED
# WARRANTIES ARISING FROM STATUTE, COURSE OF DEALING, COURSE OF PERFORMANCE, OR
# USAGE OF TRADE, ARE DISCLAIMED. IN NO EVENT SHALL TENABLE NETWORK SECURITY,
# INC., OR ANY SUCCESSOR-IN-INTEREST, BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
'''
Module for interacting with Nessus REST interface
'''
import os
import sys
import atexit
import time
import requests
import json
import collections
class SSLException(Exception):
pass
class Scanner(object):
'''
Scanner object
'''
def __init__(self, url, login='', password='', api_akey='', api_skey='',
insecure=False, ca_bundle='', auto_logout=True):
self.api_akey = None
self.api_skey = None
self.use_api = False
self.name = ''
self.policy_name = ''
self.debug = False
self.format = ''
self.format_start = ''
self.format_end = ''
self.http_response = ''
self.plugins = {}
self.names = {}
self.files = {}
self.cisco_offline_configs = ''
self.permissions = ''
self.policy_id = ''
self.policy_object = ''
self.pref_cgi = ''
self.pref_paranoid = ''
self.pref_supplied = ''
self.pref_thorough = ''
self.pref_max_checks = ''
self.pref_receive_timeout = ''
self.set_safe_checks = ''
self.pref_verbose = ''
self.pref_silent_dependencies = ''
self.res = {}
self.scan_id = ''
self.scan_name = ''
self.scan_template_uuid = ''
self.scan_uuid = ''
self.tag_id = ''
self.tag_name = ''
self.targets = ''
self.policy_template_uuid = ''
self.token = ''
self.url = url
self.ver_feed = ''
self.ver_gui = ''
self.ver_plugins = ''
self.ver_svr = ''
self.ver_web = ''
self.ca_bundle = ca_bundle
self.insecure = insecure
self.auth = []
self.host_vulns = {}
self.plugin_output = {}
self.host_details = {}
self.host_ids = {}
if insecure and hasattr(requests, 'packages'):
requests.packages.urllib3.disable_warnings()
if (api_akey and api_skey):
self.api_akey = api_akey
self.api_skey = api_skey
self.use_api = True
else:
# Initial login to get our token for all subsequent transactions
self._login(login, password)
# Register a call to the logout action automatically
if auto_logout:
atexit.register(self.action, action="session",
method="delete", retry=False)
self._get_permissions()
self._get_scanner_id()
################################################################################
def _login(self, login="", password=""):
if login and password:
self.auth = [login,password]
self.action(action="session",
method="POST",
extra={"username": self.auth[0], "password": self.auth[1]},
private=True,
retry=False)
try:
self.token = self.res["token"]
except KeyError:
if self.res["error"]:
print("It looks like you're trying to login into a Nessus 5")
print("instance. Exiting.")
sys.exit(0)
################################################################################
def logout(self):
self.action(action="session", method="delete", retry=False)
################################################################################
def _get_permissions(self):
'''
All development has been conducted using and administrator account which
had the permissions '128'
'''
self.action(action="session", method="GET")
self.permissions = self.res['permissions']
################################################################################
def _get_scanner_id(self):
'''
Pull in information about scanner. The ID is necessary, everything else
is "nice to have" for debugging.
'''
self.action(action="scanners", method="GET")
try:
for scanner in self.res["scanners"]:
if scanner["type"] == "local":
self.scanner_id = scanner['id']
self.ver_plugins = scanner['loaded_plugin_set']
self.ver_gui = scanner['ui_version']
self.ver_svr = scanner['engine_version']
self.ver_feed = scanner['license']['type']
except:
pass
################################################################################
def action(self, action, method, extra={}, files={}, json_req=True, download=False, private=False, retry=True):
'''
Generic actions for REST interface. The json_req may be unneeded, but
the plugin searching functionality does not use a JSON-esque request.
This is a backup setting to be able to change content types on the fly.
'''
payload = {}
payload.update(extra)
if self.use_api:
headers = {'X-ApiKeys': 'accessKey=' + self.api_akey +
'; secretKey=' + self.api_skey}
else:
headers = {'X-Cookie': 'token=' + str(self.token)}
if json_req:
headers.update({'Content-type': 'application/json',
'Accept': 'text/plain'})
payload = json.dumps(payload)
url = "%s/%s" % (self.url, action)
if self.debug:
if private:
print("JSON : **JSON request hidden**")
else:
print("JSON :")
print(payload)
print("HEADERS :")
print(headers)
print("URL : %s " % url)
print("METHOD : %s" % method)
print("\n")
# Figure out if we should verify SSL connection (possibly with a user
# supplied CA bundle). Default to true.
if self.insecure:
verify = False
elif self.ca_bundle:
verify = self.ca_bundle
else:
verify = True
try:
req = requests.request(method, url, data=payload, files=files,
verify=verify, headers=headers)
if not download and req.text:
self.res = req.json()
elif not req.text:
self.res = {}
if req.status_code != 200:
print("*****************START ERROR*****************")
if private:
print("JSON : **JSON request hidden**")
else:
print("JSON :")
print(payload)
print(files)
print("HEADERS :")
print(headers)
print("URL : %s " % url)
print("METHOD : %s" % method)
print("RESPONSE: %d" % req.status_code)
print("\n")
self.pretty_print()
print("******************END ERROR******************")
if self.debug:
# This could also contain "pretty_print()" but it makes a lot of
# noise if enabled for the entire scan.
print("RESPONSE CODE: %d" % req.status_code)
if download:
return req.content
except requests.exceptions.SSLError as ssl_error:
raise SSLException('%s for %s.' % (ssl_error, url))
except requests.exceptions.ConnectionError:
raise Exception("Could not connect to %s.\nExiting!\n" % url)
if self.res and "error" in self.res and retry:
if self.res["error"] == "You need to log in to perform this request" or self.res["error"] == "Invalid Credentials":
self._login()
self.action(action=action, method=method, extra=extra, files=files,
json_req=json_req, download=download, private=private,
retry=False)
################################################################################
def _policy_template_uuid(self, name):
'''
Get the template ID. This provides the default settings for the policy.
'''
self.action(action="editor/policy/templates", method="GET")
for template in self.res["templates"]:
if template["name"] == name:
self.policy_template_uuid = template["uuid"]
break
################################################################################
def _scan_template_uuid(self, name):
'''
Get the template ID. This provides the default settings for the policy.
'''
self.action(action="editor/scan/templates", method="GET")
for template in self.res["templates"]:
if template["name"] == name:
self.scan_template_uuid = template["uuid"]
break
################################################################################
def policy_add(self, name, plugins=None, credentials=[], template="advanced"):
'''
Add a policy and store the returned ID. The template defaults to
"advanced" to remain compatible with the calls that occur in Nessus
5.2.x.
'''
self.policy_name = name
self._policy_template_uuid(template)
self._policy_edit_template(uuid=self.policy_template_uuid)
try:
self.policy_id = self.res["policy_id"]
# prevent duplicate names when we build the scan
self.policy_name = self.res["policy_name"]
except KeyError:
print("policy_id was not returned. Exiting")
sys.exit(1)
self.policy_add_creds(credentials=credentials)
self._policy_set_settings()
if plugins != None:
self.plugins_info(plugins=plugins)
self._enable_plugins()
################################################################################
def policy_copy(self, existing_policy_name, new_policy_name):
'''
Create a copy of an existing policy and set it to be used for a scan
'''
self.action(action="policies", method="GET")
for policy in self.res["policies"]:
if policy["name"] == existing_policy_name:
self.action(action="policies/" + str(policy["id"]) + "/copy", method="POST")
self.policy_id = self.res["id"]
'''
If there is a name conflict the rename appends a
number to the requested name.
'''
self.policy_name = new_policy_name
self.action(action="policies/" + str(self.policy_id), method="PUT",
extra={"settings":{"name": self.policy_name}})
return True
return False
################################################################################
def policy_delete(self, name):
'''
Delete a policy.
'''
self.action(action="policies", method="GET")
for policy in self.res["policies"]:
if policy["name"] == name:
self.action(action="policies/" + str(policy["id"]), method="DELETE")
return True
return False
################################################################################
def policy_exists(self, name):
'''
Set existing policy to use for a scan.
'''
self.policy_name = name
self.action(action="policies", method="GET")
if not self.res["policies"]:
return False
else:
for policy in self.res["policies"]:
if policy["name"] == name:
self.policy_id = policy["id"]
return True
return False
################################################################################
def policy_set(self, name):
'''
Set existing policy to use for a scan.
'''
self.policy_name = name
self.action(action="policies", method="GET")
for policy in self.res["policies"]:
if policy["name"] == name:
self.policy_id = policy["id"]
break
if not self.policy_id:
print("no policy with name %s found. Exiting" % name)
sys.exit(1)
################################################################################
def policy_details(self, policy_id):
'''
Retrieves details of an existing policy.
'''
self.policy_id = policy_id
self.action(action="policies/" + str(self.policy_id), method="GET")
return self.res
################################################################################
def _policy_edit_template(self, uuid):
'''
Using the UUID, create the base policy, which will then be manipulated.
This is easier than attempting to design an entire policy in one call.
'''
extra = {"settings": {"name": self.policy_name}, "uuid": uuid}
self.action(action="policies", method="POST", extra=extra)
################################################################################
def policy_add_ports(self, ports):
'''
Read current ports and append needed ports. The current value could have
been gathered when disabling the plugin families, but for the sake of an
extra call, it is much more clear what is occurring.
'''
discovery = {}
default_ports = ""
self.action(action="editor/policy/" + str(self.policy_id), method="GET")
for inputs in self.res["settings"]["discovery"]["groups"]:
if inputs["name"] == "network_discovery":
discovery = inputs["sections"]
for item in discovery:
for nested in item["inputs"]:
if nested["id"] == "portscan_range":
default_ports = nested["default"]
new_ports = str(default_ports) + "," + str(ports)
extra = {"settings": {"portscan_range": new_ports}}
self.action(action="policies/" + str(self.policy_id), method="PUT",
extra=extra)
###############################################################################
def policy_limit_ports(self, ports):
'''
Limit the ports scanned to the given list.
'''
extra = {"settings": {"portscan_range": str(ports)}}
self.action(action="policies/" + str(self.policy_id), method="PUT",
extra=extra)
################################################################################
def policy_add_creds(self, credentials, policy_id=""):
'''
Add a list of credentials, defined using the objects in the credential
module.
'''
if not policy_id:
policy_id = self.policy_id
creds = collections.defaultdict(lambda: collections.defaultdict(list))
for credential in credentials:
creds[credential.category][credential.name].append(credential.__dict__)
creds = {"credentials": {"add": creds}}
self.action(action="policies/" + str(policy_id),
method="PUT", extra=creds)
################################################################################
def _policy_set_settings(self):
'''
Current settings include: safe_checks, scan_webapps, report_paranoia,
provided_creds_only, thorough_tests, report_verbosity,
silent_dependencies
'''
settings = {"settings": {}}
# Default to safe checks
# Values: yes, no
if not self.set_safe_checks:
self.set_safe_checks = "yes"
# Default to not scanning webapps
# Values: yes, no
if not self.pref_cgi:
self.pref_cgi = "no"
# Default to normal paranoia levels
# Values: Avoid false alarms, Normal, Paranoid
if not self.pref_paranoid:
self.pref_paranoid = "Normal"
# Default to allow scans to check for default credentials
# Values: yes, no
if not self.pref_supplied:
self.pref_supplied = "no"
# Default to not use thorough tests
# Values: yes, no
if not self.pref_thorough:
self.pref_thorough = "no"
# Default to normal verbosity.
# Values: Quiet, Normal, Verbose
if not self.pref_verbose:
self.pref_verbose = "Normal"
# Default to normal reporting of dependencies
# Values: yes, no
if not self.pref_silent_dependencies:
self.pref_silent_dependencies = "yes"
# Plugin receive timeout limit
# Values: positive integers, passed as strings
# Nessus default: 5
if not self.pref_receive_timeout:
self.pref_receive_timeout = "5"
# Maximum concurrent checks
# Values: positive integers, passed as strings
# Nessus default: 5
if not self.pref_max_checks:
self.pref_max_checks = "5"
settings["settings"].update({"safe_checks": self.set_safe_checks})
settings["settings"].update({"scan_webapps": self.pref_cgi})
settings["settings"].update({"report_paranoia": self.pref_paranoid})
settings["settings"].update({"provided_creds_only": self.pref_supplied})
settings["settings"].update({"thorough_tests": self.pref_thorough})
settings["settings"].update({"report_verbosity": self.pref_verbose})
settings["settings"].update({"silent_dependencies":
self.pref_silent_dependencies})
settings["settings"].update({"cisco_offline_configs":
self.cisco_offline_configs})
settings["settings"].update({"network_receive_timeout":
self.pref_receive_timeout})
settings["settings"].update({"max_checks_per_host":
self.pref_max_checks})
self.action(action="policies/" + str(self.policy_id), method="PUT",
extra=settings)
################################################################################
def _policy_remove_audits(self, category, type='custom'):
'''
Removes all audit files from the policy.
'''
delete_ids = []
self.action(action="editor/policy/" + str(self.policy_id),
method="GET")
for record in self.res['compliance']['data']:
if record['name'] == category:
for audit in record['audits']:
if audit['type'] == type and 'id' in audit:
delete_ids.append(str(audit['id']))
audit = {"audits": {"custom": {"delete": []}}}
if len(delete_ids) > 0:
audit["audits"]["custom"]["delete"] = delete_ids
self.action(action="policies/" + str(self.policy_id),
method="PUT", extra=audit)
################################################################################
def _policy_add_audit(self, category, filename):
'''
Adds an audit file to the policy.
'''
audit = {"audits": {"custom": {"add": []}}}
audit["audits"]["custom"]["add"].append(
{"file": filename,
"category": category})
self.action(action="policies/" + str(self.policy_id),
method="PUT", extra=audit)
################################################################################
def plugins_info(self, plugins):
'''
Gather information on plugins for reporting. This also ensures that the
plugin exists, and exits if it does not.
'''
for plugin in plugins.split(','):
self.action(action="plugins/plugin/" + str(plugin), method="GET")
if "attributes" in self.res:
for attrib in self.res["attributes"]:
if attrib["attribute_name"] == "fname":
self.plugins.update({str(plugin):
{"fname":
attrib["attribute_value"],
"name": self.res["name"]}})
else:
# We don't want to scan with plugins that don't exist.
print ("Plugin with ID %s is not found. Exiting." % plugin)
sys.exit(1)
################################################################################
def _enable_plugins(self, plugins=[]):
'''
Disable all of the families, and then enable plugins that you need. This
builds the entire "plugins" object, and can be very large for some
families, such as "AIX", as it needs to make an entry for each plugin in
the family to set the status.
'''
families = {"plugins": {}}
updates = {}
family_id = {}
self.action(action="editor/policy/" + str(self.policy_id), method="GET")
# Build an object to disable all plugins at the family level.
for item in self.res["plugins"]["families"]:
families["plugins"].update({item: {"status": "disabled"}})
# print(json.dumps(families, sort_keys=False, indent=4))
self.action(action="policies/" + str(self.policy_id),
method="PUT", extra=families)
# Query the search interface to get the family information for the
# plugin
for plugin in self.plugins.keys():
self.action(action="editor/policy/" + str(self.policy_id) +
"/families?filter.search_type=and&" +
"filter.0.filter=plugin_id&filter.0.quality=eq&" +
"filter.0.value=" + str(plugin), method="GET")
for family in self.res["families"]:
# if family not in updates:
if family not in updates:
# Add the key if it isn't in the dict
updates.update({family: []})
# Add the plugin to the list of the family
updates[family].append(plugin)
# Track the family ID so we can request the list of plugins
family_id.update({family:
str(self.res["families"][family]["id"])})
# Build the stub for a family that has individual plugins enabled
for fam, fam_id in family_id.items():
families["plugins"][fam].update({"status": "mixed"})
families["plugins"][fam].update({"individual": {}})
self.action(action="editor/policy/" + str(self.policy_id) +
"/families/" + str(fam_id), method="GET")
# Disable every plugin in the family
all_disabled = {}
for pid in self.res["plugins"]:
all_disabled.update({str(pid["id"]): "disabled"})
# Update the "plugins" object to have all individual plugins
# disabled
families["plugins"][fam]["individual"].update(all_disabled)
# Update each of the plugins that we have selected to enable
for fam, pids in updates.items():
for pid in pids:
families["plugins"][fam]["individual"].update({str(pid):
"enabled"})
self.action(action="policies/" + str(self.policy_id),
method="PUT", extra=families)
################################################################################
def scan_add(self, targets, template="custom", name="", start=""):
'''
After building the policy, create a scan.
'''
self._scan_template_uuid(name=template)
self._scan_tag()
# This makes the targets much more readable in the GUI, as it splits
# them out to "one per line"
text_targets = targets.replace(",", "\n")
self.targets = targets.replace(",", " ")
# Figure out scan name
if name:
self.scan_name = name
else:
self.scan_name = self.policy_name
scan = {"uuid": self.scan_template_uuid}
settings = {}
# Static items- some could be dynamic, but it's overkill
settings.update({"launch": "ON_DEMAND"})
settings.update({"description": "Created with REST API"})
settings.update({"file_targets": ""})
settings.update({"filters": []})
settings.update({"emails": ""})
settings.update({"filter_type": ""})
# Dynamic items
settings.update({"scanner_id": str(self.scanner_id)})
settings.update({"name": self.scan_name})
if self.policy_id:
settings.update({"policy_id": self.policy_id})
settings.update({"folder_id": self.tag_id})
settings.update({"text_targets": text_targets})
# Start a scan at a scheduled time
if start:
settings.update({"starttime": start})
settings.update({"rrules": "FREQ=ONETIME"})
scan.update({"settings": settings})
self.action(action="scans", method="POST", extra=scan)
# This is the scan template UUID, this will be overwritten when we run
# the actual scan. Storing this value is mainly for debugging. If
# something was to go wrong, and we called "objdump", seeing
# "template-..." would be an obvious indicator of our location in
# creating the scan.
self.scan_uuid = self.res["scan"]["uuid"]
# We use the id for building the "launch" URL
self.scan_id = self.res["scan"]["id"]
################################################################################
def scan_delete(self, name):
'''
Delete a scan.
'''
# Find the scan id based on the name
self.action(action="scans", method="GET")
for scan in self.res["scans"]:
if scan["name"] == name:
self.action(action="scans/" + str(scan["id"]), method="DELETE")
return True
return False
################################################################################
def scan_exists(self, name):
'''
Set existing scan.
'''
self.scan_name = name
self.action(action="scans", method="GET")
if "scans" in self.res and self.res["scans"]:
for scan in self.res["scans"]:
if scan["name"] == name:
self.scan_id = scan["id"]
return True
return False
################################################################################
def scan_update_targets(self, targets):
'''
After update targets on existing scan.
'''
# This makes the targets much more readable in the GUI, as it splits
# them out to "one per line"
text_targets = targets.replace(",", "\n")
self.targets = targets.replace(",", " ")
self.action(action="scans/" + str(self.scan_id), method="GET")
#scan = {"uuid": self.scan_uuid}
scan = {}
settings = {}
# Static items- some could be dynamic, but it's overkill
# Dynamic items
settings.update({"name": self.scan_name})
settings.update({"policy_id": self.policy_id})
settings.update({"folder_id": self.tag_id})
settings.update({"text_targets": text_targets})
scan.update({"settings": settings})
self.action(action="scans/" + str(self.scan_id), method="PUT", extra=scan)
################################################################################
def scan_run(self):
'''
Start the scan and save the UUID to query the status
'''
self.action(action="scans/" + str(self.scan_id) + "/launch",
method="POST")
self.scan_uuid = self.res["scan_uuid"]
print("Scan name : %s" % self.scan_name)
print("Scan UUID : %s" % self.scan_uuid)
################################################################################
def _scan_status(self):
'''
Check on the scan every 2 seconds.
'''
running = True
counter = 0
while running:
self.action(action="scans?folder_id=" + str(self.tag_id),
method="GET")
for scan in self.res["scans"]:
if (scan["uuid"] == self.scan_uuid
and (scan['status'] == "running" or scan['status'] == "pending")):
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(2)
counter += 2
if counter % 60 == 0:
print("")
if (scan["uuid"] == self.scan_uuid
and scan['status'] != "running" and scan['status'] != "pending"):
running = False
# Yes, there are timestamps that we can use to compute the
# actual running time, however this is just a rough metric
# that's more to get a feel of how long something is taking,
# it's not meant for precision.
print("\nComplete! Run time: %d seconds." % counter)
################################################################################
def _scan_tag(self, name="CLI"):
'''
Set the 'tag' for the scan to CLI, if the tag doesn't exist, create it
and use the resulting ID
'''
# Default to "CLI"
if not self.tag_name:
self.tag_name = name
self.action(action="folders", method="GET")
# Get the numeric ID of the tag. This is used to tag where the scan will
# live in the GUI, as well as help filter the "scan_status" queries and
# limit traffic/results processing.
for tag in self.res["folders"]:
if tag["name"] == self.tag_name:
self.tag_id = tag["id"]
break
# Create the new tag if it doesn't exist
if not self.tag_id:
self.action("folders", method="POST", extra={"name": self.tag_name})
self.tag_id = self.res["id"]
################################################################################
def scan_details(self, name):
'''
Fetch the details of the requested scan
'''
# Find the scan id based on the name
self.action(action="scans", method="GET")
for scan in self.res["scans"]:
if scan["name"] == name:
self.scan_id = scan["id"]
break
if not self.scan_id:
print("no scan with name %s found. Exiting" % name)
sys.exit(1)
# Get the details of the scan
self.action(action="scans/" + str(self.scan_id), method="GET")
return self.res
################################################################################
def scan_list(self):
'''
Fetch a list with scans
'''
self.action(action="scans", method="GET")
return self.res
################################################################################
def scan_list_from_folder(self, folder_id):
'''
Fetch a list with scans from a specified folder
'''
# Find the scan id based on the name
self.action(action="scans/?folder_id=" + str(folder_id), method="GET")
return self.res
################################################################################
def get_host_vulns(self, name):
'''
Fill in host_vulns dict with the host vulnerabilities found in a
scan
'''
# Get details of requested scan
self.scan_details(name)
for host in self.res["hosts"]:
self.action(action="scans/" + str(self.scan_id) + "/hosts/" + str(host["host_id"]), method="GET")
#print("scans/" + str(self.scan_id)+ "/hosts/" +str(host["host_id"]))
if self.scan_id not in self.host_vulns:
self.host_vulns[self.scan_id] = {}
self.host_vulns[self.scan_id][host["host_id"]]=self.res
################################################################################
def get_host_ids(self, name):
'''
List host_ids in given scan
'''
# Get details of requested scan
self.scan_details(name)
for host in self.res["hosts"]:
#print("%s" % host["host_id"])
self.host_ids[host["host_id"]]=1
################################################################################
def get_host_details(self, scan_id, host_id):
'''
Fill in host_details dict with the host vulnerabilities found in a
scan
'''
# Get details of requested scan
self.action(action="scans/" + str(scan_id) + "/hosts/" + str(host_id), method="GET")
if scan_id not in self.host_details:
self.host_details[scan_id] = {}
self.host_details[scan_id][host_id]=self.res
################################################################################
def get_plugin_output(self, scan, plugin_id):
'''
Fill in plugin_output dict with the output from a given plugin
in a given scan
'''
# Make sure the supplied plugin_id is of type int
plugin_id = int(plugin_id)
# Get list of host vulns
self.get_host_vulns(scan)
for scan_id in self.host_vulns:
for host_id in self.host_vulns[scan_id]:
for vulnerability in self.host_vulns[scan_id][host_id]["vulnerabilities"]:
if vulnerability["plugin_id"] == plugin_id:
self.action(action="scans/" + str(scan_id) + "/hosts/" + str(host_id) + "/plugins/" + str(plugin_id), method="GET")
if scan_id not in self.plugin_output:
self.plugin_output[scan_id] = {}
self.plugin_output[scan_id][host_id]=self.res
################################################################################
def _deduplicate_hosts(self, hosts):
return list({v["hostname"]: v for v in hosts}.values())
################################################################################
def download_kbs(self):
self.action("scans/" + str(self.scan_id), method="GET")
# Merge vulnerability and compliance hosts into a list, unique by
# hostname.
merged_hosts = self.res.get("hosts", []) + self.res.get("comphosts", [])
hosts = self._deduplicate_hosts(hosts=merged_hosts)
kbs = {}
for host in hosts:
kbs[host["hostname"]] = self.action("scans/" + str(self.scan_id) +
"/hosts/" + str(host["host_id"]) +
"/kb?token=" + str(self.token),
method="GET",
download=True)
return kbs
################################################################################
def download_scan(self, export_format="", chapters="", dbpasswd=""):
running = True
counter = 0
self.action("scans/" + str(self.scan_id), method="GET")
if (export_format=="db"):
data = {"format":"db","password":dbpasswd}
elif (export_format=="html"):
data = {"format":export_format,"chapters":chapters}
else:
data = {'format': export_format}
self.action("scans/" + str(self.scan_id) + "/export",
method="POST",
extra=data)
file_id = self.res['file']
print('Download for file id '+str(self.res['file'])+'.')
while running:
time.sleep(2)
counter += 2
self.action("scans/" + str(self.scan_id) + "/export/"
+ str(file_id) + "/status",
method="GET")
running = self.res['status'] != 'ready'
sys.stdout.write(".")
sys.stdout.flush()
if counter % 60 == 0:
print("")
print("")
content = self.action("scans/" + str(self.scan_id) + "/export/"
+ str(file_id) + "/download",
method="GET",
download=True)
return content
################################################################################
def scan_results(self):
'''
Get the list of hosts, then iterate over them and extract results
'''
# Check the status, we will be in a "wait" until the scan completes
self._scan_status()
# Query the completed scan and parse results
self.action("scans/" + str(self.scan_id), method="GET")
for host in self.res["hosts"]:
if self.format_start:
print(self.format_start)
print("----------------------------------------")
print("Target : %s" % host["hostname"])
print("----------------------------------------\n")
for plugin in self.plugins.keys():
self.action("scans/" + str(self.scan_id) + "/hosts/" +
str(host["host_id"]) + "/plugins/" + str(plugin),
method="GET")
# If not defined, the plugin did not fire for the host
if self.res["outputs"]:
print("Plugin Name : " + self.plugins[plugin]["name"])
print("Plugin File : " + self.plugins[plugin]["fname"])
print("Plugin ID : %s" % plugin)
print("Plugin Output :")
for output in self.res["outputs"]:
if 'plugin_output' in output:
print(output["plugin_output"])
else:
print("Success")
print()
# The 6.x Audit Trail has less information than previous
# versions(no plugin name). This information could be captured
# during the call to "_enable_plugins", and stored, but is
# somewhat limited in utility.
self.action("scans/" + str(self.scan_id) +
"/trails/?plugin_id=" + str(plugin) + "&hostname=" +
host["hostname"], method="GET")
# New syntax for 6.4
try:
if self.res["trails"]:
for output in self.res["trails"]:
print("Plugin Name : " + self.plugins[plugin]["name"])
print("Plugin File : " + self.plugins[plugin]["fname"])
print("Plugin ID : %s" % plugin)
print("Audit trail : " + output["output"])
print()
except:
pass
if self.format_end:
print(self.format_end)
try:
if self.res is not None:
for host in self.res["comphosts"]:
print("----------------------------------------")
print("Target : %s" % host["hostname"])
print("----------------------------------------\n")
for plugin in self.res["compliance"]:
self.action("scans/" + str(self.scan_id) + "/hosts/" +
str(host["host_id"]) + "/compliance/" +
str(plugin['plugin_id']), method="GET")
self.pretty_print()
except:
pass
################################################################################
def upload(self, upload_file, file_contents=""):
'''
Upload a file that can be used to import a policy or add an audit file
to a policy. If file_contents are not provided then upload_file is
treated as a full path to a file and opened.
'''
if not file_contents:
file_contents = open(upload_file, 'rb')
upload_file = os.path.basename(upload_file)
files = {'Filename': upload_file,
'Filedata': file_contents}
self.action(action="file/upload",
method="POST",
files=files,
json_req=False)
################################################################################
def policy_import(self, filename):
'''
Import a previously uploaded .nessus file as a policy.
'''
data = {'file': filename}
self.action(action="policies/import",
method="POST",
extra=data)
print("Imported policy named '%s', id %s" % (self.res['name'],
self.res['id']))
return self.res['id']
################################################################################
def pretty_print(self):
'''
Used for debugging and error conditions to easily see the returned
structure.
'''
print(json.dumps(self.res, sort_keys=False, indent=2))
print("\n")
################################################################################
def objdump(self):
'''
debugging function to dump all of the set values
'''
for attr in dir(self):
print("obj.%s = %s" % (attr, getattr(self, attr)))
if __name__ == "__main__":
print("Import the module, do not call directly.")
|
4e08cf67813b6bf91e7dfae701b4702fa7cead37
|
b8441dc1987be9e64fa3081d456b2a3060ec44d1
|
/mars/tensor/linalg/solve.py
|
7ee3d113c170e5ee7acffb787111720c7540fc4d
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mars-project/mars
|
f99fefbce999d58a9249bc72046787a9731c9c73
|
c36c53fa22e10ef9477d9c454401a2f281375f31
|
refs/heads/master
| 2023-07-23T00:23:55.133015
| 2023-07-03T11:44:54
| 2023-07-03T11:44:54
| 160,543,708
| 2,704
| 362
|
Apache-2.0
| 2023-09-11T07:57:35
| 2018-12-05T16:04:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
solve.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..datasource import tensor as astensor
from .cholesky import cholesky
from .lu import lu
from .solve_triangular import solve_triangular
def solve(a, b, sym_pos=False, sparse=None):
"""
Solve the equation ``a x = b`` for ``x``.
Parameters
----------
a : (M, M) array_like
A square matrix.
b : (M,) or (M, N) array_like
Right-hand side matrix in ``a x = b``.
sym_pos : bool
Assume `a` is symmetric and positive definite. If ``True``, use Cholesky
decomposition.
sparse: bool, optional
Return sparse value or not.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system ``a x = b``. Shape of the return matches the
shape of `b`.
Raises
------
LinAlgError
If `a` is singular.
Examples
--------
Given `a` and `b`, solve for `x`:
>>> import mars.tensor as mt
>>> a = mt.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
>>> b = mt.array([2, 4, -1])
>>> x = mt.linalg.solve(a, b)
>>> x.execute()
array([ 2., -2., 9.])
>>> mt.dot(a, x).execute() # Check the result
array([ 2., 4., -1.])
"""
a = astensor(a)
b = astensor(b)
if sym_pos:
l_ = cholesky(a, lower=True)
u = l_.T
else:
p, l_, u = lu(a)
b = p.T.dot(b)
sparse = sparse if sparse is not None else a.issparse()
uy = solve_triangular(l_, b, lower=True, sparse=sparse)
return solve_triangular(u, uy, sparse=sparse)
|
d1e38383189b73bc4b69a3f288ed635b9be51899
|
0121f2810d6081fee314d730b8f22081e0943faf
|
/python/examples-qt6/headers/main.py
|
6097b4cd16a527507cfed090175a7e506cac0e34
|
[
"MIT"
] |
permissive
|
KDAB/KDChart
|
329d5010d3ea93509f0f862cd6b9cbd36856cdfd
|
8ecbc4402b1efe0a21bdb5aa879dbf1acde0eb0b
|
refs/heads/kdchart-3.0
| 2023-09-01T13:52:24.166135
| 2023-08-15T10:46:24
| 2023-08-15T10:46:24
| 182,813,124
| 103
| 31
|
NOASSERTION
| 2023-08-23T01:54:56
| 2019-04-22T15:14:18
|
C++
|
UTF-8
|
Python
| false
| false
| 3,003
|
py
|
main.py
|
#!/usr/bin/env python
# Contact info@kdab.com if any conditions of this licensing are not clear to you.
# This file is part of the KD Chart library.
#
# SPDX-FileCopyrightText: 2019-2023 Klarälvdalens Datakonsult AB, a KDAB Group company <info@kdab.com>
#
# SPDX-License-Identifier: MIT
#
''' Example custom formatter that displays tabs for every 10 minutes '''
# pylint: disable=missing-function-docstring,missing-class-docstring
import sys
from PySide6.QtCore import Qt, QDateTime, QTime
from PySide6.QtGui import QStandardItemModel
from PySide6.QtWidgets import QApplication, QHeaderView
from PyKDChartQt6.KDGantt import DateTimeScaleFormatter, View, DateTimeGrid
class MyDateTimeScaleFormatter(DateTimeScaleFormatter):
def __init__(self):
super().__init__(DateTimeScaleFormatter.Hour, "hh")
def nextRangeBegin(self, datetime):
return self.currentRangeBegin(datetime).addSecs(60 * 10)
def currentRangeBegin(self, datetime): # pylint: disable=no-self-use
dtInstance = QDateTime(datetime)
dtInstance.setTime(QTime(dtInstance.time().hour(),
(dtInstance.time().minute() / 10) * 10, 0, 0))
return dtInstance
def text(self, datetime): # pylint: disable=no-self-use
return (":%s\nXX" % datetime.time().toString("mm"))
# A custom headerview that is taller than standard
# so we can fit more lines into it on the graphicsview
# side.
# pylint: disable=too-few-public-methods
class MyHeaderView(QHeaderView):
def __init__(self, parent=None):
super().__init__(Qt.Horizontal, parent)
def sizeHint(self):
s = super().sizeHint()
s.setHeight(s.height() * 3)
return s
if __name__ == '__main__':
app = QApplication(sys.argv)
model = QStandardItemModel(1, 1)
model.setHeaderData(0, Qt.Horizontal, "Task")
# A view with some alternative header labels
view1 = View()
grid1 = DateTimeGrid()
# grid1.setUserDefinedUpperScale(DateTimeScaleFormatter(DateTimeScaleFormatter.Year,
# "yyyy", "In the year %1.", Qt.AlignLeft))
# grid1.setUserDefinedLowerScale(DateTimeScaleFormatter(DateTimeScaleFormatter.Month,
# "MMMM", "In the month %1.", Qt.AlignRight))
grid1.setScale(DateTimeGrid.ScaleUserDefined)
grid1.setDayWidth(6.0)
view1.setGrid(grid1)
view1.setModel(model)
view1.show()
# A view with header and vertical grid lines for every 10 minutes
view2 = View()
tw = view2.leftView()
h = MyHeaderView()
tw.setHeader(h)
grid2 = DateTimeGrid()
grid2.setDayWidth(5000)
grid2.setUserDefinedUpperScale(
DateTimeScaleFormatter(DateTimeScaleFormatter.Hour, "hh"))
grid2.setUserDefinedLowerScale(MyDateTimeScaleFormatter())
grid2.setScale(DateTimeGrid.ScaleUserDefined)
view2.setGrid(grid2)
view2.setModel(model)
view2.show()
sys.exit(app.exec())
|
9bd0f235b860ebe93705f6e8af39b02f4b9e8044
|
def993d87717cd42a9090a17d9c1df5648e924ce
|
/test/IECoreScene/SmoothSkinningDataTest.py
|
c448c1df39b976cccff82545dec292f22d579b8e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
ImageEngine/cortex
|
688388296aad2b36dd0bfb7da7b25dcbdc7bd856
|
6eec66f5dccfd50dda247b04453bce65abc595eb
|
refs/heads/main
| 2023-09-05T07:01:13.679207
| 2023-08-17T23:14:41
| 2023-08-17T23:14:41
| 10,654,465
| 439
| 104
|
NOASSERTION
| 2023-09-14T11:30:41
| 2013-06-12T23:12:28
|
C++
|
UTF-8
|
Python
| false
| false
| 8,208
|
py
|
SmoothSkinningDataTest.py
|
##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
"""Unit test for SmoothSkinningData binding"""
import imath
import IECore
import IECoreScene
import os
import unittest
class TestSmoothSkinningData( unittest.TestCase ) :
def testData( self ) :
# test the object
s = IECoreScene.SmoothSkinningData()
self.assertEqual( s.influenceNames(), IECore.StringVectorData() )
self.assertEqual( s.influencePose(), IECore.M44fVectorData() )
self.assertEqual( s.pointIndexOffsets(), IECore.IntVectorData() )
self.assertEqual( s.pointInfluenceCounts(), IECore.IntVectorData() )
self.assertEqual( s.pointInfluenceIndices(), IECore.IntVectorData() )
self.assertEqual( s.pointInfluenceWeights(), IECore.FloatVectorData() )
self.assertEqual( s, s )
self.assertEqual( s, s.copy() )
self.assertEqual( s, IECoreScene.SmoothSkinningData() )
def testIO( self ) :
# test fileIndexedIO, read and write
ok_jn = IECore.StringVectorData( [ 'jointA', 'jointB' ] )
ok_ip = IECore.M44fVectorData( [imath.M44f(),imath.M44f()] )
ok_pio = IECore.IntVectorData( [0, 2, 4] )
ok_pic = IECore.IntVectorData( [2, 2, 1] )
ok_pii = IECore.IntVectorData( [0, 1, 0, 1, 1] )
ok_piw = IECore.FloatVectorData( [0.5, 0.5, 0.2, 0.8, 1.0] )
s = IECoreScene.SmoothSkinningData(ok_jn, ok_ip, ok_pio, ok_pic, ok_pii, ok_piw)
iface = IECore.IndexedIO.create( os.path.join( "test", "IECore", "ssd.fio" ), IECore.IndexedIO.OpenMode.Write )
s.save( iface, "test" )
ss = IECore.Object.load( iface, "test" )
self.assertEqual( s, ss )
def testDataStorage(self ):
#test the object can store data
ok_jn = IECore.StringVectorData( [ 'jointA', 'jointB' ] )
ok_ip = IECore.M44fVectorData( [imath.M44f(),imath.M44f()] )
ok_pio = IECore.IntVectorData( [0, 2, 4] )
ok_pic = IECore.IntVectorData( [2, 2, 1] )
ok_pii = IECore.IntVectorData( [0, 1, 0, 1, 1] )
ok_piw = IECore.FloatVectorData( [0.5, 0.5, 0.2, 0.8, 1.0] )
s = IECoreScene.SmoothSkinningData(ok_jn, ok_ip, ok_pio, ok_pic, ok_pii, ok_piw)
self.assertEqual( s.influenceNames() , ok_jn )
self.assertEqual( s.influencePose() , ok_ip )
self.assertEqual( s.pointIndexOffsets() , ok_pio )
self.assertEqual( s.pointInfluenceCounts() , ok_pic )
self.assertEqual( s.pointInfluenceIndices() , ok_pii )
self.assertEqual( s.pointInfluenceWeights() , ok_piw )
self.assertEqual( s, s )
self.assertEqual( s, s.copy() )
iface = IECore.IndexedIO.create( os.path.join( "test", "IECore", "ssd.fio" ), IECore.IndexedIO.OpenMode.Write )
s.save( iface, "test" )
ss = IECore.Object.load( iface, "test" )
self.assertEqual( s, ss )
def testValidate(self):
# good data
ok_jn = IECore.StringVectorData( [ 'jointA', 'jointB' ] )
ok_ip = IECore.M44fVectorData( [imath.M44f(),imath.M44f()] )
ok_pio = IECore.IntVectorData( [0, 2, 4] )
ok_pic = IECore.IntVectorData( [2, 2, 1] )
ok_pii = IECore.IntVectorData( [0, 1, 0, 1, 1] )
ok_piw = IECore.FloatVectorData( [0.5, 0.5, 0.2, 0.8, 1.0] )
# data with invalid nr of elements
iv_jn = IECore.StringVectorData( [ 'jointA', 'jointB', 'jointC' ] )
iv_ip = IECore.M44fVectorData( [imath.M44f()] )
iv_pio1 = IECore.IntVectorData( [0, 2, 4, 666] )
iv_pic1 = IECore.IntVectorData( [2, 2 ] )
iv_pii1 = IECore.IntVectorData( [0, 1, 0, 1, 1, 666] )
iv_piw = IECore.FloatVectorData( [0.5, 0.5, 0.2] )
# data with invalid ids
iv_pio2 = IECore.IntVectorData( [0, 2, 666] )
iv_pii2 = IECore.IntVectorData( [0, 1, 666, 1, 1] )
# data with invalid counts
iv_pic2 = IECore.IntVectorData( [2, 0, 1 ] )
# data with count / index mismatch
iv_pio3 = IECore.IntVectorData( [0, 3, 4] )
iv_pic3 = IECore.IntVectorData( [3, 1, 1] )
# test all is ok
IECoreScene.SmoothSkinningData(ok_jn, ok_ip, ok_pio, ok_pic, ok_pii, ok_piw).validate()
# test wrong nr of influenceNames, influencePose
self.assertRaises( Exception, IECoreScene.SmoothSkinningData(iv_jn, ok_ip, ok_pio, ok_pic, ok_pii, ok_piw).validate )
self.assertRaises( Exception, IECoreScene.SmoothSkinningData(ok_jn, iv_ip, ok_pio, ok_pic, ok_pii, ok_piw).validate )
# test wrong nr of pointIndexOffsets, pointInfluenceCounts
self.assertRaises( Exception, IECoreScene.SmoothSkinningData(ok_jn, ok_ip, iv_pio1, ok_pic, ok_pii, ok_piw).validate )
self.assertRaises( Exception, IECoreScene.SmoothSkinningData(ok_jn, ok_ip, ok_pio, iv_pic1, ok_pii, ok_piw).validate )
# test wrong nr of pointInfluenceIndices, pointInfluenceWeights
self.assertRaises( Exception, IECoreScene.SmoothSkinningData(ok_jn, ok_ip, ok_pio, ok_pic, iv_pii1, ok_piw).validate )
self.assertRaises( Exception, IECoreScene.SmoothSkinningData(ok_jn, ok_ip, ok_pio, ok_pic, ok_pii, iv_piw).validate )
# test invalid ids
self.assertRaises( Exception, IECoreScene.SmoothSkinningData(ok_jn, ok_ip, iv_pio2, ok_pic, ok_pii, ok_piw).validate )
self.assertRaises( Exception, IECoreScene.SmoothSkinningData(ok_jn, ok_ip, ok_pio, ok_pic, iv_pii2, ok_piw).validate )
# test wrong counts
self.assertRaises( Exception, IECoreScene.SmoothSkinningData(ok_jn, ok_ip, ok_pio, iv_pic2, ok_pii, ok_piw).validate )
# test count id mismatching
self.assertRaises( Exception, IECoreScene.SmoothSkinningData(ok_jn, ok_ip, iv_pio3, ok_pic, ok_pii, ok_piw).validate )
self.assertRaises( Exception, IECoreScene.SmoothSkinningData(ok_jn, ok_ip, ok_pio, iv_pic3, ok_pii, ok_piw).validate )
# todo: add reference test data we are happy with
# def testRef(self):
# load reference data we are sure is cool
# ss = Reader.create( "test/IECore/data/cobFiles/smoothSkinningData.cob" ).read()
# self.assertTrue( ss.isValid() );
def tearDown( self ) :
if os.path.isfile(os.path.join( "test", "IECore", "ssd.fio" )):
os.remove(os.path.join( "test", "IECore", "ssd.fio" ))
if __name__ == "__main__":
unittest.main()
|
5f2e9cafe3b6a4a3d9599311995135c95d0fdb9e
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Calibration/HcalAlCaRecoProducers/test/AlCaHcalIsoTrk_cfg.py
|
7300056e51131275849bd82d29c63533856967c7
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 4,247
|
py
|
AlCaHcalIsoTrk_cfg.py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run2_2018_cff import Run2_2018
process = cms.Process("ALCAISOTRACK",Run2_2018)
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load('Configuration.StandardSequences.AlCaRecoStreams_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag=autoCond['run2_mc']
process.load("RecoLocalCalo.EcalRecAlgos.EcalSeverityLevelESProducer_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(1000)
if hasattr(process,'MessageLogger'):
process.MessageLogger.HcalIsoTrackX=dict()
process.MessageLogger.HcalIsoTrack=dict()
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:/afs/cern.ch/user/h/huwang/work/public/for_Sunanda/RECO_data.root',
)
)
process.load('RecoLocalCalo.CaloTowersCreator.calotowermaker_cfi')
process.towerMakerAll = process.calotowermaker.clone()
process.towerMakerAll.hbheInput = cms.InputTag("hbhereco")
process.towerMakerAll.hoInput = cms.InputTag("none")
process.towerMakerAll.hfInput = cms.InputTag("none")
process.towerMakerAll.ecalInputs = cms.VInputTag(cms.InputTag("ecalRecHit","EcalRecHitsEB"), cms.InputTag("ecalRecHit","EcalRecHitsEE"))
process.towerMakerAll.AllowMissingInputs = True
process.ALCARECOStreamHcalCalIsoTrkProducerFilter = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('pathALCARECOHcalCalIsoTrkProducerFilter')
),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('ALCARECO'),
filterName = cms.untracked.string('ALCARECOHcalCalIsoTrkProducerFilter')
),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.OutALCARECOHcalCalIsoTrkProducerFilter.outputCommands,
fileName = cms.untracked.string('newPoolOutput.root'),
)
process.alcaHcalIsotrkProducer.ignoreTriggers = True
process.alcaHcalIsotrkProducer.debugEvents = [640818633, 640797426, 641251898]
process.alcaHcalIsotrkFilter.debugEvents = [640818633, 640797426, 641251898]
#process.alcaHcalIsotrkProducer.debugEvents = [641031809, 641092744, 640862532,
# 640874735, 641845581, 641144982,
# 641124886, 641240201, 640856725,
# 641709599, 641406943, 640794164,
# 641820644, 641053143, 641458751,
# 641554667, 641621481]
#process.alcaHcalIsotrkFilter.debugEvents = [641031809, 641092744, 640862532,
# 640874735, 641845581, 641144982,
# 641124886, 641240201, 640856725,
# 641709599, 641406943, 640794164,
# 641820644, 641053143, 641458751,
# 641554667, 641621481]
# Path and EndPath definitions
process.endjob_step = cms.EndPath(process.endOfProcess)
process.ALCARECOStreamHcalCalIsoTrkProducerFilterOutPath = cms.EndPath(process.ALCARECOStreamHcalCalIsoTrkProducerFilter)
# Schedule definition
process.schedule = cms.Schedule(process.pathALCARECOHcalCalIsoTrkProducerFilter,process.endjob_step,process.ALCARECOStreamHcalCalIsoTrkProducerFilterOutPath)
|
dc5488bb02651bc463635a437924236db7f8ccef
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-rocketmq/huaweicloudsdkrocketmq/v2/model/list_rocket_instance_topics_response.py
|
7e1c3d4f11e70d9dc8c230858d758a3e356834a4
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 7,691
|
py
|
list_rocket_instance_topics_response.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListRocketInstanceTopicsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'total': 'int',
'max': 'int',
'remaining': 'int',
'next_offset': 'int',
'previous_offset': 'int',
'topics': 'list[Topic]'
}
attribute_map = {
'total': 'total',
'max': 'max',
'remaining': 'remaining',
'next_offset': 'next_offset',
'previous_offset': 'previous_offset',
'topics': 'topics'
}
def __init__(self, total=None, max=None, remaining=None, next_offset=None, previous_offset=None, topics=None):
"""ListRocketInstanceTopicsResponse
The model defined in huaweicloud sdk
:param total: topic总数。
:type total: int
:param max: 最大可创建topic数量。
:type max: int
:param remaining: 剩余可创建topic数量。
:type remaining: int
:param next_offset: 下个分页的offset。
:type next_offset: int
:param previous_offset: 上个分页的offset。
:type previous_offset: int
:param topics: topic列表。
:type topics: list[:class:`huaweicloudsdkrocketmq.v2.Topic`]
"""
super(ListRocketInstanceTopicsResponse, self).__init__()
self._total = None
self._max = None
self._remaining = None
self._next_offset = None
self._previous_offset = None
self._topics = None
self.discriminator = None
if total is not None:
self.total = total
if max is not None:
self.max = max
if remaining is not None:
self.remaining = remaining
if next_offset is not None:
self.next_offset = next_offset
if previous_offset is not None:
self.previous_offset = previous_offset
if topics is not None:
self.topics = topics
@property
def total(self):
"""Gets the total of this ListRocketInstanceTopicsResponse.
topic总数。
:return: The total of this ListRocketInstanceTopicsResponse.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ListRocketInstanceTopicsResponse.
topic总数。
:param total: The total of this ListRocketInstanceTopicsResponse.
:type total: int
"""
self._total = total
@property
def max(self):
"""Gets the max of this ListRocketInstanceTopicsResponse.
最大可创建topic数量。
:return: The max of this ListRocketInstanceTopicsResponse.
:rtype: int
"""
return self._max
@max.setter
def max(self, max):
"""Sets the max of this ListRocketInstanceTopicsResponse.
最大可创建topic数量。
:param max: The max of this ListRocketInstanceTopicsResponse.
:type max: int
"""
self._max = max
@property
def remaining(self):
"""Gets the remaining of this ListRocketInstanceTopicsResponse.
剩余可创建topic数量。
:return: The remaining of this ListRocketInstanceTopicsResponse.
:rtype: int
"""
return self._remaining
@remaining.setter
def remaining(self, remaining):
"""Sets the remaining of this ListRocketInstanceTopicsResponse.
剩余可创建topic数量。
:param remaining: The remaining of this ListRocketInstanceTopicsResponse.
:type remaining: int
"""
self._remaining = remaining
@property
def next_offset(self):
"""Gets the next_offset of this ListRocketInstanceTopicsResponse.
下个分页的offset。
:return: The next_offset of this ListRocketInstanceTopicsResponse.
:rtype: int
"""
return self._next_offset
@next_offset.setter
def next_offset(self, next_offset):
"""Sets the next_offset of this ListRocketInstanceTopicsResponse.
下个分页的offset。
:param next_offset: The next_offset of this ListRocketInstanceTopicsResponse.
:type next_offset: int
"""
self._next_offset = next_offset
@property
def previous_offset(self):
"""Gets the previous_offset of this ListRocketInstanceTopicsResponse.
上个分页的offset。
:return: The previous_offset of this ListRocketInstanceTopicsResponse.
:rtype: int
"""
return self._previous_offset
@previous_offset.setter
def previous_offset(self, previous_offset):
"""Sets the previous_offset of this ListRocketInstanceTopicsResponse.
上个分页的offset。
:param previous_offset: The previous_offset of this ListRocketInstanceTopicsResponse.
:type previous_offset: int
"""
self._previous_offset = previous_offset
@property
def topics(self):
"""Gets the topics of this ListRocketInstanceTopicsResponse.
topic列表。
:return: The topics of this ListRocketInstanceTopicsResponse.
:rtype: list[:class:`huaweicloudsdkrocketmq.v2.Topic`]
"""
return self._topics
@topics.setter
def topics(self, topics):
"""Sets the topics of this ListRocketInstanceTopicsResponse.
topic列表。
:param topics: The topics of this ListRocketInstanceTopicsResponse.
:type topics: list[:class:`huaweicloudsdkrocketmq.v2.Topic`]
"""
self._topics = topics
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListRocketInstanceTopicsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
fe611cdb804675326e70a31f421ed7eb04055025
|
67025b57439c3a99d6a393f74529164ea67d7501
|
/tests/color_tests/test_asgray.py
|
63a9895bbf5788a7d9b6c2c1c4b7bc605df37d87
|
[
"MIT"
] |
permissive
|
wkentaro/imgviz
|
cf9861f97ef5f0406e44ed4c1f53b0238a131d27
|
7a3cdb1aef46a50d5025e0a9c366c5b238266907
|
refs/heads/main
| 2023-08-10T03:59:31.193626
| 2023-07-27T01:59:47
| 2023-07-27T01:59:47
| 163,167,512
| 237
| 30
|
MIT
| 2023-08-20T01:56:16
| 2018-12-26T10:27:22
|
Python
|
UTF-8
|
Python
| false
| false
| 187
|
py
|
test_asgray.py
|
import numpy as np
import imgviz
def test_asgray():
data = imgviz.data.arc2017()
gray = imgviz.asgray(data["rgb"])
assert gray.ndim == 2
assert gray.dtype == np.uint8
|
a760678d1a3a69a3f2691b6b9b79953167b215bb
|
1f20484efc357aae4b7e2f98a191e7a9256f3a58
|
/irc3/__init__.py
|
2ed8256b802a208ad8adeaf66b54f2bd16ed0227
|
[
"CC-BY-3.0",
"MIT",
"LicenseRef-scancode-ietf"
] |
permissive
|
gawel/irc3
|
55b2e4d01ca95f45077f8bad231394551584d7bd
|
76d6849d5e7a531d649aca766f623f9f30a55545
|
refs/heads/master
| 2023-07-15T20:49:40.188267
| 2023-04-17T09:02:31
| 2023-04-17T09:02:31
| 14,820,406
| 187
| 58
|
MIT
| 2023-02-27T10:18:41
| 2013-11-30T12:09:48
|
Python
|
UTF-8
|
Python
| false
| false
| 14,524
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
from urllib.request import urlopen
from ipaddress import ip_address
from collections import deque
from .dcc import DCCManager
from .dcc import DCCChat
from .dec import dcc_event
from .dec import event
from .dec import extend
from .dec import plugin
from . import config
from . import utils
from . import rfc
from . import base
from .compat import asyncio
from .compat import Queue
import venusian
import time
class IrcConnection(asyncio.Protocol):
"""asyncio protocol to handle an irc connection"""
def connection_made(self, transport):
self.transport = transport
self.closed = False
self.queue = deque()
def decode(self, data):
"""Decode data with bot's encoding"""
encoding = getattr(self, 'encoding', 'ascii')
return data.decode(encoding, 'ignore')
def data_received(self, data):
data = self.decode(data)
if self.queue:
data = self.queue.popleft() + data
lines = data.split('\r\n')
self.queue.append(lines.pop(-1))
for line in lines:
self.factory.dispatch(line)
def write(self, data):
if data is not None:
data = data.encode(self.encoding)
if not data.endswith(b'\r\n'):
data = data + b'\r\n'
self.transport.write(data)
def connection_lost(self, exc):
self.factory.log.critical('connection lost (%s): %r',
id(self.transport),
exc)
self.factory.notify('connection_lost')
if not self.closed:
self.close()
# wait a few before reconnect
self.factory.loop.call_later(
2, self.factory.create_connection)
def close(self):
if not self.closed:
self.factory.log.critical('closing old transport (%r)',
id(self.transport))
try:
self.transport.close()
finally:
self.closed = True
class IrcBot(base.IrcObject):
"""An IRC bot"""
_pep8 = [dcc_event, event, extend, plugin, rfc, config]
venusian = venusian
venusian_categories = [
'irc3',
'irc3.dcc',
'irc3.extend',
'irc3.rfc1459',
'irc3.plugins.cron',
'irc3.plugins.command',
]
logging_config = config.LOGGING
defaults = dict(
base.IrcObject.defaults,
nick='irc3',
username='irc3',
realname='Irc bot based on irc3 http://irc3.readthedocs.io',
host='localhost',
mode=0,
url='https://irc3.readthedocs.io/',
passwords={},
flood_burst=4,
flood_rate=1,
flood_rate_delay=1,
ctcp=dict(
version='irc3 {version} - {url}',
userinfo='{realname}',
time='{now:%c}',
),
# freenode config as default for testing
server_config=dict(
STATUSMSG='+@',
PREFIX='(ov)@+',
CHANTYPES='#',
CHANMODES='eIbq,k,flj,CFLMPQScgimnprstz',
),
connection=IrcConnection,
)
def __init__(self, *ini, **config):
update_config_needed = False
if 'userinfo' in config or \
('realname' in config and 'username' not in config):
update_config_needed = True # pragma: no cover
super(IrcBot, self).__init__(*ini, **config)
if update_config_needed: # pragma: no cover
# Backward compat. Remove me in 2017
self.log.fatal('realname has been renamed to username.')
self.log.fatal('userinfo has been renamed to realname.')
self.log.fatal('Please update your config with something like:.')
if 'realname' in self.config:
self.log.fatal('username = %(realname)s', self.config)
if 'userinfo' in self.config:
self.log.fatal('realname = %(userinfo)s', self.config)
import sys
sys.exit(-1)
self.queue = None
if self.config.asynchronous:
self.queue = Queue(loop=self.loop)
self.awaiting_queue = self.create_task(self.process_queue())
self._ip = self._dcc = None
# auto include the sasl plugin if needed
if 'sasl_username' in self.config and \
'irc3.plugins.sasl' not in self.registry.includes:
self.include('irc3.plugins.sasl')
# auto include the autojoins plugin if needed (for backward compat)
if 'autojoins' in self.config and \
'irc3.plugins.autojoins' not in self.registry.includes:
self.include('irc3.plugins.autojoins')
@property
def server_config(self):
"""return server configuration (rfc rpl 005)::
>>> bot = IrcBot()
>>> print(bot.server_config['STATUSMSG'])
+@
The real values are only available after the server sent them.
"""
return self.config.server_config
def connection_made(self, f): # pragma: no cover
if getattr(self, 'protocol', None):
self.protocol.close()
try:
transport, protocol = f.result()
except Exception as e:
self.log.exception(e)
self.loop.call_later(3, self.create_connection)
else:
self.log.debug('Connected')
self.protocol = protocol
self.protocol.queue = deque()
self.protocol.factory = self
self.protocol.encoding = self.encoding
if self.config.get('password'):
self._send('PASS {password}'.format(**self.config))
self.notify('connection_ready')
self.send((
'USER {username} {mode} * :{realname}\r\n'
'NICK {nick}\r\n'
).format(**self.config))
self.notify('connection_made')
def send_line(self, data, nowait=False):
"""send a line to the server. replace CR by spaces"""
data = data.replace('\n', ' ').replace('\r', ' ')
f = self.loop.create_future()
if self.queue is not None and nowait is False:
self.queue.put_nowait((f, data))
else:
self.send(data.replace('\n', ' ').replace('\r', ' '))
f.set_result(True)
return f
async def process_queue(self):
flood_burst = self.config.flood_burst
delay = float(self.config.flood_rate_delay)
flood_rate = delay / float(self.config.flood_rate)
while True:
if flood_burst == 0:
future, data = await self.queue.get()
future.set_result(True)
self.send(data)
await asyncio.sleep(.001, loop=self.loop)
else:
lines = []
for i in range(flood_burst):
future, data = await self.queue.get()
future.set_result(True)
lines.append(data)
if self.queue.empty():
break
if lines:
self.send('\r\n'.join(lines))
while not self.queue.empty():
await asyncio.sleep(flood_rate, loop=self.loop)
future, data = await self.queue.get()
future.set_result(True)
self.send(data)
def send(self, data):
"""send data to the server"""
self._send(data)
def _send(self, data):
self.protocol.write(data)
self.dispatch(data, iotype='out')
def privmsg(self, target, message, nowait=False):
"""send a privmsg to target"""
if message:
is_dcc = isinstance(target, DCCChat)
prefix = '' if is_dcc else 'PRIVMSG %s :' % target
messages = utils.split_message(
message,
self.config.max_length,
prefix=prefix,
)
if is_dcc:
for message in messages:
target.send_line(message)
elif target:
f = None
for message in messages:
f = self.send_line(prefix + message,
nowait=nowait)
return f
def action(self, target, message, nowait=False):
return self.privmsg(target, '\x01ACTION %s\x01' % message,
nowait=nowait)
def notice(self, target, message, nowait=False):
"""send a notice to target"""
if message:
is_dcc = isinstance(target, DCCChat)
prefix = '' if is_dcc else 'NOTICE %s :' % target
messages = utils.split_message(
message,
self.config.max_length,
prefix=prefix,
)
if is_dcc:
for message in messages:
target.send_line(message)
elif target:
f = None
for message in messages:
f = self.send_line(prefix + message,
nowait=nowait)
return f
def ctcp(self, target, message, nowait=False):
"""send a ctcp to target"""
if target and message:
messages = utils.split_message(message, self.config.max_length)
f = None
for message in messages:
f = self.send_line('PRIVMSG %s :\x01%s\x01' % (target,
message),
nowait=nowait)
return f
def ctcp_reply(self, target, message, nowait=False):
"""send a ctcp reply to target"""
if target and message:
messages = utils.split_message(message, self.config.max_length)
f = None
for message in messages:
f = self.send_line('NOTICE %s :\x01%s\x01' % (target, message),
nowait=nowait)
return f
def mode(self, target, *data):
"""set user or channel mode"""
self.send_line('MODE %s %s' % (target, ' '.join(data)), nowait=True)
def join(self, target):
"""join a channel"""
password = self.config.passwords.get(
target.strip(self.server_config['CHANTYPES']))
if password:
target += ' ' + password
self.send_line('JOIN %s' % target)
def part(self, target, reason=None):
"""quit a channel"""
if reason:
target += ' :' + reason
self.send_line('PART %s' % target)
def kick(self, channel, target, reason=None):
"""kick target from channel"""
if reason:
target += ' :' + reason
self.send_line('KICK %s %s' % (channel, target), nowait=True)
def invite(self, target, channel):
"""invite target to a channel"""
self.send_line('INVITE %s %s' % (target, channel))
def topic(self, channel, topic=None):
"""change or request the topic of a channel"""
if topic:
channel += ' :' + topic
self.send_line('TOPIC %s' % channel)
def away(self, message=None):
"""mark ourself as away"""
cmd = 'AWAY'
if message:
cmd += ' :' + message
self.send_line(cmd)
def unaway(self):
"""mask ourself as no longer away"""
self.away()
def quit(self, reason=None):
"""disconnect"""
if not reason:
reason = 'bye'
else:
reason = reason
self.send_line('QUIT :%s' % reason)
def get_nick(self):
return self.config.nick
def set_nick(self, nick):
self.send_line('NICK ' + nick, nowait=True)
nick = property(get_nick, set_nick, doc='nickname get/set')
@property
def ip(self):
"""return bot's ip as an ``ip_address`` object"""
if not self._ip:
if 'ip' in self.config:
ip = self.config['ip']
else:
ip = self.protocol.transport.get_extra_info('sockname')[0]
ip = ip_address(ip)
if ip.version == 4:
self._ip = ip
else: # pragma: no cover
response = urlopen('http://ipv4.icanhazip.com/')
ip = response.read().strip().decode()
ip = ip_address(ip)
self._ip = ip
return self._ip
@property
def dcc(self):
"""return the :class:`~irc3.dcc.DCCManager`"""
if self._dcc is None:
self._dcc = DCCManager(self)
return self._dcc
async def dcc_chat(self, mask, host=None, port=None):
"""Open a DCC CHAT whith mask. If host/port are specified then connect
to a server. Else create a server"""
conn = self.dcc.create(
'chat', mask, host=host, port=port)
await conn.ready
return conn
async def dcc_get(self, mask, host, port, filepath, filesize=None):
"""DCC GET a file from mask. filepath must be an absolute path with an
existing directory. filesize is the expected file size."""
conn = self.dcc.create(
'get', mask, filepath=filepath, filesize=filesize,
host=host, port=port)
await conn.ready
return conn
async def dcc_send(self, mask, filepath):
"""DCC SEND a file to mask. filepath must be an absolute path to
existing file"""
conn = self.dcc.create('send', mask, filepath=filepath)
await conn.ready
return conn
async def dcc_accept(self, mask, filepath, port, pos):
"""accept a DCC RESUME for an axisting DCC SEND. filepath is the
filename to sent. port is the port opened on the server.
pos is the expected offset"""
return self.dcc.resume(mask, filepath, port, pos)
def SIGHUP(self):
self.reload()
def SIGINT(self):
self.notify('SIGINT')
if getattr(self, 'protocol', None):
self.quit('INT')
time.sleep(1)
self.loop.stop()
def run(argv=None):
bots = {}
bot = IrcBot.from_argv(argv, botnet=bots)
bots['bot'] = bot
for section in list(bot.config):
if section.startswith('bot_'):
config = bot.config.pop(section)
bots[section] = IrcBot.from_argv(argv, botnet=bots, **config)
bot.loop.run_forever()
return bots
|
b980f106f0d6344d9cfd594b441756142be00015
|
f9a03c7f8a02913a6b04d4a6535d1974baa7dd22
|
/build.py
|
e6b46f8442d4e85466026e7bda90bd14ec31d347
|
[
"Apache-2.0"
] |
permissive
|
openshift/openshift-docs
|
e769b35bf84fb43860ed97f669cfca19e8c8f439
|
6d225638fc4ec4a50090f2a1ccfe32205983dd4f
|
refs/heads/main
| 2023-09-01T02:00:24.922059
| 2023-08-31T18:24:51
| 2023-08-31T18:24:51
| 21,740,844
| 739
| 1,990
|
Apache-2.0
| 2023-09-14T21:37:35
| 2014-07-11T15:14:42
|
HTML
|
UTF-8
|
Python
| false
| false
| 39,400
|
py
|
build.py
|
#!/usr/bin/python
# this file builds content from asciidoc to ccutil ready format BUT is only
# used for validating content, rather than the actual conversion. For the
# actual conversion, the file build_for_portal.py is used (on the portal).
# the only difference between this and build_for_portal file is in the
# section on _fix_links. This file replaces link anchors within the same file
# to the top of the file so that Travis build passes. Travis builds don't
# know books external to them and this helps pass the builds.
import argparse
import configparser
import filecmp
import fnmatch
import logging
import os
import re
import shutil
import subprocess
import sys
import time
import yaml
import requests
import tempfile
from aura import cli
cli.init_logging(False, True)
has_errors = False
CLONE_DIR = "."
BASE_PORTAL_URL = "https://access.redhat.com/documentation/en-us/"
# ID_RE = re.compile("^\[(?:\[|id=\'|#)(.*?)(\'?,.*?)?(?:\]|\')?\]", re.M | re.DOTALL)
ID_RE = re.compile("^\[(?:\[|id=\'|#|id=\")(.*?)(\'?,.*?)?(?:\]|\'|\")?\]", re.M | re.DOTALL)
LINKS_RE = re.compile("(?:xref|link):([\./\w_-]*/?[\w_.-]*\.(?:html|adoc))?(#[\w_-]*)?(\[.*?\])", re.M | re.DOTALL)
EXTERNAL_LINK_RE = re.compile("[\./]*([\w_-]+)/[\w_/-]*?([\w_.-]*\.(?:html|adoc))", re.DOTALL)
INCLUDE_RE = re.compile("include::(.*?)\[(.*?)\]", re.M)
IFDEF_RE = re.compile(r"^if(n?)def::(.*?)\[\]", re.M)
ENDIF_RE = re.compile(r"^endif::(.*?)\[\]\r?\n", re.M)
COMMENT_CONTENT_RE = re.compile(r"^^////$.*?^////$", re.M | re.DOTALL)
TAG_CONTENT_RE = re.compile(r"//\s+tag::(.*?)\[\].*?// end::(.*?)\[\]", re.M | re.DOTALL)
CMP_IGNORE_FILES = [".git", ".gitignore", "README.md", "build.cfg"]
DEVNULL = open(os.devnull, 'wb')
MASTER_FILE_BASE = "= {title}\n\
:product-author: {product-author}\n\
:product-title: {product}\n\
:product-version: {product-version}\n\
:{distro}:\n\
:imagesdir: images\n\
:idseparator: -\n\
{preface-title}\n"
DOCINFO_BASE = "<title>{title}</title>\n\
<productname>{{product-title}}</productname>\n\
<productnumber>{{product-version}}</productnumber>\n\
<subtitle>Enter a short description here.</subtitle>\n\
<abstract>\n\
<para>A short overview and summary of the book's subject and purpose, traditionally no more than one paragraph long.</para>\n\
</abstract>\n\
<authorgroup>\n\
<orgname>{product-author}</orgname>\n\
</authorgroup>\n\
<xi:include href=\"Common_Content/Legal_Notice.xml\" xmlns:xi=\"http://www.w3.org/2001/XInclude\" />\n"
# A list of book titles, that still use the old drupal url format (ie includes the product/version in the book title part)
# eg. openshift-enterprise/version-3.0/openshift-enterprise-30-getting-started vs openshift-enterprise/version-3.0/getting-started
DRUPAL_OLD_URL_TITLES = [
"Administrator Guide",
"Architecture",
"CLI Reference",
"Creating Images",
"Developer Guide",
"Getting Started",
"REST API Reference",
"Using Images",
"What's New?"
]
# A mapping of upstream book/category names to CP book names
BOOK_NAME_OVERRIDES = {
"Administration": "Administrator Guide"
}
# Lines that should be stripped out/ignored when cleaning the content
IGNORE_LINES = [
"{product-author}\n",
"{product-version}\n",
"{product-version]\n",
"{Lucas Costi}\n",
"toc::[]\n"
]
# Each MACRO in this list is omitted from the output
# if the input appears as ':MACRO:' (colon, MACRO, colon).
IGNORE_MACROS = [
"description",
"keywords",
"icons",
"data-uri",
"toc",
"toc-title"
]
# Files where the title should be removed when building the all-in-one
ALL_IN_ONE_SCRAP_TITLE = [
"welcome/index.adoc"
]
# Files that should be commented out in the toc structure
COMMENT_FILES = [
"admin_guide/overview.adoc",
"creating_images/overview.adoc",
"dev_guide/overview.adoc",
"using_images/overview.adoc",
"rest_api/overview.adoc"
]
# Map FILENAME to a map of TITLE to ID. In most of the cases the
# ID is the TITLE downcased, with "strange" chars replaced by hyphen.
# A notable exception is 'any' TITLE.
TITLE_IDS = {}
# A dictionary of existing dup ids to new unique ids
DUPLICATE_IDS = {}
# Map FILENAME to a map of BAD to GOOD. Most of the time, BAD and GOOD
# are in link syntax, i.e., beginning with "link:", but not always.
INCORRECT_LINKS = {}
log = logging.getLogger("build")
def setup_parser():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--distro", help="The distribution to build for", default="openshift-enterprise")
parser.add_argument("--all-in-one", help=argparse.SUPPRESS, action="store_true")
parser.add_argument("--title", help=argparse.SUPPRESS, default="Documentation")
parser.add_argument("--product", default="OpenShift Enterprise")
parser.add_argument("--version", default="3.0")
parser.add_argument("--author", default="Red Hat OpenShift Documentation Team")
parser.add_argument("--upstream-url", help="The upstream source url", default="https://github.com/openshift/openshift-docs.git")
parser.add_argument("--upstream-branch", help="The upstream source branch", default="enterprise-3.0")
parser.add_argument("--branch", help="The GitLab branch to commit changes into", default="GA")
parser.add_argument("-p", "--push", help="Commit and push the changes into GitLab", action="store_true")
parser.add_argument("--no-clean", help="Don't clean the drupal-build directory before building", action="store_true")
parser.add_argument("--no-upstream-fetch", help="Don't fetch the upstream sources", action="store_true")
return parser
def find_build_config_file():
"""
Finds the build config file to use, as it might be _topic_map.yml or _build_cfg.yml
"""
# updated 23rd Nov to support files in _topic_maps folder
# load everything from the _topic_maps folder
file_list = os.listdir(os.path.join(CLONE_DIR, "_topic_maps"))
# create a temp file combining all values from that folder
# don't delete it immediately, and give it a suffix of swp which makes it ignored by git
with tempfile.NamedTemporaryFile(dir=CLONE_DIR, delete=False, suffix=".swp") as tmp:
for f in file_list:
with open(os.path.join(CLONE_DIR, "_topic_maps", f), "rb") as infile:
tmp.write(infile.read())
config = os.path.abspath(tmp.name)
log.info(config)
# backup look for a single _topic_map in the cloned directory
if not os.path.isfile(config):
config = os.path.abspath(os.path.join(CLONE_DIR, "_topic_map.yml"))
return config
def parse_build_config(config):
"""
Parses the build config and returns a tree based structure for the config.
"""
config = os.path.expanduser(config)
with open(config, "r") as f:
data = list(yaml.load_all(f,Loader=yaml.FullLoader))
for book in data:
book_name = book['Name']
if book_name in BOOK_NAME_OVERRIDES:
book['Name'] = BOOK_NAME_OVERRIDES[book_name]
return data
def iter_tree(node, distro, dir_callback=None, topic_callback=None, include_path=True, parent_dir="", depth=0):
"""
Iterates over a build config tree starting from a specifc node, skipping content where the distro doesn't match. Additionally calls are
made to the dir_callback or topic_callback functions when a directory or topic is found.
"""
if "Topics" in node:
if check_node_distro_matches(node, distro):
if include_path:
topics_dir = os.path.join(parent_dir, node["Dir"])
else:
topics_dir = ""
if dir_callback is not None:
dir_callback(node, parent_dir, depth)
for topic in node["Topics"]:
iter_tree(topic, distro, dir_callback, topic_callback, True, topics_dir, depth + 1)
elif check_node_distro_matches(node, distro):
if topic_callback is not None:
topic_callback(node, parent_dir, depth)
def check_node_distro_matches(node, distro):
"""
Checks to see if the specified distro matches a distro in the nodes distros list. If there is no distros list specified on the
node then all distros are allowed, so return true.
"""
if "Distros" not in node:
return True
else:
node_distros = [x.strip() for x in node['Distros'].split(",")]
for node_distro in node_distros:
# Check for an exact match, or a glob match
if node_distro == distro or fnmatch.fnmatchcase(distro, node_distro):
return True
return False
def ensure_directory(directory):
"""
Creates DIRECTORY if it does not exist.
"""
if not os.path.exists(directory):
os.mkdir(directory)
def build_master_files(info):
"""
Builds the master.adoc and docinfo.xml files for each guide specified in the config.
"""
dest_dir = info['dest_dir']
all_in_one = info['all_in_one']
all_in_one_text = ""
for book in info['book_nodes']:
book_dest_dir = os.path.join(dest_dir, book['Dir'])
ensure_directory(book_dest_dir)
book_info = dict(info)
book_info['title'] = book['Name']
master = generate_master_entry(book, book['Dir'], info['distro'], all_in_one, all_in_one=all_in_one)
# Save the content
if not all_in_one:
master_file = os.path.join(book_dest_dir, 'master.adoc')
docinfo_file = os.path.join(book_dest_dir, 'docinfo.xml')
master_base = MASTER_FILE_BASE.format(**book_info)
log.debug("Writing " + master_file)
with open(master_file, "w") as f:
f.write(master_base + master)
log.debug("Writing " + docinfo_file)
with open(docinfo_file, "w") as f:
f.write(DOCINFO_BASE.format(**book_info))
else:
if all_in_one_text == "":
# Remove the title for the first file in the book
master = master.replace("= " + book['Name'] + "\n", "")
# Set the preface title from the first file in the book
first_file = os.path.join(info['src_dir'], book['Dir'], book['Topics'][0]['File'] + ".adoc")
preface_title = None
with open(first_file, "r") as f:
line = f.readline()
while line:
if include_line(line):
preface_title = re.sub("^=+ ", "", line)
break
line = f.readline()
if preface_title is not None:
info['preface-title'] = ":preface-title: " + preface_title
all_in_one_text += master
if all_in_one:
master_file = os.path.join(dest_dir, 'master.adoc')
docinfo_file = os.path.join(dest_dir, 'docinfo.xml')
master_base = MASTER_FILE_BASE.format(**info)
log.debug("Writing " + master_file)
with open(master_file, "w") as f:
f.write(master_base + all_in_one_text)
log.debug("Writing " + docinfo_file)
with open(docinfo_file, "w") as f:
f.write(DOCINFO_BASE.format(**info))
def generate_master_entry(node, book_dir, distro, include_name=True, all_in_one=False):
"""
Generates the master.adoc core content for a specific book/node.
"""
master_entries = []
def dir_callback(dir_node, parent_dir, depth):
if include_name or depth > 0:
master_entries.append("=" * (depth + 1) + " " + dir_node["Name"].replace("\\", ""))
def topic_callback(topic_node, parent_dir, depth):
book_file_path = os.path.join(parent_dir, topic_node["File"] + ".adoc")
file_path = os.path.join(book_dir, book_file_path)
include = "include::" + book_file_path + "[leveloffset=+" + str(depth) + "]"
if not all_in_one and file_path in COMMENT_FILES:
master_entries.append("////")
master_entries.append(include)
master_entries.append("////")
else:
master_entries.append(include)
# Add a blank line
master_entries.append("")
# Iterate over the tree and build the master.adoc content
iter_tree(node, distro, dir_callback, topic_callback, include_name)
return "\n".join(master_entries)
def reformat_for_drupal(info):
"""
Reformats the source content for use in the Customer Portal. This function does the following:
- Copies images over and flattens them into a single dir
- Copies source asciidoc over
- Filters the AsciiDoc source to remove duplicate macro definitions, that should only be in the main file.
- Adds id's for each file, so the files can be properly cross referenced.
- Adds id's to sections that are cross referenced, but have no id.
- Fixes duplicate id's in the source content.
- Fixes links that have been done incorrectly and should be cross references instead.
"""
books = info['book_nodes']
src_dir = info['src_dir']
dest_dir = info['dest_dir']
distro = info['distro']
# Build a mapping of files to ids
# Note: For all-in-one we have to collect ids from all books first
file_to_id_map = {}
if info['all_in_one']:
book_ids = []
for book in books:
book_ids.extend(collect_existing_ids(book, distro, src_dir))
for book in books:
file_to_id_map.update(build_file_to_id_map(book, distro, book_ids, src_dir))
else:
for book in books:
book_ids = collect_existing_ids(book, distro, src_dir)
file_to_id_map.update(build_file_to_id_map(book, distro, book_ids, src_dir))
info['file_to_id_map'] = file_to_id_map
# Reformat the data
for book in books:
log.info("Processing %s", book['Dir'])
book_src_dir = os.path.join(src_dir, book['Dir'])
if info['all_in_one']:
images_dir = os.path.join(dest_dir, "images")
else:
book_dest_dir = os.path.join(dest_dir, book['Dir'])
images_dir = os.path.join(book_dest_dir, "images")
ensure_directory(images_dir)
log.debug("Copying source files for " + book['Name'])
copy_files(book, book_src_dir, src_dir, dest_dir, info)
log.debug("Copying images for " + book['Name'])
copy_images(book, src_dir, images_dir, distro)
def copy_images(node, src_path, dest_dir, distro):
"""
Copy images over to the destination directory and flatten all image directories into the one top level dir.
"""
def dir_callback(dir_node, parent_dir, depth):
node_dir = os.path.join(parent_dir, dir_node['Dir'])
src = os.path.join(node_dir, "images")
if os.path.exists(src):
src_files = os.listdir(src)
for src_file in src_files:
shutil.copy(os.path.join(src, src_file), dest_dir)
iter_tree(node, distro, dir_callback, parent_dir=src_path)
def copy_files(node, book_src_dir, src_dir, dest_dir, info):
"""
Recursively copy files from the source directory to the destination directory, making sure to scrub the content, add id's where the
content is referenced elsewhere and fix any links that should be cross references.
"""
def dir_callback(dir_node, parent_dir, depth):
node_dest_dir = os.path.join(dest_dir, parent_dir, dir_node['Dir'])
ensure_directory(node_dest_dir)
def topic_callback(topic_node, parent_dir, depth):
node_src_dir = os.path.join(src_dir, parent_dir)
node_dest_dir = os.path.join(dest_dir, parent_dir)
src_file = os.path.join(node_src_dir, topic_node["File"] + ".adoc")
dest_file = os.path.join(node_dest_dir, topic_node["File"] + ".adoc")
# Copy the file
copy_file(info, book_src_dir, src_file, dest_dir, dest_file)
iter_tree(node, info['distro'], dir_callback, topic_callback)
def copy_file(info, book_src_dir, src_file, dest_dir, dest_file, include_check=True, tag=None, cwd=None):
"""
Copies a source file to destination, making sure to scrub the content, add id's where the content is referenced elsewhere and fix any
links that should be cross references. Also copies any includes that are referenced, since they aren't included in _build_cfg.yml.
"""
# It's possible that the file might have been created by another include, if so then just return
if os.path.isfile(dest_file):
return
# Touch the dest file, so we can handle circular includes
parent_dir = os.path.dirname(dest_file)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
#os.mknod(dest_file)
open(dest_file, 'w').close()
# Scrub/fix the content
content = scrub_file(info, book_src_dir, src_file, tag=tag, cwd=cwd)
# Check for any includes
if include_check:
cleaned_content = remove_conditional_content(content, info)
include_iter = INCLUDE_RE.finditer(cleaned_content)
for include in include_iter:
include_text = include.group(0)
include_path = include.group(1)
include_unparsed_vars = include.group(2)
# Determine the include vars
include_vars = {}
if include_unparsed_vars is not None and len(include_unparsed_vars) > 0:
for meta in re.split(r"\s*,\s*", include_unparsed_vars):
key, value = re.split("\s*=\s*", meta, 2)
include_vars[key] = value
# Determine the include src/dest paths
include_file = os.path.join(os.path.dirname(book_src_dir), include_path)
relative_path = os.path.relpath(include_file, os.path.dirname(src_file))
# If the path is in another book, copy it into this one
relative_book_path = os.path.relpath(include_file, book_src_dir)
if relative_book_path.startswith("../"):
path, src_book_name = os.path.split(book_src_dir)
dest_include_dir = os.path.join(dest_dir, src_book_name, "includes")
relative_path = os.path.join(os.path.relpath(dest_include_dir, parent_dir), os.path.basename(include_file))
else:
dest_include_dir = os.path.abspath(os.path.join(os.path.dirname(dest_file), os.path.dirname(relative_path)))
dest_include_file = os.path.join(dest_include_dir, os.path.basename(include_file))
# Make sure we have a reference to the current working dir
current_dir = cwd or os.path.dirname(src_file)
include_tag = include_vars.get("tag", None)
# Copy the file and fix the content
if not os.path.isfile(dest_include_file):
copy_file(info, book_src_dir, include_file, dest_dir, dest_include_file, tag=include_tag, cwd=current_dir)
else:
# The file has already been copied, so just fix the links for this tag
with open(dest_include_file, 'r') as f:
include_content = f.read()
# Fix any links
include_content = fix_links(include_content, info, book_src_dir, include_file, tag=include_tag, cwd=cwd)
with open(dest_include_file, "w") as f:
f.write(include_content)
content = content.replace(include_text, include.expand("include::" + relative_path + "[\\2]"))
with open(dest_file, "w") as f:
f.write(content)
def scrub_file(info, book_src_dir, src_file, tag=None, cwd=None):
"""
Scrubs a file and returns the cleaned file contents.
"""
base_src_file = src_file.replace(info['src_dir'] + "/", "")
# added 1/Sep/2020
# to allow loading files like json and yaml from external sources, this
# procedure loads the file recognizing that it starts with http
# it then checks if it exists or not, and if it exists, returns the raw data
# data that it finds.
if(base_src_file.startswith("https://raw.githubusercontent.com/openshift/")):
try:
response = requests.get(base_src_file)
if(response):
return response.text
else:
raise ConnectionError("Malformed URL")
except Exception as exception:
log.error("An include file wasn't found: %s", base_src_file)
has_errors = True
sys.exit(-1)
# Get a list of predefined custom title ids for the file
title_ids = TITLE_IDS.get(base_src_file, {})
# Read in the source content
with open(src_file, 'r') as f:
src_file_content = f.readlines()
# Scrub the content
content = ""
header_found = content_found = False
current_id = None
for line in src_file_content:
# Ignore any leading blank lines, before any meaningful content is found
if line.strip() == "" and not content_found:
continue
# Check if the line should be included in the output
if include_line(line):
content_found = True
# Setup the document header content/id
if not header_found and line.strip() != "" and line.startswith("="):
header_found = True
if info['all_in_one'] and base_src_file in ALL_IN_ONE_SCRAP_TITLE and line.startswith("= "):
continue
# Add a section id if one doesn't exist, so we have something to link to
elif current_id is None and src_file in info['file_to_id_map']:
file_id = info['file_to_id_map'][src_file]
content += "[[" + file_id + "]]\n"
# Add a custom title id, if one is needed
elif line.startswith("=") and current_id is None:
for title in title_ids:
title_re = r"^=+ " + title.replace(".", "\\.").replace("?", "\\?") + "( (anchor|\[).*?)?(\n)?$"
if re.match(title_re, line):
content += "[[" + title_ids[title] + "]]\n"
# Set the current id based on the line content
if current_id is None and ID_RE.match(line.strip()):
current_id = line.strip()
elif current_id is not None and line.strip != "":
current_id = None
# Add the line to the processed content
content += line
# Fix up any duplicate ids
if base_src_file in DUPLICATE_IDS:
for duplicate_id, new_id in list(DUPLICATE_IDS[base_src_file].items()):
content = content.replace("[[" + duplicate_id + "]]", "[[" + new_id + "]]")
# Replace incorrect links with correct ones
if base_src_file in INCORRECT_LINKS:
for incorrect_link, fixed_link in list(INCORRECT_LINKS[base_src_file].items()):
content = content.replace(incorrect_link, fixed_link)
# Fix up the links
content = fix_links(content, info, book_src_dir, src_file, tag=tag, cwd=cwd)
return content
def include_line(line):
"""
Determines if a line should be included in the filtered output.
"""
if line in IGNORE_LINES:
return False
for macro in IGNORE_MACROS:
if line.startswith(":" + macro + ":"):
return False
return True
def fix_links(content, info, book_src_dir, src_file, tag=None, cwd=None):
"""
Fix any links that were done incorrectly and reference the output instead of the source content.
"""
if info['all_in_one']:
content = fix_links(content, info['src_dir'], src_file, info)
else:
# Determine if the tag should be passed when fixing the links. If it's in the same book, then process the entire file. If it's
# outside the book then don't process it.
if book_src_dir in src_file:
content = _fix_links(content, book_src_dir, src_file, info, cwd=cwd)
else:
content = _fix_links(content, book_src_dir, src_file, info, tag=tag, cwd=cwd)
return content
def _fix_links(content, book_dir, src_file, info, tag=None, cwd=None):
"""
Fix any links that were done incorrectly and reference the output instead of the source content.
"""
# TODO Deal with xref so that they keep the proper path. Atm it'll just strip the path and leave only the id
file_to_id_map = info['file_to_id_map']
current_dir = cwd or os.path.dirname(src_file)
cleaned_content = remove_conditional_content(content, info, tag=tag)
links = LINKS_RE.finditer(cleaned_content)
for link in links:
link_text = link.group(0)
link_file = link.group(1)
link_anchor = link.group(2)
link_title = link.group(3)
if link_file is not None:
fixed_link_file = link_file.replace(".html", ".adoc")
fixed_link_file_abs = os.path.abspath(os.path.join(current_dir, fixed_link_file))
if fixed_link_file_abs in file_to_id_map:
if fixed_link_file_abs.startswith(book_dir + os.sep) or fixed_link_file_abs == src_file:
# We are dealing with a cross reference within the same book here
if link_anchor is None:
# Cross reference to the top of a topic, without an id being specified
link_anchor = "#" + file_to_id_map[fixed_link_file_abs]
fixed_link = "xref:" + link_anchor.replace("#", "") + link_title
else:
# We are dealing with a cross reference to another book here
external_link = EXTERNAL_LINK_RE.search(link_file)
book_dir_name = external_link.group(1)
# Find the book name
book_name = book_dir_name
for book in info['data']:
if check_node_distro_matches(book, info['distro']) and book['Dir'] == book_dir_name:
book_name = book['Name']
break
fixed_link_file = BASE_PORTAL_URL + build_portal_url(info, book_name)
if link_anchor is None:
fixed_link = "link:" + fixed_link_file + "#" + file_to_id_map[fixed_link_file_abs] + link_title
else:
fixed_link = "link:" + fixed_link_file + link_anchor + link_title
else:
# Cross reference or link that isn't in the docs suite
fixed_link = link_text
if EXTERNAL_LINK_RE.search(link_file) is not None:
rel_src_file = src_file.replace(os.path.dirname(book_dir) + "/", "")
has_errors = True
log.error("ERROR (%s): \"%s\" appears to try to reference a file not included in the \"%s\" distro", rel_src_file, link_text.replace("\n", ""), info['distro'])
sys.exit(-1)
else:
fixed_link = "xref:" + link_anchor.replace("#", "") + link_title
content = content.replace(link_text, fixed_link)
return content
def remove_conditional_content(content, info, tag=None):
"""
Removes any conditional content that doesn't match for the specified distro
"""
# Remove any ifdef content
ifdef = IFDEF_RE.search(content)
while ifdef is not None:
is_not_def = ifdef.group(1) == "n"
ifdef_distros = ifdef.group(2).split(",")
pos = ifdef.start()
end = ifdef.end()
# Determine if we should strip the conditional content, based on the distro
strip_content = False
if is_not_def and info['distro'] in ifdef_distros:
strip_content = True
elif not is_not_def and info['distro'] not in ifdef_distros:
strip_content = True
# Remove the conditional content
if strip_content:
# Find the correct endif for the current ifdef
search_pos = end
endpos = len(content)
while True:
next_ifdef = IFDEF_RE.search(content, search_pos)
endif = ENDIF_RE.search(content, search_pos)
if not endif:
break
elif not next_ifdef or next_ifdef.start() > endif.start():
endpos = endif.end()
break
else:
search_pos = endif.end()
# Replace the content and move the end pos to be the same as the start since the content was removed
ifdef_text = content[pos:endpos]
content = content.replace(ifdef_text, "")
end = pos
# Move onto the next ifdef
ifdef = IFDEF_RE.search(content, end)
# Remove commented out content
for comment in COMMENT_CONTENT_RE.finditer(content):
content = content.replace(comment.group(0), "")
# Remove content outside of tags
if tag is not None:
for tag_match in TAG_CONTENT_RE.finditer(content):
tag_text = tag_match.group(0)
tag_label = tag_match.group(1)
if tag_label == tag:
# Tag matches, so only use the content in the tag
content = tag_text
return content
def collect_existing_ids(node, distro, path):
"""
Examines all nodes asciidoc file contents and returns any existing ids.
"""
book_ids = []
def topic_callback(topic_node, parent_dir, depth):
src_file = os.path.join(parent_dir, topic_node["File"] + ".adoc")
file_ids = extract_file_ids(src_file)
book_ids.extend(file_ids)
iter_tree(node, distro, topic_callback=topic_callback, parent_dir=path)
return book_ids
def build_file_to_id_map(node, distro, existing_ids, path=""):
"""
Builds a mapping of file names/paths to the root id for the file. This is used to fix the links that are done incorrectly.
"""
file_to_id_map = {}
def topic_callback(topic_node, parent_dir, depth):
src_file = os.path.join(parent_dir, topic_node["File"] + ".adoc")
file_to_id_map[src_file] = build_file_id(topic_node["Name"], file_to_id_map, existing_ids)
iter_tree(node, distro, topic_callback=topic_callback, parent_dir=path)
return file_to_id_map
def extract_file_ids(file_path):
"""
Extracts all the ids used in the specified file.
"""
with open(file_path, "r") as f:
content = f.read()
ids = ID_RE.finditer(content)
return [id.group(1) for id in ids]
def build_file_id(file_title, file_to_id_map, existing_ids):
"""
Generates a unique id for a file, based on it's title.
"""
file_id = base_id = re.sub(r"[\[\]\(\)#]", "", file_title.lower().replace("_", "-").replace(" ", "-"))
count = 1
while file_id in existing_ids or file_id in list(file_to_id_map.values()):
file_id = base_id + "-" + str(count)
count += 1
return file_id
def build_portal_url(info, book_name):
"""
Builds a portal url path by escaping the content in the same way drupal does.
"""
product = info['product']
version = info['product-version']
return generate_url_from_name(product) + "/" + generate_url_from_name(version) + "/html-single/" + generate_url_from_name(book_name) + "/"
def replace_nbsp(val):
"""Replaces non breaking spaces with a regular space"""
if val is not None:
# Check if the string is unicode
if isinstance(val, str):
return val.replace('\xa0', ' ')
else:
return val.replace('\xc2\xa0', ' ')
else:
return None
def generate_url_from_name(name, delimiter='_'):
"""
Generates a url fragment from a product, version or titles name.
"""
# Remove characters that aren't allowed in urls
url = re.sub("^\.+|[^0-9a-zA-Z _\-.]+", "", replace_nbsp(name))
# Replace spaces with the delimiter
url = re.sub("\s+", delimiter, url)
# Replace multiple underscores with a single underscore
url = re.sub(delimiter + "+", delimiter, url)
return url.lower()
def call_git_command(*args, **kwargs):
"""
Calls a git command and retries the command if it is unable to connect to the remote repo
"""
retries = kwargs.pop("retries", 3)
try:
output = subprocess.check_output(*args, **kwargs)
if output is not None:
sys.stdout.write(output)
return output
except subprocess.CalledProcessError as e:
retries -= 1
if retries > 0 and "fatal: Could not read from remote repository" in e.output:
# Connection failed, so wait a couple of secs and try again
time.sleep(2)
call_git_command(*args, retries=retries, **kwargs)
else:
raise
def fetch_sources(url, branch, dir=None, clone_dirname=None):
"""
Fetches sources from a git repository. If the repository doesn't exist it'll be cloned into `dir_name`, otherwise if it already has been
cloned, the repo will just be updated.
"""
# Setup the defaults
if dir is None:
dir = os.getcwd()
if clone_dirname is None:
clone_dirname = url.split('/')[-1].replace(".git", "")
# If the dir already exists update the content, otherwise clone it
clone_dir = os.path.abspath(os.path.join(dir, clone_dirname))
if os.path.exists(os.path.join(clone_dir, ".git")):
cmd = ["git", "pull", "-f"]
cmd_dir = clone_dir
# Do a checkout to make sure we are on the right branch
checkout_cmd = ["git", "checkout", branch]
subprocess.check_output(checkout_cmd, cwd=cmd_dir, stderr=subprocess.STDOUT)
else:
cmd = ["git", "clone", "-b", branch, url, clone_dirname]
cmd_dir = os.path.abspath(dir)
# Execute the command
call_git_command(cmd, cwd=cmd_dir, stderr=subprocess.STDOUT)
def sync_directories(src_dir, dest_dir, ignore=None):
"""
Syncs two directories so that the both contain the same content, with the exception of ignored files.
"""
if ignore is None:
ignore = []
ignore.extend(CMP_IGNORE_FILES)
dcmp = filecmp.dircmp(src_dir, dest_dir, ignore)
_sync_directories_dircmp(dcmp)
def _sync_directories_dircmp(dcmp):
# Remove files that only exist in the dest directory
for filename in dcmp.right_only:
right = os.path.join(dcmp.right, filename)
if os.path.isfile(right):
os.remove(right)
else:
shutil.rmtree(right)
# Copy files that only exist in the source directory or files that have changed
for filename in dcmp.left_only+dcmp.common_files:
left = os.path.join(dcmp.left, filename)
right = os.path.join(dcmp.right, filename)
if os.path.isfile(left):
shutil.copy2(left, right)
else:
shutil.copytree(left, right)
# Sync sub directories
for subdcmp in list(dcmp.subdirs.values()):
_sync_directories_dircmp(subdcmp)
def commit_and_push_changes(git_dir, git_branch, git_upstream_branch):
"""
Adds, commits and pushes any changes to a local git repository.
"""
# Add all the changes
add_cmd = ["git", "add", "--all"]
subprocess.check_call(add_cmd, cwd=git_dir)
try:
# Commit the changes
commit_cmd = ["git", "commit", "-m", "Merge branch 'upstream/" + git_upstream_branch + "' into " + git_branch,
"--author", "CCS OSE Build Script <no-reply@redhat.com>"]
call_git_command(commit_cmd, cwd=git_dir, stderr=subprocess.STDOUT)
# Push the changes
push_cmd = ["git", "push"]
call_git_command(push_cmd, cwd=git_dir, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if e.output is None or "nothing to commit" not in e.output:
raise
def parse_repo_config(config_file, distro, version):
# Make sure the repo config file exists
if not os.path.isfile(config_file):
log.error("Failed loading the repo configuration from %s", config_file)
sys.exit(-1)
parser = configparser.SafeConfigParser()
parser.read(config_file)
repo_urls = dict()
section_name = distro + "-" + version
if parser.has_section(section_name):
for (key, value) in parser.items(section_name):
repo_urls[key] = value
return repo_urls
def main():
parser = setup_parser()
args = parser.parse_args()
logging.basicConfig(format='%(message)s', level=logging.INFO, stream=sys.stdout)
# Copy down the latest files
if not args.no_upstream_fetch:
log.info("Fetching the upstream sources")
fetch_sources(args.upstream_url, args.upstream_branch, clone_dirname=CLONE_DIR)
config = find_build_config_file()
src_dir = os.path.dirname(config)
# Parse the build config
data = parse_build_config(config)
# Filter the list of books that should be built
book_nodes = [node for node in data if check_node_distro_matches(node, args.distro)]
# Make the new source tree
dest_dir = os.path.join(os.getcwd(), "drupal-build", args.distro)
if not args.no_clean:
log.info("Cleaning the drupal-build directory")
if os.path.exists(dest_dir):
shutil.rmtree(dest_dir)
os.makedirs(dest_dir)
elif not os.path.exists(dest_dir):
os.makedirs(dest_dir)
info = {
'title': args.title,
'product-author': args.author,
'product-version': args.version,
'product': args.product,
'distro': args.distro,
'src_dir': src_dir,
'dest_dir': dest_dir,
'data': data,
'book_nodes': book_nodes,
'all_in_one': args.all_in_one,
'preface-title': "",
"upstream_branch": args.upstream_branch
}
# Build the master files
log.info("Building the drupal files")
build_master_files(info)
# Copy the original data and reformat for drupal
reformat_for_drupal(info)
if has_errors:
sys.exit(1)
if args.push:
# Parse the repo urls
config_file = os.path.join(os.path.dirname(__file__), 'repos.ini')
repo_urls = parse_repo_config(config_file, args.distro, args.version)
# Make sure the base git dire exists
base_git_dir = os.path.join(os.getcwd(), "gitlab-repos")
ensure_directory(base_git_dir)
# Checkout the gitlab repo, copy the changes and push them back up
for book_dir, gitlab_repo_url in list(repo_urls.items()):
build_book_dir = os.path.join(dest_dir, book_dir)
git_dirname = gitlab_repo_url.split('/')[-1].replace(".git", "")
git_dir = os.path.join(base_git_dir, git_dirname)
try:
log.info("Fetching " + book_dir + " sources from GitLab")
fetch_sources(gitlab_repo_url, args.branch, base_git_dir, git_dirname)
log.info("Syncing " + book_dir)
sync_directories(build_book_dir, git_dir, ["docinfo.xml"])
log.info("Pushing " + book_dir + " changes back to GitLab")
commit_and_push_changes(git_dir, args.branch, args.upstream_branch)
except subprocess.CalledProcessError as e:
if e.output:
sys.stdout.write(e.output)
raise
if __name__ == "__main__":
main()
|
64582aa7b5004df3183a6520294eb60408dd11c7
|
2270e0fb290591a21fd13a3980dccf4ff47d83fa
|
/tf2_gnn/layers/message_passing/rgat.py
|
946bb62c244f147700838ff25a6b6b1f2b547470
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/tf2-gnn
|
d0251e48c0d7cfa67fbd4d1cd8579cbf6845059b
|
fa608555c68c41027a44e1ec68c160e92b570632
|
refs/heads/master
| 2023-08-30T01:09:37.653689
| 2023-07-13T13:47:00
| 2023-07-13T13:47:00
| 242,964,823
| 411
| 80
|
MIT
| 2023-07-12T18:33:59
| 2020-02-25T09:49:36
|
Python
|
UTF-8
|
Python
| false
| false
| 7,247
|
py
|
rgat.py
|
"""Relation graph attention network layer."""
from typing import Dict, List, Tuple, Any
import tensorflow as tf
from dpu_utils.tf2utils import unsorted_segment_log_softmax
from .message_passing import MessagePassing, MessagePassingInput, register_message_passing_implementation
@register_message_passing_implementation
class RGAT(MessagePassing):
"""Compute new graph states by neural message passing using attention. This generalises
the original GAT model (Velickovic et al., https://arxiv.org/pdf/1710.10903.pdf)
to multiple edge types by using different weights for different edge types.
For this, we assume existing node states h^t_v and a list of per-edge-type adjacency
matrices A_\ell.
In the setting for a single attention head, we compute new states as follows:
h^t_{v, \ell} := W_\ell h^t_v
e_{u, \ell, v} := LeakyReLU(\alpha_\ell^T * concat(h^t_{u, \ell}, h^t_{v, \ell}))
a_v := softmax_{\ell, u with (u, v) \in A_\ell}(e_{u, \ell, v})
h^{t+1}_v := \sigma(\sum_{ell, (u, v) \in A_\ell}
a_v_{u, \ell} * h^_{u, \ell})
The learnable parameters of this are the W_\ell \in R^{D, D} and \alpha_\ell \in R^{2*D}.
In practice, we use K attention heads, computing separate, partial new states h^{t+1}_{v,k}
and compute h^{t+1}_v as the concatentation of the partial states.
For this, we reduce the shape of W_\ell to R^{D, D/K} and \alpha_\ell to R^{2*D/K}.
We use the following abbreviations in shape descriptions:
* V: number of nodes
* K: number of attention heads
* L: number of different edge types
* E: number of edges of a given edge type
* D: input node representation dimension
* H: output node representation dimension (set as hidden_dim)
>>> node_embeddings = tf.random.normal(shape=(5, 3))
>>> adjacency_lists = (
... tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32),
... tf.constant([[2, 3], [2, 4]], dtype=tf.int32),
... tf.constant([[3, 1]], dtype=tf.int32),
... )
...
>>> params = RGAT.get_default_hyperparameters()
>>> params["hidden_dim"] = 12
>>> layer = RGAT(params)
>>> output = layer(MessagePassingInput(node_embeddings, adjacency_lists))
>>> print(output)
tf.Tensor(..., shape=(5, 12), dtype=float32)
"""
@classmethod
def get_default_hyperparameters(cls):
these_hypers = {
"num_heads": 3,
}
mp_hypers = super().get_default_hyperparameters()
mp_hypers.update(these_hypers)
return mp_hypers
def __init__(self, params: Dict[str, Any], **kwargs):
super().__init__(params, **kwargs)
self._num_heads: int = params["num_heads"]
self._edge_type_to_message_computation_layer: List[tf.keras.layers.Layer] = []
self._edge_type_to_attention_parameters: List[tf.Variable] = []
def build(self, input_shapes: MessagePassingInput):
node_embedding_shapes = input_shapes.node_embeddings
adjacency_list_shapes = input_shapes.adjacency_lists
num_edge_types = len(adjacency_list_shapes)
per_head_dim = self._hidden_dim // self._num_heads
for i in range(num_edge_types):
with tf.name_scope(f"edge_type_{i}"):
mp_layer = tf.keras.layers.Dense(
self._hidden_dim, use_bias=False, name="Edge_weight_{}".format(i)
)
mp_layer.build(tf.TensorShape((None, node_embedding_shapes[-1])))
self._edge_type_to_message_computation_layer.append(mp_layer)
attention_weights = self.add_weight(
name="Edge_attention_parameters_{}".format(i),
shape=(self._num_heads, 2 * per_head_dim),
trainable=True,
)
self._edge_type_to_attention_parameters.append(attention_weights)
super().build(input_shapes)
def _message_function(
self,
edge_source_states: tf.Tensor,
edge_target_states: tf.Tensor,
num_incoming_to_node_per_message: tf.Tensor,
edge_type_idx: int,
training: bool,
) -> tf.Tensor:
per_head_dim = self._hidden_dim // self._num_heads
# Actually do the message calculation:
per_head_transformed_source_states = tf.reshape(
self._edge_type_to_message_computation_layer[edge_type_idx](edge_source_states),
shape=(-1, self._num_heads, per_head_dim),
) # Shape [E, K, H/K]
per_head_transformed_target_states = tf.reshape(
self._edge_type_to_message_computation_layer[edge_type_idx](edge_target_states),
shape=(-1, self._num_heads, per_head_dim),
) # Shape [E, K, H/K]
per_head_transformed_states = tf.concat(
[per_head_transformed_source_states, per_head_transformed_target_states], axis=-1
) # Shape [E, K, 2*H/K]
per_head_attention_scores = tf.nn.leaky_relu(
tf.einsum(
"vki,ki->vk",
per_head_transformed_states,
self._edge_type_to_attention_parameters[edge_type_idx],
)
) # Shape [E, K]
return (per_head_transformed_source_states, per_head_attention_scores)
def _compute_new_node_embeddings(
self,
cur_node_embeddings: tf.Tensor,
messages_per_type: List[Tuple[tf.Tensor, tf.Tensor]],
edge_type_to_message_targets: List[tf.Tensor],
num_nodes: tf.Tensor,
training: bool,
):
per_head_messages_per_type, per_head_attention_scores_per_type = zip(*messages_per_type)
per_head_messages = tf.concat(per_head_messages_per_type, axis=0) # Shape [M, K, H/K]
per_head_attention_scores = tf.concat(
per_head_attention_scores_per_type, axis=0
) # Shape [M, K]
message_targets = tf.concat(edge_type_to_message_targets, axis=0) # Shape [M]
head_to_aggregated_messages = [] # list of tensors of shape [V, H/K]
for head_idx in range(self._num_heads):
# Compute the softmax over all the attention scores for all messages going to this state:
attention_scores = tf.concat(
per_head_attention_scores[:, head_idx], axis=0
) # Shape [M]
attention_values = tf.exp(
unsorted_segment_log_softmax(
logits=attention_scores, segment_ids=message_targets, num_segments=num_nodes
)
) # Shape [M]
messages = per_head_messages[:, head_idx, :] # Shape [M, H/K]
# Compute weighted sum per target node for this head:
head_to_aggregated_messages.append(
tf.math.unsorted_segment_sum(
data=tf.expand_dims(attention_values, -1) * messages,
segment_ids=message_targets,
num_segments=num_nodes,
)
)
aggregated_messages = tf.concat(head_to_aggregated_messages, axis=-1) # Shape [V, H]
return self._activation_fn(aggregated_messages)
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
abcd27de3dc1480ace72d9c4af2774cac04d2e22
|
afbeee6a3a83946449e5fccf7c74457461ed921f
|
/docs/source/gallery/showcase/plots/point_cloud_plot.py
|
5374fd9c9b51a7002aa5b9a233a52388f664dc73
|
[
"MIT"
] |
permissive
|
K3D-tools/K3D-jupyter
|
d69e541de90835415be5516d3e6758b1fcd530d2
|
5973d30947f6bc80b2a50ba260f198bec57ddfc1
|
refs/heads/main
| 2023-09-01T20:41:01.159202
| 2023-08-26T20:45:56
| 2023-08-26T20:45:56
| 44,377,817
| 859
| 134
|
MIT
| 2023-08-26T20:33:59
| 2015-10-16T10:14:20
|
Python
|
UTF-8
|
Python
| false
| false
| 747
|
py
|
point_cloud_plot.py
|
import numpy as np
import os
import k3d
def generate():
filepath = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'../assets/point_cloud.npz')
data = np.load(filepath)['arr_0']
plt_points = k3d.points(data[:, 0:3],
data[:, 4].astype(np.uint32),
point_size=0.15,
shader="flat")
plot = k3d.plot(grid_visible=False,
camera_auto_fit=False,
background_color=0x87ceeb)
plot += plt_points
plot.camera = [20.84, -3.06, 6.96,
0.67, 0.84, 3.79,
0.0, 0.0, 1.0]
plot.snapshot_type = 'inline'
return plot.get_snapshot()
|
dace283102d1b3ed5f1304969136ebfcfdfc0fe4
|
8da41ffa2ccb09e04f95db0f211e0ed69a42a352
|
/courses/understanding_spanner/dataflow/csv-to-spanner.py
|
38d499a7afa4564226685fd667d8c1871766072b
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/training-data-analyst
|
808af9b09a0e5f5657c4ca76cdd205f808d76d89
|
975a95032ce5b7012d1772c7f1f5cfe606eae839
|
refs/heads/master
| 2023-09-05T19:50:59.722334
| 2023-09-04T14:25:33
| 2023-09-04T14:25:33
| 56,459,948
| 7,311
| 5,917
|
Apache-2.0
| 2023-09-13T21:45:54
| 2016-04-17T21:39:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,412
|
py
|
csv-to-spanner.py
|
import argparse
import logging
import re, os
from typing import NamedTuple, List
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.io.gcp.spanner import SpannerInsert
from apache_beam.dataframe.io import read_csv
from apache_beam.dataframe import convert
class PetRow(NamedTuple):
PetID: int
OwnerID: int
PetName: str
PetType: str
Breed: str
beam.coders.registry.register_coder(PetRow, beam.coders.RowCoder)
def reverse_bits(num, bitSize = 32):
binary = bin(num)
reverse = binary[-1:1:-1]
reverse = reverse + (bitSize - len(reverse))*'0'
return int(reverse,2)
def main(argv=None, save_main_session=True):
"""Main entry point."""
projectid = os.environ.get('GOOGLE_CLOUD_PROJECT')
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
default='pets.csv',
help='Input filename.')
parser.add_argument(
'--instance',
dest='instance',
default='test-spanner-instance',
help='Spanner instance ID.')
parser.add_argument(
'--database',
dest='database',
default = 'pets-db',
help='Spanner database.')
parser.add_argument(
'--table',
dest='table',
default = 'pets',
help='Spanner table.')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
with beam.Pipeline(options=pipeline_options) as p:
pets = p | 'Read CSV to dataframe' >> read_csv(known_args.input)
pets = ( convert.to_pcollection(pets)
| 'Convert to PetRow class' >> beam.Map(lambda x : PetRow(**(x._asdict())))
| 'Reverse bits in PetID' >> beam.Map(lambda x : PetRow(reverse_bits(x.PetID), reverse_bits(x.OwnerID), x.PetName, x.PetType, x.Breed))
)
pets | 'Write to Spanner' >> SpannerInsert(
project_id=projectid,
instance_id=known_args.instance,
database_id=known_args.database,
table=known_args.table)
pets | beam.Map(print)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
main()
|
dc431f81cdda1f0279477776204023854f06798f
|
4a7f8042d9d0591baf9a776320229b255d95562d
|
/shopify/utils/shop_url.py
|
76e088a73b71b97abf65c102d00e22e8b79cc3c5
|
[
"MIT"
] |
permissive
|
Shopify/shopify_python_api
|
56a175187ee22ede2bc1d26eb5b101989ae73410
|
5f295932bebbdde1835d35c4865093ff83564cdc
|
refs/heads/master
| 2023-09-04T14:44:28.214779
| 2023-04-12T16:10:00
| 2023-04-12T16:10:00
| 2,249,127
| 1,029
| 332
|
MIT
| 2023-09-14T20:51:42
| 2011-08-22T14:49:21
|
Python
|
UTF-8
|
Python
| false
| false
| 699
|
py
|
shop_url.py
|
import re
import sys
if sys.version_info[0] < 3: # Backwards compatibility for python < v3.0.0
from urlparse import urlparse
else:
from urllib.parse import urlparse
HOSTNAME_PATTERN = r"[a-z0-9][a-z0-9-]*[a-z0-9]"
def sanitize_shop_domain(shop_domain, myshopify_domain="myshopify.com"):
name = str(shop_domain or "").lower().strip()
if myshopify_domain not in name and "." not in name:
name += ".{domain}".format(domain=myshopify_domain)
name = re.sub(r"https?://", "", name)
uri = urlparse("http://{hostname}".format(hostname=name))
if re.match(r"{h}\.{d}$".format(h=HOSTNAME_PATTERN, d=re.escape(myshopify_domain)), uri.netloc):
return uri.netloc
|
25d408e8ab5e13d4758f04ea31486071d5bdfcf8
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/hardware/tests/opentrons_hardware/drivers/can_bus/test_settings.py
|
1833509a4d67577d52a09f8df6b7174fcaa1a008
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,655
|
py
|
test_settings.py
|
"""Tests for pcan bit timing calculations."""
import pytest
from typing import Optional
from opentrons_shared_data.errors.exceptions import CANBusConfigurationError
from opentrons_hardware.drivers.can_bus import settings
@pytest.mark.parametrize(
argnames=["fcan_clock", "bitrate", "sample_rate", "jump_width", "expected"],
argvalues=[
[
None,
None,
None,
None,
settings.PCANParameters(
f_clock_mhz=20, nom_brp=2, nom_tseg1=15, nom_tseg2=4, nom_sjw=1
),
],
[
40,
250000,
None,
None,
settings.PCANParameters(
f_clock_mhz=40, nom_brp=4, nom_tseg1=31, nom_tseg2=8, nom_sjw=1
),
],
[
None,
None,
87.5,
None,
settings.PCANParameters(
f_clock_mhz=20, nom_brp=2, nom_tseg1=16, nom_tseg2=2, nom_sjw=1
),
],
[
60,
None,
None,
4,
settings.PCANParameters(
f_clock_mhz=60, nom_brp=6, nom_tseg1=15, nom_tseg2=4, nom_sjw=4
),
],
],
)
def test_valid_calculate_bit_timings(
fcan_clock: Optional[int],
bitrate: Optional[int],
sample_rate: Optional[float],
jump_width: Optional[int],
expected: settings.PCANParameters,
) -> None:
"""Test valid bit timing calculations."""
result = settings.calculate_fdcan_parameters(
fcan_clock, bitrate, sample_rate, jump_width
)
assert result == expected
@pytest.mark.parametrize(
argnames=["fcan_clock", "bitrate", "sample_rate", "jump_width", "match_str"],
argvalues=[
[
90,
None,
None,
None,
"Clock value 90 exceeds max value of 80",
],
[
None,
None,
None,
130,
"Jump width value 130 exceeds max value of 128",
],
[
80,
10000,
None,
None,
"Calculated TSEG1 799.0 exceeds max value 256",
],
],
)
def test_invalid_calculate_bit_timings(
fcan_clock: Optional[int],
bitrate: Optional[int],
sample_rate: Optional[float],
jump_width: Optional[int],
match_str: str,
) -> None:
"""Test invalid bit timing calculations."""
with pytest.raises(CANBusConfigurationError, match=match_str):
settings.calculate_fdcan_parameters(
fcan_clock, bitrate, sample_rate, jump_width
)
|
959ed7c8210d2f3bf4f4995afa8cf67cb609e707
|
42748690b8eb7a79a94a7431e19f7adfec88ffdc
|
/charcoal/charcoal.py
|
50dcb2de7bea18ed91dc3e2c6668e13b142f5f32
|
[
"BSD-3-Clause"
] |
permissive
|
sky-shiny/smolder
|
4f5a59f0d66b9cf7898aa0165dde27306e89b6d0
|
385df80e83c8370bfe285948242f39e20e99db24
|
refs/heads/master
| 2021-01-18T21:32:11.172441
| 2017-11-19T22:25:22
| 2017-11-19T22:25:22
| 32,863,450
| 103
| 12
| null | 2017-11-19T23:21:18
| 2015-03-25T12:35:18
|
Python
|
UTF-8
|
Python
| false
| false
| 14,257
|
py
|
charcoal.py
|
#! /usr/bin/env python
import os
import time
import logging
import warnings
from copy import deepcopy
from ldap3 import Connection, Server, ANONYMOUS, SIMPLE, SYNC, ASYNC
import jsonpickle
import requests
import validictory
from yapsy.PluginManager import PluginManager
from . import COLOURS, get_verify, get_host_overrides, tcptest
from .output import Output
FORMAT = '%(asctime)-15s %(name)s [%(levelname)s]: %(message)s'
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
logging.basicConfig(format=FORMAT, level=logging.ERROR, datefmt="%Y-%m-%d %H:%M:%S")
LOG = logging.getLogger('smolder')
REQUESTS_LOG = logging.getLogger('requests')
REQUESTS_LOG.setLevel(logging.ERROR)
logging.getLogger('yapsy').setLevel(logging.INFO)
manager = PluginManager()
manager.setPluginPlaces([THIS_DIR, "~/.smolder_plugins"])
manager.collectPlugins()
OUTPUT_WIDTH = 108
SCHEMA = {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"uri": {
"type": "string",
"required": False
},
"port": {
"type": "integer",
"required": False
},
"inputs": {
"type": "object",
"required": False,
"properties": {
"headers": {"type": "any", "required": False},
"username": {"type": "any", "required": False},
"password": {"type": "any", "required": False},
"cookie": {"type": "any", "required": False},
"data": {"type": "any", "required": False},
"file": {"type": "any", "required": False},
"verify": {"type": "any", "required": False},
"allow_redirects": {"type": "any", "required": False},
"timeout": {"type": "any", "required": False},
"proxies": {"type": "any", "required": False},
"bind_dn": {"type": "any","required": False},
"bind_pw": {"type": "any","required": False},
"dn": {"type": "any","required": False},
"attr_key": {"type": "any", "required": False}
}
},
"outcomes": {
"type": "object",
"required": False
},
"protocol": {
"type": "string",
"required": False,
"enum": ["tcp", "http", "https","ldap","ldaps"]
},
"method": {
"type": "string",
"required": False,
"enum": ["GET", "get", "post", "POST", "put", "PUT", "delete", "DELETE", "option", "OPTION", "PURGE", "purge"]
},
"request_headers": {
"type": "None",
"required": False
},
"url": {"type": "None","required": False}
}
}
def deepupdate(original, update):
"""
Recursively update a dict.
Subdict's won't be overwritten but also updated.
:param update:
:param original:
"""
for key, value in original.items():
if key not in update:
update[key] = value
elif isinstance(value, dict):
deepupdate(value, update[key])
return update
class Charcoal(object):
def __init__(self, test, host, output_format=None):
"""
:rtype : object
"""
self.passed = 0
self.failed = 0
self.duration_ms = 0
try:
validictory.validate(test, SCHEMA)
LOG.debug("Valid schema")
except ValueError as error:
LOG.error("Error, invalid test format: {0}. Tests now use v0.2 format. v0.1 branch is still available."
.format(error))
raise
try:
self.port = test["port"]
except (AttributeError, KeyError):
print("Warning: No port definition found in the first test, using port 80 as default.")
try:
if test["protocol"] == "https":
self.port = 443
else:
self.port = 80
except (AttributeError, KeyError):
self.port = 80
self.output = Output(output_format=output_format)
LOG.debug("Test: {0}".format(test))
test_defaults = dict(inputs=dict(allow_redirects=False, timeout=30),
method="get",
outcomes=dict(colour_output=True))
if 'protocol' not in test or ('protocol' in test and test['protocol'] in ['http','https']):
test_defaults['outcomes']['expect_status_code'] = 200
host_overrides = get_host_overrides.get_host_overrides(host, self.port)
if host_overrides['hostname'] is not None:
self.host = host_overrides['hostname']
else:
self.host = host
intermediate_dict = deepupdate(test_defaults, host_overrides)
final_dict = deepupdate(intermediate_dict, test)
try:
verify = final_dict["inputs"]["verify"]
except (AttributeError, KeyError):
verify = None
try:
proto = final_dict['protocol']
except (AttributeError, KeyError):
proto = None
(self.verify, self.verify_specified) = get_verify.get_verify(verify, proto)
self.test = deepcopy(final_dict)
if "tcp_test" in test and test["tcp_test"]:
try:
tcptest.tcp_test(self.host, self.port)
self.pass_test("Connecting to {0} on port {1}".format(self.host, self.port))
except Exception:
self.fail_test("Connecting to {0} on port {1}".format(self.host, self.port))
LOG.debug("Test with defaults: {0}".format(self.test))
if "verify" in self.test["inputs"]:
del self.test["inputs"]["verify"]
self.inputs = deepcopy(self.test['inputs'])
request_url_format = '{protocol}://{host}:{port}{uri}'
try:
self.inputs['url'] = request_url_format.format(protocol=self.test['protocol'], host=self.host,
port=self.test['port'],
uri=self.test['uri'])
except KeyError:
self.inputs['url'] = request_url_format.format(protocol=self.test['protocol'], host=self.host,
port=self.test['port'],
uri='')
LOG.debug("Testing {0}".format(self.inputs['url']))
self.output.append("-" * OUTPUT_WIDTH)
this_name = ("{0:^" + str(OUTPUT_WIDTH) + "}").format(self.test['name'][:OUTPUT_WIDTH])
self.output.append(this_name)
this_url = ("{0:^" + str(OUTPUT_WIDTH) + "}").format(self.inputs["url"][:OUTPUT_WIDTH])
self.output.append(this_url)
self.output.append(self.__repr__())
self.output.append("-" * OUTPUT_WIDTH)
self.output.append(self.inputs, sec='request_inputs')
self.run()
def run(self):
with warnings.catch_warnings():
try:
LOG.debug("Verify is a: {0}, with value: {1}".format(type(self.verify), self.verify))
if self.verify:
warnings.simplefilter("error", requests.exceptions.SSLError)
start = int(round(time.time() * 1000))
if self.test["protocol"] in ['http','https']:
self.req = requests.request(self.test['method'].upper(), verify=self.verify, **self.inputs)
elif self.test["protocol"] in ['ldap','ldaps']:
try:
port = int(self.test['port'])
except ValueError:
if self.test["protocol"] == 'ldap':
port = 389
else:
port = 636
# We're going to try hitting the LDAP server at an IP: Ignore the fact
# that this doesn't match the LDAP servers cert
# define the server
s = Server(self.host, port=port, use_ssl=self.test['protocol']=='ldaps')
# define the connection
c = Connection(s, user=self.inputs['bind_dn'], password=self.inputs['bind_pw'], read_only=True)
# perform the Bind operation
if not c.bind():
message, status = self.fail_test("Couldn't bind using dn=%s and password %s" % (self.inputs['bind_dn'], '*' * len(self.inputs['bind_pw'])), success=False)
return
self.req = c
elif self.test["protocol"] == 'tcp':
try:
tcptest.tcp_test(self.host, self.port)
self.pass_test("Connecting to {0} on port {1}".format(self.host, self.port))
except Exception as error:
self.fail_test("Connecting to {0} on port {1}".format(self.host, self.port))
end = int(round(time.time() * 1000))
self.duration_ms = end - start
except (RuntimeWarning, requests.exceptions.SSLError):
if self.test["protocol"] in ['http','https']:
warnings.simplefilter("default", requests.exceptions.SSLError)
start = int(round(time.time() * 1000))
try:
self.req = requests.request(self.test['method'].upper(), verify=self.verify, **self.inputs)
except (requests.exceptions.SSLError) as e:
message, status = self.fail_test("Certificate verify failed and not ignored by inputs['verify']: %s" % (str(e)))
self.add_output("SSLVerify", message, status)
return
end = int(round(time.time() * 1000))
self.duration_ms = end - start
if not self.verify_specified:
message, status = self.fail_test("Insecure request not ignored by inputs['verify']")
self.add_output("SecureRequest", message, status)
else:
if self.verify:
message, status = self.fail_test("Insecure request made")
self.add_output("SecureRequest", message, status)
else:
message, status = self.warn_test("Insecure request made and ignored")
self.add_output("SecureRequest", message, status)
# HTTP(s) TESTS
if self.test["protocol"] in ['http','https']:
self.output.append(dict(self.req.headers), sec='response_headers')
self.output.append(self.req.status_code, sec='response_status_code')
if 'show_body' in self.test:
try:
req_content = self.req.content.decode()
except UnicodeDecodeError:
req_content = self.req.content
self.output.append(req_content)
# Run plugins for everything exceot TCP tests
if self.test["protocol"] != 'tcp':
for plugin_info in manager.getAllPlugins():
for outcome in self.test['outcomes']:
if plugin_info.name == outcome:
manager.activatePluginByName(plugin_info.name)
message, status = plugin_info.plugin_object.run(self)
self.add_output(plugin_info.name, message, status)
manager.deactivatePluginByName(plugin_info.name)
def __str__(self):
return str(self.output)
def __repr__(self):
LOG.debug(self.inputs)
if "headers" in self.inputs:
if self.inputs["headers"] != '':
header_list = ['"{0}: {1}"'.format(k, v) for k, v in list(self.inputs["headers"].items())]
header = "-H " + (" -H ".join(header_list))
else:
header = self.inputs["headers"]
else:
header = ''
if "data" in self.inputs:
data = '-d' + str(self.inputs["data"])
else:
data = ''
output = ""
# Adding curl output to allow simple debugging of the requests
if self.test['protocol'] in ['http','https']:
if not self.verify and self.test["protocol"] == "https":
curl_insecure = '--insecure'
else:
curl_insecure = ''
command = 'curl {curl_insecure} -v -s -o /dev/null {headers} {data} -X {method} "{uri}"'
output = command.format(method=str(self.test['method']).upper(), headers=header, data=data, uri=self.inputs['url'], curl_insecure=curl_insecure)
elif self.test['protocol'] in ['ldap','ldaps']:
command = 'ldapsearch -LLL -D "{bind_dn}" -w "{bind_pw}" -H {protocol}://{hostname} -xb "{dn}" {attr}'
output = command.format(bind_dn=self.test['inputs']['bind_dn'], bind_pw=self.test['inputs']['bind_pw'], protocol=self.test['protocol'], hostname=self.host, dn=self.test['inputs']['dn'], attr=self.test['inputs']['attr_key'])
return output
def pass_test(self, message):
self.passed += 1
status = "[PASS]"
if self.test["outcomes"]["colour_output"]:
status = COLOURS.to_green(status)
return message, status
def fail_test(self, message):
self.failed += 1
status = "[FAIL]"
if self.test["outcomes"]["colour_output"]:
status = COLOURS.to_red(status)
return message, status
def warn_test(self, message):
self.passed += 1
status = "[WARN]"
if self.test["outcomes"]["colour_output"]:
status = COLOURS.to_yellow(status)
return message, status
def add_output(self, name, message, status):
test_out = name + ": " + message + "." * (OUTPUT_WIDTH - len(name) - len(message) - 8) + status.rjust(8)
self.output.append(test_out)
def _to_json(self):
return jsonpickle.encode(self)
|
12b30ad0e91fb1a3a88d8dc3a6970251cdcee58b
|
84ecf73288879a396d1ff67e5779e07fadde80fb
|
/tools/analysis_tools/visualize/run.py
|
73b521922eaa186c94008fe887f4c8cea1caae28
|
[
"Apache-2.0"
] |
permissive
|
OpenDriveLab/UniAD
|
1ef9d2203ad35bbf8e1eb1a305d409817d90b888
|
2f38ff1357d3956af11c5609d5275db56c559c20
|
refs/heads/main
| 2023-08-09T05:28:26.497452
| 2023-08-07T07:27:30
| 2023-08-07T07:27:30
| 575,306,900
| 2,156
| 233
|
Apache-2.0
| 2023-08-29T02:44:05
| 2022-12-07T08:05:49
|
Python
|
UTF-8
|
Python
| false
| false
| 14,016
|
py
|
run.py
|
import cv2
import torch
import argparse
import os
import glob
import numpy as np
import mmcv
import matplotlib
import matplotlib.pyplot as plt
from nuscenes import NuScenes
from nuscenes.prediction import PredictHelper, convert_local_coords_to_global
from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix
from nuscenes.utils.data_classes import LidarPointCloud, Box
from nuscenes.utils import splits
from pyquaternion import Quaternion
from projects.mmdet3d_plugin.datasets.nuscenes_e2e_dataset import obtain_map_info
from projects.mmdet3d_plugin.datasets.eval_utils.map_api import NuScenesMap
from PIL import Image
from tools.analysis_tools.visualize.utils import color_mapping, AgentPredictionData
from tools.analysis_tools.visualize.render.bev_render import BEVRender
from tools.analysis_tools.visualize.render.cam_render import CameraRender
class Visualizer:
"""
BaseRender class
"""
def __init__(
self,
dataroot='/mnt/petrelfs/yangjiazhi/e2e_proj/data/nus_mini',
version='v1.0-mini',
predroot=None,
with_occ_map=False,
with_map=False,
with_planning=False,
with_pred_box=True,
with_pred_traj=False,
show_gt_boxes=False,
show_lidar=False,
show_command=False,
show_hd_map=False,
show_sdc_car=False,
show_sdc_traj=False,
show_legend=False):
self.nusc = NuScenes(version=version, dataroot=dataroot, verbose=True)
self.predict_helper = PredictHelper(self.nusc)
self.with_occ_map = with_occ_map
self.with_map = with_map
self.with_planning = with_planning
self.show_lidar = show_lidar
self.show_command = show_command
self.show_hd_map = show_hd_map
self.show_sdc_car = show_sdc_car
self.show_sdc_traj = show_sdc_traj
self.show_legend = show_legend
self.with_pred_traj = with_pred_traj
self.with_pred_box = with_pred_box
self.veh_id_list = [0, 1, 2, 3, 4, 6, 7]
self.use_json = '.json' in predroot
self.token_set = set()
self.predictions = self._parse_predictions_multitask_pkl(predroot)
self.bev_render = BEVRender(show_gt_boxes=show_gt_boxes)
self.cam_render = CameraRender(show_gt_boxes=show_gt_boxes)
if self.show_hd_map:
self.nusc_maps = {
'boston-seaport': NuScenesMap(dataroot=dataroot, map_name='boston-seaport'),
'singapore-hollandvillage': NuScenesMap(dataroot=dataroot, map_name='singapore-hollandvillage'),
'singapore-onenorth': NuScenesMap(dataroot=dataroot, map_name='singapore-onenorth'),
'singapore-queenstown': NuScenesMap(dataroot=dataroot, map_name='singapore-queenstown'),
}
def _parse_predictions_multitask_pkl(self, predroot):
outputs = mmcv.load(predroot)
outputs = outputs['bbox_results']
prediction_dict = dict()
for k in range(len(outputs)):
token = outputs[k]['token']
self.token_set.add(token)
if self.show_sdc_traj:
outputs[k]['boxes_3d'].tensor = torch.cat(
[outputs[k]['boxes_3d'].tensor, outputs[k]['sdc_boxes_3d'].tensor], dim=0)
outputs[k]['scores_3d'] = torch.cat(
[outputs[k]['scores_3d'], outputs[k]['sdc_scores_3d']], dim=0)
outputs[k]['labels_3d'] = torch.cat([outputs[k]['labels_3d'], torch.zeros(
(1,), device=outputs[k]['labels_3d'].device)], dim=0)
# detection
bboxes = outputs[k]['boxes_3d']
scores = outputs[k]['scores_3d']
labels = outputs[k]['labels_3d']
track_scores = scores.cpu().detach().numpy()
track_labels = labels.cpu().detach().numpy()
track_boxes = bboxes.tensor.cpu().detach().numpy()
track_centers = bboxes.gravity_center.cpu().detach().numpy()
track_dims = bboxes.dims.cpu().detach().numpy()
track_yaw = bboxes.yaw.cpu().detach().numpy()
if 'track_ids' in outputs[k]:
track_ids = outputs[k]['track_ids'].cpu().detach().numpy()
else:
track_ids = None
# speed
track_velocity = bboxes.tensor.cpu().detach().numpy()[:, -2:]
# trajectories
trajs = outputs[k][f'traj'].numpy()
traj_scores = outputs[k][f'traj_scores'].numpy()
predicted_agent_list = []
# occflow
if self.with_occ_map:
if 'topk_query_ins_segs' in outputs[k]['occ']:
occ_map = outputs[k]['occ']['topk_query_ins_segs'][0].cpu(
).numpy()
else:
occ_map = np.zeros((1, 5, 200, 200))
else:
occ_map = None
occ_idx = 0
for i in range(track_scores.shape[0]):
if track_scores[i] < 0.25:
continue
if occ_map is not None and track_labels[i] in self.veh_id_list:
occ_map_cur = occ_map[occ_idx, :, ::-1]
occ_idx += 1
else:
occ_map_cur = None
if track_ids is not None:
if i < len(track_ids):
track_id = track_ids[i]
else:
track_id = 0
else:
track_id = None
# if track_labels[i] not in [0, 1, 2, 3, 4, 6, 7]:
# continue
predicted_agent_list.append(
AgentPredictionData(
track_scores[i],
track_labels[i],
track_centers[i],
track_dims[i],
track_yaw[i],
track_velocity[i],
trajs[i],
traj_scores[i],
pred_track_id=track_id,
pred_occ_map=occ_map_cur,
past_pred_traj=None
)
)
if self.with_map:
map_thres = 0.7
score_list = outputs[k]['pts_bbox']['score_list'].cpu().numpy().transpose([
1, 2, 0])
predicted_map_seg = outputs[k]['pts_bbox']['lane_score'].cpu().numpy().transpose([
1, 2, 0]) # H, W, C
predicted_map_seg[..., -1] = score_list[..., -1]
predicted_map_seg = (predicted_map_seg > map_thres) * 1.0
predicted_map_seg = predicted_map_seg[::-1, :, :]
else:
predicted_map_seg = None
if self.with_planning:
# detection
bboxes = outputs[k]['sdc_boxes_3d']
scores = outputs[k]['sdc_scores_3d']
labels = 0
track_scores = scores.cpu().detach().numpy()
track_labels = labels
track_boxes = bboxes.tensor.cpu().detach().numpy()
track_centers = bboxes.gravity_center.cpu().detach().numpy()
track_dims = bboxes.dims.cpu().detach().numpy()
track_yaw = bboxes.yaw.cpu().detach().numpy()
track_velocity = bboxes.tensor.cpu().detach().numpy()[:, -2:]
if self.show_command:
command = outputs[k]['command'][0].cpu().detach().numpy()
else:
command = None
planning_agent = AgentPredictionData(
track_scores[0],
track_labels,
track_centers[0],
track_dims[0],
track_yaw[0],
track_velocity[0],
outputs[k]['planning_traj'][0].cpu().detach().numpy(),
1,
pred_track_id=-1,
pred_occ_map=None,
past_pred_traj=None,
is_sdc=True,
command=command,
)
predicted_agent_list.append(planning_agent)
else:
planning_agent = None
prediction_dict[token] = dict(predicted_agent_list=predicted_agent_list,
predicted_map_seg=predicted_map_seg,
predicted_planning=planning_agent)
return prediction_dict
def visualize_bev(self, sample_token, out_filename, t=None):
self.bev_render.reset_canvas(dx=1, dy=1)
self.bev_render.set_plot_cfg()
if self.show_lidar:
self.bev_render.show_lidar_data(sample_token, self.nusc)
if self.bev_render.show_gt_boxes:
self.bev_render.render_anno_data(
sample_token, self.nusc, self.predict_helper)
if self.with_pred_box:
self.bev_render.render_pred_box_data(
self.predictions[sample_token]['predicted_agent_list'])
if self.with_pred_traj:
self.bev_render.render_pred_traj(
self.predictions[sample_token]['predicted_agent_list'])
if self.with_map:
self.bev_render.render_pred_map_data(
self.predictions[sample_token]['predicted_map_seg'])
if self.with_occ_map:
self.bev_render.render_occ_map_data(
self.predictions[sample_token]['predicted_agent_list'])
if self.with_planning:
self.bev_render.render_pred_box_data(
[self.predictions[sample_token]['predicted_planning']])
self.bev_render.render_planning_data(
self.predictions[sample_token]['predicted_planning'], show_command=self.show_command)
if self.show_hd_map:
self.bev_render.render_hd_map(
self.nusc, self.nusc_maps, sample_token)
if self.show_sdc_car:
self.bev_render.render_sdc_car()
if self.show_legend:
self.bev_render.render_legend()
self.bev_render.save_fig(out_filename + '.jpg')
def visualize_cam(self, sample_token, out_filename):
self.cam_render.reset_canvas(dx=2, dy=3, tight_layout=True)
self.cam_render.render_image_data(sample_token, self.nusc)
self.cam_render.render_pred_track_bbox(
self.predictions[sample_token]['predicted_agent_list'], sample_token, self.nusc)
self.cam_render.render_pred_traj(
self.predictions[sample_token]['predicted_agent_list'], sample_token, self.nusc, render_sdc=self.with_planning)
self.cam_render.save_fig(out_filename + '_cam.jpg')
def combine(self, out_filename):
# pass
bev_image = cv2.imread(out_filename + '.jpg')
cam_image = cv2.imread(out_filename + '_cam.jpg')
merge_image = cv2.hconcat([cam_image, bev_image])
cv2.imwrite(out_filename + '.jpg', merge_image)
os.remove(out_filename + '_cam.jpg')
def to_video(self, folder_path, out_path, fps=4, downsample=1):
imgs_path = glob.glob(os.path.join(folder_path, '*.jpg'))
imgs_path = sorted(imgs_path)
img_array = []
for img_path in imgs_path:
img = cv2.imread(img_path)
height, width, channel = img.shape
img = cv2.resize(img, (width//downsample, height //
downsample), interpolation=cv2.INTER_AREA)
height, width, channel = img.shape
size = (width, height)
img_array.append(img)
out = cv2.VideoWriter(
out_path, cv2.VideoWriter_fourcc(*'DIVX'), fps, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
def main(args):
render_cfg = dict(
with_occ_map=False,
with_map=False,
with_planning=True,
with_pred_box=True,
with_pred_traj=True,
show_gt_boxes=False,
show_lidar=False,
show_command=True,
show_hd_map=False,
show_sdc_car=True,
show_legend=True,
show_sdc_traj=False
)
viser = Visualizer(version='v1.0-mini', predroot=args.predroot, dataroot='data/nuscenes', **render_cfg)
if not os.path.exists(args.out_folder):
os.makedirs(args.out_folder)
val_splits = splits.val
scene_token_to_name = dict()
for i in range(len(viser.nusc.scene)):
scene_token_to_name[viser.nusc.scene[i]['token']] = viser.nusc.scene[i]['name']
for i in range(len(viser.nusc.sample)):
sample_token = viser.nusc.sample[i]['token']
scene_token = viser.nusc.sample[i]['scene_token']
if scene_token_to_name[scene_token] not in val_splits:
continue
if sample_token not in viser.token_set:
print(i, sample_token, 'not in prediction pkl!')
continue
viser.visualize_bev(sample_token, os.path.join(args.out_folder, str(i).zfill(3)))
if args.project_to_cam:
viser.visualize_cam(sample_token, os.path.join(args.out_folder, str(i).zfill(3)))
viser.combine(os.path.join(args.out_folder, str(i).zfill(3)))
viser.to_video(args.out_folder, args.demo_video, fps=4, downsample=2)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--predroot', default='/mnt/nas20/yihan01.hu/tmp/results.pkl', help='Path to results.pkl')
parser.add_argument('--out_folder', default='/mnt/nas20/yihan01.hu/tmp/viz/demo_test/', help='Output folder path')
parser.add_argument('--demo_video', default='mini_val_final.avi', help='Demo video name')
parser.add_argument('--project_to_cam', default=True, help='Project to cam (default: True)')
args = parser.parse_args()
main(args)
|
9c3b2b4f946f242b3f7a89455d325e8ce264ca6d
|
c2d48caa5db7e746a38beca625406fcf47379d3c
|
/src/olympia/git/migrations/0003_auto_20200427_1211.py
|
4ecf4f11dd5c3b376aa625bc2bb5acbb404b14ce
|
[] |
permissive
|
mozilla/addons-server
|
1f6269ec0a4aa5a0142a5f81978ef674daf213a7
|
e0f043bca8a64478e2ba62f877c9dc28620be22f
|
refs/heads/master
| 2023-09-01T09:34:41.867534
| 2023-09-01T07:21:22
| 2023-09-01T07:21:22
| 16,416,867
| 920
| 590
|
BSD-3-Clause
| 2023-09-14T16:15:01
| 2014-01-31T18:44:15
|
Python
|
UTF-8
|
Python
| false
| false
| 441
|
py
|
0003_auto_20200427_1211.py
|
# Generated by Django 2.2.12 on 2020-04-27 12:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('git', '0002_auto_20200420_1525'),
]
operations = [
migrations.AlterModelOptions(
name='gitextractionentry',
options={'base_manager_name': 'objects', 'get_latest_by': 'created', 'verbose_name_plural': 'Git extraction entries'},
),
]
|
ff42b2b06294d0d059f46b4ad251e2e9eaa27a3b
|
b13c8608893085fab623cd827b68162e2baebf23
|
/bert_seq2seq/model/bart_model.py
|
555cf694cb08f0aacc74ff968c378b10d1cc2eff
|
[
"Apache-2.0"
] |
permissive
|
920232796/bert_seq2seq
|
0c21cb2ce2ddda803264c1f276588e98777a9b3c
|
c7988b01e3e69d66a061b28974ff9cc8fc4a36de
|
refs/heads/master
| 2022-06-23T23:50:32.625851
| 2022-06-18T12:34:11
| 2022-06-18T12:34:11
| 246,974,265
| 1,195
| 202
|
Apache-2.0
| 2022-06-18T12:34:12
| 2020-03-13T02:43:18
|
Python
|
UTF-8
|
Python
| false
| false
| 53,515
|
py
|
bart_model.py
|
# coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch BART model. """
import os
import math
import random
import warnings
from typing import Optional, Tuple
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
def _gelu_python(x):
"""
Original Implementation of the GELU activation function in Google BERT repo when initially created. For
information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in
torch.nn.functional Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def gelu_new(x):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
gelu = F.gelu
def gelu_fast(x):
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
def mish(x):
return x * torch.tanh(torch.nn.functional.softplus(x))
def linear_act(x):
return x
ACT2FN = {
"relu": F.relu,
"gelu": gelu,
"tanh": torch.tanh,
"gelu_new": gelu_new,
"gelu_fast": gelu_fast,
"mish": mish,
"linear": linear_act,
"sigmoid": torch.sigmoid,
}
def get_activation(activation_string):
if activation_string in ACT2FN:
return ACT2FN[activation_string]
else:
raise KeyError("function {} not found in ACT2FN mapping {}".format(activation_string, list(ACT2FN.keys())))
class BartConfig():
r"""
This is the configuration class to store the configuration of a :class:`~transformers.BartModel`. It is used to
instantiate a BART model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the BART `facebook/bart-large
<https://huggingface.co/facebook/bart-large>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 50265):
Vocabulary size of the BART model. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed when calling :class:`~transformers.BartModel` or
:class:`~transformers.TFBartModel`.
d_model (:obj:`int`, `optional`, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (:obj:`int`, `optional`, defaults to 12):
Number of encoder layers.
decoder_layers (:obj:`int`, `optional`, defaults to 12):
Number of decoder layers.
encoder_attention_heads (:obj:`int`, `optional`, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (:obj:`int`, `optional`, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
dropout (:obj:`float`, `optional`, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout ratio for classifier.
max_position_embeddings (:obj:`int`, `optional`, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
force_bos_token_to_be_generated (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force BOS token to be generated at step 1 (after ``decoder_start_token_id``), only
:obj:`True` for `bart-large-cnn`.
encoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0):
The LayerDrop probability for the encoder. See the `LayerDrop paper <see
https://arxiv.org/abs/1909.11556>`__ for more details.
decoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0):
The LayerDrop probability for the decoder. See the `LayerDrop paper <see
https://arxiv.org/abs/1909.11556>`__ for more details.
gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
scale_embedding (:obj:`bool`, `optional`, defaults to :obj:`False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models).
num_labels: (:obj:`int`, `optional`, defaults to 3):
The number of labels to use in :class:`~transformers.BartForSequenceClassification`.
Example::
>>> from transformers import BartModel, BartConfig
>>> # Initializing a BART facebook/bart-large style configuration
>>> configuration = BartConfig()
>>> # Initializing a model from the facebook/bart-large style configuration
>>> model = BartModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "bart"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=21128,
max_position_embeddings=512,
encoder_layers=6,
encoder_ffn_dim=3072,
encoder_attention_heads=12,
decoder_layers=6,
decoder_ffn_dim=3072,
decoder_attention_heads=12,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
activation_function="gelu",
d_model=768,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
force_bos_token_to_be_generated=False,
use_cache=True,
num_labels=3,
pad_token_id=0,
bos_token_id=101,
eos_token_id=102,
is_encoder_decoder=True,
decoder_start_token_id=102,
):
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.is_encoder_decoder = is_encoder_decoder
self.decoder_start_token_id = decoder_start_token_id
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
self.force_bos_token_to_be_generated = force_bos_token_to_be_generated # only relevant for CNN
self.output_attentions = False
@property
def num_attention_heads(self) -> int:
return self.encoder_attention_heads
@property
def hidden_size(self) -> int:
return self.d_model
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
# t1 = torch.tensor([[1, 2, 3, 4]])
# out = shift_tokens_right(t1, 0, 101)
# print(out)
# os._exit(0)
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
可以用于cross attention return (tgt_len, tgt_len + past_key_values_len) , row is output, column is input.
生成一个下三角的mask,lm model 用。
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), float("-inf"))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# import os
# input_ids = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.long)
# out = _make_causal_mask(input_ids.shape, torch.long, 0)
# print(out)
# print(out.shape)
# os._exit(0)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)
# import os
# # input_ids = torch.tensor([[1, 2, 3, 4, 5, 6, 0]], dtype=torch.long)
# # out = _make_causal_mask(input_ids.shape, torch.long, 0)
# # print(out)
# # print(out.shape)
# mask = torch.tensor([[1, 1, 1, 0]])
# out = _expand_mask(mask, torch.float, tgt_len=6)
# print(out)
# os._exit(0)
class BartLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
assert padding_idx is not None, "`padding_idx` should not be None, but of type int"
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models dont have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim, padding_idx=padding_idx)
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions + self.offset)
class BartAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
assert attn_weights.size() == (
bsz * self.num_heads,
tgt_len,
src_len,
), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
# print("encoder attention mask is " + str(attention_mask))
if attention_mask is not None:
assert attention_mask.size() == (
bsz,
1,
tgt_len,
src_len,
), f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit akward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
assert attn_output.size() == (
bsz * self.num_heads,
tgt_len,
self.head_dim,
), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
attn_output = (
attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
.transpose(1, 2)
.reshape(bsz, tgt_len, embed_dim)
)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class BartEncoderLayer(nn.Module):
def __init__(self, config: BartConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = BartAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class BartDecoderLayer(nn.Module):
def __init__(self, config: BartConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = BartAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = BartAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
encoder_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of
size `(config.encoder_attention_heads,)`.
past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=encoder_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class BartClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim: int,
inner_dim: int,
num_classes: int,
pooler_dropout: float,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class BartEncoder(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`BartEncoderLayer`.
Args:
config: BartConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__()
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = BartLearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList([BartEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(embed_dim)
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is None:
attention_mask = (input_ids > 0).float()
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
else:
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# print("encoder is " + str(attention_mask))
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return hidden_states
class BartDecoder(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`BartDecoderLayer`
Args:
config: BartConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__()
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = BartLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList([BartDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
device = input_ids.device
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
# 这个mask是下三角的mask,但是还没有忽略pad的部分 那么下面的attention mask就是忽略掉pad部分
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
)
combined_attention_mask = combined_attention_mask.to(device)
if attention_mask is not None and combined_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = combined_attention_mask + _expand_mask(
attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
next_decoder_cache = () if use_cache else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
past_key_value = past_key_values[idx] if past_key_values is not None else None
layer_outputs = decoder_layer(
hidden_states,
attention_mask=combined_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
encoder_layer_head_mask=(encoder_head_mask[idx] if encoder_head_mask is not None else None),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
return hidden_states
class BartModel(nn.Module):
def __init__(self, config: BartConfig):
super().__init__()
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.config = config
self.encoder = BartEncoder(config, self.shared)
self.decoder = BartDecoder(config, self.shared)
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
# different to other models, Bart automatically creates decoder_input_ids from
# input_ids if no decoder_input_ids are provided
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
input_ids, self.config.pad_token_id, self.config.decoder_start_token_id
)
output_attentions = False
output_hidden_states = False
use_cache = False
return_dict = False
if attention_mask is None:
# build encoder mask
attention_mask = (input_ids > 0).float()
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if decoder_attention_mask is None :
decoder_attention_mask = (decoder_input_ids > 0).float()
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
encoder_head_mask=head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return decoder_outputs, encoder_outputs
class BartForConditionalGeneration(nn.Module):
def __init__(self, config: BartConfig):
super().__init__()
self.model = BartModel(config)
self.config = config
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def get_output_embeddings(self):
return self.lm_head
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
Returns:
"""
return_dict = True
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
decoder_out, encoder_out = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(decoder_out)
target_mask = (decoder_input_ids > 0).float().view(-1)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = (loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) * target_mask) / target_mask.sum()
output = (lm_logits,)
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
if __name__ == "__main__":
config = BartConfig(encoder_layers=2, decoder_layers=2)
model = BartModel(config)
t1 = torch.tensor([[1, 2, 3, 4, 0]], dtype=torch.long)
t2 = torch.tensor([[2, 3, 4, 5, 0, 0]], dtype=torch.long)
print(model)
decoder_out, encoder_out = model(input_ids=t1, decoder_input_ids=t2)
print(decoder_out[0].shape)
print(encoder_out[0].shape)
# model2 = BartForConditionalGeneration(config)
# out = model2(input_ids=t1, labels=t2)
# loss = out[0]
# print(loss)
# out_lm = out[1]
# print(out_lm.shape)
names = []
|
329bbceb911c527659432d90161a9312228e6f4b
|
e3cb932d22853fe684c1b490adeabb55b810bb4b
|
/t5/models/gin/sequence_lengths/cnn_dailymail_v002.gin
|
5802062c67b4c15dc1fc42df8002f9fadbb2b2b1
|
[
"Apache-2.0"
] |
permissive
|
google-research/text-to-text-transfer-transformer
|
b2b621ec9b8b28e96819533d9f52cf86e2a8be06
|
24d9d3b89b129e586bbfe35cffbc5926d88adc5e
|
refs/heads/main
| 2023-08-25T05:02:00.519242
| 2023-08-15T16:55:47
| 2023-08-15T16:56:21
| 215,890,886
| 5,702
| 807
|
Apache-2.0
| 2023-08-26T09:00:52
| 2019-10-17T21:45:14
|
Python
|
UTF-8
|
Python
| false
| false
| 230
|
gin
|
cnn_dailymail_v002.gin
|
# -*-Python-*-
# The 99th %ile input sequence length for CNN/DM is actually longer than this,
# but AFAICT no one actually trains CNN/DM with super long input sequences.
utils.run.sequence_length = {'inputs': 512, 'targets': 512}
|
9c084e8f802b6811a1eaae72626de579503423a7
|
767dae79df18f9868855774464d08864a1d8629b
|
/protonfixes/gamefixes/638970.py
|
7d948d8f477f27cd1e4f67286462c0f4e50e8c67
|
[
"BSD-2-Clause"
] |
permissive
|
simons-public/protonfixes
|
05cd9c2c37c35ce56ec4c3cdcdba375c6eadf530
|
681411ba8ceb5d2d790e674eb7a5b98951d426e6
|
refs/heads/master
| 2022-11-16T04:16:32.764931
| 2022-11-15T00:24:24
| 2022-11-15T00:24:24
| 150,211,569
| 245
| 75
|
NOASSERTION
| 2022-11-15T00:24:25
| 2018-09-25T05:20:02
|
Python
|
UTF-8
|
Python
| false
| false
| 193
|
py
|
638970.py
|
""" Game fix for Yakuza 0
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" Disable FSYNC
"""
# Disable fsync to fix saving issues
util.disable_fsync()
|
5c407b6abc3c9d8b0c520b708f428d24c6034154
|
1658f312f3aee37c6c4e2714ac081e081e73a7b8
|
/examples/llama2/chat.py
|
90577571df4a3620364723bba04ddc8eadbd2ec7
|
[
"MIT"
] |
permissive
|
OpenNMT/CTranslate2
|
c96ac260e5a910ba8c7bec1f2ad7945599d90ec4
|
61d34502325bfa3c5ef8a11cd2e391d0efed1bf9
|
refs/heads/master
| 2023-08-16T03:02:30.003902
| 2023-08-04T13:33:06
| 2023-08-04T13:33:06
| 210,299,376
| 1,744
| 185
|
MIT
| 2023-09-13T07:58:59
| 2019-09-23T08:10:42
|
C++
|
UTF-8
|
Python
| false
| false
| 4,661
|
py
|
chat.py
|
import os
import sys
import ctranslate2
import sentencepiece as spm
def main():
model_dir = sys.argv[1]
print("Loading the model...")
generator = ctranslate2.Generator(model_dir, device="cuda")
sp = spm.SentencePieceProcessor(os.path.join(model_dir, "tokenizer.model"))
context_length = 4096
max_generation_length = 512
max_prompt_length = context_length - max_generation_length
dialog = []
while True:
print("")
user_prompt = input("You: ")
dialog.append({"role": "user", "content": user_prompt})
while True:
prompt_tokens = build_prompt(sp, dialog)
if len(prompt_tokens) <= max_prompt_length:
break
# Remove old conversations to reduce the prompt size.
dialog = dialog[2:]
system_prompt_tokens, prompt_tokens = extract_system_prompt(sp, prompt_tokens)
step_results = generator.generate_tokens(
prompt_tokens,
static_prompt=system_prompt_tokens,
max_length=max_generation_length,
sampling_temperature=0.6,
sampling_topk=20,
sampling_topp=1,
)
print("")
print("Llama2: ", end="", flush=True)
text_output = ""
for word in generate_words(sp, step_results):
if text_output:
word = " " + word
print(word, end="", flush=True)
text_output += word
print("")
dialog.append({"role": "assistant", "content": text_output.strip()})
def generate_words(sp, step_results):
tokens_buffer = []
for step_result in step_results:
is_new_word = step_result.token.startswith("▁")
if is_new_word and tokens_buffer:
word = sp.decode(tokens_buffer)
if word:
yield word
tokens_buffer = []
tokens_buffer.append(step_result.token_id)
if tokens_buffer:
word = sp.decode(tokens_buffer)
if word:
yield word
def extract_system_prompt(sp, tokens):
end_tokens = sp.encode_as_pieces(E_SYS)[1:]
end_position = None
for start in range(len(tokens)):
end = start + len(end_tokens)
if tokens[start:end] == end_tokens:
end_position = end
break
if end_position is None:
system_tokens = None
else:
system_tokens = tokens[:end_position]
tokens = tokens[end_position:]
return system_tokens, tokens
# The code below is adapted from
# https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/generation.py#L225-L268
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
DEFAULT_SYSTEM_PROMPT = """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
def build_prompt(sp, dialog):
if dialog[0]["role"] != "system":
dialog = [
{
"role": "system",
"content": DEFAULT_SYSTEM_PROMPT,
}
] + dialog
dialog = [
{
"role": dialog[1]["role"],
"content": B_SYS + dialog[0]["content"] + E_SYS + dialog[1]["content"],
}
] + dialog[2:]
assert all([msg["role"] == "user" for msg in dialog[::2]]) and all(
[msg["role"] == "assistant" for msg in dialog[1::2]]
), (
"model only supports 'system', 'user' and 'assistant' roles, "
"starting with 'system', then 'user' and alternating (u/a/u/a/u...)"
)
dialog_tokens = sum(
[
["<s>"]
+ sp.encode_as_pieces(
f"{B_INST} {(prompt['content']).strip()} {E_INST} {(answer['content']).strip()}"
)
+ ["</s>"]
for prompt, answer in zip(
dialog[::2],
dialog[1::2],
)
],
[],
)
assert (
dialog[-1]["role"] == "user"
), f"Last message must be from user, got {dialog[-1]['role']}"
dialog_tokens += ["<s>"] + sp.encode_as_pieces(
f"{B_INST} {(dialog[-1]['content']).strip()} {E_INST}"
)
return dialog_tokens
if __name__ == "__main__":
main()
|
3b3b82414fd66359bf8c81ddabcb67d4c1ef64f0
|
4daab5ba90185bae65169ebb8183c635385ab3f8
|
/autode/wrappers/ORCA.py
|
0d2cf1cf7ee7c9b5c3c385149190ef40f2c5cf0a
|
[
"MIT"
] |
permissive
|
duartegroup/autodE
|
bcf69440bd04411f97d39df0df0ae1f2bf6feb8c
|
4d6667592f083dfcf38de6b75c4222c0a0e7b60b
|
refs/heads/master
| 2023-09-01T15:08:16.028378
| 2023-07-25T08:09:05
| 2023-07-25T08:09:05
| 196,085,570
| 132
| 42
|
MIT
| 2023-09-12T15:20:54
| 2019-07-09T21:20:27
|
Python
|
UTF-8
|
Python
| false
| false
| 20,665
|
py
|
ORCA.py
|
import numpy as np
import os
import autode.wrappers.keywords as kws
import autode.wrappers.methods
from typing import List, TYPE_CHECKING
from autode.utils import run_external
from autode.hessians import Hessian
from autode.opt.optimisers.base import ExternalOptimiser
from autode.values import PotentialEnergy, Gradient, Coordinates
from autode.input_output import xyz_file_to_atoms
from autode.config import Config
from autode.utils import work_in_tmp_dir
from autode.log import logger
from autode.exceptions import (
UnsupportedCalculationInput,
CouldNotGetProperty,
NoCalculationOutput,
XYZfileWrongFormat,
AtomsNotFound,
)
if TYPE_CHECKING:
from autode.calculations.executors import CalculationExecutor
from autode.opt.optimisers.base import BaseOptimiser
vdw_gaussian_solvent_dict = {
"water": "Water",
"acetone": "Acetone",
"acetonitrile": "Acetonitrile",
"benzene": "Benzene",
"carbon tetrachloride": "CCl4",
"dichloromethane": "CH2Cl2",
"chloroform": "Chloroform",
"cyclohexane": "Cyclohexane",
"n,n-dimethylformamide": "DMF",
"dimethylsulfoxide": "DMSO",
"ethanol": "Ethanol",
"n-hexane": "Hexane",
"methanol": "Methanol",
"1-octanol": "Octanol",
"pyridine": "Pyridine",
"tetrahydrofuran": "THF",
"toluene": "Toluene",
}
def print_added_internals(inp_file, calc_input):
"""Print the added internal coordinates"""
if calc_input.added_internals is None:
return
for (i, j) in calc_input.added_internals:
print(
"%geom\n" "modify_internal\n" "{ B",
i,
j,
"A } end\n" "end",
file=inp_file,
)
return
def print_distance_constraints(inp_file, molecule):
"""Print the distance constraints to the input file"""
if molecule.constraints.distance is None:
return
print("%geom Constraints", file=inp_file)
for (i, j), dist in molecule.constraints.distance.items():
print("{ B", i, j, dist, "C }", file=inp_file)
print(" end\nend", file=inp_file)
return
def print_cartesian_constraints(inp_file, molecule):
"""Print the Cartesian constraints to the input file"""
if molecule.constraints.cartesian is None:
return
print("%geom Constraints", file=inp_file)
for i in molecule.constraints.cartesian:
print("{ C", i, "C }", file=inp_file)
print(" end\nend", file=inp_file)
return
def print_num_optimisation_steps(inp_file, molecule, calc_input):
"""If there are relatively few atoms increase the number of opt steps"""
if not isinstance(calc_input.keywords, kws.OptKeywords):
return # Not an optimisation so no need to increase steps
if calc_input.keywords.max_opt_cycles is not None:
print(
f"%geom MaxIter {int(calc_input.keywords.max_opt_cycles)} end",
file=inp_file,
)
return
if molecule.n_atoms > 33:
return # Use default behaviour
return
def print_point_charges(inp_file, calc_input):
"""Print a point charge file and add the name to the input file"""
if calc_input.point_charges is None:
return
filename = calc_input.filename.replace(".inp", ".pc")
with open(filename, "w") as pc_file:
print(len(calc_input.point_charges), file=pc_file)
for pc in calc_input.point_charges:
x, y, z = pc.coord
print(
f"{pc.charge:^12.8f} {x:^12.8f} {y:^12.8f} {z:^12.8f}",
file=pc_file,
)
calc_input.additional_filenames.append(filename)
print(f'% pointcharges "{filename}"', file=inp_file)
return
def print_default_params(inp_file):
"""Print some useful default parameters to the input file"""
print(
"%output \nxyzfile=True \nend ",
"%scf \nmaxiter 250 \nend",
"%output\nPrint[P_Hirshfeld] = 1\nend",
"% maxcore",
int(Config.max_core.to("MB")),
sep="\n",
file=inp_file,
)
return
def print_coordinates(inp_file, molecule):
"""Print the coordinates to the input file in the correct format"""
print("*xyz", molecule.charge, molecule.mult, file=inp_file)
for atom in molecule.atoms:
x, y, z = atom.coord
print(
f"{atom.label:<3} {x:^12.8f} {y:^12.8f} {z:^12.8f}", file=inp_file
)
print("*", file=inp_file)
return
class ORCA(autode.wrappers.methods.ExternalMethodOEGH):
def __init__(self):
super().__init__(
executable_name="orca",
path=Config.ORCA.path,
keywords_set=Config.ORCA.keywords,
implicit_solvation_type=Config.ORCA.implicit_solvation_type,
doi_list=["10.1002/wcms.81", "10.1002/wcms.1327"],
)
def __repr__(self):
return f"ORCA(available = {self.is_available})"
def generate_input_for(self, calc: "CalculationExecutor") -> None:
assert calc.input.filename is not None
keywords = self.get_keywords(calc.input, calc.molecule)
assert len(keywords) > 0
with open(calc.input.filename, "w") as inp_file:
print("!", *keywords, file=inp_file)
self.print_solvent(inp_file, calc.molecule, keywords)
print_added_internals(inp_file, calc.input)
print_distance_constraints(inp_file, calc.molecule)
print_cartesian_constraints(inp_file, calc.molecule)
print_num_optimisation_steps(inp_file, calc.molecule, calc.input)
print_point_charges(inp_file, calc.input)
print_default_params(inp_file)
if calc.n_cores > 1:
print(f"%pal nprocs {calc.n_cores}\nend", file=inp_file)
print_coordinates(inp_file, calc.molecule)
return None
@staticmethod
def input_filename_for(calc: "CalculationExecutor") -> str:
return f"{calc.name}.inp"
@staticmethod
def output_filename_for(calc: "CalculationExecutor") -> str:
return f"{calc.name}.out"
def version_in(self, calc: "CalculationExecutor") -> str:
"""Get the version of ORCA used to execute this calculation"""
if not calc.output.exists:
return self._get_version_no_output()
for line in calc.output.file_lines:
if "Program Version" in line and len(line.split()) >= 3:
return line.split()[2]
logger.warning("Could not find the ORCA version number")
return "???"
def execute(self, calc):
@work_in_tmp_dir(
filenames_to_copy=calc.input.filenames,
kept_file_exts=(".out", ".hess", ".xyz", ".inp", ".pc"),
)
def execute_orca():
run_external(
params=[calc.method.path, calc.input.filename],
output_filename=calc.output.filename,
)
execute_orca()
return None
def optimiser_from(self, calc: "CalculationExecutor") -> "BaseOptimiser":
return ORCAOptimiser(output_lines=calc.output.file_lines)
def terminated_normally_in(self, calc: "CalculationExecutor") -> bool:
termination_strings = [
"$end", # at the end of a .hess file
"ORCA TERMINATED NORMALLY",
"The optimization did not converge",
]
for n_line, line in enumerate(reversed(calc.output.file_lines)):
if any(substring in line for substring in termination_strings):
logger.info("orca terminated normally")
return True
if n_line > 30:
# The above lines are pretty close to the end of the file –
# so skip parsing it all
return False
return False
def _energy_from(self, calc: "CalculationExecutor") -> PotentialEnergy:
assert calc.output.filename is not None, "Must have a set output"
if calc.output.filename.endswith(".hess"):
logger.warning("Failed to set the potential energy")
return PotentialEnergy(0.0)
for line in reversed(calc.output.file_lines):
if "FINAL SINGLE POINT ENERGY" in line:
return PotentialEnergy(line.split()[4], units="Ha")
raise CouldNotGetProperty(name="energy")
def coordinates_from(self, calc: "CalculationExecutor") -> Coordinates:
assert calc.output.filename is not None, "Must have a set output"
fn_ext = ".hess" if calc.output.filename.endswith(".hess") else ".out"
# First try the .xyz file generated
xyz_file_name = calc.output.filename.replace(fn_ext, ".xyz")
if os.path.exists(xyz_file_name):
try:
return xyz_file_to_atoms(xyz_file_name).coordinates
except XYZfileWrongFormat:
raise AtomsNotFound(f"Failed to parse {xyz_file_name}")
# Then the Hessian file
hess_file_name = calc.output.filename.replace(fn_ext, ".hess")
if os.path.exists(hess_file_name):
hess_file_lines = open(hess_file_name, "r").readlines()
coords = []
for i, line in enumerate(hess_file_lines):
if "$atoms" not in line:
continue
for aline in hess_file_lines[
i + 2 : i + 2 + calc.molecule.n_atoms
]:
_, _, x, y, z = aline.split()
coords.append([float(x), float(y), float(z)])
return Coordinates(coords, units="a0").to("Å")
# and finally the potentially long .out file
if os.path.exists(calc.output.filename) and fn_ext == ".out":
coords = []
# There could be many sets in the file, so take the last
for i, line in enumerate(calc.output.file_lines):
if "CARTESIAN COORDINATES (ANGSTROEM)" not in line:
continue
coords, n_atoms = [], calc.molecule.n_atoms
for oline in calc.output.file_lines[i + 2 : i + 2 + n_atoms]:
_, x, y, z = oline.split()
coords.append([float(x), float(y), float(z)])
return Coordinates(coords, units="Å")
raise NoCalculationOutput("Failed to find any ORCA output files")
def partial_charges_from(self, calc: "CalculationExecutor") -> List[float]:
"""
e.g.
.HIRSHFELD ANALYSIS
------------------
Total integrated alpha density = 12.997461186
Total integrated beta density = 12.997461186
ATOM CHARGE SPIN
0 C -0.006954 0.000000
. . . .
"""
charges: List[float] = []
for i, line in enumerate(calc.output.file_lines):
if "HIRSHFELD ANALYSIS" in line:
charges = []
first, last = i + 7, i + 7 + calc.molecule.n_atoms
for charge_line in calc.output.file_lines[first:last]:
charges.append(float(charge_line.split()[-2]))
return charges
def gradient_from(self, calc: "CalculationExecutor") -> Gradient:
"""
e.g.
#------------------
CARTESIAN GRADIENT <- i
#------------------
1 C : -0.011390275 -0.000447412 0.000552736 <- j
"""
gradients: List[List[float]] = []
for i, line in enumerate(calc.output.file_lines):
if (
"CARTESIAN GRADIENT" in line
or "The final MP2 gradient" in line
):
gradients = []
if "CARTESIAN GRADIENT" in line:
first, last = i + 3, i + 3 + calc.molecule.n_atoms
if "The final MP2 gradient" in line:
first, last = i + 1, i + 1 + calc.molecule.n_atoms
if "CARTESIAN GRADIENT (NUMERICAL)" in line:
first, last = i + 2, i + 2 + calc.molecule.n_atoms
for grad_line in calc.output.file_lines[first:last]:
if len(grad_line.split()) <= 3:
continue
dadx, dady, dadz = grad_line.split()[-3:]
gradients.append([float(dadx), float(dady), float(dadz)])
return Gradient(gradients, units="Ha a0^-1").to("Ha Å^-1")
@staticmethod
def _start_line_hessian(calc, file_lines):
"""
Find the line where the Hessian starts in an ORCA Hessian file
e.g. H2O.hess
Arguments:
calc (autode.calculation.Calculation):
file_lines (list(str)):
Returns:
(int):
Raises:
(autode.exceptions.CouldNotGetProperty | AssertionError):
"""
for i, line in enumerate(file_lines):
if "$hessian" not in line:
continue
# Ensure the number of atoms is present, and is the number expected
n_atoms = int(file_lines[i + 1].split()[0]) // 3
assert n_atoms == calc.molecule.n_atoms
return i + 3
raise CouldNotGetProperty(f"No Hessian found in the Hessian file")
def hessian_from(
self, calc: "autode.calculations.executors.CalculationExecutor"
) -> Hessian:
"""Grab the Hessian from the output .hess file
e.g.::
$hessian
9
0 1
2 3 4
0 6.48E-01 4.376E-03 2.411E-09 -3.266E-01 -2.5184E-01
. . . . . .
"""
assert calc.input.keywords is not None, "Must have keywords"
assert calc.output.filename is not None, "Output filename must be set"
hess_filename = calc.output.filename
if calc.output.filename.endswith(".out"):
hess_filename = calc.output.filename.replace(".out", ".hess")
if not os.path.exists(hess_filename):
raise CouldNotGetProperty("Could not find Hessian file")
file_lines = open(hess_filename, "r", encoding="utf-8").readlines()
hessian_blocks = []
start_line = self._start_line_hessian(calc, file_lines)
for j, h_line in enumerate(file_lines[start_line:]):
if len(h_line.split()) == 0:
# Assume we're at the end of the Hessian
break
# Skip blank lines in the file, marked by one or more fewer items
# than the previous
if len(h_line.split()) < len(
file_lines[start_line + j - 1].split()
):
continue
# First item is the coordinate number, thus append all others
hessian_blocks.append([float(v) for v in h_line.split()[1:]])
n_atoms = calc.molecule.n_atoms
hessian = [block for block in hessian_blocks[: 3 * n_atoms]]
for i, block in enumerate(hessian_blocks[3 * n_atoms :]):
hessian[i % (3 * n_atoms)] += block
return Hessian(
np.array(hessian),
atoms=calc.molecule.atoms,
functional=calc.input.keywords.functional,
units="Ha a0^-2",
).to("Ha Å^-2")
@work_in_tmp_dir(filenames_to_copy=[], kept_file_exts=[])
def _get_version_no_output(self) -> str:
"""
Get the version of ORCA without an existing output file
"""
try:
run_external(
params=[self.path, "-h"],
output_filename="tmp",
stderr_to_log=False,
)
line = next(l for l in open("tmp", "r") if "Program Version" in l)
return line.split()[2]
except (OSError, IOError, StopIteration):
return "???"
def get_keywords(self, calc_input, molecule):
"""Modify the keywords for this calculation with the solvent + fix for
single atom optimisation calls"""
kwds_cls = calc_input.keywords.__class__
new_keywords = kwds_cls()
for keyword in calc_input.keywords:
if "scalfreq" in keyword.lower():
raise UnsupportedCalculationInput(
"Frequency scaling within ORCA will not alter the "
"calculated frequencies. Use ade.Config.freq_scale_factor"
)
if "opt" in keyword.lower() and molecule.n_atoms == 1:
logger.warning("Can't optimise a single atom")
continue
if isinstance(keyword, kws.ECP) and keyword.orca is None:
# Use the default specification for applying ECPs
continue
if isinstance(keyword, kws.MaxOptCycles):
continue # Set in print_num_optimisation_steps
if isinstance(keyword, kws.Keyword):
new_keywords.append(keyword.orca)
else:
new_keywords.append(str(keyword))
if molecule.solvent is not None:
self.add_solvent_keyword(molecule, new_keywords)
# Sort the keywords with all the items with newlines at the end, so
# the first keyword line is a single contiguous line
return kwds_cls(
sorted(new_keywords, key=lambda kw: 1 if "\n" in kw else 0)
)
def use_vdw_gaussian_solvent(self, keywords) -> bool:
"""
Determine if the calculation should use the gaussian charge scheme which
generally affords better convergence for optimiations in implicit solvent
Arguments:
keywords (autode.wrappers.keywords.Keywords):
Returns:
(bool):
"""
assert self.implicit_solvation_type is not None, "Must have a solvent"
if self.implicit_solvation_type.lower() != "cpcm":
return False
if keywords.contain_any_of("freq", "optts") and not self.is_v5:
logger.warning(
"Cannot do analytical frequencies with gaussian "
"charge scheme - switching off"
)
return False
return True
def add_solvent_keyword(self, molecule, keywords):
"""Add a keyword to the input file based on the solvent"""
if self.implicit_solvation_type.lower() not in ["smd", "cpcm"]:
raise UnsupportedCalculationInput(
"Implicit solvent type must be " "either SMD or CPCM"
)
if (
self.use_vdw_gaussian_solvent(keywords)
and molecule.solvent.orca not in vdw_gaussian_solvent_dict
):
err = (
f"CPCM solvent with gaussian charge not available for "
f"{molecule.solvent.name}. Available solvents are "
f"{vdw_gaussian_solvent_dict.keys()}"
)
raise UnsupportedCalculationInput(message=err)
solv_name = vdw_gaussian_solvent_dict[molecule.solvent.orca]
keywords.append(f"CPCM({solv_name})")
return
def print_solvent(self, inp_file, molecule, keywords):
"""Add the solvent block to the input file"""
if molecule.solvent is None:
return
if self.implicit_solvation_type.lower() == "smd":
print(
f"%cpcm\n"
f"smd true\n"
f'SMDsolvent "{molecule.solvent.orca}"\n'
f"end",
file=inp_file,
)
if self.use_vdw_gaussian_solvent(keywords):
print("%cpcm\n" "surfacetype vdw_gaussian\n" "end", file=inp_file)
return
@property
def is_v5(self):
"""Is this ORCA version at least 5.0.0?"""
return self._get_version_no_output()[0] == "5"
class ORCAOptimiser(ExternalOptimiser):
def __init__(self, output_lines: List[str]):
self._lines = output_lines
@property
def converged(self) -> bool:
"""Has the optimisation converged?"""
for line in reversed(self._lines):
if "THE OPTIMIZATION HAS CONVERGED" in line:
return True
return False
@property
def last_energy_change(self) -> "PotentialEnergy":
"""Find the last energy change in the file"""
energies = []
for line in self._lines:
if "FINAL SINGLE POINT ENERGY" in line:
energies.append(PotentialEnergy(line.split()[4], units="Ha"))
if len(energies) < 2:
return PotentialEnergy(np.inf)
return energies[-1] - energies[-2]
orca = ORCA()
|
c08527614094f288f807d71dbec783188cf383db
|
85c668af40853f5ee48fbe8c4045df1a5dd4104e
|
/tests/common/test_shapes.py
|
80da49add72fb459b08706fc77517c4deb6855ce
|
[
"MIT",
"LicenseRef-scancode-public-domain",
"OFL-1.1"
] |
permissive
|
marcomusy/vedo
|
771db91bca05cda864fc7d1776d9140726676704
|
9a9f7c5e9ebf135e5c745c521c898866e3ede0ef
|
refs/heads/master
| 2023-08-21T12:56:35.545713
| 2023-08-14T14:39:37
| 2023-08-14T14:39:37
| 110,261,047
| 1,419
| 206
|
MIT
| 2023-09-02T18:38:22
| 2017-11-10T15:17:47
|
Python
|
UTF-8
|
Python
| false
| false
| 385
|
py
|
test_shapes.py
|
from vedo import Arc, vtk_version
import numpy as np
print('-----------------------------------------------------')
print('VTK Version', vtk_version, "test_shapes.py")
print('-----------------------------------------------------')
#####################################
arc = Arc(center=None, point1=(1, 1, 1), point2=None, normal=(0, 0, 1), angle=np.pi)
assert isinstance(arc, Arc)
|
ee53a3632eab74bbddf47a8ce4206acee28dabd8
|
532ad1aedff8528b2e8af4e4e752f32d58b92b0d
|
/tests/link/c/test_type.py
|
603dfb28d39fe5aa7db2187b68e7ccead5ef23b4
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
aesara-devs/aesara
|
ebaa204159d4ddb94ede10580c5b8e39d114713f
|
b5a3cf45f0f6762bb4bb0e2c657f7d3822c74595
|
refs/heads/main
| 2023-08-09T10:56:56.528283
| 2023-07-15T06:15:49
| 2023-07-15T13:28:29
| 221,231,590
| 861
| 142
|
NOASSERTION
| 2023-09-05T03:16:16
| 2019-11-12T14:02:08
|
Python
|
UTF-8
|
Python
| false
| false
| 8,730
|
py
|
test_type.py
|
import os
import numpy as np
import pytest
import aesara
from aesara import scalar as aes
from aesara.graph.basic import Apply
from aesara.link.c.op import COp
from aesara.link.c.type import CDataType, CEnumType, EnumList, EnumType
from aesara.tensor.type import TensorType, continuous_dtypes
class ProdOp(COp):
__props__ = ()
def make_node(self, i):
return Apply(self, [i], [CDataType("void *", "py_decref")()])
def c_support_code(self, **kwargs):
return """
void py_decref(void *p) {
Py_XDECREF((PyObject *)p);
}
"""
def c_code(self, node, name, inps, outs, sub):
return """
Py_XDECREF(%(out)s);
%(out)s = (void *)%(inp)s;
Py_INCREF(%(inp)s);
""" % dict(
out=outs[0], inp=inps[0]
)
def c_code_cache_version(self):
return (0,)
def perform(self, *args, **kwargs):
raise NotImplementedError()
class GetOp(COp):
__props__ = ()
def make_node(self, c):
return Apply(self, [c], [TensorType("float32", shape=(None,))()])
def c_support_code(self, **kwargs):
return """
void py_decref(void *p) {
Py_XDECREF((PyObject *)p);
}
"""
def c_code(self, node, name, inps, outs, sub):
return """
Py_XDECREF(%(out)s);
%(out)s = (PyArrayObject *)%(inp)s;
Py_INCREF(%(out)s);
""" % dict(
out=outs[0], inp=inps[0]
)
def c_code_cache_version(self):
return (0,)
def perform(self, *args, **kwargs):
raise NotImplementedError()
@pytest.mark.skipif(
not aesara.config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_cdata():
i = TensorType("float32", shape=(None,))()
c = ProdOp()(i)
i2 = GetOp()(c)
mode = None
if aesara.config.mode == "FAST_COMPILE":
mode = "FAST_RUN"
# This should be a passthrough function for vectors
f = aesara.function([i], i2, mode=mode)
v = np.random.standard_normal((9,)).astype("float32")
v2 = f(v)
assert (v2 == v).all()
class MyOpEnumList(COp):
__props__ = ("op_chosen",)
params_type = EnumList(
("ADD", "+"),
("SUB", "-"),
("MULTIPLY", "*"),
("DIVIDE", "/"),
ctype="unsigned long long",
)
def __init__(self, choose_op):
assert self.params_type.ADD == 0
assert self.params_type.SUB == 1
assert self.params_type.MULTIPLY == 2
assert self.params_type.DIVIDE == 3
assert self.params_type.fromalias("+") == self.params_type.ADD
assert self.params_type.fromalias("-") == self.params_type.SUB
assert self.params_type.fromalias("*") == self.params_type.MULTIPLY
assert self.params_type.fromalias("/") == self.params_type.DIVIDE
assert self.params_type.has_alias(choose_op)
self.op_chosen = choose_op
def get_params(self, node):
return self.op_chosen
def make_node(self, a, b):
return Apply(self, [aes.as_scalar(a), aes.as_scalar(b)], [aes.float64()])
def perform(self, node, inputs, outputs, op):
a, b = inputs
(o,) = outputs
if op == self.params_type.ADD:
o[0] = a + b
elif op == self.params_type.SUB:
o[0] = a - b
elif op == self.params_type.MULTIPLY:
o[0] = a * b
elif op == self.params_type.DIVIDE:
if any(dtype in continuous_dtypes for dtype in (a.dtype, b.dtype)):
o[0] = a / b
else:
o[0] = a // b
else:
raise NotImplementedError("Unknown op id " + str(op))
o[0] = np.float64(o[0])
def c_code_cache_version(self):
return (1,)
def c_code(self, node, name, inputs, outputs, sub):
return """
switch(%(op)s) {
case ADD:
%(o)s = %(a)s + %(b)s;
break;
case SUB:
%(o)s = %(a)s - %(b)s;
break;
case MULTIPLY:
%(o)s = %(a)s * %(b)s;
break;
case DIVIDE:
%(o)s = %(a)s / %(b)s;
break;
default:
{%(fail)s}
break;
}
""" % dict(
op=sub["params"], o=outputs[0], a=inputs[0], b=inputs[1], fail=sub["fail"]
)
class MyOpCEnumType(COp):
__props__ = ("python_value",)
params_type = CEnumType(
("MILLION", "million"),
("BILLION", "billion"),
("TWO_BILLIONS", "two_billions"),
ctype="size_t",
)
def c_header_dirs(self, **kwargs):
return [os.path.join(os.path.dirname(__file__), "c_code")]
def c_headers(self, **kwargs):
return ["test_cenum.h"]
def __init__(self, value_name):
# As we see, Python values of constants are not related to real C values.
assert self.params_type.MILLION == 0
assert self.params_type.BILLION == 1
assert self.params_type.TWO_BILLIONS == 2
assert self.params_type.has_alias(value_name)
self.python_value = self.params_type.fromalias(value_name)
def get_params(self, node):
return self.python_value
def make_node(self):
return Apply(self, [], [aes.uint32()])
def perform(self, *args, **kwargs):
raise NotImplementedError()
def c_code_cache_version(self):
return (3,)
def c_code(self, node, name, inputs, outputs, sub):
return """
%(o)s = %(val)s;
""" % dict(
o=outputs[0],
# params in C code will already contains expected C constant value.
val=sub["params"],
)
class TestEnumTypes:
def test_enum_class(self):
# Check that invalid enum name raises exception.
for invalid_name in ("a", "_A", "0"):
try:
EnumList(invalid_name)
except AttributeError:
pass
else:
raise Exception("EnumList with invalid name should fail.")
try:
EnumType(**{invalid_name: 0})
except AttributeError:
pass
else:
raise Exception("EnumType with invalid name should fail.")
# Check that invalid enum value raises exception.
try:
EnumType(INVALID_VALUE="string is not allowed.")
except TypeError:
pass
else:
raise Exception("EnumType with invalid value should fail.")
# Check EnumType.
e1 = EnumType(C1=True, C2=12, C3=True, C4=-1, C5=False, C6=0.0)
e2 = EnumType(C1=1, C2=12, C3=1, C4=-1.0, C5=0.0, C6=0)
assert e1 == e2
assert not (e1 != e2)
assert hash(e1) == hash(e2)
# Check access to attributes.
assert len((e1.ctype, e1.C1, e1.C2, e1.C3, e1.C4, e1.C5, e1.C6)) == 7
# Check enum with aliases.
e1 = EnumType(A=("alpha", 0), B=("beta", 1), C=2)
e2 = EnumType(A=("alpha", 0), B=("beta", 1), C=2)
e3 = EnumType(A=("a", 0), B=("beta", 1), C=2)
assert e1 == e2
assert e1 != e3
assert e1.filter("beta") == e1.fromalias("beta") == e1.B == 1
assert e1.filter("C") == e1.fromalias("C") == e1.C == 2
# Check that invalid alias (same as a constant) raises exception.
try:
EnumList(("A", "a"), ("B", "B"))
except TypeError:
EnumList(("A", "a"), ("B", "b"))
else:
raise Exception(
"Enum with an alias name equal to a constant name should fail."
)
def test_op_with_enumlist(self):
a = aes.int32()
b = aes.int32()
c_add = MyOpEnumList("+")(a, b)
c_sub = MyOpEnumList("-")(a, b)
c_multiply = MyOpEnumList("*")(a, b)
c_divide = MyOpEnumList("/")(a, b)
f = aesara.function([a, b], [c_add, c_sub, c_multiply, c_divide])
va = 12
vb = 15
ref = [va + vb, va - vb, va * vb, va // vb]
out = f(va, vb)
assert ref == out, (ref, out)
@pytest.mark.skipif(
not aesara.config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_op_with_cenumtype(self):
million = MyOpCEnumType("million")()
billion = MyOpCEnumType("billion")()
two_billions = MyOpCEnumType("two_billions")()
f = aesara.function([], [million, billion, two_billions])
val_million, val_billion, val_two_billions = f()
assert val_million == 1000000
assert val_billion == val_million * 1000
assert val_two_billions == val_billion * 2
@aesara.config.change_flags(**{"cmodule__debug": True})
def test_op_with_cenumtype_debug(self):
self.test_op_with_cenumtype()
|
858b0567e8d5fc89eafc5d2394c99badd11441f6
|
279f415dd1e06c594c6c87deda57e201c73c4542
|
/espnet2/text/abs_tokenizer.py
|
21d727d6153eac1db3fb9f880c5013c1002dbcb5
|
[
"Apache-2.0"
] |
permissive
|
espnet/espnet
|
f7ba47271c1a6b1ed606dbbfb04a7f14220bb585
|
bcd20948db7846ee523443ef9fd78c7a1248c95e
|
refs/heads/master
| 2023-08-28T23:43:34.238336
| 2023-08-23T02:51:39
| 2023-08-23T02:51:39
| 114,054,873
| 7,242
| 2,244
|
Apache-2.0
| 2023-09-14T08:01:11
| 2017-12-13T00:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 314
|
py
|
abs_tokenizer.py
|
from abc import ABC, abstractmethod
from typing import Iterable, List
class AbsTokenizer(ABC):
@abstractmethod
def text2tokens(self, line: str) -> List[str]:
raise NotImplementedError
@abstractmethod
def tokens2text(self, tokens: Iterable[str]) -> str:
raise NotImplementedError
|
bac52f0d8d6781be18628527e9eae4d107f813c1
|
3a7fcd9f16304b258b7db0c78ce59116ad4e206e
|
/setup.py
|
1e2f005e358c7f64d6530ce3f0f039f6175d150c
|
[
"MIT"
] |
permissive
|
wdecoster/NanoPlot
|
09c7fa1ac37bed5b03d1a88f56171a675cc69559
|
3c45efbf7d2d06b911e63ceca820a5dd92ef4234
|
refs/heads/master
| 2023-07-07T19:49:49.333839
| 2023-07-01T10:56:21
| 2023-07-01T10:56:21
| 90,007,962
| 329
| 54
|
MIT
| 2023-05-25T09:46:32
| 2017-05-02T08:11:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,788
|
py
|
setup.py
|
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
exec(open("nanoplot/version.py").read())
setup(
name="NanoPlot",
version=__version__, # noqa: F821
description="Plotting suite for Oxford Nanopore sequencing data and alignments",
long_description=open(path.join(here, "README.md"), encoding="utf-8").read(),
long_description_content_type="text/markdown",
url="https://github.com/wdecoster/NanoPlot",
author="Wouter De Coster",
author_email="decosterwouter@gmail.com",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
keywords="nanopore sequencing plotting quality control",
packages=find_packages() + ["extra", "scripts"],
python_requires=">=3",
install_requires=[
"biopython",
"pysam>0.10.0.0",
"pandas>=1.1.0",
"numpy>=1.16.5",
"scipy",
"python-dateutil",
"nanoget>=1.19.1",
"nanomath>=1.0.0",
"plotly>=5.4.0",
"pyarrow",
"kaleido",
],
package_data={"NanoPlot": []},
package_dir={"nanoplot": "nanoplot"},
include_package_data=True,
entry_points={
"console_scripts": [
"NanoPlot=nanoplot.NanoPlot:main",
],
},
data_files=[("", ["LICENSE"])],
)
|
0b965c88682632d0c9a71512f9d5d908caf8392b
|
0e5291307525916f95faecaaa175fd7839eddbf8
|
/SySeVR_docker/docker_build/home/SySeVR/softdir/py2neo-py2neo-2.0/py2neo/ext/neobox/__init__.py
|
67eb679194daba135a269fc0363024f072c97076
|
[
"Apache-2.0"
] |
permissive
|
SySeVR/SySeVR
|
9d7df721ac4964c4746e18938b4383e4a8142cc8
|
5e195d0bc63a76a298b65c9c3460fed0ee3082c7
|
refs/heads/master
| 2022-06-16T22:17:21.117906
| 2021-10-29T08:59:23
| 2021-10-29T08:59:23
| 141,377,750
| 255
| 125
| null | 2022-06-05T03:02:06
| 2018-07-18T03:45:49
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,407
|
py
|
__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. note::
Neobox has been written to work on Linux and may not operate
correctly - or at all - on other platforms.
Neobox is a command line tool and API for managing multiple Neo4j server
installations (boxes), each running on its own unique port. The command
line tool is installed as part of the py2neo setup script and can be used
as follows::
$ neobox install my-box community 2.1.5
Created server instance 'my-box' configured on ports 47470 and 47471
$ neobox list
my-box
$ neobox start my-box
http://localhost:47476/
$ neobox stop my-box
$ neobox drop my-box
$ neobox remove my-box
All command line features are also available as API calls.
"""
from py2neo.ext.neobox.core import *
__all__ = ["Warehouse", "Box"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.