blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2662607a8e5a3747180c96a39d01a3886cbf6a0d
|
c530897cb72b6943c7226b25824444cad5f3503b
|
/usaspending_api/download/tests/integration/test_populate_monthly_delta_files.py
|
336062ac25c17ebecc54432a012773837190e9b6
|
[
"CC0-1.0"
] |
permissive
|
fedspendingtransparency/usaspending-api
|
fc63a22d32ea0207b7273d3e1ef26ba9dbabc42a
|
38f920438697930ae3ac57bbcaae9034877d8fb7
|
refs/heads/master
| 2023-09-01T22:00:36.633612
| 2023-08-29T18:39:18
| 2023-08-29T18:39:18
| 65,394,827
| 276
| 118
|
CC0-1.0
| 2023-09-14T20:33:15
| 2016-08-10T15:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 11,272
|
py
|
test_populate_monthly_delta_files.py
|
import zipfile
import datetime
import pytest
import os
from django.core.management import call_command
from os import listdir
from model_bakery import baker
from csv import reader
from usaspending_api.settings import HOST
from usaspending_api.awards.models import TransactionDelta
from usaspending_api.common.helpers.sql_helpers import get_database_dsn_string
from usaspending_api.download.v2.download_column_historical_lookups import query_paths
@pytest.fixture
@pytest.mark.django_db(transaction=True)
def monthly_download_delta_data(db, monkeypatch):
baker.make("references.ToptierAgency", toptier_agency_id=1, toptier_code="001", name="Test_Agency")
baker.make("references.Agency", pk=1, toptier_agency_id=1)
baker.make("references.ToptierAgency", toptier_agency_id=2, toptier_code="002", name="Test_Agency 2")
baker.make("references.Agency", pk=2, toptier_agency_id=2)
i = 1
fiscal_year = 2020
baker.make(
"search.AwardSearch",
award_id=i,
generated_unique_award_id="CONT_AWD_1_0_0",
is_fpds=True,
type="B",
type_description="Purchase Order",
piid=f"piid{i}",
awarding_agency_id=1,
funding_agency_id=1,
fiscal_year=fiscal_year,
)
baker.make("awards.FinancialAccountsByAwards", award_id=i)
baker.make(
"search.TransactionSearch",
award_id=i,
transaction_id=i,
is_fpds=True,
transaction_unique_id=i,
usaspending_unique_transaction_id="",
type="B",
type_description="Purchase Order",
period_of_performance_start_date=datetime.datetime(fiscal_year, 5, 7),
period_of_performance_current_end_date=datetime.datetime(fiscal_year, 5, 7),
action_date=datetime.datetime(fiscal_year, 5, 7),
federal_action_obligation=100,
modification_number="1",
transaction_description="a description",
last_modified_date=datetime.datetime(fiscal_year, 5, 7),
award_certified_date=datetime.datetime(fiscal_year, 5, 7),
etl_update_date=datetime.date.today(),
create_date=datetime.datetime(fiscal_year, 5, 7),
update_date=datetime.datetime(fiscal_year, 5, 7),
fiscal_year=fiscal_year,
awarding_agency_id=1,
funding_agency_id=1,
original_loan_subsidy_cost=100.0,
face_value_loan_guarantee=100.0,
funding_amount=100.0,
non_federal_funding_amount=100.0,
generated_unique_award_id="CONT_AWD_1_0_0",
business_categories=[],
detached_award_procurement_id=i,
detached_award_proc_unique=f"test{i}",
piid=f"piid{i}",
agency_id=1,
awarding_sub_tier_agency_c="001",
awarding_subtier_agency_name="Test_Agency",
awarding_agency_code="001",
awarding_toptier_agency_name="Test_Agency",
parent_award_id=f"000{i}",
contract_award_type="B",
contract_award_type_desc="Contract",
)
TransactionDelta.objects.update_or_create_transaction(i)
monkeypatch.setenv("DOWNLOAD_DATABASE_URL", get_database_dsn_string())
@pytest.mark.django_db(transaction=True)
def test_all_agencies(monthly_download_delta_data, monkeypatch):
call_command("populate_monthly_delta_files", "--debugging_skip_deleted", "--last_date=2020-12-31")
file_list = listdir("csv_downloads")
formatted_date = datetime.datetime.strftime(datetime.date.today(), "%Y%m%d")
assert f"FY(All)_All_Contracts_Delta_{formatted_date}.zip" in file_list
os.remove(os.path.normpath(f"csv_downloads/FY(All)_All_Contracts_Delta_{formatted_date}.zip"))
@pytest.mark.django_db(transaction=True)
def test_specific_agency(monthly_download_delta_data, monkeypatch):
contract_data = [
"",
"1",
"test1",
"CONT_AWD_1_0_0",
"piid1",
"1",
"",
"",
"",
"0001",
"",
"100.00",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"2020-05-07",
"2020",
"2020-05-07",
"2020-05-07",
"",
"",
"",
"001",
"Test_Agency",
"001",
"Test_Agency",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"B",
"Contract",
"",
"",
"",
"",
"",
"",
"",
"",
"a description",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
f"{HOST}/award/CONT_AWD_1_0_0/" if "localhost" in HOST else f"https://{HOST}/award/CONT_AWD_1_0_0/",
"2020-05-07",
]
call_command("populate_monthly_delta_files", "--agencies=1", "--debugging_skip_deleted", "--last_date=2020-12-31")
file_list = listdir("csv_downloads")
formatted_date = datetime.datetime.strftime(datetime.date.today(), "%Y%m%d")
assert f"FY(All)_001_Contracts_Delta_{formatted_date}.zip" in file_list
with zipfile.ZipFile(
os.path.normpath(f"csv_downloads/FY(All)_001_Contracts_Delta_{formatted_date}.zip"), "r"
) as zip_ref:
zip_ref.extractall("csv_downloads")
assert f"FY(All)_001_Contracts_Delta_{formatted_date}_1.csv" in listdir("csv_downloads")
with open(
os.path.normpath(f"csv_downloads/FY(All)_001_Contracts_Delta_{formatted_date}_1.csv"), "r"
) as contract_file:
csv_reader = reader(contract_file)
row_count = 0
for row in csv_reader:
if row_count == 0:
# 63 is the character limit for column names
assert row == [s[:63] for s in query_paths["transaction_search"]["d1"].keys()]
else:
assert row == contract_data
row_count += 1
assert row_count == 2
os.remove(os.path.normpath(f"csv_downloads/FY(All)_001_Contracts_Delta_{formatted_date}.zip"))
os.remove(os.path.normpath(f"csv_downloads/FY(All)_001_Contracts_Delta_{formatted_date}_1.csv"))
@pytest.mark.django_db(transaction=True)
def test_award_types(client, monthly_download_delta_data, monkeypatch):
call_command(
"populate_monthly_delta_files",
"--agencies=1",
"--award_types=assistance",
"--debugging_skip_deleted",
"--last_date=2020-12-31",
)
file_list = listdir("csv_downloads")
formatted_date = datetime.datetime.strftime(datetime.date.today(), "%Y%m%d")
assert f"FY(All)_001_Assistance_Delta_{formatted_date}.zip" not in file_list
baker.make(
"search.AwardSearch",
award_id=2,
is_fpds=False,
type="02",
type_description="Block Grant",
fain="fain2",
awarding_agency_id=2,
funding_agency_id=2,
fiscal_year=2020,
)
baker.make(
"search.TransactionSearch",
award_id=2,
transaction_id=2,
is_fpds=False,
transaction_unique_id=2,
type="02",
type_description="Block Grant",
period_of_performance_start_date=datetime.datetime(2020, 5, 7),
period_of_performance_current_end_date=datetime.datetime(2020, 5, 7),
action_date=datetime.datetime(2020, 5, 7),
last_modified_date=datetime.datetime(2020, 5, 7),
award_certified_date=datetime.datetime(2020, 5, 7),
etl_update_date=datetime.date.today(),
create_date=datetime.datetime(2020, 5, 7),
update_date=datetime.datetime(2020, 5, 7),
fiscal_year=2020,
awarding_agency_id=1,
funding_agency_id=1,
generated_unique_award_id=2,
fain="fain2",
awarding_agency_code="001",
awarding_sub_tier_agency_c=1,
awarding_toptier_agency_name="Test_Agency",
awarding_subtier_agency_name="Test_Agency",
)
baker.make("awards.TransactionDelta", transaction_id=2, created_at=datetime.datetime.now())
call_command(
"populate_monthly_delta_files",
"--agencies=1",
"--award_types=assistance",
"--debugging_skip_deleted",
"--last_date=2020-12-31",
)
file_list = listdir("csv_downloads")
formatted_date = datetime.datetime.strftime(datetime.date.today(), "%Y%m%d")
assert f"FY(All)_001_Assistance_Delta_{formatted_date}.zip" in file_list
os.remove(os.path.normpath(f"csv_downloads/FY(All)_001_Assistance_Delta_{formatted_date}.zip"))
|
efe40a0d750b905df951c3c352c34f9839f5a636
|
2853845c003d03db22f67c3303fa1ec333180ae7
|
/web_console_v2/api/fedlearner_webconsole/utils/k8s_cache.py
|
44783205b0460f289e21ad3bd6c0aa98c66cc864
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
bytedance/fedlearner
|
fc1dd2ba2ec88092e83a32732eccea52451ce552
|
436e4959952c970917ee8f47b920f0a76cd4dd05
|
refs/heads/master
| 2023-08-14T23:01:02.875453
| 2023-05-23T03:44:03
| 2023-05-23T03:44:03
| 235,348,659
| 893
| 243
|
Apache-2.0
| 2023-06-08T07:37:18
| 2020-01-21T13:26:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,008
|
py
|
k8s_cache.py
|
# Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import threading
from enum import Enum
class EventType(Enum):
ADDED = 'ADDED'
MODIFIED = 'MODIFIED'
DELETED = 'DELETED'
class ObjectType(Enum):
POD = 'POD'
FLAPP = 'FLAPP'
class Event(object):
def __init__(self, flapp_name, event_type, obj_type, obj_dict):
self.flapp_name = flapp_name
self.event_type = event_type
self.obj_type = obj_type
# {'status': {}, 'metadata': {}}
self.obj_dict = obj_dict
@staticmethod
def from_json(event, obj_type):
# TODO(xiangyuxuan): move this to k8s/models.py
event_type = event['type']
obj = event['object']
if obj_type == ObjectType.POD:
obj = obj.to_dict()
metadata = obj.get('metadata')
status = obj.get('status')
flapp_name = metadata['labels']['app-name']
return Event(flapp_name,
EventType(event_type),
obj_type,
obj_dict={'status': status,
'metadata': metadata})
metadata = obj.get('metadata')
status = obj.get('status')
# put event to queue
return Event(metadata['name'],
EventType(event_type),
obj_type,
obj_dict={'status': status})
class K8sCache(object):
def __init__(self):
self._lock = threading.Lock()
# key: flapp_name, value: a dict
# {'flapp': flapp cache, 'pods': pods cache,
# 'deleted': is flapp deleted}
self._cache = {}
# TODO(xiangyuxuan): use class instead of json to manage cache and queue
def update_cache(self, event: Event):
with self._lock:
flapp_name = event.flapp_name
if flapp_name not in self._cache:
self._cache[flapp_name] = {'pods': {'items': []},
'deleted': False}
# if not flapp's then pod's event
if event.obj_type == ObjectType.FLAPP:
if event.event_type == EventType.DELETED:
self._cache[flapp_name] = {'pods': {'items': []},
'deleted': True}
else:
self._cache[flapp_name]['deleted'] = False
self._cache[flapp_name]['flapp'] = event.obj_dict
else:
if self._cache[flapp_name]['deleted']:
return
existed = False
for index, pod in enumerate(
self._cache[flapp_name]['pods']['items']):
if pod['metadata']['name'] == \
event.obj_dict['metadata']['name']:
existed = True
self._cache[flapp_name]['pods']['items'][index] \
= event.obj_dict
break
if not existed:
self._cache[flapp_name]['pods'][
'items'].append(event.obj_dict)
def get_cache(self, flapp_name):
# use read-write lock to fast
with self._lock:
if flapp_name in self._cache:
return self._cache[flapp_name]
return {'flapp': None, 'pods': {'items': []}}
k8s_cache = K8sCache()
|
ab7ee3d96b674ec2de4e29f0197737add1a264c2
|
a5dc4afbe35abe3377f0cf586d6b53207a9e63a4
|
/sewar/no_ref.py
|
84d253ba36b4941dc95c3041b52197bea8db022c
|
[
"MIT"
] |
permissive
|
andrewekhalel/sewar
|
a53cfe539eb33b714c0301f1dc6b4ae712af475b
|
c1776bd58a98b1f6d61be6cac8952227a5d98527
|
refs/heads/master
| 2023-08-16T22:57:36.498482
| 2023-08-15T15:47:35
| 2023-08-15T15:47:35
| 145,829,544
| 544
| 76
|
MIT
| 2022-12-13T22:46:23
| 2018-08-23T09:11:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,424
|
py
|
no_ref.py
|
from __future__ import absolute_import, division, print_function
import numpy as np
from .full_ref import uqi
from scipy.ndimage.filters import uniform_filter
from .utils import imresize
def d_lambda (ms,fused,p=1):
"""calculates Spectral Distortion Index (D_lambda).
:param ms: low resolution multispectral image.
:param fused: high resolution fused image.
:param p: parameter to emphasize large spectral differences (default = 1).
:returns: float -- D_lambda.
"""
L = ms.shape[2]
M1 = np.ones((L,L))
M2 = np.ones((L,L))
for l in range(L):
for r in range(l+1,L):
M1[l,r] = M1[r,l] = uqi(fused[:,:,l],fused[:,:,r])
M2[l,r] = M2[r,l] = uqi(ms[:,:,l],ms[:,:,r])
diff = np.abs(M1 - M2)**p
return (1./(L*(L-1)) * np.sum(diff))**(1./p)
def d_s (pan,ms,fused,q=1,r=4,ws=7):
"""calculates Spatial Distortion Index (D_S).
:param pan: high resolution panchromatic image.
:param ms: low resolution multispectral image.
:param fused: high resolution fused image.
:param q: parameter to emphasize large spatial differences (default = 1).
:param r: ratio of high resolution to low resolution (default=4).
:param ws: sliding window size (default = 7).
:returns: float -- D_S.
"""
pan = pan.astype(np.float64)
fused = fused.astype(np.float64)
pan_degraded = uniform_filter(pan.astype(np.float64), size=ws)/(ws**2)
pan_degraded = imresize(pan_degraded,(pan.shape[0]//r,pan.shape[1]//r))
L = ms.shape[2]
M1 = np.zeros(L)
M2 = np.zeros(L)
for l in range(L):
M1[l] = uqi(fused[:,:,l],pan)
M2[l] = uqi(ms[:,:,l],pan_degraded)
diff = np.abs(M1 - M2)**q
return ((1./L)*(np.sum(diff)))**(1./q)
def qnr (pan,ms,fused,alpha=1,beta=1,p=1,q=1,r=4,ws=7):
"""calculates Quality with No Reference (QNR).
:param pan: high resolution panchromatic image.
:param ms: low resolution multispectral image.
:param fused: high resolution fused image.
:param alpha: emphasizes relevance of spectral distortions to the overall.
:param beta: emphasizes relevance of spatial distortions to the overall.
:param p: parameter to emphasize large spectral differences (default = 1).
:param q: parameter to emphasize large spatial differences (default = 1).
:param r: ratio of high resolution to low resolution (default=4).
:param ws: sliding window size (default = 7).
:returns: float -- QNR.
"""
a = (1-d_lambda(ms,fused,p=p))**alpha
b = (1-d_s(pan,ms,fused,q=q,ws=ws,r=r))**beta
return a*b
|
5f00a2f77678f99cb5eff21c58b5c0a2368e77e5
|
c69fc4000c675f4405bd2b00e749fadaf629d3b3
|
/tests/test_max_score.py
|
9d5a4c8d9f6569b89631dbc9a7b0ae9cab8c1818
|
[
"MIT"
] |
permissive
|
SimonBlanke/Hyperactive
|
dedf75e77bbd0c3020ce7b8f3d9382b7474f8590
|
23cc6adf36a13a9cac2b544117a41edd889563f0
|
refs/heads/master
| 2023-08-25T01:00:10.666237
| 2023-07-23T15:23:19
| 2023-07-23T15:23:19
| 155,687,643
| 473
| 45
|
MIT
| 2023-06-28T15:34:12
| 2018-11-01T08:53:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,092
|
py
|
test_max_score.py
|
import time
import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier
from hyperactive import Hyperactive
from hyperactive.optimizers import (
RandomSearchOptimizer,
HillClimbingOptimizer,
)
def objective_function(para):
score = -para["x1"] * para["x1"]
return score
search_space = {
"x1": list(np.arange(0, 100000, 0.1)),
}
def test_max_score_0():
def objective_function(para):
score = -para["x1"] * para["x1"]
return score
search_space = {
"x1": list(np.arange(0, 100, 0.1)),
}
max_score = -9999
opt = HillClimbingOptimizer(
epsilon=0.01,
rand_rest_p=0,
)
hyper = Hyperactive()
hyper.add_search(
objective_function,
search_space,
optimizer=opt,
n_iter=100000,
initialize={"warm_start": [{"x1": 99}]},
max_score=max_score,
)
hyper.run()
print("\n Results head \n", hyper.search_data(objective_function).head())
print("\n Results tail \n", hyper.search_data(objective_function).tail())
print("\nN iter:", len(hyper.search_data(objective_function)))
assert -100 > hyper.best_score(objective_function) > max_score
def test_max_score_1():
def objective_function(para):
score = -para["x1"] * para["x1"]
time.sleep(0.01)
return score
search_space = {
"x1": list(np.arange(0, 100, 0.1)),
}
max_score = -9999
c_time = time.perf_counter()
hyper = Hyperactive()
hyper.add_search(
objective_function,
search_space,
n_iter=100000,
initialize={"warm_start": [{"x1": 99}]},
max_score=max_score,
)
hyper.run()
diff_time = time.perf_counter() - c_time
print("\n Results head \n", hyper.search_data(objective_function).head())
print("\n Results tail \n", hyper.search_data(objective_function).tail())
print("\nN iter:", len(hyper.search_data(objective_function)))
assert diff_time < 1
|
b4ac52b6279877e22c01e9a3eef5f69ed2b18d02
|
952dc66c61966f099756cdb6c2d13b40352f63cc
|
/zerver/webhooks/bitbucket2/view.py
|
1eca871e9e7a5ef59f9e962e33c4e006b8addd9c
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/zulip
|
5ae6aad35fd9f72996c0a2a9cdd674400966ebf6
|
965a25d91b6ee2db54038f5df855215fa25146b0
|
refs/heads/main
| 2023-08-28T23:43:00.971110
| 2023-08-28T16:47:09
| 2023-08-28T19:33:02
| 43,160,685
| 20,239
| 8,996
|
Apache-2.0
| 2023-09-14T20:57:47
| 2015-09-25T16:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 20,088
|
py
|
view.py
|
# Webhooks for external integrations.
import re
import string
from functools import partial
from typing import Dict, List, Optional, Protocol
from django.http import HttpRequest, HttpResponse
from zerver.decorator import log_unsupported_webhook_event, webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventTypeError
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import WildValue, check_bool, check_int, check_string, to_wild_value
from zerver.lib.webhooks.common import (
check_send_webhook_message,
validate_extract_webhook_http_header,
)
from zerver.lib.webhooks.git import (
TOPIC_WITH_BRANCH_TEMPLATE,
TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE,
get_commits_comment_action_message,
get_force_push_commits_event_message,
get_issue_event_message,
get_pull_request_event_message,
get_push_commits_event_message,
get_push_tag_event_message,
get_remove_branch_event_message,
get_short_sha,
)
from zerver.models import UserProfile
BITBUCKET_TOPIC_TEMPLATE = "{repository_name}"
BITBUCKET_FORK_BODY = "{actor} forked the repository into [{fork_name}]({fork_url})."
BITBUCKET_COMMIT_STATUS_CHANGED_BODY = (
"[System {key}]({system_url}) changed status of {commit_info} to {status}."
)
BITBUCKET_REPO_UPDATED_CHANGED = (
"{actor} changed the {change} of the **{repo_name}** repo from **{old}** to **{new}**"
)
BITBUCKET_REPO_UPDATED_ADDED = (
"{actor} changed the {change} of the **{repo_name}** repo to **{new}**"
)
PULL_REQUEST_SUPPORTED_ACTIONS = [
"approved",
"unapproved",
"created",
"updated",
"rejected",
"fulfilled",
"comment_created",
"comment_updated",
"comment_deleted",
]
ALL_EVENT_TYPES = [
"change_commit_status",
"pull_request_comment_created",
"pull_request_updated",
"pull_request_unapproved",
"push",
"pull_request_approved",
"pull_request_fulfilled",
"issue_created",
"issue_commented",
"fork",
"pull_request_comment_updated",
"pull_request_created",
"pull_request_rejected",
"repo:updated",
"issue_updated",
"commit_comment",
"pull_request_comment_deleted",
]
@webhook_view("Bitbucket2", all_event_types=ALL_EVENT_TYPES)
@has_request_variables
def api_bitbucket2_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: WildValue = REQ(argument_type="body", converter=to_wild_value),
branches: Optional[str] = REQ(default=None),
user_specified_topic: Optional[str] = REQ("topic", default=None),
) -> HttpResponse:
type = get_type(request, payload)
if type == "push":
# ignore push events with no changes
if not payload["push"]["changes"]:
return json_success(request)
branch = get_branch_name_for_push_event(payload)
if branch and branches and branches.find(branch) == -1:
return json_success(request)
topics = get_push_topics(payload)
bodies = get_push_bodies(request, payload)
for b, t in zip(bodies, topics):
check_send_webhook_message(
request, user_profile, t, b, type, unquote_url_parameters=True
)
else:
topic = get_topic_based_on_type(payload, type)
body_function = get_body_based_on_type(type)
body = body_function(
request,
payload,
include_title=user_specified_topic is not None,
)
check_send_webhook_message(
request, user_profile, topic, body, type, unquote_url_parameters=True
)
return json_success(request)
def get_topic_for_branch_specified_events(
payload: WildValue, branch_name: Optional[str] = None
) -> str:
return TOPIC_WITH_BRANCH_TEMPLATE.format(
repo=get_repository_name(payload["repository"]),
branch=get_branch_name_for_push_event(payload) if branch_name is None else branch_name,
)
def get_push_topics(payload: WildValue) -> List[str]:
topics_list = []
for change in payload["push"]["changes"]:
potential_tag = (change["new"] or change["old"])["type"].tame(check_string)
if potential_tag == "tag":
topics_list.append(get_topic(payload))
else:
if change.get("new"):
branch_name = change["new"]["name"].tame(check_string)
else:
branch_name = change["old"]["name"].tame(check_string)
topics_list.append(get_topic_for_branch_specified_events(payload, branch_name))
return topics_list
def get_topic(payload: WildValue) -> str:
return BITBUCKET_TOPIC_TEMPLATE.format(
repository_name=get_repository_name(payload["repository"])
)
def get_topic_based_on_type(payload: WildValue, type: str) -> str:
if type.startswith("pull_request"):
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repository_name(payload["repository"]),
type="PR",
id=payload["pullrequest"]["id"].tame(check_int),
title=payload["pullrequest"]["title"].tame(check_string),
)
if type.startswith("issue"):
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repository_name(payload["repository"]),
type="issue",
id=payload["issue"]["id"].tame(check_int),
title=payload["issue"]["title"].tame(check_string),
)
assert type != "push"
return get_topic(payload)
def get_type(request: HttpRequest, payload: WildValue) -> str:
if "push" in payload:
return "push"
elif "fork" in payload:
return "fork"
elif "comment" in payload and "commit" in payload:
return "commit_comment"
elif "commit_status" in payload:
return "change_commit_status"
elif "issue" in payload:
if "changes" in payload:
return "issue_updated"
if "comment" in payload:
return "issue_commented"
return "issue_created"
elif "pullrequest" in payload:
pull_request_template = "pull_request_{}"
# Note that we only need the HTTP header to determine pullrequest events.
# We rely on the payload itself to determine the other ones.
event_key = validate_extract_webhook_http_header(request, "X-Event-Key", "BitBucket")
assert event_key is not None
action = re.match("pullrequest:(?P<action>.*)$", event_key)
if action:
action_group = action.group("action")
if action_group in PULL_REQUEST_SUPPORTED_ACTIONS:
return pull_request_template.format(action_group)
else:
event_key = validate_extract_webhook_http_header(request, "X-Event-Key", "BitBucket")
if event_key == "repo:updated":
return event_key
raise UnsupportedWebhookEventTypeError(event_key)
class BodyGetter(Protocol):
def __call__(self, request: HttpRequest, payload: WildValue, include_title: bool) -> str:
...
def get_body_based_on_type(
type: str,
) -> BodyGetter:
return GET_SINGLE_MESSAGE_BODY_DEPENDING_ON_TYPE_MAPPER[type]
def get_push_bodies(request: HttpRequest, payload: WildValue) -> List[str]:
messages_list = []
for change in payload["push"]["changes"]:
potential_tag = (change["new"] or change["old"])["type"].tame(check_string)
if potential_tag == "tag":
messages_list.append(get_push_tag_body(request, payload, change))
# if change['new'] is None, that means a branch was deleted
elif change["new"].value is None:
messages_list.append(get_remove_branch_push_body(request, payload, change))
elif change["forced"].tame(check_bool):
messages_list.append(get_force_push_body(request, payload, change))
else:
messages_list.append(get_normal_push_body(request, payload, change))
return messages_list
def get_remove_branch_push_body(request: HttpRequest, payload: WildValue, change: WildValue) -> str:
return get_remove_branch_event_message(
get_actor_info(request, payload),
change["old"]["name"].tame(check_string),
)
def get_force_push_body(request: HttpRequest, payload: WildValue, change: WildValue) -> str:
return get_force_push_commits_event_message(
get_actor_info(request, payload),
change["links"]["html"]["href"].tame(check_string),
change["new"]["name"].tame(check_string),
change["new"]["target"]["hash"].tame(check_string),
)
def get_commit_author_name(request: HttpRequest, commit: WildValue) -> str:
if "user" in commit["author"]:
return get_user_info(request, commit["author"]["user"])
return commit["author"]["raw"].tame(check_string).split()[0]
def get_normal_push_body(request: HttpRequest, payload: WildValue, change: WildValue) -> str:
commits_data = [
{
"name": get_commit_author_name(request, commit),
"sha": commit["hash"].tame(check_string),
"url": commit["links"]["html"]["href"].tame(check_string),
"message": commit["message"].tame(check_string),
}
for commit in change["commits"]
]
return get_push_commits_event_message(
get_actor_info(request, payload),
change["links"]["html"]["href"].tame(check_string),
change["new"]["name"].tame(check_string),
commits_data,
is_truncated=change["truncated"].tame(check_bool),
)
def get_fork_body(request: HttpRequest, payload: WildValue, include_title: bool) -> str:
return BITBUCKET_FORK_BODY.format(
actor=get_user_info(request, payload["actor"]),
fork_name=get_repository_full_name(payload["fork"]),
fork_url=get_repository_url(payload["fork"]),
)
def get_commit_comment_body(request: HttpRequest, payload: WildValue, include_title: bool) -> str:
comment = payload["comment"]
action = "[commented]({})".format(comment["links"]["html"]["href"].tame(check_string))
return get_commits_comment_action_message(
get_actor_info(request, payload),
action,
comment["commit"]["links"]["html"]["href"].tame(check_string),
comment["commit"]["hash"].tame(check_string),
comment["content"]["raw"].tame(check_string),
)
def get_commit_status_changed_body(
request: HttpRequest, payload: WildValue, include_title: bool
) -> str:
commit_api_url = payload["commit_status"]["links"]["commit"]["href"].tame(check_string)
commit_id = commit_api_url.split("/")[-1]
commit_info = "[{short_commit_id}]({repo_url}/commits/{commit_id})".format(
repo_url=get_repository_url(payload["repository"]),
short_commit_id=get_short_sha(commit_id),
commit_id=commit_id,
)
return BITBUCKET_COMMIT_STATUS_CHANGED_BODY.format(
key=payload["commit_status"]["key"].tame(check_string),
system_url=payload["commit_status"]["url"].tame(check_string),
commit_info=commit_info,
status=payload["commit_status"]["state"].tame(check_string),
)
def get_issue_commented_body(request: HttpRequest, payload: WildValue, include_title: bool) -> str:
action = "[commented]({}) on".format(
payload["comment"]["links"]["html"]["href"].tame(check_string)
)
return get_issue_action_body(request, payload, action, include_title)
def get_issue_action_body(
request: HttpRequest, payload: WildValue, action: str, include_title: bool
) -> str:
issue = payload["issue"]
assignee = None
message = None
if action == "created":
if issue["assignee"]:
assignee = get_user_info(request, issue["assignee"])
message = issue["content"]["raw"].tame(check_string)
return get_issue_event_message(
user_name=get_actor_info(request, payload),
action=action,
url=issue["links"]["html"]["href"].tame(check_string),
number=issue["id"].tame(check_int),
message=message,
assignee=assignee,
title=issue["title"].tame(check_string) if include_title else None,
)
def get_pull_request_action_body(
request: HttpRequest, payload: WildValue, action: str, include_title: bool
) -> str:
pull_request = payload["pullrequest"]
target_branch = None
base_branch = None
if action == "merged":
target_branch = pull_request["source"]["branch"]["name"].tame(check_string)
base_branch = pull_request["destination"]["branch"]["name"].tame(check_string)
return get_pull_request_event_message(
user_name=get_actor_info(request, payload),
action=action,
url=get_pull_request_url(pull_request),
number=pull_request["id"].tame(check_int),
target_branch=target_branch,
base_branch=base_branch,
title=pull_request["title"].tame(check_string) if include_title else None,
)
def get_pull_request_created_or_updated_body(
request: HttpRequest, payload: WildValue, action: str, include_title: bool
) -> str:
pull_request = payload["pullrequest"]
assignee = None
if pull_request["reviewers"]:
assignee = get_user_info(request, pull_request["reviewers"][0])
return get_pull_request_event_message(
user_name=get_actor_info(request, payload),
action=action,
url=get_pull_request_url(pull_request),
number=pull_request["id"].tame(check_int),
target_branch=pull_request["source"]["branch"]["name"].tame(check_string)
if action == "created"
else None,
base_branch=pull_request["destination"]["branch"]["name"].tame(check_string)
if action == "created"
else None,
message=pull_request["description"].tame(check_string),
assignee=assignee,
title=pull_request["title"].tame(check_string) if include_title else None,
)
def get_pull_request_comment_created_action_body(
request: HttpRequest,
payload: WildValue,
include_title: bool,
) -> str:
action = "[commented]({})".format(
payload["comment"]["links"]["html"]["href"].tame(check_string)
)
return get_pull_request_comment_action_body(request, payload, action, include_title)
def get_pull_request_deleted_or_updated_comment_action_body(
request: HttpRequest,
payload: WildValue,
action: str,
include_title: bool,
) -> str:
action = "{} a [comment]({})".format(
action, payload["comment"]["links"]["html"]["href"].tame(check_string)
)
return get_pull_request_comment_action_body(request, payload, action, include_title)
def get_pull_request_comment_action_body(
request: HttpRequest,
payload: WildValue,
action: str,
include_title: bool,
) -> str:
action += " on"
return get_pull_request_event_message(
user_name=get_actor_info(request, payload),
action=action,
url=payload["pullrequest"]["links"]["html"]["href"].tame(check_string),
number=payload["pullrequest"]["id"].tame(check_int),
message=payload["comment"]["content"]["raw"].tame(check_string),
title=payload["pullrequest"]["title"].tame(check_string) if include_title else None,
)
def get_push_tag_body(request: HttpRequest, payload: WildValue, change: WildValue) -> str:
if change.get("new"):
tag = change["new"]
action = "pushed"
elif change.get("old"):
tag = change["old"]
action = "removed"
return get_push_tag_event_message(
get_actor_info(request, payload),
tag["name"].tame(check_string),
tag_url=tag["links"]["html"]["href"].tame(check_string),
action=action,
)
def append_punctuation(title: str, message: str) -> str:
if title[-1] not in string.punctuation:
message = f"{message}."
return message
def get_repo_updated_body(request: HttpRequest, payload: WildValue, include_title: bool) -> str:
changes = ["website", "name", "links", "language", "full_name", "description"]
body = ""
repo_name = payload["repository"]["name"].tame(check_string)
actor = get_actor_info(request, payload)
for change in changes:
new = payload["changes"][change]["new"]
old = payload["changes"][change]["old"]
if change == "full_name":
change = "full name"
if new and old:
message = BITBUCKET_REPO_UPDATED_CHANGED.format(
actor=actor,
change=change,
repo_name=repo_name,
old=str(old.value),
new=str(new.value),
)
message = append_punctuation(str(new.value), message) + "\n"
body += message
elif new and not old:
message = BITBUCKET_REPO_UPDATED_ADDED.format(
actor=actor,
change=change,
repo_name=repo_name,
new=str(new.value),
)
message = append_punctuation(str(new.value), message) + "\n"
body += message
return body
def get_pull_request_url(pullrequest_payload: WildValue) -> str:
return pullrequest_payload["links"]["html"]["href"].tame(check_string)
def get_repository_url(repository_payload: WildValue) -> str:
return repository_payload["links"]["html"]["href"].tame(check_string)
def get_repository_name(repository_payload: WildValue) -> str:
return repository_payload["name"].tame(check_string)
def get_repository_full_name(repository_payload: WildValue) -> str:
return repository_payload["full_name"].tame(check_string)
def get_user_info(request: HttpRequest, dct: WildValue) -> str:
# See https://developer.atlassian.com/cloud/bitbucket/bitbucket-api-changes-gdpr/
# Since GDPR, we don't get username; instead, we either get display_name
# or nickname.
if "display_name" in dct:
return dct["display_name"].tame(check_string)
if "nickname" in dct:
return dct["nickname"].tame(check_string)
# We call this an unsupported_event, even though we
# are technically still sending a message.
log_unsupported_webhook_event(
request=request,
summary="Could not find display_name/nickname field",
)
return "Unknown user"
def get_actor_info(request: HttpRequest, payload: WildValue) -> str:
actor = payload["actor"]
return get_user_info(request, actor)
def get_branch_name_for_push_event(payload: WildValue) -> Optional[str]:
change = payload["push"]["changes"][-1]
potential_tag = (change["new"] or change["old"])["type"].tame(check_string)
if potential_tag == "tag":
return None
else:
return (change["new"] or change["old"])["name"].tame(check_string)
GET_SINGLE_MESSAGE_BODY_DEPENDING_ON_TYPE_MAPPER: Dict[str, BodyGetter] = {
"fork": get_fork_body,
"commit_comment": get_commit_comment_body,
"change_commit_status": get_commit_status_changed_body,
"issue_updated": partial(get_issue_action_body, action="updated"),
"issue_created": partial(get_issue_action_body, action="created"),
"issue_commented": get_issue_commented_body,
"pull_request_created": partial(get_pull_request_created_or_updated_body, action="created"),
"pull_request_updated": partial(get_pull_request_created_or_updated_body, action="updated"),
"pull_request_approved": partial(get_pull_request_action_body, action="approved"),
"pull_request_unapproved": partial(get_pull_request_action_body, action="unapproved"),
"pull_request_fulfilled": partial(get_pull_request_action_body, action="merged"),
"pull_request_rejected": partial(get_pull_request_action_body, action="rejected"),
"pull_request_comment_created": get_pull_request_comment_created_action_body,
"pull_request_comment_updated": partial(
get_pull_request_deleted_or_updated_comment_action_body, action="updated"
),
"pull_request_comment_deleted": partial(
get_pull_request_deleted_or_updated_comment_action_body, action="deleted"
),
"repo:updated": get_repo_updated_body,
}
|
b721a3b0619028eb0e7996bb14b9c116200d2825
|
48fee8cd0d6f1e83cd13969e437ee94472d3f113
|
/examples/simple_nested.py
|
e47c33dd1f25e76158696b9351ee414a4038f78f
|
[
"BSD-3-Clause"
] |
permissive
|
plumdog/flask_table
|
15f97bee65fac1634aaf03acc11ae30fd8e9e1cb
|
0ef1e6f195f83af9df45e76de77dd3207f30f679
|
refs/heads/master
| 2023-01-05T21:15:21.652887
| 2021-12-04T07:16:42
| 2021-12-04T07:16:42
| 19,407,941
| 230
| 49
|
BSD-3-Clause
| 2022-12-14T13:00:30
| 2014-05-03T17:10:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,266
|
py
|
simple_nested.py
|
from flask_table import Table, Col, NestedTableCol
"""Lets suppose that we have a class that we get an iterable of from
somewhere, such as a database. We can declare a table that pulls out
the relevant entries, escapes them and displays them. Additionally,
we show here how to used a NestedTableCol, by first defining a
sub-table.
"""
class SubItem(object):
def __init__(self, col1, col2):
self.col1 = col1
self.col2 = col2
class Item(object):
def __init__(self, name, description, subtable):
self.name = name
self.description = description
self.subtable = subtable
class SubItemTable(Table):
col1 = Col('Sub-column 1')
col2 = Col('Sub-column 2')
class ItemTable(Table):
name = Col('Name')
description = Col('Description')
subtable = NestedTableCol('Subtable', SubItemTable)
def main():
items = [Item('Name1', 'Description1', [SubItem('r1sr1c1', 'r1sr1c2'),
SubItem('r1sr2c1', 'r1sr2c2')]),
Item('Name2', 'Description2', [SubItem('r2sr1c1', 'r2sr1c2'),
SubItem('r2sr2c1', 'r2sr2c2')]),
]
table = ItemTable(items)
# or {{ table }} in jinja
print(table.__html__())
"""Outputs:
<table>
<thead>
<tr><th>Name</th><th>Description</th><th>Subtable</th></tr>
</thead>
<tbody>
<tr><td>Name1</td><td>Description1</td><td><table>
<thead>
<tr><th>Sub-column 1</th><th>Sub-column 2</th></tr>
</thead>
<tbody>
<tr><td>r1sr1c1</td><td>r1sr1c2</td></tr>
<tr><td>r1sr2c1</td><td>r1sr2c2</td></tr>
</tbody>
</table></td></tr>
<tr><td>Name2</td><td>Description2</td><td><table>
<thead>
<tr><th>Sub-column 1</th><th>Sub-column 2</th></tr>
</thead>
<tbody>
<tr><td>r2sr1c1</td><td>r2sr1c2</td></tr>
<tr><td>r2sr2c1</td><td>r2sr2c2</td></tr>
</tbody>
</table></td></tr>
</tbody>
</table>
Except it doesn't bother to prettify the output.
"""
if __name__ == '__main__':
main()
|
c4f45471193bfefb2fafe7f798440f506b85df6d
|
1faeaab0e95faf58d4e656efb07f064c70873c07
|
/demo/demo/urls.py
|
87631f4e9ab3e480a1fc6b38c9ba7c6c87164565
|
[
"BSD-2-Clause"
] |
permissive
|
idlesign/django-sitetree
|
ee094759f85c4c37e97e5dadfb2ea62bfc2031c6
|
0fa4c95903d63e1d68430fd0108fc8938fa038eb
|
refs/heads/master
| 2023-07-18T08:28:41.521241
| 2022-11-11T01:14:39
| 2022-11-11T01:14:39
| 687,157
| 272
| 132
|
BSD-3-Clause
| 2022-11-11T01:14:40
| 2010-05-26T12:38:55
|
Python
|
UTF-8
|
Python
| false
| false
| 286
|
py
|
urls.py
|
from django.urls import re_path
from .views import index, listing, detailed
urlpatterns = [
re_path(r'^$', index, name='index'),
re_path(r'^articles/$', listing, name='articles-listing'),
re_path(r'^articles/(?P<article_id>\d+)/$', detailed, name='articles-detailed'),
]
|
4897fb14b5a67f0edb2ba4e1b44c2ab46a2ffc50
|
d91d19da3589c3f69a834bbb9834386e80f100e0
|
/datashader/tests/benchmarks/test_canvas.py
|
5853d3974aea503785f658464c56142f2df1b8bf
|
[] |
permissive
|
holoviz/datashader
|
11d518371e974c02ba3843871e3e0905e0c83956
|
b510594eb771d14cff3b69efca8ddd37ca3a1046
|
refs/heads/main
| 2023-08-18T13:55:24.214980
| 2023-08-17T08:45:48
| 2023-08-17T08:45:48
| 48,504,165
| 1,040
| 133
|
BSD-3-Clause
| 2023-09-11T09:51:30
| 2015-12-23T18:02:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,486
|
py
|
test_canvas.py
|
import pytest
import os
import numpy as np
import pandas as pd
import datashader as ds
test_gpu = bool(int(os.getenv("DATASHADER_TEST_GPU", 0)))
@pytest.fixture
def time_series():
n = 10**7
signal = np.random.normal(0, 0.3, size=n).cumsum() + 50
noise = lambda var, bias, n: np.random.normal(bias, var, n)
ys = signal + noise(1, 10*(np.random.random() - 0.5), n)
df = pd.DataFrame({'y': ys})
df['x'] = df.index
return df
@pytest.mark.benchmark(group="canvas")
def test_line(benchmark, time_series):
cvs = ds.Canvas(plot_height=300, plot_width=900)
benchmark(cvs.line, time_series, 'x', 'y')
@pytest.mark.benchmark(group="canvas")
def test_points(benchmark, time_series):
cvs = ds.Canvas(plot_height=300, plot_width=900)
benchmark(cvs.points, time_series, 'x', 'y')
@pytest.mark.skipif(not test_gpu, reason="DATASHADER_TEST_GPU not set")
@pytest.mark.benchmark(group="canvas")
def test_line_gpu(benchmark, time_series):
from cudf import from_pandas
time_series = from_pandas(time_series)
cvs = ds.Canvas(plot_height=300, plot_width=900)
benchmark(cvs.line, time_series, 'x', 'y')
@pytest.mark.skipif(not test_gpu, reason="DATASHADER_TEST_GPU not set")
@pytest.mark.benchmark(group="canvas")
def test_points_gpu(benchmark, time_series):
from cudf import from_pandas
time_series = from_pandas(time_series)
cvs = ds.Canvas(plot_height=300, plot_width=900)
benchmark(cvs.points, time_series, 'x', 'y')
|
492c7df777e7e8ccad28116f17d5baa122d6c81d
|
850fb312d6cfa25546369b4950c47b04231dce8e
|
/src/gt4sd/frameworks/granular/dataloader/sampler.py
|
ce9fa3edc78605e9058a9c844efd38c88765c95b
|
[
"MIT"
] |
permissive
|
GT4SD/gt4sd-core
|
825418303547c36cf64575ac4f8711877fd7e16b
|
0b69b7d5b261f2f9af3984793c1295b9b80cd01a
|
refs/heads/main
| 2023-09-02T21:23:46.156469
| 2023-08-30T08:28:40
| 2023-08-30T08:28:40
| 458,309,249
| 239
| 50
|
MIT
| 2023-08-25T06:14:52
| 2022-02-11T19:06:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,216
|
py
|
sampler.py
|
#
# MIT License
#
# Copyright (c) 2022 GT4SD team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Sampler implementation.
Reimplemented starting from: https://github.com/ncullen93/torchsample/blob/ea4d1b3975f68be0521941e733887ed667a1b46e/torchsample/samplers.py.
The main reason for reimplementation is to avoid to add a dependency and to control better the logger.
"""
import logging
from typing import Iterator
import numpy as np
import torch
from sklearn.model_selection import StratifiedShuffleSplit
from torch.utils.data import Sampler
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class StratifiedSampler(Sampler):
"""Implementation of a sampler for tensors based on scikit-learn StratifiedShuffleSplit."""
def __init__(
self, targets: torch.Tensor, batch_size: int, test_size: float = 0.5
) -> None:
"""Construct a StratifiedSampler.
Args:
targets: targets tensor.
batch_size: size of the batch.
test_size: proportion of samples in the test set. Defaults to 0.5.
"""
self.targets = targets
self.number_of_splits = int(self.targets.size(0) / batch_size)
self.test_size = test_size
def gen_sample_array(self) -> np.ndarray:
"""Get sample array.
Returns:
sample array.
"""
splitter = StratifiedShuffleSplit(
n_splits=self.number_of_splits, test_size=self.test_size
)
data_placeholder = torch.randn(self.targets.size(0), 2).numpy()
targets = self.targets.numpy()
splitter.get_n_splits(data_placeholder, targets)
train_index, test_index = next(splitter.split(data_placeholder, targets))
return np.hstack([train_index, test_index])
def __iter__(self) -> Iterator[np.ndarray]:
"""Get an iterator over the sample array.
Returns:
sample array iterator.
Yields:
a sample array.
"""
return iter(self.gen_sample_array())
def __len__(self) -> int:
"""Length of the sampler.
Returns:
the sampler length.
"""
return len(self.targets)
|
57840be1bc5a20916597b7477e38bff812f534a7
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/bwobsolete_helpers/PyGUI/IME.py
|
3dbe0d35c3f7c62c8cd56a44e440047e8ecb73c1
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 17,084
|
py
|
IME.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/bwobsolete_helpers/PyGUI/IME.py
import BigWorld
import GUI
import Math
import Utils
from Utils import BlinkingCursor
from Utils import blinkingColourProvider
from Utils import getHPixelScalar, getVPixelScalar
from bwdebug import *
ALWAYS_ON_TOP_Z = 0.01
BACKGROUND_COLOUR = (32, 32, 32, 255)
DEFAULT_FONT_NAME = 'default_small.font'
SELECTED_CANDIDATE_BGCOLOUR = (220, 220, 220, 255)
SELECTED_CANDIDATE_FGCOLOUR = (16, 16, 16, 255)
ATTR_INPUT = 0
ATTR_TARGET_CONVERTED = 1
ATTR_CONVERTED = 2
ATTR_TARGET_NOTCONVERTED = 3
ATTR_INPUT_ERROR = 4
ATTR_FIXEDCONVERTED = 5
def _attrToColours(attr):
bgColour = None
fgColour = None
if attr in [ATTR_INPUT, ATTR_CONVERTED]:
bgColour = (32, 32, 32, 255)
fgColour = (255, 255, 255, 255)
else:
bgColour = (220, 220, 220, 255)
fgColour = (16, 16, 16, 255)
return (bgColour, fgColour)
def _compositionStringBlocks():
comp = BigWorld.ime.composition
attr = BigWorld.ime.compositionAttr
if len(comp) == 0:
return []
ret = []
currentAttr = attr[0]
currentBlock = u''
for idx in range(len(comp)):
if attr[idx] != currentAttr:
ret.append((currentAttr, currentBlock))
currentAttr = attr[idx]
currentBlock = u''
currentBlock += comp[idx]
if len(currentBlock) > 0:
ret.append((currentAttr, currentBlock))
return ret
def _bgText(text, font, bgcolour, fgcolour):
w = Utils.createTextWithBackground(text, font, bgcolour, fgcolour)
w.colouriser = GUI.ColourShader()
w.colouriser.colourProvider = bgcolour
w.text.colouriser = GUI.ColourShader()
w.text.colouriser.colourProvider = fgcolour
return w
def logIMEEvent(event):
print str(event) + ':'
print ' stateChanged=' + str(event.stateChanged) + " (value='" + str(BigWorld.ime.state) + "')"
print ' candidatesVisibilityChanged=' + str(event.candidatesVisibilityChanged) + ' (value=' + str(BigWorld.ime.candidatesVisible) + ')'
print ' candidatesChanged=' + str(event.candidatesChanged) + " (value='" + str(BigWorld.ime.candidates) + "', attrs=" + str(BigWorld.ime.compositionAttr) + ')'
print ' selectedCandidateChanged=' + str(event.selectedCandidateChanged) + ' (value=' + str(BigWorld.ime.selectedCandidate) + ')'
print ' compositionChanged=' + str(event.compositionChanged) + " (value='" + str(BigWorld.ime.composition) + "')"
print ' compositionCursorPositionChanged=' + str(event.compositionCursorPositionChanged) + ' (value=' + str(BigWorld.ime.compositionCursorPosition) + ')'
print ' readingVisibilityChanged=' + str(event.readingVisibilityChanged) + ' (value=' + str(BigWorld.ime.readingVisible) + ')'
print ' readingChanged=' + str(event.readingChanged) + " (value='" + str(BigWorld.ime.reading) + "')"
class CompositionWindow(object):
def __init__(self):
self.comp = GUI.Window('system/maps/col_white.bmp')
self.comp.materialFX = GUI.Simple.eMaterialFX.BLEND
self.comp.visible = False
self.comp.script = self
self.comp.horizontalPositionMode = GUI.Simple.ePositionMode.CLIP
self.comp.horizontalAnchor = GUI.Simple.eHAnchor.LEFT
self.comp.verticalPositionMode = GUI.Simple.ePositionMode.CLIP
self.comp.verticalAnchor = GUI.Simple.eVAnchor.CENTER
self.comp.widthMode = GUI.Simple.eSizeMode.PIXEL
self.comp.heightMode = GUI.Simple.eSizeMode.PIXEL
self.comp.position.z = ALWAYS_ON_TOP_Z
self.comp.addChild(GUI.Text(), 'text')
self.comp.text.font = DEFAULT_FONT_NAME
self.comp.text.horizontalAnchor = GUI.Simple.eHAnchor.LEFT
self.comp.text.horizontalPositionMode = GUI.Simple.eSizeMode.PIXEL
self.comp.text.colouriser = GUI.ColourShader()
self.comp.text.colouriser.colourProvider = self.comp.text.colour
self.cursor = BlinkingCursor()
self.comp.addChild(self.cursor.comp, 'cursor')
self.cursor.enable(True)
self.comp.backgroundColouriser = GUI.ColourShader()
self.comp.backgroundColouriser.colourProvider = BACKGROUND_COLOUR
self._firstTargetConvertedBlock = -1
def populate(self, fontName):
if len(BigWorld.ime.composition) == 0:
self.comp.visible = False
return False
for name, comp in self.comp.children:
if name not in ('cursor', 'text'):
self.comp.delChild(comp)
if BigWorld.ime.language == 'KOREAN':
self._populateKorean(fontName)
else:
self._populateChineseJapanese(fontName)
self.comp.visible = True
return True
def _populateChineseJapanese(self, fontName):
compString = BigWorld.ime.composition
self.comp.text.font = fontName
self.comp.text.text = ''
compBlocks = _compositionStringBlocks()
self._firstTargetConvertedBlock = -1
fullWidth = 0
idx = 0
for attr, str in compBlocks:
bgColour, fgColour = _attrToColours(attr)
if self._firstTargetConvertedBlock < 0 and attr not in [ATTR_INPUT, ATTR_CONVERTED]:
self._firstTargetConvertedBlock = idx
w = _bgText(str, fontName, bgColour, fgColour)
w.horizontalAnchor = GUI.Simple.eHAnchor.LEFT
w.horizontalPositionMode = GUI.Simple.eSizeMode.PIXEL
w.position.x = fullWidth
self.comp.addChild(w, 'compBlock%d' % idx)
idx += 1
fullWidth += w.width
self.comp.widthMode = GUI.Simple.eSizeMode.PIXEL
self.comp.width = fullWidth
_, self.comp.height = self.comp.compBlock0.text.stringDimensions(compString)
self.comp.height = self.comp.height * getVPixelScalar()
cursorIndex = BigWorld.ime.compositionCursorPosition
cursorOffset = self.comp.compBlock0.text.stringWidth(compString[:cursorIndex])
self.cursor.comp.position.x = cursorOffset * getHPixelScalar()
self.comp.backgroundColouriser.colourProvider = BACKGROUND_COLOUR
self.cursor.enable(True)
self.cursor.touch()
def _populateKorean(self, fontName):
compString = BigWorld.ime.composition
self.comp.text.font = fontName
self.comp.text.text = compString
sw, sh = self.comp.text.stringDimensions(compString)
hratio = getHPixelScalar()
vratio = getVPixelScalar()
self.comp.width = sw * hratio
self.comp.height = sh * vratio
self.comp.text.position.x = 0
self.comp.backgroundColouriser.colourProvider = blinkingColourProvider(BACKGROUND_COLOUR, Utils.CURSOR_BLINK_PERIOD)
self.cursor.enable(False)
def reposition(self, positionClip, minClip, maxClip):
widthInClip, heightInClip = Utils.clipSize(self.comp)
if positionClip[0] + widthInClip > maxClip[0]:
positionClip = minClip
self.comp.position.x = positionClip.x
self.comp.position.y = positionClip.y
def clipBounds(self):
return Utils.clipRegion(self.comp)
def candidateClipBounds(self):
clipMins, clipMaxs = self.cursor.clipBounds()
if BigWorld.ime.language == 'JAPANESE':
ftcb = self._firstTargetConvertedBlock
block = getattr(self.comp, 'compBlock%d' % ftcb, None)
if block is not None:
widthMode = block.widthMode
block.widthMode = GUI.Simple.eSizeMode.CLIP
blockMins = block.localToScreen((-1, -1))
block.widthMode = widthMode
clipMins[0] = blockMins[0]
clipMaxs[0] = blockMins[0]
return (clipMins, clipMaxs)
def hide(self):
self.comp.visible = False
class ReadingWindow(object):
MARGIN_SIZE = 2
def __init__(self):
self.comp = GUI.Window('system/maps/col_white.bmp')
self.comp.materialFX = GUI.Simple.eMaterialFX.BLEND
self.comp.colour = BACKGROUND_COLOUR
self.comp.visible = False
self.comp.script = self
self.comp.horizontalPositionMode = GUI.Simple.ePositionMode.CLIP
self.comp.horizontalAnchor = GUI.Simple.eHAnchor.LEFT
self.comp.verticalPositionMode = GUI.Simple.ePositionMode.CLIP
self.comp.verticalAnchor = GUI.Simple.eVAnchor.RIGHT
self.comp.widthMode = GUI.Simple.eSizeMode.PIXEL
self.comp.heightMode = GUI.Simple.eSizeMode.PIXEL
self.comp.position.z = ALWAYS_ON_TOP_Z
self.comp.addChild(GUI.Text(''), 'text')
self.comp.text.multiline = True
self.comp.text.font = DEFAULT_FONT_NAME
self.comp.text.horizontalAnchor = GUI.Simple.eHAnchor.LEFT
self.comp.text.horizontalPositionMode = GUI.Simple.eSizeMode.PIXEL
self.comp.text.verticalAnchor = GUI.Simple.eVAnchor.TOP
self.comp.text.verticalPositionMode = GUI.Simple.eSizeMode.PIXEL
def populate(self, fontName):
readingString = BigWorld.ime.reading
if not BigWorld.ime.readingVisible or len(readingString) == 0:
self.comp.visible = False
return False
self.comp.text.font = fontName
if BigWorld.ime.readingVertical:
readingString = '\n'.join([ c for c in readingString ])
horzMargin = self.MARGIN_SIZE * getHPixelScalar()
vertMargin = self.MARGIN_SIZE * getVPixelScalar()
self.comp.text.text = readingString
self.comp.text.position.x = horzMargin
self.comp.text.position.y = vertMargin
self.comp.widthMode = GUI.Simple.eSizeMode.PIXEL
textWidth, textHeight = self.comp.text.stringDimensions(readingString)
self.comp.width = horzMargin * 2 + textWidth * getHPixelScalar()
self.comp.height = vertMargin * 2 + textHeight * getVPixelScalar()
self.comp.visible = True
return True
def reposition(self, screenClip):
self.comp.position[0] = screenClip[0]
self.comp.position[1] = screenClip[1]
self.comp.visible = True
def clipBounds(self):
return Utils.clipRegion(self.comp)
def hide(self):
self.comp.visible = False
class CandidateWindow(object):
def __init__(self):
self.comp = GUI.Window('system/maps/col_white.bmp')
self.comp.colour = (32, 32, 32, 255)
self.comp.materialFX = GUI.Simple.eMaterialFX.BLEND
self.comp.visible = False
self.comp.script = self
self.comp.horizontalPositionMode = GUI.Simple.ePositionMode.CLIP
self.comp.horizontalAnchor = GUI.Simple.eHAnchor.LEFT
self.comp.position.x = 0
self.comp.verticalPositionMode = GUI.Simple.ePositionMode.CLIP
self.comp.verticalAnchor = GUI.Simple.eVAnchor.TOP
self.comp.position.y = 0
self.comp.position.z = ALWAYS_ON_TOP_Z
self.comp.widthMode = GUI.Simple.eSizeMode.PIXEL
self.comp.heightMode = GUI.Simple.eSizeMode.PIXEL
self.comp.addChild(GUI.Text(''), 'candidateText')
self.comp.candidateText.font = DEFAULT_FONT_NAME
self.comp.candidateText.multiline = True
self.comp.candidateText.horizontalPositionMode = GUI.Simple.eSizeMode.PIXEL
self.comp.candidateText.verticalPositionMode = GUI.Simple.eSizeMode.PIXEL
self.comp.candidateText.horizontalAnchor = GUI.Simple.eHAnchor.LEFT
self.comp.candidateText.verticalAnchor = GUI.Simple.eVAnchor.TOP
self.comp.candidateText.position = (0, 0, 0.5)
def populate(self, fontName):
if not BigWorld.ime.candidatesVisible:
self.comp.visible = False
return False
candidates = BigWorld.ime.candidates
selectedIdx = BigWorld.ime.selectedCandidate
fullText = u''
preSelectWidth = 0
for i in range(len(candidates)):
fullText += str(i + 1) + candidates[i]
if i < len(candidates) - 1:
fullText += u'\n' if BigWorld.ime.candidatesVertical else u' '
if i == selectedIdx - 1:
preSelectWidth, _ = self.comp.candidateText.stringDimensions(fullText)
self.comp.candidateText.font = fontName
self.comp.candidateText.text = fullText
sw, sh = self.comp.candidateText.stringDimensions(fullText)
self._resize(sw, sh)
if hasattr(self.comp, 'selectedCandidate'):
self.comp.delChild(self.comp.selectedCandidate)
if len(candidates) > 0:
selStr = str(selectedIdx + 1) + candidates[selectedIdx]
selectedComp = _bgText(selStr, fontName, SELECTED_CANDIDATE_BGCOLOUR, SELECTED_CANDIDATE_FGCOLOUR)
selectedComp.horizontalAnchor = GUI.Simple.eHAnchor.LEFT
selectedComp.horizontalPositionMode = GUI.Simple.eSizeMode.PIXEL
selectedComp.verticalAnchor = GUI.Simple.eVAnchor.TOP
selectedComp.verticalPositionMode = GUI.Simple.eSizeMode.PIXEL
selectedComp.position.z = 0.01
selectedComp.text.horizontalPositionMode = GUI.Simple.eSizeMode.PIXEL
selectedComp.text.horizontalAnchor = GUI.Simple.eHAnchor.LEFT
selectedComp.text.position.x = 0
fontWidth, fontHeight = self.comp.candidateText.stringDimensions(selStr)
if BigWorld.ime.candidatesVertical:
selectedComp.position.x = 0
selectedComp.position.y = fontHeight * selectedIdx * getVPixelScalar()
selectedComp.widthMode = self.comp.widthMode
selectedComp.width = self.comp.width
else:
selectedComp.position.x = preSelectWidth * getHPixelScalar()
selectedComp.position.y = 0
selectedComp.widthMode = GUI.Simple.eSizeMode.PIXEL
selectedComp.width = (fontWidth + 1) * getHPixelScalar()
selectedComp.heightMode = self.comp.heightMode
selectedComp.height = self.comp.height
self.comp.addChild(selectedComp, 'selectedCandidate')
self.comp.visible = True
return True
def reposition(self, screenClipMins, screenClipMaxs):
clipWidth, clipHeight = Utils.clipSize(self.comp)
cx, cy = screenClipMaxs.x, screenClipMins.y
if cy - clipHeight < -1.0:
cy = screenClipMaxs.y + clipHeight
self.comp.position[0] = cx
self.comp.position[1] = cy
def _resize(self, pixelW, pixelH):
self.comp.width = pixelW * getHPixelScalar()
self.comp.height = pixelH * getVPixelScalar()
def hide(self):
self.comp.visible = False
_composition = None
_reading = None
_candidates = None
def init():
global _composition
global _reading
global _candidates
TRACE_MSG('Initialising IME components')
if _composition is None:
_composition = CompositionWindow()
GUI.addRoot(_composition.comp)
if _reading is None:
_reading = ReadingWindow()
GUI.addRoot(_reading.comp)
if _candidates is None:
_candidates = CandidateWindow()
GUI.addRoot(_candidates.comp)
return
def fini():
global _reading
global _candidates
global _composition
if _composition is not None:
GUI.delRoot(_composition.comp)
_composition = None
if _reading is not None:
GUI.delRoot(_reading.comp)
_reading = None
if _candidates is not None:
GUI.delRoot(_candidates.comp)
_candidates = None
return
def refresh(positionClip, minClip, maxClip, fontName):
if _composition is None or _candidates is None or _reading is None:
ERROR_MSG('IME components have not been initialised.')
return
else:
gotComposition = _composition.populate(fontName)
gotReading = _reading.populate(fontName)
gotCandidates = _candidates.populate(fontName)
if gotComposition:
_composition.reposition(positionClip, minClip, maxClip)
else:
return
compPosMins, compPosMaxs = _composition.clipBounds()
candPosMins, candPosMaxs = _composition.candidateClipBounds()
if gotReading:
pos = (compPosMins[0] + 0.025, compPosMins[1] + (compPosMaxs[1] - compPosMins[1]) * 0.5)
_reading.reposition(pos)
readingMins, readingMaxs = _reading.clipBounds()
candPosMins[1] = readingMins[1]
candPosMaxs[1] = compPosMaxs[1]
else:
candPosMins[1] = compPosMins[1]
candPosMaxs[1] = compPosMaxs[1]
if gotCandidates:
_candidates.reposition(candPosMins, candPosMaxs)
return
def handleIMEEvent(event, positionClip, minClip, maxClip, fontName):
if event.compositionChanged or event.compositionCursorPositionChanged or event.candidatesVisibilityChanged or event.candidatesChanged or event.selectedCandidateChanged or event.readingChanged:
refresh(positionClip, minClip, maxClip, fontName)
def hideAll():
_composition.hide()
_candidates.hide()
_reading.hide()
|
a224763d19f9b9400e4626573dcf2834608ce0fe
|
b2fef77e77f77b6cfd83da4ec2f89cbe73330844
|
/monai/handlers/parameter_scheduler.py
|
d12e6e072c0a12edf3e1d2b608178a0bff4065a8
|
[
"Apache-2.0"
] |
permissive
|
Project-MONAI/MONAI
|
8ef2593cc5fd1cd16e13464f927fe563fe3f5bac
|
e48c3e2c741fa3fc705c4425d17ac4a5afac6c47
|
refs/heads/dev
| 2023-09-02T00:21:04.532596
| 2023-09-01T06:46:45
| 2023-09-01T06:46:45
| 214,485,001
| 4,805
| 996
|
Apache-2.0
| 2023-09-14T15:19:30
| 2019-10-11T16:41:38
|
Python
|
UTF-8
|
Python
| false
| false
| 7,119
|
py
|
parameter_scheduler.py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import logging
from bisect import bisect_right
from collections.abc import Callable
from typing import TYPE_CHECKING
from monai.config import IgniteInfo
from monai.utils import min_version, optional_import
if TYPE_CHECKING:
from ignite.engine import Engine, Events
else:
Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine")
Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
class ParamSchedulerHandler:
"""
General purpose scheduler for parameters values. By default it can schedule in a linear, exponential, step or
multistep function. One can also pass Callables to have customized scheduling logic.
Args:
parameter_setter (Callable): Function that sets the required parameter
value_calculator (Union[str,Callable]): Either a string ('linear', 'exponential', 'step' or 'multistep')
or Callable for custom logic.
vc_kwargs (Dict): Dictionary that stores the required parameters for the value_calculator.
epoch_level (bool): Whether the step is based on epoch or iteration. Defaults to False.
name (Optional[str]): Identifier of logging.logger to use, if None, defaulting to ``engine.logger``.
event (Optional[str]): Event to which the handler attaches. Defaults to Events.ITERATION_COMPLETED.
"""
def __init__(
self,
parameter_setter: Callable,
value_calculator: str | Callable,
vc_kwargs: dict,
epoch_level: bool = False,
name: str | None = None,
event: str | None = None,
):
self.epoch_level = epoch_level
self.event = event if event is not None else Events.ITERATION_COMPLETED
self._calculators = {
"linear": self._linear,
"exponential": self._exponential,
"step": self._step,
"multistep": self._multistep,
}
self._parameter_setter = parameter_setter
self._vc_kwargs = vc_kwargs
self._value_calculator = self._get_value_calculator(value_calculator=value_calculator)
self.logger = logging.getLogger(name)
self._name = name
def _get_value_calculator(self, value_calculator):
if isinstance(value_calculator, str):
return self._calculators[value_calculator]
if callable(value_calculator):
return value_calculator
raise ValueError(
f"value_calculator must be either a string from {list(self._calculators.keys())} or a Callable."
)
def __call__(self, engine: Engine) -> None:
if self.epoch_level:
self._vc_kwargs["current_step"] = engine.state.epoch
else:
self._vc_kwargs["current_step"] = engine.state.iteration
new_value = self._value_calculator(**self._vc_kwargs)
self._parameter_setter(new_value)
def attach(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine that is used for training.
"""
if self._name is None:
self.logger = engine.logger
engine.add_event_handler(self.event, self)
@staticmethod
def _linear(
initial_value: float, step_constant: int, step_max_value: int, max_value: float, current_step: int
) -> float:
"""
Keeps the parameter value to zero until step_zero steps passed and then linearly increases it to 1 until an
additional step_one steps passed. Continues the trend until it reaches max_value.
Args:
initial_value (float): Starting value of the parameter.
step_constant (int): Step index until parameter's value is kept constant.
step_max_value (int): Step index at which parameter's value becomes max_value.
max_value (float): Max parameter value.
current_step (int): Current step index.
Returns:
float: new parameter value
"""
if current_step <= step_constant:
delta = 0.0
elif current_step > step_max_value:
delta = max_value - initial_value
else:
delta = (max_value - initial_value) / (step_max_value - step_constant) * (current_step - step_constant)
return initial_value + delta
@staticmethod
def _exponential(initial_value: float, gamma: float, current_step: int) -> float:
"""
Decays the parameter value by gamma every step.
Based on the closed form of ExponentialLR from Pytorch:
https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.ExponentialLR.html.
Args:
initial_value (float): Starting value of the parameter.
gamma (float): Multiplicative factor of parameter value decay.
current_step (int): Current step index.
Returns:
float: new parameter value
"""
return initial_value * gamma**current_step
@staticmethod
def _step(initial_value: float, gamma: float, step_size: int, current_step: int) -> float:
"""
Decays the parameter value by gamma every step_size.
Based on StepLR from Pytorch:
https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.StepLR.html.
Args:
initial_value (float): Starting value of the parameter.
gamma (float): Multiplicative factor of parameter value decay.
step_size (int): Period of parameter value decay.
current_step (int): Current step index.
Returns
float: new parameter value
"""
return initial_value * gamma ** (current_step // step_size)
@staticmethod
def _multistep(initial_value: float, gamma: float, milestones: list[int], current_step: int) -> float:
"""
Decays the parameter value by gamma once the number of steps reaches one of the milestones.
Based on MultiStepLR from Pytorch.
https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.MultiStepLR.html.
Args:
initial_value (float): Starting value of the parameter.
gamma (float): Multiplicative factor of parameter value decay.
milestones (List[int]): List of step indices. Must be increasing.
current_step (int): Current step index.
Returns:
float: new parameter value
"""
return initial_value * gamma ** bisect_right(milestones, current_step)
|
1c435f5d216b1f722aa75d85026bf1e22154ec95
|
71b8b60c5627ace1bbda39f679f93f60b55543ca
|
/tensorflow_federated/python/core/impl/computation/computation_impl.py
|
ceb97a946cb846abe3f8be880d5aedf7addaf150
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/federated
|
ff94b63e9f4af448795bae77cee5b627dcae9051
|
ad4bca66f4b483e09d8396e9948630813a343d27
|
refs/heads/main
| 2023-08-31T11:46:28.559047
| 2023-08-31T02:04:38
| 2023-08-31T02:09:59
| 161,556,784
| 2,297
| 631
|
Apache-2.0
| 2023-09-13T22:54:14
| 2018-12-12T23:15:35
|
Python
|
UTF-8
|
Python
| false
| false
| 6,062
|
py
|
computation_impl.py
|
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the implementation of the base Computation interface."""
from typing import Any, Optional
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.computation import computation_base
from tensorflow_federated.python.core.impl.computation import function_utils
from tensorflow_federated.python.core.impl.context_stack import context_stack_base
from tensorflow_federated.python.core.impl.context_stack import context_stack_impl
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import type_serialization
class ConcreteComputation(computation_base.Computation):
"""A representation of a `pb.Computation` in the `tff.Computation` interface.
This implementation exposes methods to retrieve the backing `pb.Computation`,
as well as the Python representation of this protocol buffer represented by
an instance of `building_blocks.ComputationBuildingBlock`. Leverages the
implementation of `__call__` inherited from `function_utils.ConcreteFunction`
to pass `self` to the currently installed context.
"""
@classmethod
def get_proto(cls, value: 'ConcreteComputation') -> pb.Computation:
py_typecheck.check_type(value, cls)
return value._computation_proto # pylint: disable=protected-access
@classmethod
def with_type(
cls,
value: 'ConcreteComputation',
type_spec: computation_types.FunctionType,
) -> 'ConcreteComputation':
py_typecheck.check_type(value, cls)
py_typecheck.check_type(type_spec, computation_types.Type)
# Ensure we are assigning a type-safe signature.
value.type_signature.check_assignable_from(type_spec)
# pylint: disable=protected-access
return cls(
value._computation_proto, value._context_stack, annotated_type=type_spec
)
# pylint: enable=protected-access
@classmethod
def from_building_block(
cls, building_block: building_blocks.ComputationBuildingBlock
) -> 'ConcreteComputation':
"""Converts a computation building block to a computation impl."""
py_typecheck.check_type(
building_block, building_blocks.ComputationBuildingBlock
)
return cls(
building_block.proto,
context_stack_impl.context_stack,
annotated_type=building_block.type_signature, # pytype: disable=wrong-arg-types
)
def to_building_block(self):
# TODO: b/161560999 - currently destroys annotated type.
# This should perhaps be fixed by adding `type_parameter` to `from_proto`.
return building_blocks.ComputationBuildingBlock.from_proto(
self._computation_proto
)
def to_compiled_building_block(self):
return building_blocks.CompiledComputation(
self._computation_proto, type_signature=self.type_signature
)
def __init__(
self,
computation_proto: pb.Computation,
context_stack: context_stack_base.ContextStack,
annotated_type: Optional[computation_types.FunctionType] = None,
):
"""Constructs a new instance of ConcreteComputation from the computation_proto.
Args:
computation_proto: The protocol buffer that represents the computation, an
instance of pb.Computation.
context_stack: The context stack to use.
annotated_type: Optional, type information with additional annotations
that replaces the information in `computation_proto.type`.
Raises:
TypeError: If `annotated_type` is not `None` and is not compatible with
`computation_proto.type`.
ValueError: If `computation_proto.type` is `None`.
"""
py_typecheck.check_type(computation_proto, pb.Computation)
py_typecheck.check_type(context_stack, context_stack_base.ContextStack)
if computation_proto.type is None:
raise ValueError('Expected `computation_proto.type` to not be `None`.')
type_spec = type_serialization.deserialize_type(computation_proto.type)
if annotated_type is not None:
if type_spec is None or not type_spec.is_assignable_from(annotated_type):
raise TypeError(
'annotated_type not compatible with computation_proto.type\n'
f'computation_proto.type: {type_spec}\n'
f'annotated_type: {annotated_type}'
)
type_spec = annotated_type
if not isinstance(type_spec, computation_types.FunctionType):
raise TypeError(
f'{type_spec} is not a functional type, from proto: '
f'{computation_proto}'
)
self._type_signature = type_spec
self._context_stack = context_stack
self._computation_proto = computation_proto
def __eq__(self, other: Any) -> bool:
if self is other:
return True
elif not isinstance(other, ConcreteComputation):
return NotImplemented
return self._computation_proto == other._computation_proto
@property
def type_signature(self) -> computation_types.FunctionType:
return self._type_signature
def __call__(self, *args, **kwargs):
arg = function_utils.pack_args(
self._type_signature.parameter, # pytype: disable=attribute-error
args,
kwargs,
)
return self._context_stack.current.invoke(self, arg)
def __hash__(self) -> int:
return hash(self._computation_proto.SerializeToString(deterministic=True))
|
54d73c0ea7a45df241f44749d8503f616e0189bf
|
d718da620c1adffdd202355e2b641d1a1fc18cbe
|
/src/pyfme/models/euler_flat_earth.py
|
f0d16231a5158d84d01caf05f7c6b8d6f2bca88e
|
[
"MIT"
] |
permissive
|
AeroPython/PyFME
|
af46465725ee9adb5ac149757d02032a8a40ddc3
|
156fa9f1db097f107c20ad7354c71b1eaee4cbb1
|
refs/heads/master
| 2021-01-24T06:47:30.243437
| 2020-08-13T11:34:37
| 2020-08-13T11:34:37
| 42,995,365
| 210
| 103
|
MIT
| 2020-08-13T11:34:39
| 2015-09-23T10:47:58
|
Python
|
UTF-8
|
Python
| false
| false
| 8,491
|
py
|
euler_flat_earth.py
|
# -*- coding: utf-8 -*-
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
Euler Flat Earth
----------------
Classical aircraft motion equations assuming no Earth rotation
inertial effects, representing attitude with Euler angles (not valid for
all-attitude flight) and integrating aircraft position in Earth axis (Flat
Earth).
"""
import numpy as np
from numpy import sin, cos
from pyfme.models.dynamic_system import AircraftDynamicSystem
from pyfme.models.state import (
AircraftState, EarthPosition, EulerAttitude, BodyVelocity,
BodyAngularVelocity, BodyAcceleration, BodyAngularAcceleration
)
class EulerFlatEarth(AircraftDynamicSystem):
"""Euler Flat Earth Dynamic System.
Classical aircraft motion equations assuming no Earth rotation, no Earth
curvature and modelling attitude with Euler angles. Aircraft position is
performed on Earth axis.
"""
def fun(self, t, x):
self._update_full_system_state_from_state(x, self.state_vector_dot)
updated_simulation = self.update_simulation(t, self.full_state)
mass = updated_simulation.aircraft.mass
inertia = updated_simulation.aircraft.inertia
forces = updated_simulation.aircraft.total_forces
moments = updated_simulation.aircraft.total_moments
rv = _system_equations(t, x, mass, inertia, forces, moments)
return rv
def steady_state_trim_fun(self, full_state, environment, aircraft,
controls):
environment.update(full_state)
aircraft.calculate_forces_and_moments(full_state, environment,
controls)
mass = aircraft.mass
inertia = aircraft.inertia
forces = aircraft.total_forces
moments = aircraft.total_moments
t0 = 0
x0 = self._get_state_vector_from_full_state(full_state)
rv = _system_equations(t0, x0, mass, inertia, forces, moments)
return rv[:6]
def _update_full_system_state_from_state(self, state, state_dot):
self.full_state.position.update(state[9:12])
self.full_state.attitude.update(state[6:9])
att = self.full_state.attitude
self.full_state.velocity.update(state[0:3], att)
self.full_state.angular_vel.update(state[3:6], att)
self.full_state.acceleration.update(state_dot[0:3], att)
self.full_state.angular_accel.update(state_dot[3:6], att)
def _adapt_full_state_to_dynamic_system(self, full_state):
pos = EarthPosition(full_state.position.x_earth,
full_state.position.y_earth,
full_state.position.height,
full_state.position.lat,
full_state.position.lon)
att = EulerAttitude(full_state.attitude.theta,
full_state.attitude.phi,
full_state.attitude.psi)
vel = BodyVelocity(full_state.velocity.u,
full_state.velocity.v,
full_state.velocity.w,
att)
ang_vel = BodyAngularVelocity(full_state.angular_vel.p,
full_state.angular_vel.q,
full_state.angular_vel.r,
att)
accel = BodyAcceleration(full_state.acceleration.u_dot,
full_state.acceleration.v_dot,
full_state.acceleration.w_dot,
att)
ang_accel = BodyAngularAcceleration(full_state.angular_accel.p_dot,
full_state.angular_accel.q_dot,
full_state.angular_accel.r_dot,
att)
full_state = AircraftState(pos, att, vel, ang_vel, accel, ang_accel)
return full_state
def _get_state_vector_from_full_state(self, full_state):
x0 = np.array(
[
full_state.velocity.u,
full_state.velocity.v,
full_state.velocity.w,
full_state.angular_vel.p,
full_state.angular_vel.q,
full_state.angular_vel.r,
full_state.attitude.theta,
full_state.attitude.phi,
full_state.attitude.psi,
full_state.position.x_earth,
full_state.position.y_earth,
full_state.position.z_earth
]
)
return x0
# TODO: numba jit
def _system_equations(time, state_vector, mass, inertia, forces, moments):
"""Euler flat earth equations: linear momentum equations, angular momentum
equations, angular kinematic equations, linear kinematic
equations.
Parameters
----------
time : float
Current time (s).
state_vector : array_like, shape(9)
Current value of absolute velocity and angular velocity, both
expressed in body axes, euler angles and position in Earth axis.
(u, v, w, p, q, r, theta, phi, psi, x, y, z)
(m/s, m/s, m/s, rad/s, rad/s rad/s, rad, rad, rad, m, m ,m).
mass : float
Current mass of the aircraft (kg).
inertia : array_like, shape(3, 3)
3x3 tensor of inertia of the aircraft (kg * m2)
Current equations assume that the aircraft has a symmetry plane
(x_b - z_b), thus J_xy and J_yz must be null.
forces : array_like, shape(3)
3 dimensional vector containing the total total_forces (including
gravity) in x_b, y_b, z_b axes (N).
moments : array_like, shape(3)
3 dimensional vector containing the total total_moments in x_b,
y_b, z_b axes (N·m).
Returns
-------
dstate_dt : array_like, shape(9)
Derivative with respect to time of the state vector.
Current value of absolute acceleration and angular acceleration,
both expressed in body axes, Euler angles derivatives and velocity
with respect to Earth Axis.
(du_dt, dv_dt, dw_dt, dp_dt, dq_dt, dr_dt, dtheta_dt, dphi_dt,
dpsi_dt, dx_dt, dy_dt, dz_dt)
(m/s² , m/s², m/s², rad/s², rad/s², rad/s², rad/s, rad/s, rad/s,
m/s, m/s, m/s).
References
----------
.. [1] B. Etkin, "Dynamics of Atmospheric Flight", Courier Corporation,
p. 149 (5.8 The Flat-Earth Approximation), 2012.
.. [2] M. A. Gómez Tierno y M. Pérez Cortés, "Mecánica del Vuelo",
Garceta Grupo Editorial, pp.18-25 (Tema 2: Ecuaciones Generales del
Moviemiento), 2012.
"""
# Note definition of total_moments of inertia p.21 Gomez Tierno, et al
# Mecánica de vuelo
Ix = inertia[0, 0]
Iy = inertia[1, 1]
Iz = inertia[2, 2]
Jxz = - inertia[0, 2]
Fx, Fy, Fz = forces
L, M, N = moments
u, v, w = state_vector[0:3]
p, q, r = state_vector[3:6]
theta, phi, psi = state_vector[6:9]
# Linear momentum equations
du_dt = Fx / mass + r * v - q * w
dv_dt = Fy / mass - r * u + p * w
dw_dt = Fz / mass + q * u - p * v
# Angular momentum equations
dp_dt = (L * Iz + N * Jxz - q * r * (Iz ** 2 - Iz * Iy + Jxz ** 2) +
p * q * Jxz * (Ix + Iz - Iy)) / (Ix * Iz - Jxz ** 2)
dq_dt = (M + (Iz - Ix) * p * r - Jxz * (p ** 2 - r ** 2)) / Iy
dr_dt = (L * Jxz + N * Ix + p * q * (Ix ** 2 - Ix * Iy + Jxz ** 2) -
q * r * Jxz * (Iz + Ix - Iy)) / (Ix * Iz - Jxz ** 2)
# Angular Kinematic equations
dtheta_dt = q * cos(phi) - r * sin(phi)
dphi_dt = p + (q * sin(phi) + r * cos(phi)) * np.tan(theta)
dpsi_dt = (q * sin(phi) + r * cos(phi)) / cos(theta)
# Linear kinematic equations
dx_dt = (cos(theta) * cos(psi) * u +
(sin(phi) * sin(theta) * cos(psi) - cos(phi) * sin(psi)) * v +
(cos(phi) * sin(theta) * cos(psi) + sin(phi) * sin(psi)) * w)
dy_dt = (cos(theta) * sin(psi) * u +
(sin(phi) * sin(theta) * sin(psi) + cos(phi) * cos(psi)) * v +
(cos(phi) * sin(theta) * sin(psi) - sin(phi) * cos(psi)) * w)
dz_dt = -u * sin(theta) + v * sin(phi) * cos(theta) + w * cos(
phi) * cos(theta)
return np.array([du_dt, dv_dt, dw_dt, dp_dt, dq_dt, dr_dt, dtheta_dt,
dphi_dt, dpsi_dt, dx_dt, dy_dt, dz_dt])
|
ba0f076c28ff8381530cf2813423889bd705ee41
|
04e5b6df2ee3bcfb7005d8ec91aab8e380333ac4
|
/Lib/objc/_FTServices.py
|
f95d8680cd90992fae2ec4a80c973a4c74dce3d7
|
[
"MIT"
] |
permissive
|
ColdGrub1384/Pyto
|
64e2a593957fd640907f0e4698d430ea7754a73e
|
7557485a733dd7e17ba0366b92794931bdb39975
|
refs/heads/main
| 2023-08-01T03:48:35.694832
| 2022-07-20T14:38:45
| 2022-07-20T14:38:45
| 148,944,721
| 884
| 157
|
MIT
| 2023-02-26T21:34:04
| 2018-09-15T22:29:07
|
C
|
UTF-8
|
Python
| false
| false
| 1,635
|
py
|
_FTServices.py
|
"""
Classes from the 'FTServices' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
FTPasswordManager = _Class("FTPasswordManager")
_FTPasswordManagerCachedAuthTokenInfo = _Class("_FTPasswordManagerCachedAuthTokenInfo")
FTRegionSupport = _Class("FTRegionSupport")
FTRegion = _Class("FTRegion")
FTUserConfiguration = _Class("FTUserConfiguration")
FTEntitlementSupport = _Class("FTEntitlementSupport")
FTMessageDeliveryRemoteURLConnectionFactory = _Class(
"FTMessageDeliveryRemoteURLConnectionFactory"
)
FTAuthKitManager = _Class("FTAuthKitManager")
FTEmbeddedReachability = _Class("FTEmbeddedReachability")
FTiMessageStatus = _Class("FTiMessageStatus")
FTServiceStatus = _Class("FTServiceStatus")
FTNetworkSupport = _Class("FTNetworkSupport")
FTSelectedPNRSubscription = _Class("FTSelectedPNRSubscription")
FTSelectedPNRSubscriptionCache = _Class("FTSelectedPNRSubscriptionCache")
FTDeviceSupport = _Class("FTDeviceSupport")
FTMessageDelivery_DualMode = _Class("FTMessageDelivery_DualMode")
FTMessageQueue = _Class("FTMessageQueue")
FTMessageDelivery = _Class("FTMessageDelivery")
FTMessageDelivery_APS = _Class("FTMessageDelivery_APS")
FTMessageDelivery_HTTP = _Class("FTMessageDelivery_HTTP")
FTServerBag = _Class("FTServerBag")
FTGetRegionMetadataMessage = _Class("FTGetRegionMetadataMessage")
FTIDSMessage = _Class("FTIDSMessage")
FTURLRequestMessage = _Class("FTURLRequestMessage")
IDSWebTunnelRequestMessage = _Class("IDSWebTunnelRequestMessage")
|
5003606e679db8b94be0bb59574ddd8683415f02
|
fb52e7d82f88bf59e43a0cf6762e1926e2188f11
|
/adam-python/bdgenomics/adam/test/variantDataset_test.py
|
de8c22152da0670e28344ed03e12f8accb3b4574
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT"
] |
permissive
|
bigdatagenomics/adam
|
d621fea3d988da8917417d8ebd46434919b917fa
|
02f4048ac7ddcc1d507427bb86893691a4f6160a
|
refs/heads/master
| 2023-08-12T18:20:18.696000
| 2023-02-23T16:30:44
| 2023-02-23T17:10:52
| 14,541,530
| 929
| 315
|
Apache-2.0
| 2023-08-21T17:07:36
| 2013-11-19T23:47:57
|
Scala
|
UTF-8
|
Python
| false
| false
| 1,709
|
py
|
variantDataset_test.py
|
#
# Licensed to Big Data Genomics (BDG) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The BDG licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdgenomics.adam.adamContext import ADAMContext
from bdgenomics.adam.test import SparkTestCase
class VariantDatasetTest(SparkTestCase):
def test_vcf_round_trip(self):
testFile = self.resourceFile("small.vcf")
ac = ADAMContext(self.ss)
variants = ac.loadVariants(testFile)
tmpPath = self.tmpFile() + ".vcf"
variants.toVariantContexts().saveAsVcf(tmpPath)
savedVariants = ac.loadVariants(testFile)
self.assertEqual(variants._jvmDataset.jrdd().count(),
savedVariants._jvmDataset.jrdd().count())
def test_transform(self):
variantPath = self.resourceFile("small.vcf")
ac = ADAMContext(self.ss)
variants = ac.loadVariants(variantPath)
transformedVariants = variants.transform(lambda x: x.filter(x.start < 19190))
self.assertEqual(transformedVariants.toDF().count(), 3)
|
52be4b3da6973e7dd17fa5f058da92e4db4f3ede
|
2218f5f93ded47519e29a682e68f5e4eff82be8e
|
/docs/sphinx/conf.py
|
09222a459f2b012743ff9f2feb25f2cd5c16adce
|
[
"Apache-2.0"
] |
permissive
|
ddinu/observable
|
56bb113c28a8f3e716d625c0a633318e72df103d
|
8bb82fdf8a7680af5f1957a497cf7fecc7d80010
|
refs/heads/master
| 2022-08-07T15:56:02.730886
| 2022-01-24T23:37:18
| 2022-01-24T23:37:18
| 66,405,755
| 301
| 32
|
Apache-2.0
| 2022-01-24T23:09:33
| 2016-08-23T21:45:22
|
C++
|
UTF-8
|
Python
| false
| false
| 436
|
py
|
conf.py
|
project = "Observable"
master_doc = 'index'
html_theme = 'alabaster'
html_theme_options = {
'description': "Generic observable objects for C++",
'github_user': 'ddinu',
'github_repo': 'observable',
'github_button': True,
'font_family': 'Helvetica, Arial, sans-serif',
'head_font_family': 'Helvetica, Arial, sans-serif',
}
pygments_style = 'xcode'
html_sidebars = { '**': ['globaltoc.html', 'searchbox.html'] }
|
d90799b491e43b127f0c9ac17658e88d2cae1ef2
|
ef2c1a0ae0f1746e58fcc160844788ab92a8d488
|
/archai/trainers/lamb_optimizer.py
|
04f225db7056a21688694e826c41af701a0e4b84
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"LGPL-2.1-or-later",
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/archai
|
4d04476ef6a434148638ef91df0ef3bf2c948422
|
95d6e19a1523a701b3fbc249dd1a7d1e7ba44aee
|
refs/heads/main
| 2023-09-03T13:23:48.576626
| 2023-07-27T01:30:01
| 2023-07-27T01:30:01
| 245,036,506
| 439
| 97
|
MIT
| 2023-05-09T21:10:10
| 2020-03-05T00:54:29
|
Python
|
UTF-8
|
Python
| false
| false
| 8,706
|
py
|
lamb_optimizer.py
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0.
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py
#
# Copyright (c) 2019 cybertronai.
# Licensed under the MIT license.
from typing import Iterable, Optional, Tuple
import torch
from torch.optim import Optimizer
class Lamb(Optimizer):
"""Lamb algorithm for large batch optimization.
It has been proposed in `Large Batch Optimization for Deep Learning:
Training BERT in 76 minutes`.
Reference:
https://arxiv.org/abs/1904.00962
"""
def __init__(
self,
params: Iterable,
lr: Optional[float] = 1e-3,
betas: Optional[Tuple[float, float]] = (0.9, 0.999),
eps: Optional[float] = 1e-6,
weight_decay: Optional[float] = 0.0,
adam: Optional[bool] = False,
) -> None:
"""Initialize the optimizer.
Args:
params: An iterable of parameters to optimize.
lr: The learning rate.
betas: Coefficients used for computing running averages.
eps: Term added to the denominator to improve numerical stability.
weight_decay: Weight decay.
adam: Whether to turn current optimizer into Adam.
Raises:
ValueError: If the learning rate, epsilon value, or beta parameters are invalid.
"""
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.adam = adam
super().__init__(params, defaults)
def step(self, closure: Optional[callable] = None) -> torch.FloatTensor:
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Lamb does not support sparse gradients.")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# Paper v3 does not use debiasing.
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# Apply bias to lr to avoid broadcast
# * math.sqrt(bias_correction2) / bias_correction1
step_size = group["lr"]
weight_norm = p.data.norm(p=2).clamp_(0, 10)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group["eps"])
if group["weight_decay"] != 0:
adam_step.add_(p.data, alpha=group["weight_decay"])
adam_norm = adam_step.norm(p=2)
if weight_norm == 0.0 or adam_norm == 0.0:
trust_ratio = 1
else:
trust_ratio = weight_norm / (adam_norm + group["eps"])
state["weight_norm"] = weight_norm
state["adam_norm"] = adam_norm
state["trust_ratio"] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(adam_step, alpha=-step_size * trust_ratio)
return loss
@torch.jit.script
def _lamb_kernel(
param: torch.Tensor,
grad: torch.Tensor,
exp_avg: torch.Tensor,
exp_avg_sq: torch.Tensor,
beta1: float,
beta2: float,
step_size: float,
eps: float,
weight_decay: float,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
exp_avg = exp_avg * beta1 + (1 - beta1) * grad
exp_avg_sq = exp_avg_sq * beta2 + (1 - beta2) * (grad * grad)
adam_step = exp_avg / (exp_avg_sq.sqrt() + eps)
adam_step = adam_step + weight_decay * param
weight_norm = param.norm(p=2).clamp(0, 10)
adam_norm = adam_step.norm(p=2)
trust_ratio = weight_norm / (adam_norm + eps)
trust_ratio = (weight_norm == 0.0) * 1.0 + (weight_norm != 0.0) * trust_ratio
trust_ratio = (adam_norm == 0.0) * 1.0 + (adam_norm != 0.0) * trust_ratio
trust_ratio = trust_ratio.float()
param = param - step_size * trust_ratio * adam_step
return param, exp_avg, exp_avg_sq
class JITLamb(Optimizer):
"""JIT-based version of the Lamb algorithm for large batch optimization.
It has been proposed in `Large Batch Optimization for Deep Learning:
Training BERT in 76 minutes`.
Reference:
https://arxiv.org/abs/1904.00962
"""
def __init__(
self,
params: Iterable,
lr: Optional[float] = 1e-3,
betas: Optional[Tuple[float, float]] = (0.9, 0.999),
eps: Optional[float] = 1e-6,
weight_decay: Optional[float] = 0.0,
adam: Optional[bool] = False,
) -> None:
"""Initialize the optimizer.
Args:
params: An iterable of parameters to optimize.
lr: The learning rate.
betas: Coefficients used for computing running averages.
eps: Term added to the denominator to improve numerical stability.
weight_decay: Weight decay.
adam: Whether to turn current optimizer into Adam.
Raises:
ValueError: If the learning rate, epsilon value, or beta parameters are invalid.
"""
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.adam = adam
super().__init__(params, defaults)
def step(self, closure: Optional[callable] = None) -> torch.FloatTensor:
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("JITLamb does not support sparse gradients.")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
step_size = group["lr"]
param, exp_avg, exp_avg_sq = _lamb_kernel(
p.data,
grad,
exp_avg,
exp_avg_sq,
beta1,
beta2,
step_size,
group["eps"],
group["weight_decay"],
)
state["exp_avg"] = exp_avg
state["exp_avg_sq"] = exp_avg_sq
p.data = param
return loss
|
f3c9f33cd5dba0dc9e0e39d086df27783391b20c
|
40dd8330e5f78c4348bbddc2c5acfd59d793dd51
|
/tools/model_converters/swin2mmseg.py
|
d434f9465bbdad6bebc7d5962e8bfaf63c7c9e72
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmsegmentation
|
0d12092312e2c465ede1fd7dd9847b6f2b37049c
|
30a3f94f3e2916e27fa38c67cc3b8c69c1893fe8
|
refs/heads/main
| 2023-09-04T10:54:52.299711
| 2023-07-24T07:28:21
| 2023-07-24T07:28:21
| 272,133,018
| 6,534
| 2,375
|
Apache-2.0
| 2023-09-14T01:22:32
| 2020-06-14T04:32:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,740
|
py
|
swin2mmseg.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from collections import OrderedDict
import mmengine
import torch
from mmengine.runner import CheckpointLoader
def convert_swin(ckpt):
new_ckpt = OrderedDict()
def correct_unfold_reduction_order(x):
out_channel, in_channel = x.shape
x = x.reshape(out_channel, 4, in_channel // 4)
x = x[:, [0, 2, 1, 3], :].transpose(1,
2).reshape(out_channel, in_channel)
return x
def correct_unfold_norm_order(x):
in_channel = x.shape[0]
x = x.reshape(4, in_channel // 4)
x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
return x
for k, v in ckpt.items():
if k.startswith('head'):
continue
elif k.startswith('layers'):
new_v = v
if 'attn.' in k:
new_k = k.replace('attn.', 'attn.w_msa.')
elif 'mlp.' in k:
if 'mlp.fc1.' in k:
new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.')
elif 'mlp.fc2.' in k:
new_k = k.replace('mlp.fc2.', 'ffn.layers.1.')
else:
new_k = k.replace('mlp.', 'ffn.')
elif 'downsample' in k:
new_k = k
if 'reduction.' in k:
new_v = correct_unfold_reduction_order(v)
elif 'norm.' in k:
new_v = correct_unfold_norm_order(v)
else:
new_k = k
new_k = new_k.replace('layers', 'stages', 1)
elif k.startswith('patch_embed'):
new_v = v
if 'proj' in k:
new_k = k.replace('proj', 'projection')
else:
new_k = k
else:
new_v = v
new_k = k
new_ckpt[new_k] = new_v
return new_ckpt
def main():
parser = argparse.ArgumentParser(
description='Convert keys in official pretrained swin models to'
'MMSegmentation style.')
parser.add_argument('src', help='src model path or url')
# The dst path must be a full path of the new checkpoint.
parser.add_argument('dst', help='save path')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
weight = convert_swin(state_dict)
mmengine.mkdir_or_exist(osp.dirname(args.dst))
torch.save(weight, args.dst)
if __name__ == '__main__':
main()
|
d927f496b0644872007c881f037084a89791d819
|
f7dc806f341ef5dbb0e11252a4693003a66853d5
|
/modules/mobile_vr/SCsub
|
e6c43228b4c9a2ff92663a44041514d3c476bdec
|
[
"LicenseRef-scancode-free-unknown",
"MIT",
"CC-BY-4.0",
"OFL-1.1",
"Bison-exception-2.2",
"CC0-1.0",
"LicenseRef-scancode-nvidia-2002",
"LicenseRef-scancode-other-permissive",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSL-1.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unicode",
"BSD-2-Clause",
"FTL",
"GPL-3.0-or-later",
"Bitstream-Vera",
"Zlib",
"MPL-2.0",
"MIT-Modern-Variant"
] |
permissive
|
godotengine/godot
|
8a2419750f4851d1426a8f3bcb52cac5c86f23c2
|
970be7afdc111ccc7459d7ef3560de70e6d08c80
|
refs/heads/master
| 2023-08-21T14:37:00.262883
| 2023-08-21T06:26:15
| 2023-08-21T06:26:15
| 15,634,981
| 68,852
| 18,388
|
MIT
| 2023-09-14T21:42:16
| 2014-01-04T16:05:36
|
C++
|
UTF-8
|
Python
| false
| false
| 158
|
SCsub
|
#!/usr/bin/env python
Import("env")
Import("env_modules")
env_mobile_vr = env_modules.Clone()
env_mobile_vr.add_source_files(env.modules_sources, "*.cpp")
|
|
a5d15ab27ee3902c45449aaf323c86fbdabb603c
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-green/aliyunsdkgreen/__init__.py
|
c8e17b56156faac648f0a58dc047f156f76e63cc
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 21
|
py
|
__init__.py
|
__version__ = '3.6.6'
|
50309e485e2260079e4b0d1f9af1138ccb688e7a
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Calibration/HcalAlCaRecoProducers/python/alcagammajet_cfi.py
|
6afff74e0f025f01943f7da0e6a2c8687a6d5dde
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 429
|
py
|
alcagammajet_cfi.py
|
import FWCore.ParameterSet.Config as cms
# producer for alcadijets (HCAL gamma-jet)
import Calibration.HcalAlCaRecoProducers.alcaGammaJetProducer_cfi
GammaJetProd = Calibration.HcalAlCaRecoProducers.alcaGammaJetProducer_cfi.alcaGammaJetProducer.clone()
import Calibration.HcalAlCaRecoProducers.alcaGammaJetSelector_cfi
GammaJetFilter = Calibration.HcalAlCaRecoProducers.alcaGammaJetSelector_cfi.alcaGammaJetSelector.clone()
|
23eb6cce60fbc6f48f622f62ba6bcfa5271323f2
|
c82aceed27b19578708f1aa1497a0a2e7268e891
|
/modules/api/src/test/functional/tests/shared_zone_test_context.py
|
a27c52018416e2a9601a8b0b892c1d8283d6d6af
|
[
"MPL-2.0",
"EPL-1.0",
"Apache-2.0"
] |
permissive
|
vinyldns/vinyldns
|
441ba87943db67aba61806f47d85de5c5380dd99
|
ec54b1d533f744fc7777aa747b6ad4f1c46d0c3e
|
refs/heads/master
| 2023-08-31T04:17:28.072961
| 2023-08-22T14:18:30
| 2023-08-22T14:18:30
| 142,474,323
| 353
| 134
|
Apache-2.0
| 2023-09-12T20:26:47
| 2018-07-26T17:43:18
|
Scala
|
UTF-8
|
Python
| false
| false
| 28,814
|
py
|
shared_zone_test_context.py
|
import copy
import inspect
import logging
from typing import MutableMapping, Mapping
from tests.list_batch_summaries_test_context import ListBatchChangeSummariesTestContext
from tests.list_groups_test_context import ListGroupsTestContext
from tests.list_recordsets_test_context import ListRecordSetsTestContext
from tests.list_zones_test_context import ListZonesTestContext
from tests.test_data import TestData
from utils import *
from vinyldns_python import VinylDNSClient
logger = logging.getLogger(__name__)
class SharedZoneTestContext(object):
"""
Creates multiple zones to test authorization / access to shared zones across users
"""
_data_cache: MutableMapping[str, MutableMapping[str, Mapping]] = {}
def __init__(self, partition_id: str):
self.partition_id = partition_id
self.setup_started = False
self.ok_vinyldns_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "okAccessKey", "okSecretKey")
self.dummy_vinyldns_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "dummyAccessKey", "dummySecretKey")
self.shared_zone_vinyldns_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "sharedZoneUserAccessKey", "sharedZoneUserSecretKey")
self.support_user_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "supportUserAccessKey", "supportUserSecretKey")
self.super_user_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "superUserAccessKey", "superUserSecretKey")
self.unassociated_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "listGroupAccessKey", "listGroupSecretKey")
self.test_user_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "testUserAccessKey", "testUserSecretKey")
self.history_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "history-key", "history-secret")
self.non_user_client = VinylDNSClient(VinylDNSTestContext.vinyldns_url, "not-exist-key", "not-exist-secret")
self.clients = [self.ok_vinyldns_client, self.dummy_vinyldns_client, self.shared_zone_vinyldns_client,
self.support_user_client, self.super_user_client, self.unassociated_client,
self.test_user_client, self.history_client, self.non_user_client]
self.list_zones = ListZonesTestContext(partition_id)
self.list_zones_client = self.list_zones.client
self.list_records_context = ListRecordSetsTestContext(partition_id)
self.list_groups_context = ListGroupsTestContext(partition_id)
self.list_batch_summaries_context = ListBatchChangeSummariesTestContext(partition_id)
self.dummy_group = None
self.ok_group = None
self.shared_record_group = None
self.history_group = None
self.group_activity_created = None
self.group_activity_updated = None
self.history_zone = None
self.ok_zone = None
self.dummy_zone = None
self.ip6_reverse_zone = None
self.ip6_16_nibble_zone = None
self.ip4_reverse_zone = None
self.classless_base_zone = None
self.classless_zone_delegation_zone = None
self.system_test_zone = None
self.parent_zone = None
self.ds_zone = None
self.requires_review_zone = None
self.shared_zone = None
self.ip4_10_prefix = None
self.ip4_classless_prefix = None
self.ip6_prefix = None
def setup(self):
if self.setup_started:
# Safeguard against reentrance
return
self.setup_started = True
partition_id = self.partition_id
try:
ok_group = {
"name": f"ok-group{partition_id}",
"email": "test@test.com",
"description": "this is a description",
"members": [{"id": "ok"}, {"id": "support-user-id"}],
"admins": [{"id": "ok"}]
}
self.ok_group = self.ok_vinyldns_client.create_group(ok_group, status=200)
# in theory this shouldn"t be needed, but getting "user is not in group' errors on zone creation
self.confirm_member_in_group(self.ok_vinyldns_client, self.ok_group)
dummy_group = {
"name": f"dummy-group{partition_id}",
"email": "test@test.com",
"description": "this is a description",
"members": [{"id": "dummy"}],
"admins": [{"id": "dummy"}]
}
self.dummy_group = self.dummy_vinyldns_client.create_group(dummy_group, status=200)
# in theory this shouldn"t be needed, but getting "user is not in group' errors on zone creation
self.confirm_member_in_group(self.dummy_vinyldns_client, self.dummy_group)
shared_record_group = {
"name": f"record-ownergroup{partition_id}",
"email": "test@test.com",
"description": "this is a description",
"members": [{"id": "sharedZoneUser"}, {"id": "ok"}, {"id": "support-user-id"}],
"admins": [{"id": "sharedZoneUser"}, {"id": "ok"}]
}
self.shared_record_group = self.ok_vinyldns_client.create_group(shared_record_group, status=200)
history_group = {
"name": f"history-group{partition_id}",
"email": "test@test.com",
"description": "this is a description",
"members": [{"id": "history-id"}],
"admins": [{"id": "history-id"}]
}
self.history_group = self.history_client.create_group(history_group, status=200)
self.confirm_member_in_group(self.history_client, self.history_group)
history_zone_change = self.history_client.create_zone(
{
"name": f"system-test-history{partition_id}.",
"email": "i.changed.this.1.times@history-test.com",
"shared": False,
"adminGroupId": self.history_group["id"],
"isTest": True,
"connection": {
"name": "vinyldns.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
},
"transferConnection": {
"name": "vinyldns.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
}
}, status=202)
self.history_zone = history_zone_change["zone"]
# initialize history
self.history_client.wait_until_zone_active(history_zone_change["zone"]["id"])
self.init_history()
ok_zone_change = self.ok_vinyldns_client.create_zone(
{
"name": f"ok{partition_id}.",
"email": "test@test.com",
"shared": False,
"adminGroupId": self.ok_group["id"],
"isTest": True,
"connection": {
"name": "ok.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
},
"transferConnection": {
"name": "ok.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
}
}, status=202)
self.ok_zone = ok_zone_change["zone"]
dummy_zone_change = self.dummy_vinyldns_client.create_zone(
{
"name": f"dummy{partition_id}.",
"email": "test@test.com",
"shared": False,
"adminGroupId": self.dummy_group["id"],
"isTest": True,
"acl": {
"rules": [
{
"accessLevel": "Delete",
"description": "some_test_rule",
"userId": "history-id"
}
]
},
"connection": {
"name": "dummy.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
},
"transferConnection": {
"name": "dummy.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
}
}, status=202)
self.dummy_zone = dummy_zone_change["zone"]
self.ip6_prefix = f"fd69:27cc:fe9{partition_id}"
ip6_reverse_zone_change = self.ok_vinyldns_client.create_zone(
{
"name": f"{partition_id}.9.e.f.c.c.7.2.9.6.d.f.ip6.arpa.",
"email": "test@test.com",
"shared": False,
"adminGroupId": self.ok_group["id"],
"isTest": True,
"connection": {
"name": "ip6.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
},
"transferConnection": {
"name": "ip6.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
}
}, status=202
)
self.ip6_reverse_zone = ip6_reverse_zone_change["zone"]
ip6_16_nibble_zone_change = self.ok_vinyldns_client.create_zone(
{
"name": f"0.0.0.1.{partition_id}.9.e.f.c.c.7.2.9.6.d.f.ip6.arpa.",
"email": "test@test.com",
"shared": False,
"adminGroupId": self.ok_group["id"],
"isTest": True,
"backendId": "func-test-backend"
}, status=202
)
self.ip6_16_nibble_zone = ip6_16_nibble_zone_change["zone"]
self.ip4_10_prefix = f"10.{partition_id}"
ip4_reverse_zone_change = self.ok_vinyldns_client.create_zone(
{
"name": f"{partition_id}.10.in-addr.arpa.",
"email": "test@test.com",
"shared": False,
"adminGroupId": self.ok_group["id"],
"isTest": True,
"connection": {
"name": "ip4.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
},
"transferConnection": {
"name": "ip4.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
}
}, status=202
)
self.ip4_reverse_zone = ip4_reverse_zone_change["zone"]
self.ip4_classless_prefix = f"192.0.{partition_id}"
classless_base_zone_change = self.ok_vinyldns_client.create_zone(
{
"name": f"{partition_id}.0.192.in-addr.arpa.",
"email": "test@test.com",
"shared": False,
"adminGroupId": self.ok_group["id"],
"isTest": True,
"connection": {
"name": "classless-base.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
},
"transferConnection": {
"name": "classless-base.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
}
}, status=202
)
self.classless_base_zone = classless_base_zone_change["zone"]
classless_zone_delegation_change = self.ok_vinyldns_client.create_zone(
{
"name": f"192/30.{partition_id}.0.192.in-addr.arpa.",
"email": "test@test.com",
"shared": False,
"adminGroupId": self.ok_group["id"],
"isTest": True,
"connection": {
"name": "classless.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
},
"transferConnection": {
"name": "classless.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
}
}, status=202
)
self.classless_zone_delegation_zone = classless_zone_delegation_change["zone"]
system_test_zone_change = self.ok_vinyldns_client.create_zone(
{
"name": f"system-test{partition_id}.",
"email": "test@test.com",
"shared": False,
"adminGroupId": self.ok_group["id"],
"isTest": True,
"connection": {
"name": "system-test.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
},
"transferConnection": {
"name": "system-test.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
}
}, status=202
)
self.system_test_zone = system_test_zone_change["zone"]
# parent zone gives access to the dummy user, dummy user cannot manage ns records
parent_zone_change = self.ok_vinyldns_client.create_zone(
{
"name": f"parent.com{partition_id}.",
"email": "test@test.com",
"shared": False,
"adminGroupId": self.ok_group["id"],
"isTest": True,
"acl": {
"rules": [
{
"accessLevel": "Delete",
"description": "some_test_rule",
"userId": "dummy"
}
]
},
"connection": {
"name": "parent.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
},
"transferConnection": {
"name": "parent.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
}
}, status=202)
self.parent_zone = parent_zone_change["zone"]
# mimicking the spec example
ds_zone_change = self.ok_vinyldns_client.create_zone(
{
"name": f"example.com{partition_id}.",
"email": "test@test.com",
"shared": False,
"adminGroupId": self.ok_group["id"],
"isTest": True,
"connection": {
"name": "example.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
},
"transferConnection": {
"name": "example.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
}
}, status=202)
self.ds_zone = ds_zone_change["zone"]
# zone with name configured for manual review
requires_review_zone_change = self.ok_vinyldns_client.create_zone(
{
"name": f"zone.requires.review{partition_id}.",
"email": "test@test.com",
"shared": False,
"adminGroupId": self.ok_group["id"],
"isTest": True,
"backendId": "func-test-backend"
}, status=202)
self.requires_review_zone = requires_review_zone_change["zone"]
# Shared zone
shared_zone_change = self.support_user_client.create_zone(
{
"name": f"shared{partition_id}.",
"email": "test@test.com",
"shared": True,
"adminGroupId": self.shared_record_group["id"],
"isTest": True,
"connection": {
"name": "shared.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
},
"transferConnection": {
"name": "shared.",
"keyName": VinylDNSTestContext.dns_key_name,
"key": VinylDNSTestContext.dns_key,
"algorithm": VinylDNSTestContext.dns_key_algo,
"primaryServer": VinylDNSTestContext.name_server_ip
}
}, status=202)
self.shared_zone = shared_zone_change["zone"]
# wait until our zones are created
self.ok_vinyldns_client.wait_until_zone_active(system_test_zone_change["zone"]["id"])
self.ok_vinyldns_client.wait_until_zone_active(ok_zone_change["zone"]["id"])
self.dummy_vinyldns_client.wait_until_zone_active(dummy_zone_change["zone"]["id"])
self.ok_vinyldns_client.wait_until_zone_active(ip6_reverse_zone_change["zone"]["id"])
self.ok_vinyldns_client.wait_until_zone_active(ip6_16_nibble_zone_change["zone"]["id"])
self.ok_vinyldns_client.wait_until_zone_active(ip4_reverse_zone_change["zone"]["id"])
self.ok_vinyldns_client.wait_until_zone_active(classless_base_zone_change["zone"]["id"])
self.ok_vinyldns_client.wait_until_zone_active(classless_zone_delegation_change["zone"]["id"])
self.ok_vinyldns_client.wait_until_zone_active(system_test_zone_change["zone"]["id"])
self.ok_vinyldns_client.wait_until_zone_active(parent_zone_change["zone"]["id"])
self.ok_vinyldns_client.wait_until_zone_active(ds_zone_change["zone"]["id"])
self.ok_vinyldns_client.wait_until_zone_active(requires_review_zone_change["zone"]["id"])
self.shared_zone_vinyldns_client.wait_until_zone_active(shared_zone_change["zone"]["id"])
# initialize group activity
self.init_group_activity()
# initialize list zones, only do this when constructing the whole!
self.list_zones.setup()
# note: there are no state to load, the tests only need the client
self.list_zones_client = self.list_zones.client
# build the list of records; note: we do need to save the test records
self.list_records_context.setup()
# build the list of groups
self.list_groups_context.setup()
except Exception:
# Cleanup if setup fails
self.tear_down()
traceback.print_exc()
raise
def init_history(self):
# Initialize the zone history
# change the zone nine times to we have update events in zone change history,
# ten total changes including creation
for i in range(2, 11):
zone_update = copy.deepcopy(self.history_zone)
zone_update["connection"]["key"] = VinylDNSTestContext.dns_key
zone_update["transferConnection"]["key"] = VinylDNSTestContext.dns_key
zone_update["email"] = "i.changed.this.{0}.times@history-test.com".format(i)
self.history_client.update_zone(zone_update, status=202)
# create some record sets
test_a = TestData.A.copy()
test_a["zoneId"] = self.history_zone["id"]
test_aaaa = TestData.AAAA.copy()
test_aaaa["zoneId"] = self.history_zone["id"]
test_cname = TestData.CNAME.copy()
test_cname["zoneId"] = self.history_zone["id"]
a_record = self.history_client.create_recordset(test_a, status=202)["recordSet"]
aaaa_record = self.history_client.create_recordset(test_aaaa, status=202)["recordSet"]
cname_record = self.history_client.create_recordset(test_cname, status=202)["recordSet"]
# wait here for all the record sets to be created
self.history_client.wait_until_recordset_exists(a_record["zoneId"], a_record["id"])
self.history_client.wait_until_recordset_exists(aaaa_record["zoneId"], aaaa_record["id"])
self.history_client.wait_until_recordset_exists(cname_record["zoneId"], cname_record["id"])
# update the record sets
a_record_update = copy.deepcopy(a_record)
a_record_update["ttl"] += 100
a_record_update["records"][0]["address"] = "9.9.9.9"
a_change = self.history_client.update_recordset(a_record_update, status=202)
aaaa_record_update = copy.deepcopy(aaaa_record)
aaaa_record_update["ttl"] += 100
aaaa_record_update["records"][0]["address"] = "2003:db8:0:0:0:0:0:4"
aaaa_change = self.history_client.update_recordset(aaaa_record_update, status=202)
cname_record_update = copy.deepcopy(cname_record)
cname_record_update["ttl"] += 100
cname_record_update["records"][0]["cname"] = "changed-cname."
cname_change = self.history_client.update_recordset(cname_record_update, status=202)
self.history_client.wait_until_recordset_change_status(a_change, "Complete")
self.history_client.wait_until_recordset_change_status(aaaa_change, "Complete")
self.history_client.wait_until_recordset_change_status(cname_change, "Complete")
# delete the recordsets
self.history_client.delete_recordset(a_record["zoneId"], a_record["id"])
self.history_client.delete_recordset(aaaa_record["zoneId"], aaaa_record["id"])
self.history_client.delete_recordset(cname_record["zoneId"], cname_record["id"])
self.history_client.wait_until_recordset_deleted(a_record["zoneId"], a_record["id"])
self.history_client.wait_until_recordset_deleted(aaaa_record["zoneId"], aaaa_record["id"])
self.history_client.wait_until_recordset_deleted(cname_record["zoneId"], cname_record["id"])
def init_group_activity(self):
client = self.ok_vinyldns_client
group_name = f"test-list-group-activity-max-item-success{self.partition_id}"
members = [{"id": "ok"}]
new_group = {
"name": group_name,
"email": "test@test.com",
"members": members,
"admins": [{"id": "ok"}]
}
created_group = client.create_group(new_group, status=200)
update_groups = []
updated_groups = []
# each update changes the member
for runner in range(0, 10):
members = [{"id": "dummy{0:0>3}".format(runner)}]
update_groups.append({
"id": created_group["id"],
"name": group_name,
"email": "test@test.com",
"members": members,
"admins": [{"id": "ok"}]
})
updated_groups.append(client.update_group(update_groups[runner]["id"], update_groups[runner], status=200))
self.group_activity_created = created_group
self.group_activity_updated = updated_groups
def tear_down(self):
"""
The ok_vinyldns_client is a zone admin on _all_ the zones.
We shouldn't have to do any checks now, as zone admins have full rights to all zones, including
deleting all records (even in the old shared model)
"""
try:
self.list_zones.tear_down()
self.list_records_context.tear_down()
if self.list_batch_summaries_context:
self.list_batch_summaries_context.tear_down(self)
if self.list_groups_context:
self.list_groups_context.tear_down()
for client in self.clients:
client.clear_zones()
for client in self.clients:
client.clear_groups()
# Close all clients
for client in self.clients:
client.tear_down()
except Exception:
traceback.print_exc()
raise
@staticmethod
def confirm_member_in_group(client, group):
retries = 2
success = group in client.list_all_my_groups(status=200)
while retries >= 0 and not success:
success = group in client.list_all_my_groups(status=200)
time.sleep(.05)
retries -= 1
assert_that(success, is_(True))
|
aee22ec13955f79a040eb5b70f2f55372036fb37
|
163bad17c2ba0aeeb05e29d1a7f870e675ee28eb
|
/hikyuu/trade_manage/trade.py
|
92dbce57ddd996e90963d73c595f7e75e494c975
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
fasiondog/hikyuu
|
8b7bc4fd99ff915c621586a480c3663ef3fae464
|
86b0fa5b0e847d9a04905bca93660a7a33fc9fc2
|
refs/heads/master
| 2023-09-03T15:04:33.983389
| 2023-09-03T11:17:46
| 2023-09-03T11:17:46
| 5,103,141
| 1,884
| 547
|
MIT
| 2023-09-06T16:53:51
| 2012-07-18T23:21:42
|
C++
|
UTF-8
|
Python
| false
| false
| 4,755
|
py
|
trade.py
|
#!/usr/bin/python
# -*- coding: utf8 -*-
# cp936
#
# The MIT License (MIT)
#
# Copyright (c) 2010-2017 fasiondog
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#===============================================================================
# History:
# 1. 20130213, Added by fasiondog
#===============================================================================
from hikyuu.util.slice import list_getitem
from hikyuu import *
BorrowRecordList.__getitem__ = list_getitem
PositionRecordList.__getitem__ = list_getitem
TradeRecordList.__getitem__ = list_getitem
BorrowRecordList.__str__ = lambda self: str(list(self))
BorrowRecordList.__repr__ = lambda self: repr(list(self))
PositionRecordList.__str__ = lambda self: str(list(self))
PositionRecordList.__repr__ = lambda self: repr(list(self))
TradeRecordList.__str__ = lambda self: str(list(self))
TradeRecordList.__repr__ = lambda self: repr(list(self))
try:
import numpy as np
import pandas as pd
def TradeList_to_np(t_list):
"""转化为numpy结构数组"""
t_type = np.dtype(
{
'names': [
'交易日期', '证券代码', '证券名称', '业务名称', '计划交易价格', '实际成交价格', '目标价格', '成交数量', '佣金', '印花税',
'过户费', '其它成本', '交易总成本', '止损价', '现金余额', '信号来源'
],
'formats': [
'datetime64[D]', 'U10', 'U20', 'U10', 'd', 'd', 'd', 'i', 'd', 'd', 'd', 'd',
'd', 'd', 'd', 'U5'
]
}
)
return np.array(
[
(
t.datetime, t.stock.market_code, t.stock.name, get_business_name(t.business),
t.planPrice, t.realPrice, t.goalPrice, t.number, t.cost.commission,
t.cost.stamptax, t.cost.transferfee, t.cost.others, t.cost.total, t.stoploss,
t.cash, get_system_part_name(t.part)
) for t in t_list
],
dtype=t_type
)
def TradeList_to_df(t):
"""转化为pandas的DataFrame"""
return pd.DataFrame.from_records(TradeList_to_np(t), index='交易日期')
TradeRecordList.to_np = TradeList_to_np
TradeRecordList.to_df = TradeList_to_df
def PositionList_to_np(pos_list):
"""转化为numpy结构数组"""
t_type = np.dtype(
{
'names': ['证券代码', '证券名称', '买入日期', '已持仓天数', '持仓数量', '投入金额', '当前市值', '盈亏金额', '盈亏比例'],
'formats': ['U10', 'U20', 'datetime64[D]', 'i', 'i', 'd', 'd', 'd', 'd']
}
)
sm = StockManager.instance()
query = Query(-1)
data = []
for pos in pos_list:
invest = pos.buy_money - pos.sell_money + pos.total_cost
k = pos.stock.get_kdata(query)
cur_val = k[0].close * pos.number
bonus = cur_val - invest
date_list = sm.get_trading_calendar(Query(Datetime(pos.take_datetime.date())))
data.append(
(
pos.stock.market_code, pos.stock.name, pos.take_datetime, len(date_list),
pos.number, invest, cur_val, bonus, 100 * bonus / invest
)
)
return np.array(data, dtype=t_type)
def PositionList_to_df(pos_list):
"""转化为pandas的DataFrame"""
return pd.DataFrame.from_records(PositionList_to_np(pos_list), index='证券代码')
PositionRecordList.to_np = PositionList_to_np
PositionRecordList.to_df = PositionList_to_df
except:
pass
|
05dd191bc612827e1b09eb9c062b501d472ab72c
|
1543b53d145e1783ba0faa8a3b84b5c49ee29084
|
/kedro/extras/datasets/api/__init__.py
|
ccd799b2c95741dd06b6afd513b6066a87d23c6d
|
[
"Apache-2.0"
] |
permissive
|
kedro-org/kedro
|
9ed5920ac713e66861039ba4901a5347d3cda28e
|
0293dc15812b27330bba31a01c7b332b3165af2a
|
refs/heads/main
| 2023-09-01T08:57:52.258279
| 2023-08-31T09:19:39
| 2023-08-31T09:19:39
| 182,067,506
| 4,099
| 332
|
Apache-2.0
| 2023-09-14T12:12:52
| 2019-04-18T10:29:56
|
Python
|
UTF-8
|
Python
| false
| false
| 316
|
py
|
__init__.py
|
"""``APIDataSet`` loads the data from HTTP(S) APIs
and returns them into either as string or json Dict.
It uses the python requests library: https://requests.readthedocs.io/en/latest/
"""
__all__ = ["APIDataSet"]
from contextlib import suppress
with suppress(ImportError):
from .api_dataset import APIDataSet
|
d634f2b3d934856e70e6967929f96e3a4c238dd9
|
9d5ae8add5868d56af20f38f0bc1841d5ed3b4c2
|
/tests/unit/utils/test_stat_buffer.py
|
b263f2e33de64ee522c693272200a9e54b8e1230
|
[
"BSD-3-Clause",
"Python-2.0",
"Apache-2.0"
] |
permissive
|
powerapi-ng/powerapi
|
5c6e30a48716f0c06449c489820991ed3ca2167d
|
be3f1852ad38894c2bc487bbb3a30508ed8d6b50
|
refs/heads/master
| 2023-08-16T12:14:16.940876
| 2023-08-16T11:25:51
| 2023-08-16T11:25:51
| 175,017,297
| 143
| 29
|
BSD-3-Clause
| 2023-09-12T12:00:45
| 2019-03-11T14:27:02
|
Python
|
UTF-8
|
Python
| false
| false
| 5,041
|
py
|
test_stat_buffer.py
|
# Copyright (c) 2021, INRIA
# Copyright (c) 2021, University of Lille
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
from powerapi.utils import StatBuffer
M1 = {
'tags': {'t1': 'a', 't2': 'b'},
'time': 1,
'value': 1.0
}
M2 = {
'tags': {'t1': 'a', 't2': 'b'},
'time': 2,
'value': 2.0
}
M3 = {
'tags': {'t1': 'a', 't2': 'b'},
'time': 3,
'value': 3.0
}
M4 = {
'tags': {'t1': 'a', 't2': 'b'},
'time': 4,
'value': 4.0
}
def test_asking_if_stat_is_available_on_a_key_that_was_never_append_must_raise_KeyError():
buffer = StatBuffer(3)
buffer.append(M2, 'ab')
with pytest.raises(KeyError):
buffer.is_available('qlksjdq')
def test_asking_if_stat_is_available_on_a_stat_buffer_with_aggregation_periode_of_3_while_2_measure_where_append_on_2_seconds_return_false():
buffer = StatBuffer(3)
buffer.append(M1, 'ab')
buffer.append(M2, 'ab')
assert not buffer.is_available('ab')
def test_asking_if_stat_is_available_on_a_stat_buffer_with_aggregation_periode_of_1_while_2_measure_where_append_on_1_seconds_return_true():
buffer = StatBuffer(1)
buffer.append(M1, 'ab')
buffer.append(M2, 'ab')
assert buffer.is_available('ab')
def test_asking_if_stat_is_available_on_a_stat_buffer_with_aggregation_periode_of_1_while_2_measure_where_append_on_2_seconds_return_true():
buffer = StatBuffer(1)
buffer.append(M1, 'ab')
buffer.append(M3, 'ab')
assert buffer.is_available('ab')
def test_get_stats_on_a_stat_buffer_with_aggregation_periode_of_3_while_2_measure_where_append_on_2_seconds_return_None():
buffer = StatBuffer(3)
buffer.append(M1, 'ab')
buffer.append(M2, 'ab')
assert buffer.get_stats('ab') is None
def test_get_stats_on_a_key_that_was_never_append_must_raise_KeyError():
buffer = StatBuffer(3)
buffer.append(M2, 'ab')
with pytest.raises(KeyError):
buffer.get_stats('qlksjdq')
def test_get_stats_on_a_stat_buffer_with_aggregation_periode_of_1_while_2_measure_where_append_on_2_seconds_return_good_results():
buffer = StatBuffer(1)
buffer.append(M1, 'ab')
buffer.append(M2, 'ab')
assert buffer.get_stats('ab') == {
'mean': 1.5,
'std': 0.5,
'min': 1.0,
'max': 2.0,
'tags': {'t1': 'a', 't2': 'b'},
'time': 2
}
def test_get_stats_on_a_stat_buffer_with_aggregation_periode_of_1_while_3_measure_where_append_on_2_seconds_return_stats_on_two_first_results():
buffer = StatBuffer(1)
buffer.append(M1, 'ab')
buffer.append(M2, 'ab')
buffer.append(M3, 'ab')
assert buffer.get_stats('ab') == {
'mean': 1.5,
'std': 0.5,
'min': 1.0,
'max': 2.0,
'tags': {'t1': 'a', 't2': 'b'},
'time': 2
}
def test_get_stats_second_times_on_a_stat_buffer_with_aggregation_periode_of_1_while_3_measure_where_append_on_2_seconds_return_None():
buffer = StatBuffer(1)
buffer.append(M1, 'ab')
buffer.append(M2, 'ab')
buffer.append(M3, 'ab')
buffer.get_stats('ab')
assert buffer.get_stats('ab') is None
def test_get_stat_buffer_with_aggregation_periode_of_1_while_4_measure_append_good_result_for_two_last_measure():
buffer = StatBuffer(1)
buffer.append(M1, 'ab')
buffer.append(M2, 'ab')
buffer.append(M3, 'ab')
buffer.append(M4, 'ab')
buffer.get_stats('ab')
assert buffer.get_stats('ab') == {
'mean': 3.5,
'std': 0.5,
'min': 3.0,
'max': 4.0,
'tags': {'t1': 'a', 't2': 'b'},
'time': 4
}
|
e942c32abda797626d0bcbfd1265894a8d35ce2c
|
39b021eabbb8e3be1734cf92fd641965a796b0eb
|
/deepchem/feat/molecule_featurizers/raw_featurizer.py
|
b6cf3908c407660406484cae98d858229d4d832c
|
[
"MIT"
] |
permissive
|
deepchem/deepchem
|
066cbf42316b2f6bec0166727e0264a485d5266f
|
ee6e67ebcf7bf04259cf13aff6388e2b791fea3d
|
refs/heads/master
| 2023-09-02T01:32:17.860111
| 2023-08-31T18:49:00
| 2023-08-31T18:49:00
| 43,098,215
| 4,876
| 1,905
|
MIT
| 2023-09-14T19:10:44
| 2015-09-24T23:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,687
|
py
|
raw_featurizer.py
|
from typing import Union
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
class RawFeaturizer(MolecularFeaturizer):
"""Encodes a molecule as a SMILES string or RDKit mol.
This featurizer can be useful when you're trying to transform a large
collection of RDKit mol objects as Smiles strings, or alternatively as a
"no-op" featurizer in your molecular pipeline.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self, smiles: bool = False):
"""Initialize this featurizer.
Parameters
----------
smiles: bool, optional (default False)
If True, encode this molecule as a SMILES string. Else as a RDKit mol.
"""
self.smiles = smiles
def _featurize(self, datapoint: RDKitMol, **kwargs) -> Union[str, RDKitMol]:
"""Calculate either smiles string or pass through raw molecule.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
str or rdkit.Chem.rdchem.Mol
SMILES string or RDKit Mol object.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
if self.smiles:
return Chem.MolToSmiles(datapoint)
else:
return datapoint
|
a4d04a01622c177f98a2e7854409fc7119f1d7ba
|
76b467dff2c870e85f2f5a5cf70229626b966a25
|
/sota_extractor/scrapers/nlp_progress/markdown.py
|
7eadbab4f57e4703eee764030efa67187888fb30
|
[
"Apache-2.0",
"MIT",
"CC-BY-SA-3.0",
"CC-BY-SA-4.0"
] |
permissive
|
paperswithcode/sota-extractor
|
6909eeb34c1ea3c6b2c5f833b02e3785eaada5ba
|
e6374da129cedab1736e97883bfbcddb7ddd2a72
|
refs/heads/master
| 2022-05-13T02:38:38.047255
| 2022-03-09T15:23:26
| 2022-03-09T15:23:26
| 160,834,210
| 271
| 35
|
Apache-2.0
| 2022-03-09T15:23:27
| 2018-12-07T14:29:42
|
Python
|
UTF-8
|
Python
| false
| false
| 5,722
|
py
|
markdown.py
|
import io
import logging
from typing import List
import markdown
from markdown.treeprocessors import Treeprocessor
from markdown.extensions.tables import TableExtension
from sota_extractor.taskdb.v01 import Task, Dataset, TaskDB
from sota_extractor.scrapers.nlp_progress.fixer import fix_task
from sota_extractor.scrapers.nlp_progress.parsers import (
Text,
parse_sota,
parse_subdatasets,
)
logger = logging.getLogger(__name__)
class ParserProcessor(Treeprocessor):
def __init__(self, md=None):
super().__init__(md=md)
self.parsed: List[Task] = []
def run(self, root):
# Assumptions:
# 1) H1 are tasks
# 2) Everything until the next heading is the task description
# 3) H2 are subtasks, H3 are datasets, H4 are subdatasets
# Algorithm:
# 1) Split the document by headings
sections = []
cur = []
for el in root:
if el.tag in {"h1", "h2", "h3", "h4", "h5"}:
if cur:
sections.append(cur)
cur = [el]
else:
cur = [el]
else:
cur.append(el)
if cur:
sections.append(cur)
# 2) Parse each heading section one-by-one
task = None # current task element being parsed
subtask = None # current subtask being parsed
dataset = None # current dataset being parsed
for section_index in range(len(sections)):
section = sections[section_index]
header = section[0]
if header.text is None:
# Invalid section
continue
# Task definition
if header.tag == "h1":
if task is not None:
self.parsed.append(task)
task = Task(
name=header.text.strip().title(),
description=Text.parse(
[e for e in section if e.tag == "p"]
).text,
)
# reset subtasks and datasets
subtask = None
dataset = None
# Subtask definition
if header.tag == "h2":
if task is None:
logger.error(
"Unexpected subtask without a parent task at: %s",
header.text,
)
# new substask
subtask = Task(
name=header.text.strip().title(),
description=Text.parse(
[e for e in section if e.tag == "p"]
).text,
parent=task,
)
task.subtasks.append(subtask)
# reset the last dataset
dataset = None
# Dataset definition
if header.tag == "h3" and "Table of content" not in header.text:
if task is None:
logger.error(
"Unexpected dataset without a parent task at: %s",
header.text,
)
tables = [t for t in section if t.tag == "table"]
n_tables = len(tables)
if n_tables < 2:
text = Text.parse([e for e in section if e.tag == "p"])
dataset = Dataset(
name=header.text.strip().strip(":"),
description=text.text,
links=text.links,
)
if n_tables == 1:
dataset.sota = parse_sota(tables[0])
else:
table_idxs = [
i for i, el in enumerate(section) if el.tag == "table"
]
pairs = []
for idx in table_idxs:
if idx >= 2 and section[idx - 1].tag == "p":
pairs.append((section[idx - 1], section[idx]))
description_idxs = set(range(1, len(section))) - set(
table_idxs
)
description_ps = [
el
for i, el in enumerate(section)
if i in description_idxs
]
text = Text.parse(description_ps)
dataset = Dataset(
name=header.text.strip().strip(":"),
description=text.text,
links=text.links,
)
dataset.subdatasets = parse_subdatasets(
parent=dataset, pairs=pairs
)
if subtask is not None:
# we are in a subtask, add everything here
subtask.datasets.append(dataset)
else:
task.datasets.append(dataset)
if task:
self.parsed.append(task)
class Markdown(markdown.Markdown):
def __init__(self):
super().__init__(extensions=[TableExtension()])
self.parser_processor = ParserProcessor(self)
self.treeprocessors.register(
self.parser_processor, "parser_processor", 1
)
def parse_file(filename: str) -> TaskDB:
"""Parse an NLP-Progress markdown file and return a TaskDB instance."""
md = Markdown()
with io.open("/dev/null", "wb") as f:
md.convertFile(filename, output=f)
tdb = TaskDB()
for task in md.parser_processor.parsed:
for t in fix_task(task):
tdb.add_task(t)
return tdb
|
35a7398f2569acffe9328f5940ae8a817a51b364
|
f00747d1406b3211983f1255855fce0b5346c13d
|
/src/modules/iqy_gen.py
|
b310d90579a99783625b0c6be4d57f1433a4ae6d
|
[
"Apache-2.0"
] |
permissive
|
sevagas/macro_pack
|
d594c34fed6f3e684130323d96efa27d2d98af95
|
071fd4aa16dc74815c2a860ddafe4358d6454c89
|
refs/heads/master
| 2023-08-15T08:04:35.577175
| 2022-01-20T17:15:59
| 2022-01-20T17:15:59
| 105,688,016
| 1,992
| 413
|
Apache-2.0
| 2022-05-12T13:20:41
| 2017-10-03T18:30:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
iqy_gen.py
|
#!/usr/bin/env python
# encoding: utf-8
import logging
from common.utils import MPParam, getParamValue
from modules.payload_builder import PayloadBuilder
"""
http://www.labofapenetrationtester.com/2015/08/abusing-web-query-iqy-files.html
https://inquest.net/blog/2018/08/23/hunting-iqy-files-with-yara
"""
IQY_TEMPLATE = \
r"""WEB
1
<<<URL>>>
"""
class IqyGenerator(PayloadBuilder):
""" Module used to generate malicious IQY Excel web query"""
def check(self):
return True
def generate(self):
logging.info(" [+] Generating %s file..." % self.outputFileType)
paramArray = [MPParam("targetUrl")]
self.fillInputParams(paramArray)
# Fill template
urlContent = IQY_TEMPLATE
urlContent = urlContent.replace("<<<URL>>>", getParamValue(paramArray, "targetUrl"))
# Write in new file
f = open(self.outputFilePath, 'w')
f.writelines(urlContent)
f.close()
logging.info(" [-] Generated URL file: %s" % self.outputFilePath)
logging.info(" [-] Test with : \n Click on %s file to test.\n" % self.outputFilePath)
|
76d95e14b64b98e8ae67636dd770efb4d174e57f
|
c544b4a171b55fb98e2a17aa3068b599301585ac
|
/plasTeX/Renderers/ManPage/__init__.py
|
4dec52eba4c6aa6f1966500bac576dab8408eded
|
[
"MIT"
] |
permissive
|
plastex/plastex
|
3c9dbbfd47a2cb83ef919bcfbb1b57723a8a6979
|
a882a62b81e6ae7b8c9454ae2b222ef5c2c14bb1
|
refs/heads/master
| 2023-07-20T04:53:20.325023
| 2023-07-03T10:03:34
| 2023-07-03T10:03:34
| 17,483,722
| 129
| 41
|
NOASSERTION
| 2023-09-04T10:49:23
| 2014-03-06T16:10:23
|
Python
|
UTF-8
|
Python
| false
| false
| 12,585
|
py
|
__init__.py
|
from plasTeX.Renderers import Renderer as BaseRenderer
from plasTeX import encoding
import textwrap, re, string
class ManPageRenderer(BaseRenderer):
""" Renderer for UNIX man pages """
outputType = str
fileExtension = '.man'
aliases = {
'superscript': 'active::^',
'subscript': 'active::_',
'dollar': '$',
'percent': '%',
'opencurly': '{',
'closecurly': '}',
'underscore': '_',
'ampersand': '&',
'hashmark': '#',
'space': ' ',
'tilde': 'active::~',
'at': '@',
'backslash': '\\',
}
def __init__(self, *args, **kwargs):
BaseRenderer.__init__(self, *args, **kwargs)
# Load dictionary with methods
for key in vars(type(self)):
if key.startswith('do__'):
self[self.aliases[key[4:]]] = getattr(self, key)
elif key.startswith('do_'):
self[key[3:]] = getattr(self, key)
self['default-layout'] = self['document-layout'] = self.default
self.footnotes = []
self.blocks = []
def default(self, node):
""" Rendering method for all non-text nodes """
# Handle characters like \&, \$, \%, etc.
if len(node.nodeName) == 1 and node.nodeName not in encoding.stringletters():
return self.textDefault(node.nodeName)
# Render child nodes
return str(node)
def textDefault(self, node):
return str(node)
def processFileContent(self, document, s):
s = BaseRenderer.processFileContent(self, document, s)
# Clean up newlines
s = re.sub(r'\s*\n(\s*\n)+', r'\n\n', s)
s = re.sub(r'(\s*\n)+(\.B[ld])', r'\n\2', s)
s = re.sub(r'(\.E[ld])\s*(\.B[ld])', r'\1\n\n\2', s)
s = re.sub(r'\.Ed\s*\.Bd', r'.Ed\n.Bd', s)
s = s.lstrip()
return s
# Alignment
def do_flushleft(self, node):
return '\n.Bd -ragged\n%s\n.Ed\n' % node
do_raggedbottom = do_raggedright = do_leftline = do_flushleft
def center(self, text):
return '\n.Bd -centered\n%s\n.Ed\n' % text
def do_center(self, node):
return self.center(str(node))
do_centering = do_centerline = do_center
def do_flushright(self, node):
return '\n.Bd -offset right\n%s\n.Ed\n' % node
do_raggedleft = do_llap = do_flushright
# Arrays
def do_array(self, node, render=str):
output = ['.TS']
# Process colspecs
if node.colspec:
alignments = [x.style['text-align'] for x in node.colspec]
else:
alignments = ['l']*100
for row in node:
colspec = []
for i, cell in enumerate(row):
colspec.append(cell.style.get('text-align', alignments[i])[0])
output.append(' '.join(colspec))
output[-1] += '.'
# Render table
for row in node:
content = []
for cell in row:
content.append(render(cell).strip())
output.append('\t'.join(content))
output.append('.TE')
output.append('')
return re.sub(r'\s*.TE\s*', r'\n.TE\n', '\n'.join(output))
do_tabular = do_tabularx = do_longtable = do_array
def do_cline(self, node):
return ''
def do_multicolumn(self, node):
return str(node)
# Bibliography
def do_thebibliography(self, node):
output = ['','.Sh Bibliography','']
output.append('.Bl -tag -width indent')
for item in node:
output.append('.It %s' % str(item.bibcite).strip())
output.append(str(item).strip())
output.append('.El')
output.append('')
return '\n'.join(output)
def do_bibliographystyle(self, node):
return ''
def do_bibliography(self, node):
return self.default(node)
def do_cite(self, node):
output = []
for item in node.citation():
output.append(str(item))
return ''.join(output)
def do_bibliographyref(self, node):
return self.default(node)
# Boxes
do_mbax = do_makebox = do_fbox = do_framebox = do_parbox = default
do_minipage = do_raisebox = do_rule = default
# Breaking
def do_linebreak(self, node):
return '\n\n'
do_newline = do_pagebreak = do_newpage = do_clearpage = do_cleardoublepage = do_linebreak
# Crossref
def do_ref(self, node):
return str(node.idref['label'].ref)
def do_pageref(self, node):
return '*'
def do_label(self, node):
return ''
# Floats
def do_figure(self, node):
return str(node)
do_table = do_marginpar = do_figure
def do_caption(self, node):
return '\n%s %s: %s\n' % (node.title, node.ref, str(node).strip())
# Font Selection
do_sffamily = do_textsf = default
do_upshape = do_textup = default
do_scshape = do_textsc = default
do_sc = default
do_tiny = do_scriptsize = do_footnotesize = do_small = default
do_normalsize = do_large = do_Large = do_LARGE = do_huge = do_HUGE = default
def do_textbf(self, node):
return '\\fB%s\\fP' % node
do_bfseries = do_bf = do_textbf
def do_textit(self, node):
return '\\fI%s\\fP' % node
do_itshape = do_it = do_slshape = do_textsl = do_sl = do_cal = do_textit
def do_texttt(self, node):
return '\\fC%s\\fP' % node
do_ttfamily = do_tt = do_texttt
def do_textmd(self, node):
return '\\fR%s\\fP' % node
do_mdseries = do_rmfamily = do_textrm = do_textnormal = do_rm = do_textmd
def do_symbol(self, node):
return '*'
# Footnotes
def do_footnote(self, node):
mark = '[%s]' % (len(self.footnotes)+1)
self.footnotes.append(str(node))
return mark
def do_footnotetext(self, node):
self.do_footnote(self, node)
return ''
def do_footnotemark(self, node):
return '[%s]' % (len(self.footnotes)+1)
# Index
def do_theindex(self, node):
return ''
do_printindex = do_index = do_theindex
# Lists
def do_itemize(self, node):
output = ['', '.Bl -bullet -offset 3n -compact']
for item in node:
output.append('.It')
output.append(str(item).strip())
output.append('.El')
output.append('')
return '\n'.join(output)
def do_enumerate(self, node):
output = ['','.Bl -enum -offset 3n -compact']
for item in node:
output.append('.It')
output.append(str(item).strip())
output.append('.El')
output.append('')
return '\n'.join(output)
def do_description(self, node):
output = ['','.Bl -tag -width 3n']
for item in node:
output.append('.It %s' % str(item.attributes.get('term','')).strip())
output.append(str(item).strip())
output.append('.El')
output.append('')
return '\n'.join(output)
do_list = do_trivlist = do_description
# Math
def do_math(self, node):
return re.sub(r'\s*(_|\^)\s*', r'\1', node.source.replace('\\','\\\\'))
do_ensuremath = do_math
def do_equation(self, node):
s = ' %s' % re.compile(r'^\s*\S+\s*(.*?)\s*\S+\s*$', re.S).sub(r'\1', node.source.replace('\\','\\\\'))
return re.sub(r'\s*(_|\^)\s*', r'\1', s)
do_displaymath = do_equation
def do_eqnarray(self, node):
def render(node):
s = re.compile(r'^\$\\\\displaystyle\s*(.*?)\s*\$\s*$', re.S).sub(r'\1', node.source.replace('\\','\\\\'))
return re.sub(r'\s*(_|\^)\s*', r'\1', s)
return self.do_array(node, render=render)
do_align = do_gather = do_falign = do_multiline = do_eqnarray
do_multline = do_alignat = do_split = do_eqnarray
# Misc
do_bgroup = default
def do_def(self, node):
return ''
do_tableofcontents = do_input = do_protect = do_let = do_def
do_newcommand = do_hfill = do_hline = do_openout = do_renewcommand = do_def
do_write = do_appendix = do_global = do_noindent = do_def
do_include = do_markboth = do_setcounter = do_refstepcounter = do_def
do_medskip = do_smallskip = do_parindent = do_indent = do_setlength = do_def
do_settowidth = do_addtolength = do_nopagebreak = do_newwrite = do_def
do_newcounter = do_typeout = do_sloppypar = do_hfil = do_thispagestyle = do_def
def do_egroup(self, node):
return ''
# Pictures
def do_picture(self, node):
return ''
# Primitives
def do_par(self, node):
return '\n%s\n' % str(node).strip()
def do__superscript(self, node):
return self.default(node)
def do__subscript(self, node):
return self.default(node)
# Quotations
def do_quote(self, node):
backslash = self['\\']
self['\\'] = lambda *args: '\001'
res = [x.strip() for x in str(node).split('\001')]
output = []
for par in [x.strip() for x in str(node).split('\n\n')]:
for item in [x.strip() for x in par.split('\001')]:
output.append(self.fill(item, initial_indent=' ', subsequent_indent=' '))
output.append('')
output.pop()
self['\\'] = backslash
return '\n'.join(output)
do_quotation = do_verse = do_quote
# Sectioning
def do_document(self, node):
content = str(node).rstrip()
footnotes = ''
if self.footnotes:
output = ['','.Bl -tag -offset indent']
for i, item in enumerate(self.footnotes):
output.append('.It [%s]' % (i+1))
output.append(item)
output.append('.El')
output.append('')
footnotes = '\n'.join(output)
return '%s%s' % (content, footnotes)
def do_maketitle(self, node):
output = []
metadata = node.ownerDocument.userdata
if 'date' in metadata:
output.append('.Dd %s' % metadata['date'])
if 'title' in metadata:
output.append('.Dt %s' % str(metadata['title']).upper())
output.append('')
return '\n'.join(output)
def do_section(self, node):
return '.Sh %s\n%s' % (node.title, node)
do_part = do_chapter = do_section
def do_subsection(self, node):
return '.Ss %s\n%s' % (node.title, node)
do_subsubsection = do_paragraph = do_subparagraph = do_subsubparagraph = do_subsection
def do_title(self, node):
return ''
do_author = do_date = do_thanks = do_title
def do_abstract(self, node):
return self.center(str(node).strip())
# Sentences
def do__dollar(self, node):
return '$'
def do__percent(self, node):
return '%'
def do__opencurly(self, node):
return '{'
def do__closecurly(self, node):
return '}'
def do__underscore(self, node):
return '_'
def do__ampersand(self, node):
return '&'
def do__hashmark(self, node):
return '#'
def do__space(self, node):
return ' '
def do_LaTeX(self, node):
return 'LaTeX'
def do_TeX(self, node):
return 'TeX'
def do_emph(self, node):
return self.default(node)
do_em = do_emph
def do__tilde(self, node):
return ' '
def do_enspace(self, node):
return ' '
do_quad = do_qquad = do_enspace
def do_enskip(self, node):
return ''
do_thinspace = do_enskip
def do_underbar(self, node):
return self.default(node)
# Space
def do_hspace(self, node):
return ' '
def do_vspace(self, node):
return ''
do_bigskip = do_medskip = do_smallskip = do_vspace
# Tabbing - not implemented yet
# Verbatim
def do_verbatim(self, node):
return '\n.Bd -literal%s.Ed\n' % node
do_alltt = do_verbatim
def do_mbox(self, node):
return self.default(node)
def do__at(self, node):
return ''
def do__backslash(self, node):
return '\\'
Renderer = ManPageRenderer
|
ac7cb63f440dd3cd5f2a86b287790588f91c3a5e
|
b4cfd4949cab5dc5bd27fb028596a9fc02f4e1db
|
/skfda/exploratory/outliers/_directional_outlyingness.py
|
30e8338d34415cc11facf54244ed437b0db9077a
|
[
"BSD-3-Clause"
] |
permissive
|
GAA-UAM/scikit-fda
|
dabfd995f2c82efb0d44fa1d2005b2a8ca67442b
|
dfbce35cc9e67d93306dddf0edf4f95aaacd8aff
|
refs/heads/develop
| 2023-08-31T09:11:31.407423
| 2023-08-18T08:19:21
| 2023-08-18T08:19:21
| 96,133,420
| 231
| 55
|
BSD-3-Clause
| 2023-08-18T08:19:22
| 2017-07-03T17:06:56
|
Python
|
UTF-8
|
Python
| false
| false
| 19,535
|
py
|
_directional_outlyingness.py
|
from __future__ import annotations
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import scipy.integrate
import scipy.stats
from numpy import linalg as la
from sklearn.covariance import MinCovDet
from ..._utils._sklearn_adapter import BaseEstimator, OutlierMixin
from ...misc.validation import validate_random_state
from ...representation import FDataGrid
from ...typing._base import RandomStateLike
from ...typing._numpy import NDArrayFloat, NDArrayInt
from ..depth.multivariate import Depth, ProjectionDepth
from . import _directional_outlyingness_experiment_results as experiments
@dataclass
class DirectionalOutlyingnessStats:
"""Directional outlyingness statistical measures."""
directional_outlyingness: NDArrayFloat
functional_directional_outlyingness: NDArrayFloat
mean_directional_outlyingness: NDArrayFloat
variation_directional_outlyingness: NDArrayFloat
def directional_outlyingness_stats( # noqa: WPS218
fdatagrid: FDataGrid,
*,
multivariate_depth: Depth[NDArrayFloat] | None = None,
pointwise_weights: NDArrayFloat | None = None,
) -> DirectionalOutlyingnessStats:
r"""
Compute the directional outlyingness of the functional data.
Furthermore, it calculates functional, mean and the variational
directional outlyingness of the samples in the data set, which are also
returned.
The functional directional outlyingness can be seen as the overall
outlyingness, analog to other functional outlyingness measures.
The mean directional outlyingness, describes the relative
position (including both distance and direction) of the samples on average
to the center curve; its norm can be regarded as the magnitude
outlyingness.
The variation of the directional outlyingness, measures
the change of the directional outlyingness in terms of both norm and
direction across the whole design interval and can be regarded as the
shape outlyingness.
Firstly, the directional outlyingness is calculated as follows:
.. math::
\mathbf{O}\left(\mathbf{X}(t) , F_{\mathbf{X}(t)}\right) =
\left\{\frac{1}{d\left(\mathbf{X}(t) , F_{\mathbf{X}(t)}\right)} - 1
\right\} \cdot \mathbf{v}(t)
where :math:`\mathbf{X}` is a stochastic process with probability
distribution :math:`F`, :math:`d` a depth function and :math:`\mathbf{v}(t)
= \left\{ \mathbf{X}(t) - \mathbf{Z}(t)\right\} / \lVert \mathbf{X}(t) -
\mathbf{Z}(t) \rVert` is the spatial sign of :math:`\left\{\mathbf{X}(t) -
\mathbf{Z}(t)\right\}`, :math:`\mathbf{Z}(t)` denotes the median and
:math:`\lVert \cdot \rVert` denotes the :math:`L_2` norm.
From the above formula, we define the mean directional outlyingness as:
.. math::
\mathbf{MO}\left(\mathbf{X} , F_{\mathbf{X}}\right) = \int_I
\mathbf{O}\left(\mathbf{X}(t) , F_{\mathbf{X}(t)}\right) \cdot w(t) dt;
and the variation of the directional outlyingness as:
.. math::
VO\left(\mathbf{X} , F_{\mathbf{X}}\right) = \int_I \lVert\mathbf{O}
\left(\mathbf{X}(t), F_{\mathbf{X}(t)}\right)-\mathbf{MO}\left(
\mathbf{X} , F_{\mathbf{X}}\right) \rVert^2 \cdot w(t) dt
where :math:`w(t)` a weight function defined on the :term:`domain` of
:math:`\mathbf{X}`, :math:`I`.
Then, the total functional outlyingness can be computed using these values:
.. math::
FO\left(\mathbf{X} , F_{\mathbf{X}}\right) = \lVert \mathbf{MO}\left(
\mathbf{X} , F_{\mathbf{X}}\right)\rVert^2 + VO\left(\mathbf{X} ,
F_{\mathbf{X}}\right) .
Args:
fdatagrid (FDataGrid): Object containing the samples to be ordered
according to the directional outlyingness.
multivariate_depth (:ref:`depth measure <depth-measures>`, optional):
Method used to order the data. Defaults to :func:`projection
depth <skfda.exploratory.depth.multivariate.ProjectionDepth>`.
pointwise_weights (array_like, optional): an array containing the
weights of each point of discretisation where values have been
recorded. Defaults to the same weight for each of the points:
1/len(interval).
Returns:
DirectionalOutlyingnessStats object.
Example:
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = FDataGrid(data_matrix, grid_points)
>>> stats = directional_outlyingness_stats(fd)
>>> stats.directional_outlyingness
array([[[ 1.33333333],
[ 1.33333333],
[ 2.33333333],
[ 1.5 ],
[ 1.66666667],
[ 1.66666667]],
[[ 0. ],
[ 0. ],
[ 0. ],
[ 0. ],
[ 0. ],
[ 0. ]],
[[-1.33333333],
[-1.33333333],
[-1. ],
[-0.5 ],
[-0.33333333],
[-0.33333333]],
[[-0.66666667],
[-0.66666667],
[-1. ],
[-2.5 ],
[-3. ],
[-2.33333333]]])
>>> stats.functional_directional_outlyingness
array([ 6.58864198, 6.4608642 , 6.63753086, 7.40481481])
>>> stats.mean_directional_outlyingness
array([[ 1.66666667],
[ 0. ],
[-0.8 ],
[-1.74444444]])
>>> stats.variation_directional_outlyingness
array([ 0.12777778, 0. , 0.17666667, 0.94395062])
References:
Dai, Wenlin, and Genton, Marc G. "Directional outlyingness for
multivariate functional data." Computational Statistics & Data
Analysis 131 (2019): 50-65.
"""
if fdatagrid.dim_domain > 1:
raise NotImplementedError("Only support 1 dimension on the domain.")
if multivariate_depth is None:
multivariate_depth = ProjectionDepth()
if (
pointwise_weights is not None
and (
len(pointwise_weights) != len(fdatagrid.grid_points[0])
or pointwise_weights.sum() != 1
)
):
raise ValueError(
"There must be a weight in pointwise_weights for each recorded "
"time point and altogether must integrate to 1.",
)
if pointwise_weights is None:
pointwise_weights = np.ones(
len(fdatagrid.grid_points[0]),
) / (
fdatagrid.domain_range[0][1] - fdatagrid.domain_range[0][0]
)
depth_pointwise = multivariate_depth(fdatagrid.data_matrix)
assert depth_pointwise.shape == fdatagrid.data_matrix.shape[:-1]
# Obtaining the pointwise median sample Z, to calculate
# v(t) = {X(t) − Z(t)}/|| X(t) − Z(t) ||
median_index = np.argmax(depth_pointwise, axis=0)
pointwise_median = fdatagrid.data_matrix[
median_index,
range(fdatagrid.data_matrix.shape[1]),
]
assert pointwise_median.shape == fdatagrid.data_matrix.shape[1:]
v = fdatagrid.data_matrix - pointwise_median
assert v.shape == fdatagrid.data_matrix.shape
v_norm = la.norm(v, axis=-1, keepdims=True)
# To avoid ZeroDivisionError, the zeros are substituted by ones (the
# reference implementation also does this).
v_norm[np.where(v_norm == 0)] = 1
v_unitary = v / v_norm
# Calculation directinal outlyingness
dir_outlyingness = (1 / depth_pointwise[..., np.newaxis] - 1) * v_unitary
# Calculation mean directional outlyingness
weighted_dir_outlyingness = (
dir_outlyingness * pointwise_weights[:, np.newaxis]
)
assert weighted_dir_outlyingness.shape == dir_outlyingness.shape
mean_dir_outlyingness = scipy.integrate.simps(
weighted_dir_outlyingness,
fdatagrid.grid_points[0],
axis=1,
)
assert mean_dir_outlyingness.shape == (
fdatagrid.n_samples,
fdatagrid.dim_codomain,
)
# Calculation variation directional outlyingness
norm = np.square(la.norm(
dir_outlyingness
- mean_dir_outlyingness[:, np.newaxis, :],
axis=-1,
))
weighted_norm = norm * pointwise_weights
variation_dir_outlyingness = scipy.integrate.simps(
weighted_norm,
fdatagrid.grid_points[0],
axis=1,
)
assert variation_dir_outlyingness.shape == (fdatagrid.n_samples,)
functional_dir_outlyingness = (
np.square(la.norm(mean_dir_outlyingness))
+ variation_dir_outlyingness
)
assert functional_dir_outlyingness.shape == (fdatagrid.n_samples,)
return DirectionalOutlyingnessStats(
directional_outlyingness=dir_outlyingness,
functional_directional_outlyingness=functional_dir_outlyingness,
mean_directional_outlyingness=mean_dir_outlyingness,
variation_directional_outlyingness=variation_dir_outlyingness,
)
class MSPlotOutlierDetector( # noqa: WPS230
BaseEstimator,
OutlierMixin[FDataGrid],
):
r"""Outlier detector using directional outlyingness.
Considering :math:`\mathbf{Y} = \left(\mathbf{MO}^T, VO\right)^T`, the
outlier detection method is implemented as described below.
First, the square robust Mahalanobis distance is calculated based on a
sample of size :math:`h \leq fdatagrid.n_samples`:
.. math::
{RMD}^2\left( \mathbf{Y}, \mathbf{\tilde{Y}}^*_J\right) = \left(
\mathbf{Y} - \mathbf{\tilde{Y}}^*_J\right)^T {\mathbf{S}^*_J}^{-1}
\left( \mathbf{Y} - \mathbf{\tilde{Y}}^*_J\right)
where :math:`J` denotes the group of :math:`h` samples that minimizes the
determinant of the corresponding covariance matrix,
:math:`\mathbf{\tilde{Y}}^*_J = h^{-1}\sum_{i\in{J}}\mathbf{Y}_i` and
:math:`\mathbf{S}^*_J = h^{-1}\sum_{i\in{J}}\left( \mathbf{Y}_i - \mathbf{
\tilde{Y}}^*_J\right) \left( \mathbf{Y}_i - \mathbf{\tilde{Y}}^*_J
\right)^T`. The sub-sample of size h controls the robustness of the method.
Then, the tail of this distance distribution is approximated as follows:
.. math::
\frac{c\left(m - p\right)}{m\left(p + 1\right)}RMD^2\left(
\mathbf{Y}, \mathbf{\tilde{Y}}^*_J\right)\sim F_{p+1, m-p}
where :math:`p` is the dimension of the image plus one, and :math:`c` and
:math:`m` are parameters determining the degrees of freedom of the
:math:`F`-distribution and the scaling factor, given by empirical results
and an asymptotic formula.
Finally, we choose a cutoff value to determine the outliers, C ,
as the :math:`\alpha` quantile of :math:`F_{p+1, m-p}`. We set
:math:`\alpha = 0.993`, which is used in the classical boxplot for
detecting outliers under a normal distribution.
Parameters:
multivariate_depth: Method used to order the data. Defaults
to :class:`projection depth
<fda.depth_measures.multivariate.ProjectionDepth>`.
pointwise_weights: an array containing the
weights of each points of discretisati on where values have
been recorded.
cutoff_factor: Factor that multiplies the cutoff value, in order to
consider more or less curves as outliers.
assume_centered: If True, the support of the
robust location and the covariance estimates is computed, and a
covariance estimate is recomputed from it, without centering
the data. Useful to work with data whose mean is significantly
equal to zero but is not exactly zero. If False, default value,
the robust location and covariance are directly computed with
the FastMCD algorithm without additional treatment.
support_fraction: The proportion of points to be included in the
support of the raw MCD estimate.
Default is None, which implies that the minimum value of
support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state: If int,
random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number
generator; If None, the random number generator is the
RandomState instance used by np.random. By default, it is 0.
Example:
Function :math:`f : \mathbb{R}\longmapsto\mathbb{R}`.
>>> import skfda
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> out_detector = MSPlotOutlierDetector()
>>> out_detector.fit_predict(fd)
array([1, 1, 1, 1])
References:
Dai, Wenlin, and Genton, Marc G. "Multivariate functional data
visualization and outlier detection." Journal of Computational
and Graphical Statistics 27.4 (2018): 923-934.
"""
def __init__(
self,
*,
multivariate_depth: Depth[NDArrayFloat] | None = None,
pointwise_weights: NDArrayFloat | None = None,
assume_centered: bool = False,
support_fraction: float | None = None,
num_resamples: int = 1000,
random_state: RandomStateLike = 0,
cutoff_factor: float = 1,
_force_asymptotic: bool = False,
) -> None:
self.multivariate_depth = multivariate_depth
self.pointwise_weights = pointwise_weights
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.num_resamples = num_resamples
self.random_state = random_state
self.cutoff_factor = cutoff_factor
self._force_asymptotic = _force_asymptotic
def _compute_points(self, X: FDataGrid) -> NDArrayFloat:
multivariate_depth = self.multivariate_depth
if multivariate_depth is None:
multivariate_depth = ProjectionDepth()
# The depths of the samples are calculated giving them an ordering.
stats = directional_outlyingness_stats(
X,
multivariate_depth=multivariate_depth,
pointwise_weights=self.pointwise_weights,
)
mean = stats.mean_directional_outlyingness
variation = stats.variation_directional_outlyingness[:, np.newaxis]
return np.concatenate((mean, variation), axis=1)
def _parameters_asymptotic( # noqa: WPS210
self,
sample_size: int,
dimension: int,
) -> Tuple[float, float]:
"""Return the scaling and cutoff parameters via asymptotic formula."""
n = sample_size
p = dimension
h = np.floor((n + p + 1) / 2)
# c estimation
xi_left = scipy.stats.chi2.rvs(
size=self.num_resamples,
df=p + 2,
random_state=self.random_state_,
)
xi_right = scipy.stats.ncx2.rvs(
size=self.num_resamples,
df=p,
nc=h / n,
random_state=self.random_state_,
)
c_numerator = np.sum(xi_left < xi_right) / self.num_resamples
c_denominator = h / n
estimated_c = c_numerator / c_denominator
# m estimation
alpha = (n - h) / n
alpha_compl = 1 - alpha
q_alpha = scipy.stats.chi2.ppf(alpha_compl, df=p)
dist_p2 = scipy.stats.chi2.cdf(q_alpha, df=p + 2)
dist_p4 = scipy.stats.chi2.cdf(q_alpha, df=p + 4)
c_alpha = alpha_compl / dist_p2
c2 = -dist_p2 / 2
c3 = -dist_p4 / 2
c4 = 3 * c3
b1 = (c3 - c4) / dist_p2
b2 = (
0.5 + 1 / dist_p2
* (c3 - q_alpha / p * (c2 + alpha_compl / 2))
)
v1 = (
alpha_compl * b1**2
* (alpha * (c_alpha * q_alpha / p - 1) ** 2 - 1)
- 2 * c3 * c_alpha**2
* (
3 * (b1 - p * b2)**2
+ (p + 2) * b2 * (2 * b1 - p * b2)
)
)
v2 = n * (b1 * (b1 - p * b2) * alpha_compl)**2 * c_alpha**2
v = v1 / v2
m_asympt = 2 / (c_alpha**2 * v)
estimated_m = (
m_asympt
* np.exp(0.725 - 0.00663 * p - 0.078 * np.log(n)) # noqa: WPS432
)
dfn = p
dfd = estimated_m - p + 1
# Calculation of the cutoff value and scaling factor to identify
# outliers.
scaling = estimated_c * dfd / estimated_m / dfn
cutoff_value = scipy.stats.f.ppf(
0.993, # noqa: WPS432
dfn,
dfd,
loc=0,
scale=1,
)
return scaling, cutoff_value
def _parameters_numeric(
self,
sample_size: int,
dimension: int,
) -> Tuple[float, float]:
key = sample_size // 5
use_asympt = True
if not self._force_asymptotic:
if dimension == 2:
scaling_list = experiments.dim2_scaling_list
cutoff_list = experiments.dim2_cutoff_list
assert len(scaling_list) == len(cutoff_list)
if key < len(scaling_list):
use_asympt = False
elif dimension == 3:
scaling_list = experiments.dim3_scaling_list
cutoff_list = experiments.dim3_cutoff_list
assert len(scaling_list) == len(cutoff_list)
if key < len(scaling_list):
use_asympt = False
if use_asympt:
return self._parameters_asymptotic(sample_size, dimension)
return scaling_list[key], cutoff_list[key]
def fit_predict( # noqa: D102
self,
X: FDataGrid,
y: object = None,
) -> NDArrayInt:
self.random_state_ = validate_random_state(self.random_state)
self.points_ = self._compute_points(X)
# The square mahalanobis distances of the samples are
# calulated using MCD.
self.cov_ = MinCovDet(
store_precision=False,
assume_centered=self.assume_centered,
support_fraction=self.support_fraction,
random_state=self.random_state_,
)
self.cov_.fit(self.points_)
# Calculation of the degrees of freedom of the F-distribution
# (approximation of the tail of the distance distribution).
# One per dimension (mean dir out) plus one (variational dir out)
dimension = X.dim_codomain + 1
if self._force_asymptotic:
scaling, cutoff_value = self._parameters_asymptotic(
sample_size=X.n_samples,
dimension=dimension,
)
else:
scaling, cutoff_value = self._parameters_numeric(
sample_size=X.n_samples,
dimension=dimension,
)
self.scaling_ = scaling
self.cutoff_value_ = cutoff_value * self.cutoff_factor
rmd_2: NDArrayFloat = self.cov_.mahalanobis(self.points_)
outliers = self.scaling_ * rmd_2 > self.cutoff_value_
# Predict as scikit-learn outlier detectors
return ~outliers + outliers * -1
|
96d85d395793d601f1466c2af747dd4a48cc357c
|
22531d5431acb0630c600be31ae3e8a9954b252c
|
/dpkt/hsrp.py
|
51154569828d26cd6e1d71df483665cdace7b893
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] |
permissive
|
kbandla/dpkt
|
f33cae70d587637fe46303ca9cd467ac1f2f0990
|
440447ae17afc83af9e8d0167e8f13a907d9b83b
|
refs/heads/master
| 2023-08-18T08:58:41.459495
| 2023-01-27T16:44:52
| 2023-01-27T16:44:52
| 4,576,441
| 1,071
| 350
|
NOASSERTION
| 2023-09-09T15:21:08
| 2012-06-06T18:39:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,223
|
py
|
hsrp.py
|
# $Id: hsrp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Cisco Hot Standby Router Protocol."""
from __future__ import absolute_import
from . import dpkt
# Opcodes
HELLO = 0
COUP = 1
RESIGN = 2
# States
INITIAL = 0x00
LEARN = 0x01
LISTEN = 0x02
SPEAK = 0x04
STANDBY = 0x08
ACTIVE = 0x10
class HSRP(dpkt.Packet):
"""Cisco Hot Standby Router Protocol.
It is a Cisco proprietary redundancy protocol for establishing a fault-tolerant default gateway. Version 1 of the
protocol was described in RFC 2281 in 1998. Version 2 of the protocol includes improvements and supports IPv6 but
there is no corresponding RFC published for this version.
Attributes:
__hdr__: Header fields of HSRP.
version: (int): Version. HSRP version number. (1 byte)
opcode: (int): Operation code. (Hello - 0, Coup - 1, Resign - 2) (1 byte)
state: (int): State. This field describes the current state of the router sending the message. (1 byte)
hello: (int): Hellotime. This field is only meaningful in Hello messages. It contains the approximate period
between the Hello messages that the router sends. The time is given in seconds.(1 byte)
hold: (int): Holdtime. This field is only meaningful in Hello messages. It contains the amount of time that
the current Hello message should be considered valid. The time is given in seconds. (1 byte)
priority: (int): Priority. This field is used to elect the active and standby routers. (1 byte)
group: (int): Group. This field identifies the standby group. (1 byte)
rsvd: (int): Reserved. (1 byte)
auth: (bytes): Authentication Data. This field contains a clear text 8 character reused password. (8 bytes)
vip: (bytes): Virtual IP Address. The virtual IP address used by this group. (4 bytes)
"""
__hdr__ = (
('version', 'B', 0),
('opcode', 'B', 0),
('state', 'B', 0),
('hello', 'B', 0),
('hold', 'B', 0),
('priority', 'B', 0),
('group', 'B', 0),
('rsvd', 'B', 0),
('auth', '8s', b'cisco'),
('vip', '4s', b'')
)
|
42af354e3ebb8c508e32760cec2dd4680dbb3550
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/SimGeneral/DataMixingModule/python/DataMixer_DataConditions_3_8_X_data2010.py
|
dc9c02fcf255679225fc8edcc9fdc4540a5582c4
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 14,722
|
py
|
DataMixer_DataConditions_3_8_X_data2010.py
|
import FWCore.ParameterSet.Config as cms
def customise(process):
#
# IOV set based on GlobalTag GR_R_35X_V8B
#
# placeholder !!!!!! replace with the actual run number of
# the real run to be overlaid
process.source.firstRun = cms.untracked.uint32(132599)
process.ecalConditions1 = cms.ESSource("PoolDBESSource",
process.CondDBSetup,
timetype = cms.string('runnumber'),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('EcalADCToGeVConstantRcd'),
tag = cms.string('EcalADCToGeVConstant_v6_offline')
),
cms.PSet(
record = cms.string('EcalChannelStatusRcd'),
tag = cms.string('EcalChannelStatus_v04_offline')
),
cms.PSet(
record = cms.string('EcalGainRatiosRcd'),
tag = cms.string('EcalGainRatio_TestPulse2009_offline')
),
cms.PSet(
record = cms.string('EcalIntercalibConstantsRcd'),
tag = cms.string('EcalIntercalibConstants_v6_offline')
),
cms.PSet(
record = cms.string('EcalIntercalibErrorsRcd'),
tag = cms.string('EcalIntercalibErrors_mc')
),
cms.PSet(
record = cms.string('EcalMappingElectronicsRcd'),
tag = cms.string('EcalMappingElectronics_EEMap')
),
cms.PSet(
record = cms.string('EcalPedestalsRcd'),
tag = cms.string('EcalPedestals_2009runs_hlt')
),
cms.PSet(
record = cms.string('EcalTBWeightsRcd'),
tag = cms.string('EcalTBWeights_EBEE_v01_offline')
),
cms.PSet(
record = cms.string('EcalTimeCalibConstantsRcd'),
tag = cms.string('EcalTimeCalibConstants_v02_offline')
),
cms.PSet(
record = cms.string('EcalWeightXtalGroupsRcd'),
tag = cms.string('EcalWeightXtalGroups_EBEE_offline')
),
cms.PSet(
record = cms.string('EcalLaserAPDPNRatiosRcd'),
tag = cms.string('EcalLaserAPDPNRatios_p1p2p3_v2_mc')
),
),
connect = cms.string('frontier://FrontierProd/CMS_COND_31X_ECAL'),
authenticationMethod = cms.untracked.uint32(0)
)
process.ecalConditions2 = cms.ESSource("PoolDBESSource",
process.CondDBSetup,
timetype = cms.string('runnumber'),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('EcalTPGCrystalStatusRcd'),
tag = cms.string('EcalTPGCrystalStatus_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGFineGrainEBGroupRcd'),
tag = cms.string('EcalTPGFineGrainEBGroup_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGFineGrainEBIdMapRcd'),
tag = cms.string('EcalTPGFineGrainEBIdMap_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGFineGrainStripEERcd'),
tag = cms.string('EcalTPGFineGrainStripEE_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGFineGrainTowerEERcd'),
tag = cms.string('EcalTPGFineGrainTowerEE_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGLinearizationConstRcd'),
tag = cms.string('EcalTPGLinearizationConst_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGLutGroupRcd'),
tag = cms.string('EcalTPGLutGroup_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGLutIdMapRcd'),
tag = cms.string('EcalTPGLutIdMap_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGPedestalsRcd'),
tag = cms.string('EcalTPGPedestals_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGPhysicsConstRcd'),
tag = cms.string('EcalTPGPhysicsConst_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGSlidingWindowRcd'),
tag = cms.string('EcalTPGSlidingWindow_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGTowerStatusRcd'),
tag = cms.string('EcalTPGTowerStatus_hlt')
),
cms.PSet(
record = cms.string('EcalTPGWeightGroupRcd'),
tag = cms.string('EcalTPGWeightGroup_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGWeightIdMapRcd'),
tag = cms.string('EcalTPGWeightIdMap_v2_hlt')
),
),
connect = cms.string('frontier://FrontierProd/CMS_COND_34X_ECAL'),
authenticationMethod = cms.untracked.uint32(0)
)
process.es_prefer_ecal1 = cms.ESPrefer("PoolDBESSource","ecalConditions1")
process.es_prefer_ecal2 = cms.ESPrefer("PoolDBESSource","ecalConditions2")
process.hcalConditions = cms.ESSource("PoolDBESSource",
process.CondDBSetup,
timetype = cms.string('runnumber'),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('HcalChannelQualityRcd'),
tag = cms.string('HcalChannelQuality_v2.10_offline')
),
cms.PSet(
record = cms.string('HcalElectronicsMapRcd'),
tag = cms.string('HcalElectronicsMap_v7.03_hlt')
),
cms.PSet(
record = cms.string('HcalGainsRcd'),
tag = cms.string('HcalGains_v2.32_offline')
),
cms.PSet(
record = cms.string('HcalL1TriggerObjectsRcd'),
tag = cms.string('HcalL1TriggerObjects_v1.00_hlt')
),
cms.PSet(
record = cms.string('HcalLUTCorrsRcd'),
tag = cms.string('HcalLUTCorrs_v1.01_hlt')
),
cms.PSet(
record = cms.string('HcalPedestalsRcd'),
tag = cms.string('HcalPedestals_ADC_v9.12_offline')
),
cms.PSet(
record = cms.string('HcalPedestalWidthsRcd'),
tag = cms.string('HcalPedestalWidths_ADC_v7.01_hlt')
),
cms.PSet(
record = cms.string('HcalPFCorrsRcd'),
tag = cms.string('HcalPFCorrs_v2.00_express')
),
cms.PSet(
record = cms.string('HcalQIEDataRcd'),
tag = cms.string('HcalQIEData_NormalMode_v7.00_hlt')
),
cms.PSet(
record = cms.string('HcalRespCorrsRcd'),
tag = cms.string('HcalRespCorrs_v1.02_express')
),
cms.PSet(
record = cms.string('HcalTimeCorrsRcd'),
tag = cms.string('HcalTimeCorrs_v1.00_express')
),
cms.PSet(
record = cms.string('HcalZSThresholdsRcd'),
tag = cms.string('HcalZSThresholds_v1.01_hlt')
),
),
connect = cms.string('frontier://FrontierProd/CMS_COND_31X_HCAL'),
authenticationMethod = cms.untracked.uint32(0)
)
process.es_prefer_hcal = cms.ESPrefer("PoolDBESSource","hcalConditions")
try:
process.ecalRecHit.ChannelStatusToBeExcluded = [ 1, 2, 3, 4, 8, 9, 10, 11, 12, 13, 14, 78, 142 ]
except:
return(process)
return(process)
|
64e67e713f1397847264a68fe79b0a4ecac10687
|
793155a64d67f08a27900b91309c45c4a80c4098
|
/6/src6/1/mario1.py
|
99002879c63ddc5219108934a5e58496eef2ef76
|
[] |
no_license
|
cs50/lectures
|
dad568d3179be2f821b861b4db5f63e66a03cfcd
|
deed42bc3d2dd2e0f56f7e3a0e421026a5ca8107
|
refs/heads/2022/fall
| 2023-07-08T11:48:40.398165
| 2023-03-22T15:27:22
| 2023-03-22T15:27:22
| 66,601,652
| 225
| 146
| null | 2022-09-19T21:00:56
| 2016-08-26T00:02:52
|
C
|
UTF-8
|
Python
| false
| false
| 172
|
py
|
mario1.py
|
# Prints a column of n bricks with a loop
from cs50 import get_int
while True:
n = get_int("Height: ")
if n > 0:
break
for i in range(n):
print("#")
|
b88e6a55321fa78c981fc914107f4dccc3437ca1
|
ce0a34a4a1f44cda31042e4294e6cef334392a37
|
/corpustools/gui/environments.py
|
654a14afba3ea2d0d1f1ab4dc1b2366840a9c051
|
[
"GPL-3.0-only"
] |
permissive
|
PhonologicalCorpusTools/CorpusTools
|
ba6644f90a9790d3f61d923b3b5622eaeaa24caa
|
314bd30be24b1cb7ee0c252a6529bbfe964056ad
|
refs/heads/master
| 2022-09-29T20:36:12.148289
| 2022-09-16T01:57:47
| 2022-09-16T01:57:47
| 18,848,568
| 108
| 24
|
BSD-3-Clause
| 2021-05-07T23:58:03
| 2014-04-16T17:14:55
|
Python
|
UTF-8
|
Python
| false
| false
| 56,641
|
py
|
environments.py
|
from .imports import *
from .widgets import SegmentSelectionWidget, SegmentSelectDialog
from corpustools.corpus.classes.lexicon import EnvironmentFilter, SyllableEnvironmentFilter
import sip
from pprint import pprint
import regex as re
SPECIAL_SYMBOL_RE = ['.', '^', '$', '*', '+', '?', '|', '{', '}', '[', ']', '#', '(', ')', '\'', '\"']
class EnvironmentDialog(QDialog):
rowToAdd = Signal(str)
def __init__(self, inventory,parent=None):
QDialog.__init__(self,parent)
self.inventory = inventory
layout = QVBoxLayout()
layout.setAlignment(Qt.AlignTop | Qt.AlignLeft)
self.lhsEnvFrame = QGroupBox('Left hand side')
self.rhsEnvFrame = QGroupBox('Right hand side')
lhsEnvLayout = QVBoxLayout()
lhsEnvLayout.setAlignment(Qt.AlignTop | Qt.AlignLeft)
rhsEnvLayout = QVBoxLayout()
rhsEnvLayout.setAlignment(Qt.AlignTop | Qt.AlignLeft)
self.lhs = SegmentSelectionWidget(self.inventory, exclusive = True)
self.rhs = SegmentSelectionWidget(self.inventory, exclusive = True)
lhsEnvLayout.addWidget(self.lhs)
rhsEnvLayout.addWidget(self.rhs)
self.lhsEnvFrame.setLayout(lhsEnvLayout)
self.rhsEnvFrame.setLayout(rhsEnvLayout)
envFrame = QFrame()
envLayout = QHBoxLayout()
envLayout.addWidget(self.lhsEnvFrame)
envLayout.addWidget(self.rhsEnvFrame)
envFrame.setLayout(envLayout)
layout.addWidget(envFrame)
self.oneButton = QPushButton('Add')
self.anotherButton = QPushButton('Add and create another')
self.cancelButton = QPushButton('Cancel')
self.acLayout = QHBoxLayout()
self.acLayout.addWidget(self.oneButton, alignment = Qt.AlignLeft)
self.acLayout.addWidget(self.anotherButton, alignment = Qt.AlignLeft)
self.acLayout.addWidget(self.cancelButton, alignment = Qt.AlignLeft)
self.oneButton.clicked.connect(self.one)
self.anotherButton.clicked.connect(self.another)
self.cancelButton.clicked.connect(self.reject)
acFrame = QFrame()
acFrame.setLayout(self.acLayout)
layout.addWidget(acFrame, alignment = Qt.AlignLeft)
self.addOneMore = False
self.setLayout(layout)
#self.setFixedSize(self.sizeHint())
self.setWindowTitle('Create bigram')
def one(self):
self.addOneMore = False
self.accept()
def another(self):
self.addOneMore = True
self.accept()
def reset(self):
self.lhs.clearAll()
self.rhs.clearAll()
def accept(self):
lhs = self.lhs.value()
rhs = self.rhs.value()
if lhs == '':
reply = QMessageBox.critical(self,
"Missing information", "Please specify a left hand of the bigram.")
return
if rhs == '':
reply = QMessageBox.critical(self,
"Missing information", "Please specify a right hand of the bigram.")
return
env = lhs, rhs
self.rowToAdd.emit([env])
if not self.addOneMore:
QDialog.accept(self)
else:
self.reset()
class EnvironmentSegmentWidget(QWidget):
segDeleted = Signal(list)
def __init__(self, inventory, parent=None, middle=False, enabled=True, preset_label=False,
show_full_inventory=False, side=None, allow_zero_match=False):
QWidget.__init__(self, parent)
self.inventory = inventory
self.segments = set()
self.features = set()
self.parent_ = parent
self.enabled = enabled
self.show_full_inventory = show_full_inventory
self.side = side
self.allowZeroMatch = allow_zero_match
self.middle = middle
layout = QVBoxLayout()
if self.middle:
lab = '_\n\n{}'
else:
lab = '{}'
self.mainLabel = QPushButton(lab)
self.mainLabel.setStyleSheet("padding: 4px")
layout.addWidget(self.mainLabel)
self.setLayout(layout)
if self.enabled:
self.menu = QMenu(self)
segmentAct = QAction("Add segments", self, triggered=self.selectSegments)
featureAct = QAction("Add features", self, triggered=self.selectFeatures)
clearAct = QAction("Clear selection", self, triggered=self.clearSelection)
matchAnythingAct = QAction("Match single wildcard", self, triggered=self.addArbitrary)
self.menu.addAction(segmentAct)
self.menu.addAction(featureAct)
if not self.middle:
nonSegSelectMenu = self.menu.addMenu('Add non-segment symbol')
for symbol in self.inventory.non_segment_symbols:
nonSegSelectMenu.addAction(QAction(symbol, self, triggered=self.addNonSegSymbol))
self.menu.addAction(matchAnythingAct)
self.menu.addAction(clearAct)
if not self.middle:
deleteAct = QAction("Delete", self, triggered=self.deleteSelection)
self.menu.addAction(deleteAct)
self.mainLabel.setMenu(self.menu)
addNewPosMenu = self.menu.addMenu("Add new environment position")
addToLeftAct = QAction("To the left", self, triggered=self.addToLeft)
addToRightAct = QAction("To the right", self, triggered=self.addToRight)
addNewPosMenu.addAction(addToLeftAct)
addNewPosMenu.addAction(addToRightAct)
if not self.middle:
self.allowZeroAct = QAction("Make this position optional", self, checkable=True, triggered=self.setZeroMatching)
self.menu.addAction(self.allowZeroAct)
else:
self.mainLabel.setEnabled(False)
if preset_label:
self.segments = preset_label.segments
self.features = preset_label.features
self.updateLabel()
def setZeroMatching(self, b):
self.allowZeroMatch = self.allowZeroAct.isChecked()
def addToLeft(self):
self.parent_.insertSegWidget(self, 'l')
def addToRight(self):
self.parent_.insertSegWidget(self, 'r')
def addNonSegSymbol(self):
self.segments.add(self.sender().text())
self.updateLabel()
def addArbitrary(self):
self.segments = set(self.inventory.segs)
self.updateLabel()
def clearSelection(self):
self.segments = set()
self.features = set()
self.updateLabel()
def deleteSelection(self):
self.segDeleted.emit([self]) #connected to EnvironmentSegmentWidget.deleteSeg()
def updateLabel(self):
labelText = self.generateDisplayText()
if not labelText:
labelText = '{}'
if self.middle:
labelText = '_\n\n{}'.format(labelText)
self.mainLabel.setText(labelText)
def generateDisplayText(self):
displayList = list()
if len(self.segments) == len(self.inventory.segs):
if self.show_full_inventory:
displayList = ','.join(self.segments)
else:
displayList = '{*}'
else:
displayList.extend(self.segments)
displayList.extend(self.features)
displayList = ','.join(displayList)
displayList = '{{{}}}'.format(displayList)
return displayList
def selectSegments(self):
dialog = SegmentSelectDialog(self.inventory, self.segments, self, start_pressed=self.segments)
if dialog.exec_():
self.segments = set(dialog.value())
self.updateLabel()
def selectFeatures(self):
dialog = SegmentSelectDialog(self.inventory, self.segments, self, use_features=True)
if dialog.exec_():
self.features = set(dialog.value())
self.updateLabel()
def value(self):
segs = [s for s in self.segments]
if self.features:
more_segs = self.inventory.features_to_segments(self.features)
segs.extend(more_segs)
segs = list(set(segs))
return segs
def displayValue(self):
return self.generateDisplayText()
def getData(self):
attrs = ['inventory', 'segments', 'features', 'inventory', 'middle', 'enabled',
'show_full_inventory', 'side', 'allowZeroMatch']
return {attr: getattr(self, attr) for attr in attrs}
def loadData(self, data):
for k, v in data.items(): #see the getData() function above for details
setattr(self, k, v)
class EnvironmentSelectWidget(QGroupBox):
def __init__(self, inventory, parent=None, middle=True, show_full_inventory=False, single_env=False, mode='segMode'):
QGroupBox.__init__(self, 'Environments', parent)
self.parent = parent
self.middle = middle
self.inventory = inventory
self.show_full_inventory = show_full_inventory
self.single_env = single_env
self.mode = mode
layout = QVBoxLayout()
scroll = QScrollArea()
self.environmentFrame = QWidget()
lay = QBoxLayout(QBoxLayout.TopToBottom)
self.addButton = QPushButton('New environment')
self.addButton.clicked.connect(self.addNewEnvironment)
lay.addWidget(self.addButton)
lay.addStretch()
self.environmentFrame.setLayout(lay)
scroll.setWidgetResizable(True)
scroll.setWidget(self.environmentFrame)
scroll.setMinimumWidth(140)
scroll.setMinimumHeight(200)
policy = scroll.sizePolicy()
policy.setVerticalStretch(1)
scroll.setSizePolicy(policy)
layout.addWidget(scroll)
self.setLayout(layout)
self.setMinimumWidth(500)
def addNewEnvironment(self):
if self.mode == 'segMode':
envWidget = EnvironmentWidget(self.inventory, middle=self.middle, parent=self, show_full_inventory=self.show_full_inventory)
else:
envWidget = EnvironmentSyllableWidget(self.inventory, middle=self.middle, parent=self, show_full_inventory=self.show_full_inventory)
pos = self.environmentFrame.layout().count() - 2
self.environmentFrame.layout().insertWidget(pos, envWidget)
if self.single_env:
self.addButton.setEnabled(False)
@Slot(list) # connected to EnvironmentWidget.envCopied()
def addCopiedEnvironment(self, args):
copy_data = args[0] if args else None
if self.mode == 'segMode':
envWidget = EnvironmentWidget(self.inventory, middle=copy_data.middle, parent=self, copy_data=copy_data)
else:
envWidget = EnvironmentSyllableWidget(self.inventory, middle=copy_data.middle, parent=self,
copy_data=copy_data)
pos = self.environmentFrame.layout().count() - 2
self.environmentFrame.layout().insertWidget(pos, envWidget)
def value(self):
envs = []
for ind in range(self.environmentFrame.layout().count() - 2):
wid = self.environmentFrame.layout().itemAt(ind).widget()
envs.append(wid.value()) # wid here is EnvironmentWidget, and value() returns an EnvFilter
return envs
def displayValue(self):
# TODO: need to change this part to fit the new sylmode
envs = []
for ind in range(self.environmentFrame.layout().count() - 2):
wid = self.environmentFrame.layout().itemAt(ind).widget()
envs.append(wid.displayValue())
return envs
class EnvironmentWidget(QWidget):
envCopied = Signal(list)
def __init__(self, inventory, parent=None, middle=True, copy_data=None, show_full_inventory=False):
QWidget.__init__(self)
self.inventory = inventory
self.parent = parent
self.middle = middle
self.show_full_inventory = show_full_inventory
self.envCopied.connect(self.parent.addCopiedEnvironment)
layout = QHBoxLayout()
self.lhsAddNew = QPushButton('+')
self.lhsAddNew.setFixedWidth(50)
self.lhsAddNew.clicked.connect(self.addLhs)
self.lhsWidget = QWidget()
self.lhsLayout = QHBoxLayout()
self.lhsWidget.setLayout(self.lhsLayout)
self.rhsAddNew = QPushButton('+')
self.rhsAddNew.setFixedWidth(50)
self.rhsAddNew.clicked.connect(self.addRhs)
self.rhsWidget = QWidget()
self.rhsLayout = QHBoxLayout()
self.rhsWidget.setLayout(self.rhsLayout)
self.middleWidget = EnvironmentSegmentWidget(self.inventory, parent=self, middle=True, enabled=middle,
show_full_inventory=show_full_inventory)
self.removeButton = QPushButton('Remove environment')
self.removeButton.clicked.connect(self.deleteEnvironment)
self.copyButton = QPushButton('Copy environment')
self.copyButton.clicked.connect(self.copyEnvironment)
if self.parent.single_env:
self.copyButton.setEnabled(False)
layout.addWidget(self.lhsAddNew)
layout.addWidget(self.lhsWidget)
layout.addWidget(self.middleWidget)
layout.addWidget(self.rhsWidget)
layout.addWidget(self.rhsAddNew)
layout.addStretch()
optionlayout = QVBoxLayout()
optionlayout.addWidget(self.removeButton)
optionlayout.addWidget(self.copyButton)
layout.addLayout(optionlayout)
self.setLayout(layout)
if copy_data:
self.loadfromCopy(copy_data)
def loadfromCopy(self, copy_data):
self.middleWidget.segments = copy_data.middleWidget.segments
self.middleWidget.features = copy_data.middleWidget.features
self.middleWidget.mainLabel.setText(copy_data.middleWidget.mainLabel.text())
for ind in range(copy_data.lhsWidget.layout().count()):
copy_wid = copy_data.lhsWidget.layout().itemAt(ind).widget()
wid = EnvironmentSegmentWidget(self.inventory, parent=self, preset_label=copy_wid, side='l',
allow_zero_match=copy_wid.allowZeroMatch)
wid.allowZeroAct.setChecked(copy_wid.allowZeroMatch)
self.lhsWidget.layout().insertWidget(ind, wid)
wid.segDeleted.connect(self.deleteSeg)
for ind in range(copy_data.rhsWidget.layout().count()):
copy_wid = copy_data.rhsWidget.layout().itemAt(ind).widget()
wid = EnvironmentSegmentWidget(self.inventory, parent=self, preset_label=copy_wid, side='r',
allow_zero_match=copy_wid.allowZeroMatch)
wid.allowZeroAct.setChecked(copy_wid.allowZeroMatch)
self.rhsWidget.layout().insertWidget(ind, wid)
wid.segDeleted.connect(self.deleteSeg)
def copyEnvironment(self):
self.envCopied.emit([self]) # connected to EnvironmentSelectWidget.addCopiedEnvironment()
def deleteEnvironment(self):
self.parent.addButton.setEnabled(True)
self.deleteLater()
def insertSegWidget(self, match_widget, add_to_side):
if match_widget.side is None: # middle widget
segWidget = EnvironmentSegmentWidget(self.inventory,
parent=self,
show_full_inventory=self.show_full_inventory,
side=add_to_side)
segWidget.segDeleted.connect(self.deleteSeg)
if add_to_side == 'r':
layout = self.rhsWidget.layout()
layout.insertWidget(0, segWidget)
elif add_to_side == 'l':
layout = self.lhsWidget.layout()
layout.insertWidget(len(layout)+1, segWidget)
layout.update()
return
segWidget = EnvironmentSegmentWidget(self.inventory,
parent=self,
show_full_inventory=self.show_full_inventory,
side=match_widget.side)
segWidget.segDeleted.connect(self.deleteSeg)
if match_widget.side == 'r':
layout = self.rhsWidget.layout()
elif match_widget.side == 'l':
layout = self.lhsWidget.layout()
widgets = list()
for ind in range(layout.count()):
if layout.itemAt(ind).widget() == match_widget:
if add_to_side == 'l':
widgets.append(segWidget)
widgets.append(layout.itemAt(ind).widget())
elif add_to_side == 'r':
widgets.append(layout.itemAt(ind).widget())
widgets.append(segWidget)
else:
widgets.append(layout.itemAt(ind).widget())
for i, widget in enumerate(widgets):
layout.insertWidget(i, widget)
layout.update()
@Slot(list) #connected to EnvironmentSegmentWidget.segDeleted()
def deleteSeg(self, arg):
segWidget = arg[0]
if segWidget.side == 'r':
layout = self.rhsWidget.layout()
elif segWidget.side == 'l':
layout = self.lhsWidget.layout()
for ind in reversed(range(layout.count())):
if layout.itemAt(ind) == segWidget:
layout.removeAt(ind)
break
segWidget.deleteLater()
def addLhs(self):
segWidget = EnvironmentSegmentWidget(self.inventory, parent=self,
show_full_inventory=self.show_full_inventory, side='l')
self.lhsWidget.layout().insertWidget(0, segWidget)
segWidget.segDeleted.connect(self.deleteSeg)
return segWidget
def addRhs(self):
segWidget = EnvironmentSegmentWidget(self.inventory, parent=self,
show_full_inventory=self.show_full_inventory, side='r')
self.rhsWidget.layout().addWidget(segWidget)
segWidget.segDeleted.connect(self.deleteSeg)
return segWidget
def value(self):
lhsZeroPositions = list()
rhsZeroPositions = list()
lhs = []
for ind in range(self.lhsWidget.layout().count()):
wid = self.lhsWidget.layout().itemAt(ind).widget()
lhs.append(wid.value())
if wid.allowZeroMatch:
lhsZeroPositions.append(ind)
rhs = []
for ind in range(self.rhsWidget.layout().count()):
wid = self.rhsWidget.layout().itemAt(ind).widget()
rhs.append(wid.value())
if wid.allowZeroMatch:
rhsZeroPositions.append(ind)
middle = self.middleWidget.value()
return EnvironmentFilter(middle, lhs, rhs, zeroPositions=(lhsZeroPositions, rhsZeroPositions))
def displayValue(self):
lhs = list()
rhs = list()
for ind in range(self.lhsWidget.layout().count()):
wid = self.lhsWidget.layout().itemAt(ind).widget()
lhs.append(wid.displayValue())
lhs = ','.join(lhs) if lhs else ''
for ind in range(self.rhsWidget.layout().count()):
wid = self.rhsWidget.layout().itemAt(ind).widget()
rhs.append(wid.displayValue())
rhs = ','.join(rhs) if rhs else ''
return '{}_{}'.format(lhs, rhs)
class SyllableConstructDialog(QDialog):
def __init__(self, inventory, parent=None,
preselected_onset=dict(),
preselected_nucleus=dict(),
preselected_coda=dict(),
preselected_stress=set(),
preselected_tone=set()):
QDialog.__init__(self, parent)
self.setWindowTitle('Construct syllables')
mainLayout = QVBoxLayout()
self.setLayout(mainLayout)
layout = QHBoxLayout()
self.syllWidget = SyllableConstructWidget(inventory, parent=self,
preselected_onset=preselected_onset,
preselected_nucleus=preselected_nucleus,
preselected_coda=preselected_coda)
layout.addWidget(self.syllWidget)
optionFrame = QGroupBox('Options')
optionLayout = QVBoxLayout()
optionFrame.setLayout(optionLayout)
self.stressWidget = StressWidget(inventory, parent=self, preselected_stress=preselected_stress)
optionLayout.addWidget(self.stressWidget)
self.toneWidget = ToneWidget(inventory, parent=self, preselected_tone=preselected_tone)
optionLayout.addWidget(self.toneWidget)
layout.addWidget(optionFrame)
mainLayout.addLayout(layout)
acFrame = QFrame()
acLayout = QHBoxLayout()
acFrame.setLayout(acLayout)
self.acceptButton = QPushButton('Ok')
self.cancelButton = QPushButton('Cancel')
acLayout.addWidget(self.acceptButton, alignment=Qt.AlignLeft)
acLayout.addWidget(self.cancelButton, alignment=Qt.AlignLeft)
self.acceptButton.clicked.connect(self.accept)
self.cancelButton.clicked.connect(self.reject)
mainLayout.addWidget(acFrame, alignment=Qt.AlignCenter)
def value(self):
output = dict()
output['stress'] = self.stressWidget.value()
output['tone'] = self.toneWidget.value()
syllable_info = self.syllWidget.value()
output.update(syllable_info)
return output
class SyllableConstructWidget(QGroupBox):
#sylCopied = Signal(list)
def __init__(self, inventory, parent=None, show_full_inventory=False,
preselected_onset=dict(),
preselected_nucleus=dict(),
preselected_coda=dict()):
QGroupBox.__init__(self, 'Syllable', parent)
self.parent = parent
self.inventory = inventory
self.show_full_inventory = show_full_inventory
layout = QHBoxLayout()
# onset part
onsetGroup = QGroupBox('Onset')
globalOnsetLayout = QVBoxLayout()
onsetGroup.setLayout(globalOnsetLayout)
self.onsetSearchType = SearchTypeWidget(parent=onsetGroup,
preselected_type=preselected_onset['search_type'])
globalOnsetLayout.addWidget(self.onsetSearchType)
onsetBottomLayout = QHBoxLayout()
self.onsetWidget = QWidget()
self.onsetLayout = QHBoxLayout()
self.onsetWidget.setLayout(self.onsetLayout)
for seg_dict in preselected_onset['contents']:
segWidget = SyllableSegmentWidget(self.inventory, "onsets", parent=self, root=False,
show_full_inventory=self.show_full_inventory,
preselected_segments=seg_dict['segments'],
preselected_features=seg_dict['features'],
preselected_negative=seg_dict['negative'])
self.onsetWidget.layout().addWidget(segWidget)
segWidget.segDeleted.connect(self.deleteSeg)
self.onsetAddNew = QPushButton('+')
self.onsetAddNew.clicked.connect(self.addOnset)
self.onsetLabel = QLabel()
onsetBottomLayout.addWidget(self.onsetAddNew)
onsetBottomLayout.addWidget(self.onsetWidget)
onsetBottomLayout.addWidget(self.onsetLabel)
globalOnsetLayout.addLayout(onsetBottomLayout)
# coda part
codaGroup = QGroupBox('Coda')
globalCodaLayout = QVBoxLayout()
codaGroup.setLayout(globalCodaLayout)
self.codaSearchType = SearchTypeWidget(parent=codaGroup,
preselected_type=preselected_coda['search_type'])
globalCodaLayout.addWidget(self.codaSearchType)
codaBottomLayout = QHBoxLayout()
self.codaWidget = QWidget()
self.codaLayout = QHBoxLayout()
self.codaWidget.setLayout(self.codaLayout)
for seg_dict in preselected_coda['contents']:
segWidget = SyllableSegmentWidget(self.inventory, "codas", parent=self, root=False,
show_full_inventory=self.show_full_inventory,
preselected_segments=seg_dict['segments'],
preselected_features=seg_dict['features'],
preselected_negative=seg_dict['negative'])
self.codaWidget.layout().addWidget(segWidget)
segWidget.segDeleted.connect(self.deleteSeg)
self.codaAddNew = QPushButton('+')
self.codaAddNew.clicked.connect(self.addCoda)
self.codaLabel = QLabel()
codaBottomLayout.addWidget(self.codaLabel)
codaBottomLayout.addWidget(self.codaWidget)
codaBottomLayout.addWidget(self.codaAddNew)
globalCodaLayout.addLayout(codaBottomLayout)
# nucleus part
nucleusGroup = QGroupBox('Nucleus')
globalNucleusLayout = QVBoxLayout()
nucleusGroup.setLayout(globalNucleusLayout)
self.nucleusSearchType = SearchTypeWidget(parent=nucleusGroup,
preselected_type=preselected_nucleus['search_type'])
globalNucleusLayout.addWidget(self.nucleusSearchType)
bottomLayout = QHBoxLayout()
self.nucleusWidget = QWidget()
self.nucleusLayout = QHBoxLayout()
self.nucleusWidget.setLayout(self.nucleusLayout)
if preselected_nucleus['contents']:
self.nucleus = SyllableSegmentWidget(self.inventory,
"nuclei",
parent=self,
root=True,
show_full_inventory=show_full_inventory,
preselected_segments=preselected_nucleus['contents'][0]['segments'],
preselected_features=preselected_nucleus['contents'][0]['features'],
preselected_negative=preselected_nucleus['contents'][0]['negative'])
else:
self.nucleus = SyllableSegmentWidget(self.inventory,
"nuclei",
parent=self,
root=True)
self.nucleusLayout.addWidget(self.nucleus)
bottomLayout.addWidget(self.nucleusWidget)
globalNucleusLayout.addLayout(bottomLayout)
layout.addWidget(onsetGroup)
layout.addWidget(nucleusGroup)
layout.addWidget(codaGroup)
layout.addStretch()
self.setLayout(layout)
self.setZeroSymbol()
def setZeroSymbol(self):
if self.onsetLayout.count() == 0:
self.onsetLabel.setText('\u2205')
else:
self.onsetLabel.setText('')
if self.codaLayout.count() == 0:
self.codaLabel.setText('\u2205')
else:
self.codaLabel.setText('')
def addOnset(self):
segWidget = SyllableSegmentWidget(self.inventory, "onsets", parent=self, root=False, show_full_inventory=self.show_full_inventory)
self.onsetWidget.layout().insertWidget(0, segWidget)
segWidget.segDeleted.connect(self.deleteSeg)
self.setZeroSymbol()
return segWidget
def addCoda(self):
segWidget = SyllableSegmentWidget(self.inventory, "codas", parent=self, root=False, show_full_inventory=self.show_full_inventory)
self.codaWidget.layout().addWidget(segWidget)
segWidget.segDeleted.connect(self.deleteSeg)
self.setZeroSymbol()
return segWidget
@Slot(list) # connected to SyllableWidget.segDeleted()
def deleteSeg(self, arg):
segWidget = arg[0]
if segWidget.constituent == "codas":
layout = self.codaWidget.layout()
elif segWidget.constituent == 'onsets':
layout = self.onsetWidget.layout()
for ind in reversed(range(layout.count())):
if layout.itemAt(ind).widget() == segWidget:
layout.removeWidget(segWidget)
break
segWidget.deleteLater()
self.setZeroSymbol()
def value(self):
output = {'onset': dict(),
'nucleus': dict(),
'coda': dict()}
output['onset']['search_type'] = self.onsetSearchType.value()
output['nucleus']['search_type'] = self.nucleusSearchType.value()
output['coda']['search_type'] = self.codaSearchType.value()
output['onset']['contents'] = list()
for i in range(self.onsetLayout.count()):
segWidget = self.onsetLayout.itemAt(i).widget()
output['onset']['contents'].append(segWidget.value())
output['nucleus']['contents'] = list()
for k in range(self.nucleusLayout.count()):
segWidget = self.nucleusLayout.itemAt(k).widget()
output['nucleus']['contents'].append(segWidget.value())
output['coda']['contents'] = list()
for j in range(self.codaLayout.count()):
segWidget = self.codaLayout.itemAt(j).widget()
output['coda']['contents'].append(segWidget.value())
return output
def displayValue(self):
pass
class SearchTypeWidget(QGroupBox):
def __init__(self, parent=None, preselected_type=''):
QGroupBox.__init__(self, 'Search type', parent)
layout = QVBoxLayout()
self.typeSelect = QComboBox()
for search_type in ['Exactly matches', 'Minimally contains', 'Starts with', 'Ends with']:
self.typeSelect.addItem(search_type)
layout.addWidget(self.typeSelect)
index = self.typeSelect.findText('Minimally contains')
self.typeSelect.setCurrentIndex(index)
if preselected_type:
preselected_index = self.typeSelect.findText(preselected_type)
self.typeSelect.setCurrentIndex(preselected_index)
self.setLayout(layout)
def value(self):
return str(self.typeSelect.currentText())
class StressWidget(QGroupBox):
def __init__(self, inventory, parent=None, preselected_stress=set()):
QGroupBox.__init__(self, 'Stress', parent)
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self.options = list(inventory.stress_types.keys()) + ['None']
for stress in self.options:
setattr(self, stress, QCheckBox(stress))
self.layout.addWidget(getattr(self, stress))
if stress in preselected_stress:
getattr(self, stress).setChecked(True)
# If there is no stress, then disable this widget
if len(inventory.stress_types.keys()) == 0:
self.setEnabled(False)
def value(self):
if not self.isEnabled():
return set()
selected = set()
for stress in self.options:
button = getattr(self, stress)
if button.isChecked():
selected.add(button.text())
return selected
class ToneWidget(QGroupBox):
def __init__(self, inventory, parent=None, preselected_tone=set()):
QGroupBox.__init__(self, 'Tone', parent)
self.inventory = inventory
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self.options = list(inventory.tone_types.keys()) + ['None']
for tone in self.options:
setattr(self, tone, QCheckBox(tone))
self.layout.addWidget(getattr(self, tone))
if tone in preselected_tone:
getattr(self, tone).setChecked(True)
# If there is no stress, then disable this widget
if len(inventory.tone_types.keys()) == 0:
self.setEnabled(False)
def value(self):
if not self.isEnabled():
return set()
selected = set()
for tone in self.options:
button = getattr(self, tone)
if button.isChecked():
selected.add(button.text())
return selected
class SyllableSegmentWidget(QWidget):
segDeleted = Signal(list)
def __init__(self,
inventory,
constituent,
parent=None,
root=False,
show_full_inventory=False,
preselected_segments=set(),
preselected_features=set(),
preselected_negative=False):
QWidget.__init__(self, parent)
self.inventory = inventory
self.constituent = constituent
self.segments = preselected_segments
self.features = preselected_features
self.parent_ = parent
self.show_full_inventory = show_full_inventory
self.root = root
self.negative = preselected_negative
layout = QVBoxLayout()
self.setLayout(layout)
self.mainLabel = QPushButton("{}")
self.mainLabel.setStyleSheet("padding: 4px")
layout.addWidget(self.mainLabel)
self.menu = QMenu(self)
segmentAct = QAction("Add segments", self, triggered=self.selectSegments)
featureAct = QAction("Add features", self, triggered=self.selectFeatures)
clearAct = QAction("Clear selection", self, triggered=self.clearSelection)
matchAnythingAct = QAction("Match single wildcard", self, triggered=self.addArbitrary)
self.negAct = QAction("Set negative", self, checkable=True, triggered=self.setNegative)
if self.negative:
self.negAct.setChecked(True)
self.menu.addAction(segmentAct)
self.menu.addAction(featureAct)
self.menu.addAction(clearAct)
self.menu.addAction(matchAnythingAct)
self.menu.addAction(self.negAct)
if not self.root:
deleteAct = QAction("Delete", self, triggered=self.deleteSelection)
self.menu.addAction(deleteAct)
self.mainLabel.setMenu(self.menu)
self.updateLabel()
def setNegative(self):
self.negative = self.negAct.isChecked()
def addArbitrary(self):
self.segments = set(self.inventory.segs.keys()) - {'#'} # Add all segs except for the boundary symbol #
self.updateLabel()
def clearSelection(self):
self.segments = set()
self.features = set()
self.negative = False
self.updateLabel()
def deleteSelection(self):
self.segDeleted.emit([self])
def updateLabel(self):
labelText = self.generateDisplayText()
if not labelText:
labelText = '{}'
labelText = '{}'.format(labelText)
self.mainLabel.setText(labelText)
def generateDisplayText(self):
displayList = list()
if len(self.segments) == len(self.inventory.segs.keys()) - 1: # exclude '#'
if self.show_full_inventory:
displayList = ', '.join(self.segments)
else:
displayList = '{*}'
else:
displayList.extend(self.segments)
displayList.extend(self.features)
displayList = ', '.join(displayList)
displayList = '{{{}}}'.format(displayList)
return displayList
def selectSegments(self):
dialog = SegmentSelectDialog(self.inventory, self.segments, self, start_pressed=self.segments)
if dialog.exec_():
self.segments = set(dialog.value())
self.updateLabel()
def selectFeatures(self):
dialog = SegmentSelectDialog(self.inventory, self.segments, self, use_features=True)
if dialog.exec_():
self.features = set(dialog.value())
self.updateLabel()
def value(self):
# {'segments': ('p', 't', 'k'),
# 'features': ('+syllabic')}
output = dict()
output['segments'] = self.segments
output['features'] = self.features
output['negative'] = self.negative
return output
def displayValue(self):
return self.generateDisplayText()
def getData(self):
pass
"""
attrs = ['inventory', 'segments', 'features', 'inventory', 'middle', 'enabled',
'show_full_inventory', 'side', 'allowZeroMatch']
return {attr:getattr(self,attr) for attr in attrs}
"""
def loadData(self, data):
pass
"""
for k, v in data.items(): #see the getData() function above for details
setattr(self, k, v)
"""
class SyllableWidget(QWidget):
segDeleted = Signal(list)
def __init__(self, inventory, parent=None, middle=False, show_full_inventory=False, side=None, preset=False):
QWidget.__init__(self, parent)
self.inventory = inventory
# Crucial data
self.onset = {'search_type': '', 'contents': []}
self.nucleus = {'search_type': '', 'contents': []}
self.coda = {'search_type': '', 'contents': []}
self.stress = set()
self.tone = set()
self.nonSeg = set()
self.parent = parent
self.show_full_inventory = show_full_inventory
self.side = side
self.middle = middle
layout = QVBoxLayout()
self.specification = QFormLayout()
self.onsetLabel = QLabel()
self.nucleusLabel = QLabel()
self.codaLabel = QLabel()
self.stressLabel = QLabel()
self.toneLabel = QLabel()
self.specification.addRow('Onset:', self.onsetLabel)
self.specification.addRow('Nucleus:', self.nucleusLabel)
self.specification.addRow('Coda:', self.codaLabel)
self.specification.addRow('Stress:', self.stressLabel)
self.specification.addRow('Tone:', self.toneLabel)
if self.middle:
self.specification.addRow('POSITION', QLabel('MIDDLE'))
layout.addLayout(self.specification)
self.mainLabel = QPushButton('Edit')
#self.mainLabel.setStyleSheet("padding: 4px")
layout.addWidget(self.mainLabel)
self.setLayout(layout)
self.menu = QMenu(self)
unspecifiedSyllableAct = QAction('Add an unspecified syllable', self, triggered=self.addUnspecifiedSyllable)
syllableAct = QAction('Construct the syllable', self, triggered=self.constructSyllable)
clearAct = QAction('Clear selection', self, triggered=self.clearSelection)
self.menu.addAction(unspecifiedSyllableAct)
self.menu.addAction(syllableAct)
self.menu.addAction(clearAct)
if not self.middle:
nonSegSelectMenu = self.menu.addMenu('Add non-segment symbol')
for symbol in self.inventory.non_segment_symbols:
nonSegSelectMenu.addAction(QAction(symbol, self, triggered=self.addNonSegSymbol))
deleteAct = QAction('Delete', self, triggered=self.deleteSelection)
self.menu.addAction(deleteAct)
self.mainLabel.setMenu(self.menu)
addNewPosMenu = self.menu.addMenu('Add new environment position')
addToLeftAct = QAction('To the left', self, triggered=self.addToLeft)
addToRightAct = QAction('To the right', self, triggered=self.addToRight)
addNewPosMenu.addAction(addToLeftAct)
addNewPosMenu.addAction(addToRightAct)
self.updateLabel()
if preset:
self.onset = preset.onset
self.nucleus = preset.nucleus
self.coda = preset.coda
self.stress = preset.stress
self.tone = preset.tone
self.nonSeg = preset.nonSeg
self.updateLabel()
def addToLeft(self):
self.parent.insertSegWidget(self, 'l')
def addToRight(self):
self.parent.insertSegWidget(self, 'r')
def addNonSegSymbol(self):
self.nonSeg.add(self.sender().text())
self.onset = {'search_type': '', 'contents': []}
self.nucleus = {'search_type': '', 'contents': []}
self.coda = {'search_type': '', 'contents': []}
self.stress = set()
self.tone = set()
self.updateLabel()
def clearSelection(self):
self.onset = {'search_type': '', 'contents': []}
self.nucleus = {'search_type': '', 'contents': []}
self.coda = {'search_type': '', 'contents': []}
self.stress = set()
self.tone = set()
self.nonSeg = set()
self.mainLabel.setText('Edit')
self.updateLabel()
def deleteSelection(self):
self.segDeleted.emit([self]) # connected to SyllableConstructionWidget.deleteSeg()
def updateLabel(self):
self.generateDisplayText()
if (self.nonSeg == set() and self.onset == {'contents': [], 'search_type': 'Minimally contains'} and
self.nucleus == {'contents': [], 'search_type': 'Minimally contains'} and
self.coda == {'contents': [], 'search_type': 'Minimally contains'} and
self.stress == set(list(self.inventory.stress_types.keys()) + ['None']) and
self.tone == set(list(self.inventory.tone_types.keys()) + ['None'])):
label = '{' + '\u03C3' + '}'
self.mainLabel.setText(label)
def generateColorText(self, slot, neg_color='darkRed', pos_color='darkGreen'):
if len(slot['segments']) == len(self.inventory.segs.keys()) - 1: # exclude '#'
if self.show_full_inventory:
display_text = '{' + ', '.join(self.segments) + '}'
else:
display_text = '{*}'
else:
display_list = list()
display_list.extend(slot['segments'])
display_list.extend(slot['features'])
display_text = '{' + ', '.join(display_list) + '}'
if slot['negative']:
display_text = '<font color=\"' + neg_color + '\">' + display_text + '</font>'
else:
display_text = '<font color=\"' + pos_color + '\">' + display_text + '</font>'
return display_text
def generateDisplayText(self):
display_onset = ''
for slot in self.onset['contents']:
display_text = self.generateColorText(slot)
display_onset += display_text
self.onsetLabel.setText(display_onset)
if self.onset['search_type'] == 'Exactly matches':
self.onsetLabel.setStyleSheet('background:transparent')
elif self.onset['search_type'] == 'Minimally contains':
self.onsetLabel.setStyleSheet('background:white')
elif self.onset['search_type'] == 'Starts with':
self.onsetLabel.setStyleSheet('background:lightgreen')
elif self.onset['search_type'] == 'Ends with':
self.onsetLabel.setStyleSheet('background:#FAAFBE')
display_nucleus = ''
for slot in self.nucleus['contents']:
display_text = self.generateColorText(slot)
display_nucleus += display_text
self.nucleusLabel.setText(display_nucleus)
if self.nucleus['search_type'] == 'Exactly matches':
self.nucleusLabel.setStyleSheet('background:transparent')
elif self.nucleus['search_type'] == 'Minimally contains':
self.nucleusLabel.setStyleSheet('background:white')
elif self.nucleus['search_type'] == 'Starts with':
self.nucleusLabel.setStyleSheet('background:lightgreen')
elif self.nucleus['search_type'] == 'Ends with':
self.nucleusLabel.setStyleSheet('background:#FAAFBE')
display_coda = ''
for slot in self.coda['contents']:
display_text = self.generateColorText(slot)
display_coda += display_text
self.codaLabel.setText(display_coda)
if self.coda['search_type'] == 'Exactly matches':
self.codaLabel.setStyleSheet('background:transparent')
elif self.coda['search_type'] == 'Minimally contains':
self.codaLabel.setStyleSheet('background:white')
elif self.coda['search_type'] == 'Starts with':
self.codaLabel.setStyleSheet('background:lightgreen')
elif self.coda['search_type'] == 'Ends with':
self.codaLabel.setStyleSheet('background:#FAAFBE')
display_stress = '{' + ', '.join(self.stress) + '}'
display_tone = '{' + ', '.join(self.tone) + '}'
self.stressLabel.setText(display_stress)
self.toneLabel.setText(display_tone)
if self.nonSeg:
label = '{' + ', '.join(self.nonSeg) + '}'
self.mainLabel.setText(label)
def addUnspecifiedSyllable(self):
self.nonSeg = set()
self.onset = {'contents': [], 'search_type': 'Minimally contains'}
self.nucleus = {'contents': [], 'search_type': 'Minimally contains'}
self.coda = {'contents': [], 'search_type': 'Minimally contains'}
self.stress = set(list(self.inventory.stress_types.keys()) + ['None'])
self.tone = set(list(self.inventory.tone_types.keys()) + ['None'])
label = '{' + '\u03C3' + '}'
self.mainLabel.setText(label)
self.updateLabel()
def constructSyllable(self):
dialog = SyllableConstructDialog(self.inventory, parent=self,
preselected_onset=self.onset,
preselected_nucleus=self.nucleus,
preselected_coda=self.coda,
preselected_stress=self.stress,
preselected_tone=self.tone)
if dialog.exec_(): # Ok pressed
result = dialog.value()
self.onset = result['onset']
#print('Onset =')
#pprint(self.onset)
self.nucleus = result['nucleus']
#print('Nuclues =')
#pprint(self.nucleus)
self.coda = result['coda']
#print('Coda =')
#pprint(self.coda)
self.stress = result['stress']
#print('Stress =')
#pprint(self.stress)
self.tone = result['tone']
#print('Tone =')
#pprint(self.tone)
self.updateLabel()
def extract_unit_info(self, unit):
segs = unit['segments']
if unit['features']:
more_segs = self.inventory.features_to_segments(list(unit['features']))
segs = segs.union(more_segs)
if unit['negative']:
all_segs = set(self.inventory.segs.keys()) - {'#'}
segs = all_segs - segs
return segs
def value(self):
output = {'onset': {'contents': list(), 'search_type': self.onset['search_type']},
'nucleus': {'contents': list(), 'search_type': self.nucleus['search_type']},
'coda': {'contents': list(), 'search_type': self.coda['search_type']}}
for unit in self.onset['contents']:
output['onset']['contents'].append(self.extract_unit_info(unit))
for unit in self.coda['contents']:
output['coda']['contents'].append(self.extract_unit_info(unit))
for unit in self.nucleus['contents']:
output['nucleus']['contents'].append(self.extract_unit_info(unit))
output['stress'] = self.stress
output['tone'] = self.tone
output['nonsegs'] = self.nonSeg
return output
def generateSlotText(self, slot):
if len(slot['segments']) == len(self.inventory.segs.keys()) - 1: # exclude '#'
if self.show_full_inventory:
display_text = '(' + ','.join(self.segments) + ')'
else:
display_text = '(*)'
else:
display_list = list()
display_list.extend(slot['segments'])
display_list.extend(slot['features'])
display_text = '(' + ','.join(display_list) + ')'
return display_text
def displayValue(self):
if self.nonSeg:
text = '{' + ', '.join(self.nonSeg) + '}'
return text
elif self.mainLabel.text() == '{\u03C3}':
return '\u03C3'
else:
onset = ''
for slot in self.onset['contents']:
text = self.generateSlotText(slot)
onset += text
nucleus = ''
for slot in self.nucleus['contents']:
text = self.generateSlotText(slot)
nucleus += text
coda = ''
for slot in self.coda['contents']:
text = self.generateSlotText(slot)
coda += text
return '{' + onset + nucleus + coda + '}'
def getData(self):
attrs = ['inventory', 'onset', 'nucleus', 'coda', 'stress', 'tone', 'nonSeg',
'middle', 'show_full_inventory', 'side']
return {attr: getattr(self, attr) for attr in attrs}
def loadData(self, data):
for k, v in data.items(): # see the getData() function above for details
setattr(self, k, v)
class EnvironmentSyllableWidget(QWidget):
envCopied = Signal(list)
def __init__(self, inventory, parent=None, middle=True, copy_data=None, show_full_inventory=False):
QWidget.__init__(self)
self.inventory = inventory
self.parent = parent
self.middle = middle
self.show_full_inventory = show_full_inventory
self.envCopied.connect(self.parent.addCopiedEnvironment)
layout = QHBoxLayout()
self.lhsAddNew = QPushButton('+')
self.lhsAddNew.setFixedWidth(50)
self.lhsAddNew.clicked.connect(self.addLhs)
self.lhsWidget = QWidget()
self.lhsLayout = QHBoxLayout()
self.lhsWidget.setLayout(self.lhsLayout)
self.rhsAddNew = QPushButton('+')
self.rhsAddNew.setFixedWidth(50)
self.rhsAddNew.clicked.connect(self.addRhs)
self.rhsWidget = QWidget()
self.rhsLayout = QHBoxLayout()
self.rhsWidget.setLayout(self.rhsLayout)
self.middleWidget = SyllableWidget(self.inventory,
parent=self,
middle=True,
show_full_inventory=show_full_inventory)
self.removeButton = QPushButton('Remove environment')
self.removeButton.clicked.connect(self.deleteLater)
self.copyButton = QPushButton('Copy environment')
self.copyButton.clicked.connect(self.copyEnvironment)
layout.addWidget(self.lhsAddNew)
layout.addWidget(self.lhsWidget)
layout.addWidget(self.middleWidget)
layout.addWidget(self.rhsWidget)
layout.addWidget(self.rhsAddNew)
layout.addStretch()
optionlayout = QVBoxLayout()
optionlayout.addWidget(self.removeButton)
optionlayout.addWidget(self.copyButton)
layout.addLayout(optionlayout)
self.setLayout(layout)
self.sizeHint()
if copy_data:
self.loadfromCopy(copy_data)
def loadfromCopy(self, copy_data):
self.middleWidget.onset = copy_data.middleWidget.onset
self.middleWidget.nucleus = copy_data.middleWidget.nucleus
self.middleWidget.coda = copy_data.middleWidget.coda
self.middleWidget.stress = copy_data.middleWidget.stress
self.middleWidget.tone = copy_data.middleWidget.tone
self.middleWidget.nonSeg = copy_data.middleWidget.nonSeg
self.middleWidget.mainLabel.setText(copy_data.middleWidget.mainLabel.text())
for ind in range(copy_data.lhsWidget.layout().count()):
copy_wid = copy_data.lhsWidget.layout().itemAt(ind).widget()
wid = SyllableWidget(self.inventory, parent=self, side='l', preset=copy_wid)
self.lhsWidget.layout().insertWidget(ind, wid)
wid.segDeleted.connect(self.deleteSeg)
for ind in range(copy_data.rhsWidget.layout().count()):
copy_wid = copy_data.rhsWidget.layout().itemAt(ind).widget()
wid = SyllableWidget(self.inventory, parent=self, side='r', preset=copy_wid)
self.rhsWidget.layout().insertWidget(ind, wid)
wid.segDeleted.connect(self.deleteSeg)
def copyEnvironment(self):
self.envCopied.emit([self]) # connected to EnvironmentSelectWidget.addCopiedEnvironment()
def insertSegWidget(self, match_widget, add_to_side):
if match_widget.side is None: # middle widget
segWidget = SyllableWidget(self.inventory,
parent=self,
show_full_inventory=self.show_full_inventory,
side=add_to_side)
segWidget.segDeleted.connect(self.deleteSeg)
if add_to_side == 'r':
layout = self.rhsWidget.layout()
layout.insertWidget(0, segWidget)
elif add_to_side == 'l':
layout = self.lhsWidget.layout()
layout.insertWidget(len(layout)+1, segWidget)
layout.update()
return
segWidget = SyllableWidget(self.inventory,
parent=self,
show_full_inventory=self.show_full_inventory,
side=match_widget.side)
segWidget.segDeleted.connect(self.deleteSeg)
if match_widget.side == 'r':
layout = self.rhsWidget.layout()
elif match_widget.side == 'l':
layout = self.lhsWidget.layout()
widgets = list()
for ind in range(layout.count()):
if layout.itemAt(ind).widget() == match_widget:
if add_to_side == 'l':
widgets.append(segWidget)
widgets.append(layout.itemAt(ind).widget())
elif add_to_side == 'r':
widgets.append(layout.itemAt(ind).widget())
widgets.append(segWidget)
else:
widgets.append(layout.itemAt(ind).widget())
for i, widget in enumerate(widgets):
layout.insertWidget(i, widget)
layout.update()
@Slot(list) # connected to SyllableWidget.segDeleted()
def deleteSeg(self, arg):
segWidget = arg[0]
if segWidget.side == 'r':
layout = self.rhsWidget.layout()
elif segWidget.side == 'l':
layout = self.lhsWidget.layout()
for ind in reversed(range(layout.count())):
if layout.itemAt(ind) == segWidget:
layout.removeAt(ind)
break
segWidget.deleteLater()
def addLhs(self):
segWidget = SyllableWidget(self.inventory,
parent=self,
show_full_inventory=self.show_full_inventory,
side='l')
self.lhsWidget.layout().insertWidget(0, segWidget)
segWidget.segDeleted.connect(self.deleteSeg)
return segWidget
def addRhs(self):
segWidget = SyllableWidget(self.inventory,
parent=self,
show_full_inventory=self.show_full_inventory,
side='r')
self.rhsWidget.layout().addWidget(segWidget)
segWidget.segDeleted.connect(self.deleteSeg)
return segWidget
def value(self):
lhs = []
for ind in range(self.lhsWidget.layout().count()):
wid = self.lhsWidget.layout().itemAt(ind).widget()
lhs.append(wid.value())
middle = [self.middleWidget.value()]
rhs = []
for ind in range(self.rhsWidget.layout().count()):
wid = self.rhsWidget.layout().itemAt(ind).widget()
rhs.append(wid.value())
return SyllableEnvironmentFilter(self.inventory, middle, lhs=lhs, rhs=rhs)
def displayValue(self):
lhs = list()
rhs = list()
for ind in range(self.lhsWidget.layout().count()):
wid = self.lhsWidget.layout().itemAt(ind).widget()
lhs.append(wid.displayValue())
lhs = ''.join(lhs) if lhs else ''
for ind in range(self.rhsWidget.layout().count()):
wid = self.rhsWidget.layout().itemAt(ind).widget()
rhs.append(wid.displayValue())
rhs = ''.join(rhs) if rhs else ''
return '{}_{}'.format(lhs, rhs)
|
dc8ec2ecfa59d7fab6c305f95e2695f2178d85c8
|
ec1c268f2b24ab1a636be3db21651748ccade65d
|
/QuickWall/__init__.py
|
7f1ae9b9a42baed8f7e1a3da52bf00eb2f8af301
|
[
"MIT"
] |
permissive
|
deepjyoti30/QuickWall
|
f44a2d023d1b0f6a9ca9b7476c6415f1621d32ca
|
3a51b33b29fce615f29df672fd68e9a4d687b3e9
|
refs/heads/master
| 2022-02-24T20:10:02.615520
| 2022-02-16T18:09:04
| 2022-02-16T18:09:04
| 201,968,469
| 210
| 12
|
MIT
| 2019-12-31T09:19:59
| 2019-08-12T16:30:18
|
Python
|
UTF-8
|
Python
| false
| false
| 284
|
py
|
__init__.py
|
name = "QuickWall"
__all__ = ['download',
'utility',
'logger',
'nitrogen',
'feh',
'kde',
'setter',
'blacklist',
'SetPaper',
'search',
'wall',
'wal',
]
|
02329a124245531b26030ef4d48a5c92ca10f1f4
|
83b8b30ebb633eecd29ca0a7a20cc43a293c9333
|
/tests/cmdline/repl_words_move.py
|
e1eb5295350c180e13d5bf861ac7a3d9c58be43e
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
adafruit/circuitpython
|
430ec895149d1eb814b505db39b4977a35ee88a7
|
506dca71b0cbb7af749bb51f86b01021db5483b3
|
refs/heads/main
| 2023-08-21T16:30:46.781068
| 2023-08-20T00:39:44
| 2023-08-20T00:39:44
| 66,166,069
| 3,806
| 1,560
|
MIT
| 2023-09-14T19:23:51
| 2016-08-20T20:10:40
|
C
|
UTF-8
|
Python
| false
| false
| 709
|
py
|
repl_words_move.py
|
# word movement
# backward-word, start in word
234b1
# backward-word, don't start in word
234 b1
# backward-word on start of line. if cursor is moved, this will result in a SyntaxError
1 2 + 3b+
# forward-word, start in word
1+2 12+f+3
# forward-word, don't start in word
1+ 12 3f+
# forward-word on eol. if cursor is moved, this will result in a SyntaxError
1 + 2 3f+
# kill word
# backward-kill-word, start in word
100 + 45623
# backward-kill-word, don't start in word
100 + 456231
# forward-kill-word, start in word
100 + 256d3
# forward-kill-word, don't start in word
1 + 256d2
# extra move/kill shortcuts
# ctrl-left
234[1;5D1
# ctrl-right
12[1;5C3
# ctrl-w
1231
|
f22f50273601130e284056268777f660ed9fc25d
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/interfaces/inotifysae.py
|
dfe566f7aec02931930cfd78d436ff6a0372d169
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 522
|
py
|
inotifysae.py
|
# ---------------------------------------------------------------------
# INotifySAE interface
# ---------------------------------------------------------------------
# Copyright (C) 2007-2010 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC Modules
from noc.core.interface.base import BaseInterface
from .base import StringParameter, BooleanParameter
class INotifySAE(BaseInterface):
event = StringParameter()
returns = BooleanParameter()
|
345b4d1f0f460f6d0b355f6c2fd15ced8818aabc
|
2d05050d0ada29f7680b4df20c10bb85b0530e45
|
/python/tvm/auto_scheduler/search_task.py
|
767baf916d58f0aa0c87537fd4fcf617275737d6
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
apache/tvm
|
87cb617f9a131fa44e1693303aaddf70e7a4c403
|
d75083cd97ede706338ab413dbc964009456d01b
|
refs/heads/main
| 2023-09-04T11:24:26.263032
| 2023-09-04T07:26:00
| 2023-09-04T07:26:00
| 70,746,484
| 4,575
| 1,903
|
Apache-2.0
| 2023-09-14T19:06:33
| 2016-10-12T22:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 23,857
|
py
|
search_task.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" The definiton of SearchTask """
import json
import os
import logging
import numpy as np
import tvm._ffi
from tvm.runtime import Object, ndarray
from tvm.driver.build_module import build
from tvm.target import Target
from .measure import LocalBuilder, LocalRunner
from .measure_record import load_best_record
from .workload_registry import make_workload_key
from .compute_dag import ComputeDAG, LayoutRewriteOption
from .cost_model import XGBModel
from .search_policy import SketchPolicy
from .workload_registry import WORKLOAD_FUNC_REGISTRY, register_workload_tensors
from . import _ffi_api
# pylint: disable=invalid-name
logger = logging.getLogger("auto_scheduler")
@tvm._ffi.register_object("auto_scheduler.HardwareParams")
class HardwareParams(Object):
"""The parameters of target hardware used to guide the search policy.
When a parameter isn't provided, it will instead use the
current machine's default value if target is specified.
TODO(jcf94): This is considered to be merged with the new Target specification:
https://discuss.tvm.apache.org/t/rfc-tvm-target-specification/6844
Parameters
----------
num_cores : int, optional
The number of device cores.
vector_unit_bytes : int, optional
The width of vector units in bytes.
cache_line_bytes : int, optional
The size of cache line in bytes.
max_shared_memory_per_block : int, optional
The max shared memory per block in bytes.
max_local_memory_per_block : int, optional
The max local memory per block in bytes.
max_threads_per_block : int, optional
The max number of threads per block.
max_vthread_extent : int, optional
The max vthread extent.
warp_size : int, optional
The thread numbers of a warp.
target : str or Target, optional
The compilation target. Used to determine default values if provided.
target_host : str or Target, optional
The compilation target host. Used to determine default values if provided.
"""
def __init__(
self,
num_cores=None,
vector_unit_bytes=None,
cache_line_bytes=None,
max_shared_memory_per_block=None,
max_local_memory_per_block=None,
max_threads_per_block=None,
max_vthread_extent=None,
warp_size=None,
target=None,
target_host=None,
):
# If target is provided, get the default paramters for this machine.
if target is not None:
if isinstance(target, str):
target = tvm.target.Target(target)
if isinstance(target_host, str):
target_host = tvm.target.Target(target_host)
default_params = _ffi_api.GetDefaultHardwareParams(target, target_host)
if num_cores is None:
num_cores = default_params.num_cores
if vector_unit_bytes is None:
vector_unit_bytes = default_params.vector_unit_bytes
if cache_line_bytes is None:
cache_line_bytes = default_params.cache_line_bytes
if max_shared_memory_per_block is None:
max_shared_memory_per_block = default_params.max_shared_memory_per_block
if max_local_memory_per_block is None:
max_local_memory_per_block = default_params.max_local_memory_per_block
if max_threads_per_block is None:
max_threads_per_block = default_params.max_threads_per_block
if max_vthread_extent is None:
max_vthread_extent = default_params.max_vthread_extent
if warp_size is None:
warp_size = default_params.warp_size
self.__init_handle_by_constructor__(
_ffi_api.HardwareParams,
num_cores,
vector_unit_bytes,
cache_line_bytes,
max_shared_memory_per_block,
max_local_memory_per_block,
max_threads_per_block,
max_vthread_extent,
warp_size,
)
def __str__(self):
"""Pretty printing for hardware parameter configuration."""
format_str = (
"HardwareParams:\n"
f" num_cores: {self.num_cores}\n"
f" vector_unit_bytes: {self.vector_unit_bytes}\n"
f" cache_line_bytes: {self.cache_line_bytes}\n"
f" max_shared_memory_per_block: {self.max_shared_memory_per_block}\n"
f" max_local_memory_per_block: {self.max_local_memory_per_block}\n"
f" max_threads_per_block: {self.max_threads_per_block}\n"
f" max_vthread_extent: {self.max_vthread_extent}\n"
f" warp_size: {self.warp_size}\n"
)
return format_str
@tvm._ffi.register_object("auto_scheduler.TuningOptions")
class TuningOptions(Object):
"""This controls the options of performance tuning.
Parameters
----------
num_measure_trials: int = 0
The number of measurement trials.
The search policy measures `num_measure_trials` schedules in total and returns the best one
among them.
With `num_measure_trials` == 0, the policy will do the schedule search but won't involve
measurement. This can be used to get a runnable schedule quickly without auto-tuning.
early_stopping: Optional[int]
Stop the tuning early if getting no improvement after n measurements.
num_measures_per_round: int = 64
The number of schedules to be measured at each search round.
The whole schedule search process will try a total number of `num_measure_trials` in several
rounds.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during schedule search.
builder: Union[ProgramBuilder, str] = 'local'
ProgramBuilder which builds the program.
runner: Union[ProgramRunner, str] = 'local'
ProgramRunner which runs the program and measures time costs.
measure_callbacks: Optional[List[MeasureCallback]]
Callback functions called after each measurement.
Candidates:
- auto_scheduler.RecordToFile
"""
def __init__(
self,
num_measure_trials=0,
early_stopping=None,
num_measures_per_round=64,
verbose=1,
builder="local",
runner="local",
measure_callbacks=None,
):
if isinstance(builder, str):
if builder == "local":
builder = LocalBuilder()
else:
raise ValueError("Invalid builder: " + builder)
elif not isinstance(builder, tvm.auto_scheduler.measure.ProgramBuilder):
raise ValueError(
"Invalid builder: "
+ builder
+ " . TuningOptions expects a ProgramBuilder or string."
)
if isinstance(runner, str):
if runner == "local":
runner = LocalRunner()
else:
raise ValueError("Invalid runner: " + runner)
elif not isinstance(runner, tvm.auto_scheduler.measure.ProgramRunner):
raise ValueError(
"Invalid runner: " + runner + " . TuningOptions expects a ProgramRunner or string."
)
self.__init_handle_by_constructor__(
_ffi_api.TuningOptions,
num_measure_trials,
early_stopping or -1,
num_measures_per_round,
verbose,
builder,
runner,
measure_callbacks,
)
# The map stores special registered buffer for measurement.
# This can be used for sparse workloads when we cannot use random tensors for measurment.
# {
# "workload_key_0": {
# "task_input_0": Tensor(...),
# "task_input_1": Tensor(...)
# },
# "workload_key_1": {
# "task_input_2": Tensor(...),
# "task_input_3": Tensor(...)
# },
# ...
# }
TASK_INPUT_BUFFER_TABLE = {}
def _save_buffer_to_file(buffer_name, buffer_data):
"""Save the current Tensor buffer to a numpy file.
File name will be: {buffer_name}.{buffer_shape}_{buffer_data_type}.npy
"""
np_data = buffer_data.numpy()
buffer_name += "."
for i in np_data.shape:
buffer_name += f"{i}_"
buffer_name += f"{np_data.dtype}.npy"
np_data.tofile(buffer_name, " ")
def _try_load_buffer_from_file(buffer_name):
"""Try to load buffer from a numpy file, if not found, return None.
File name has a same format as `_save_buffer_to_file`.
"""
filelist = os.listdir()
for file in filelist:
if file.startswith(buffer_name + "."):
meta_info = file.split(".")[-2].split("_")
shape = [int(i) for i in meta_info[:-1]]
dtype = meta_info[-1]
buffer_data = np.fromfile(file, dtype=dtype, sep=" ")
buffer_data = buffer_data.reshape(shape)
return ndarray.array(buffer_data)
return None
def register_task_input_buffer(
workload_key, input_name, input_data, overwrite=False, save_to_file=False
):
"""Register special buffer for measurement.
Parameters
----------
workload_key : str
The workload key of the SearchTask.
input_name : str
The name of input buffer.
input_data : tvm.nd.NDArray
The input Tensor data.
overwrite : bool = False
Whether to overwrite the data if a name has already registered.
save_to_file : bool = False
Whether to save the data to a local file as well. This can be reused to resume the last
tuning process.
Returns
-------
tvm.nd.NDArray
The actual registered Tensor data of this input_name. With `overwrite` set to False, will
return the original one if the name has already registered before.
"""
global TASK_INPUT_BUFFER_TABLE
if workload_key not in TASK_INPUT_BUFFER_TABLE:
TASK_INPUT_BUFFER_TABLE[workload_key] = {}
input_table = TASK_INPUT_BUFFER_TABLE[workload_key]
if not overwrite:
if input_name not in input_table.keys():
# Try to load buffer data from local file
tensor_from_file = _try_load_buffer_from_file(input_name)
if tensor_from_file:
input_table[input_name] = tensor_from_file
elif input_name in input_table.keys():
raise RuntimeError(
"Tensor %s exists in TASK_INPUT_BUFFER_TABLE, %s"
% (input_name, "set overwrite to True or this Tensor will not be registered")
)
input_table[input_name] = input_data
if save_to_file:
_save_buffer_to_file(input_name, input_data)
return input_data
def get_task_input_buffer(workload_key, input_name):
"""Get special buffer for measurement.
The buffers are registered by `register_task_input_buffer`.
Parameters
----------
workload_key : str
The workload key of the SearchTask.
input_name : str
The name of input buffer.
Returns
-------
tvm.nd.NDArray
The registered input buffer.
"""
global TASK_INPUT_BUFFER_TABLE
if workload_key not in TASK_INPUT_BUFFER_TABLE:
TASK_INPUT_BUFFER_TABLE[workload_key] = {}
input_table = TASK_INPUT_BUFFER_TABLE[workload_key]
if input_name not in input_table:
# Try to load buffer data from local file
tensor_from_file = _try_load_buffer_from_file(input_name)
if tensor_from_file:
input_table[input_name] = tensor_from_file
# Then check for the default table, the input names extracted from a relay model will be
# stored here for we're not able to get the workload_key at that time
if input_name not in input_table:
input_table = TASK_INPUT_BUFFER_TABLE["default"]
if input_name in input_table:
return input_table[input_name]
raise ValueError(
f"{input_name} not found in TASK_INPUT_BUFFER_TABLE, "
f"should provide with `SearchTask(..., task_inputs={{...}})`"
)
@tvm._ffi.register_object("auto_scheduler.SearchTask")
class SearchTask(Object):
"""The computation information and hardware parameters for a schedule search task.
Parameters
----------
func : Union[Function, str]
The function that returns the compute declaration Tensors.
Can be the a function or the function name.
args : Union[Tuple[Any, ...], List[Any]]
The args of the function.
compute_dag : ComputeDAG
The ComputeDAG for the corresponding compute declaration.
workload_key : str
The workload key for the corresponding compute declaration.
target : any target-like object, see Target.canon_target
The target device of this search task.
target_host : None or any target-like object, see Target.canon_target
The target host device of this search task.
hardware_params : Optional[HardwareParams]
Hardware parameters used in this search task.
layout_rewrite_option : Optional[LayoutRewriteOption]
The layout rewrite option used for measuring programs. If None, the default value will be
set depending on the specified target.
Auto_scheduler will find a better schedule for the specified layout rewrite option.
The NO_REWRITE and INSERT_TRANSFORM_STAGE are expected to be used when tuning a standalone
op, and the REWRITE_FOR_PRE_TRANSFORMED is expected to be used when tuning ops inside a
network.
task_inputs : Union[Dict[str, tvm.nd.NDArray], List[str]]
A dict maps the input names to input tensors or a list of input names.
Some special Tensor used as inputs in program measuring. Usually we do not need to care
about it, but for special workloads like Sparse computation the Sparse Tensor input are
meaningful that we cannot use random input directly.
task_inputs_overwrite : bool = False
Whether to overwrite the data if a name has already in the global table.
task_inputs_save_to_file : bool = False
Whether to save the data to a local file as well. This can be reused to resume the last
tuning process.
desc: str = ""
The description string of this task.
Examples
--------
.. code-block:: python
# We support two ways to create a search task
# Way 1: create a task by a workload generation function.
# The `workload_func` is a function decorated by @auto_scheduler.register_workload
task = SearchTask(func=workload_func, args=args, target=target)
# Way 2: create a task by a workload_key.
# The `workload_key` is a string, which can be either a hash key or a json-serialized
# tuple(func, args).
task = SearchTask(workload_key=workload_key, target=target)
"""
def __init__(
self,
func=None,
args=None,
compute_dag=None,
workload_key=None,
target=None,
target_host=None,
hardware_params=None,
layout_rewrite_option=None,
task_inputs=None,
task_inputs_overwrite=False,
task_inputs_save_to_file=False,
desc="",
):
assert (
func is not None or workload_key is not None
), "Either a workload generation function or a workload key should be provided"
if func is not None:
workload_key = make_workload_key(func, args)
if compute_dag is None:
compute_dag = ComputeDAG(workload_key)
assert target is not None, "Must specify a target."
target, target_host = Target.canon_target_and_host(target, target_host)
if layout_rewrite_option is None:
layout_rewrite_option = LayoutRewriteOption.get_target_default(target)
task_input_names = []
if isinstance(task_inputs, list):
task_input_names = task_inputs
elif isinstance(task_inputs, dict):
for input_name in task_inputs:
register_task_input_buffer(
workload_key,
input_name,
task_inputs[input_name],
task_inputs_overwrite,
task_inputs_save_to_file,
)
task_input_names.append(input_name)
elif task_inputs is not None:
raise ValueError("task_inputs should be a dict or a list.")
self.__init_handle_by_constructor__(
_ffi_api.SearchTask,
compute_dag,
workload_key,
target,
target_host,
hardware_params,
layout_rewrite_option,
task_input_names,
desc,
)
def tune(self, tuning_options, search_policy=None, adaptive_training=False):
"""Run auto scheduling search for a task
Parameters
----------
tuning_options : TuningOptions
Tuning and measurement options.
search_policy : Optional[SearchPolicy]
The search policy to be used for schedule search.
"""
if search_policy is None:
cost_model = XGBModel(adaptive_training=adaptive_training)
search_policy = SketchPolicy(self, cost_model)
_ffi_api.AutoSchedule(search_policy, tuning_options)
def apply_best(self, log_file, include_compatible=False, layout_rewrite_option=None):
"""Apply the history best from a log file and return the schedule.
Parameters
----------
log_file : str
The name of the log file.
include_compatible: bool
When set to True, all compatible records in the log file will be considered.
layout_rewrite_option : Optional[LayoutRewriteOption]
The layout rewrite option.
Returns
-------
A `te.Schedule` and the a list of `te.Tensor` to be used in `tvm.lower` or `tvm.build`.
"""
inp, _ = load_best_record(
log_file, self.workload_key, include_compatible=include_compatible
)
if inp is None:
raise RuntimeError(
f"Cannot find any valid schedule for {self.workload_key} in file {log_file}"
)
sch, args = self.compute_dag.apply_steps_from_state(
inp.state, layout_rewrite_option or self.layout_rewrite_option
)
return sch, args
def print_best(self, log_file, print_mode="schedule"):
"""Print the best schedule as python schedule API code or CUDA source code.
Parameters
----------
log_file : str
The name of the log file
print_mode: str
if "schedule", print the best schedule as python schedule API code.
if "cuda", print the best schedule as CUDA source code.
Returns
-------
code: str
The best schedule code in python API or CUDA source code
"""
inp, _ = load_best_record(log_file, self.workload_key)
if inp is None:
raise RuntimeError(
f"Cannot find any valid schedule for {self.workload_key} in file {log_file}"
)
if print_mode == "schedule":
return self.compute_dag.print_python_code_from_state(inp.state)
if print_mode == "cuda":
assert self.target.kind.name == "cuda"
sch, args = self.compute_dag.apply_steps_from_state(inp.state)
func = build(sch, args, "cuda")
return func.imported_modules[0].get_source()
raise ValueError(f"Invalid print_mode: {print_mode}")
def __getstate__(self):
self.target, self.target_host = Target.canon_target_and_host(self.target, self.target_host)
return {
"compute_dag": self.compute_dag,
"workload_key": self.workload_key,
"target": self.target,
"target_host": self.target_host,
"hardware_params": self.hardware_params,
"layout_rewrite_option": self.layout_rewrite_option,
"task_input_names": self.task_input_names,
"desc": self.desc,
}
def __setstate__(self, state):
# Register the workload if needed
try:
workload = json.loads(state["workload_key"])
except Exception: # pylint: disable=broad-except
raise RuntimeError(f"Invalid workload key {state['workload_key']}")
# workload[0] is either the compute function name or the ComputeDAG hash.
# The compute functions are already registered when importing TVM, so here
# we only register the ComputeDAG workloads. If the same workload has
# already been registered, the later registration overrides the previous one.
if workload[0] not in WORKLOAD_FUNC_REGISTRY:
register_workload_tensors(state["workload_key"], state["compute_dag"].tensors)
state["target"], state["target_host"] = Target.canon_target_and_host(
state["target"], state["target_host"]
)
self.__init_handle_by_constructor__(
_ffi_api.SearchTask,
state["compute_dag"],
state["workload_key"],
state["target"],
state["target"].host,
state["hardware_params"],
state["layout_rewrite_option"],
state["task_input_names"],
state["desc"],
)
def create_task(func, args, target, target_host=None, hardware_params=None):
"""THIS API IS DEPRECATED.
Create a search task.
Parameters
----------
func : Union[Function, str]
The function that returns the compute declaration Tensors.
Can be the a function or the function name.
args : Union[Tuple[Any, ...], List[Any]]
The args of the function.
target : Union[tvm.target.Target, str]
The target device of this search task.
target_host : Optional[Union[tvm.target.Target, str]]
The target host device of this search task.
hardware_params : Optional[HardwareParams]
Hardware parameters used in this search task.
Returns
-------
SearchTask: the created task
"""
raise ValueError(
'The API "auto_scheduler.create_task" is deprecated.'
"See https://github.com/apache/tvm/pull/7028 for the upgrade guide"
)
def auto_schedule(task, search_policy=None, tuning_options=TuningOptions()):
"""THIS API IS DEPRECATED.
Run auto scheduling search for a task.
Parameters
----------
task : SearchTask
The SearchTask for the computation declaration.
search_policy : Optional[SearchPolicy]
The search policy to be used for schedule search.
tuning_options : Optional[TuningOptions]
Tuning and measurement options.
Returns
-------
A `te.Schedule` and the a list of `te.Tensor` to be used in `tvm.lower` or `tvm.build`.
"""
raise ValueError(
'The API "auto_scheduler.create_task" is deprecated.'
"See https://github.com/apache/tvm/pull/7028 for the upgrade guide."
)
|
bf435e5381b0bec0a03d81e530a64ab0eec5b26c
|
0f59e486ea9d7c96b8c3f7f92bf063fc8389f1e8
|
/envi/expression.py
|
c95a944b639a9f7036408b9e0580256891481431
|
[
"Apache-2.0"
] |
permissive
|
vivisect/vivisect
|
ac259918b6281d9431c32a0b2307c61f9cab0dec
|
b07e161cc28b19fdda0d047eefafed22c5b00f15
|
refs/heads/master
| 2023-08-25T09:02:00.526532
| 2023-07-26T03:07:07
| 2023-07-26T03:07:07
| 26,651,759
| 833
| 181
|
Apache-2.0
| 2023-09-07T03:43:53
| 2014-11-14T18:28:47
|
Python
|
UTF-8
|
Python
| false
| false
| 4,068
|
py
|
expression.py
|
"""
Unified expression helpers.
"""
class ExpressionFail(Exception):
def __init__(self, pycode, exception):
Exception.__init__(self)
self.pycode = pycode
self.exception = exception
def __repr__(self):
return "ExpressionFail: %r is not a valid expression in this context (%r)" % \
(self.pycode, self.exception)
def __str__(self):
return self.__repr__()
def evaluate(pycode, locvars):
try:
val = eval(pycode, {}, locvars)
except Exception:
try:
# check through the keys for anything we might want to replace
keys = list(locvars.keys())
# sort the keys in reverse order so that longer matching strings take priority
keys.sort(reverse=True)
# replace the substrings with the string versions of the lookup value
for key in keys:
if key in pycode:
pval = locvars[key]
pycode = pycode.replace(key, str(pval))
val = eval(pycode, {}, locvars)
except Exception as e:
raise ExpressionFail(pycode, e)
return val
class ExpressionLocals(dict):
"""
An object to act as the locals dictionary for the evaluation
of envi expressions. You may pass in an envi.symstore.resolver.SymbolResolver
object to automagically use symbols in your expressions.
"""
def __init__(self, symobj=None):
dict.__init__(self)
self.symobj = symobj
def __getitem__(self, name):
if self.symobj is not None:
ret = self.symobj.getSymByName(name)
if ret is not None:
if ret.symtype == 3:
return ret
else:
return ret.value
return dict.__getitem__(self, name)
get = __getitem__
def __iter__(self):
if self.symobj is not None:
for va, name in self.symobj.getNames():
yield name
yield from dict.__iter__(self)
def keys(self):
return [key for key in self]
def __contains__(self, key):
return self.__getitem__(key) is not None
class MemoryExpressionLocals(ExpressionLocals):
def __init__(self, memobj, symobj=None):
ExpressionLocals.__init__(self, symobj=symobj)
self.memobj = memobj
self.update({
'mapbase': self.mapbase,
'maplen': self.maplen,
'ispoi': self.ispoi,
'mem': self.mem,
'poi': self.poi,
'sym': self.sym,
})
def sym(self, symstr):
'''
An easy to use utility for symbols which have un-pythonic names.
Example x = sym('kernel32.??2@$$FYAPAXI@Z')
'''
return int(evaluate(symstr, self))
def mapbase(self, address):
"""
The expression mapbase(address) returns the base address of the
memory mapped area containing "address"
"""
map = self.memobj.getMemoryMap(address)
if not map:
raise Exception("ERROR - un-mapped address in mapbase()")
return map[0]
def maplen(self, address):
"""
The expression maplen(address) returns the length of the
memory mapped area containing "address".
"""
map = self.memobj.getMemoryMap(address)
if not map:
raise Exception("ERROR - un-mapped address in maplen()")
return map[1]
def ispoi(self, addr):
"""
The expression ispoi(value) returns True if the specified value
is a valid pointer. Otherwise, False.
"""
return self.memobj.isValidPointer(addr)
def mem(self, addr, size):
"""
Read and return memory.
Example: mem(ecx, 20)
"""
return self.memobj.readMemory(addr, size)
def poi(self, address):
"""
When expression contains "poi(addr)" this will return
the address pointed to by addr.
"""
return self.memobj.readMemoryPtr(address)
|
7161eb0d57293f31f0d479425eaf4ebf74a100d6
|
69e215d4d4f91501558bfa3f529f9569155c1da4
|
/installer/resources/src/find_existing_resources.py
|
dc1dc97d6b3b03c96bde6599870e25235c4daf19
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"GPL-1.0-or-later",
"AGPL-3.0-only",
"AGPL-3.0-or-later"
] |
permissive
|
awslabs/scale-out-computing-on-aws
|
a9ca83602a684c9067d48c19656b294bc12ac5fb
|
e1617b14bb1b4a29a5bc6e02fa76c1312a333389
|
refs/heads/main
| 2023-08-08T23:08:33.652011
| 2023-07-28T09:23:56
| 2023-07-28T09:23:56
| 212,436,034
| 110
| 66
|
Apache-2.0
| 2023-08-24T20:20:57
| 2019-10-02T20:34:06
|
Python
|
UTF-8
|
Python
| false
| false
| 38,319
|
py
|
find_existing_resources.py
|
######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import sys
import os
installer_path = "/".join(os.path.dirname(os.path.abspath(__file__)).split("/")[:-3])
sys.path.append(installer_path)
from installer.resources.src.prompt import get_input as get_input
import os
try:
import boto3
from colored import fg, bg, attr
import ipaddress
except ImportError:
print(
"boto3 extension is required. Run 'pip install boto3 ipaddress' and try again"
)
sys.exit(1)
class FindExistingResource:
def __init__(self, region, client_ip):
self.region = region
self.client_ip = client_ip
session = boto3.Session(region_name=self.region)
self.ec2 = session.client("ec2")
self.efs = session.client("efs")
self.fsx = session.client("fsx")
self.ds = session.client("ds")
self.es = session.client("es")
self.iam = session.client("iam")
self.install_parameters = {}
def find_vpc(self):
try:
print(
f"\n====== What {fg('misty_rose_3')}VPC{attr('reset')} in {self.region} do you want to use? ======\n"
)
vpcs_by_name = {}
vpc_paginator = self.ec2.get_paginator("describe_vpcs")
vpc_iterator = vpc_paginator.paginate(
Filters=[
{
"Name": "state",
"Values": ["available"],
},
]
)
for page in vpc_iterator:
for vpc in page["Vpcs"]:
resource_name = False
if "Tags" in vpc.keys():
for tag in vpc["Tags"]:
if tag["Key"] == "Name":
resource_name = tag["Value"]
# WARNING - This will skip unnamed VPCs
if not resource_name:
continue
vpcs_by_name[resource_name] = vpc
vpcs = {}
count = 1
for resource_name in sorted(vpcs_by_name):
vpc = vpcs_by_name[resource_name]
vpcs[count] = {
"id": vpc["VpcId"],
"description": f"{resource_name if resource_name else ''} {vpc['VpcId']} {vpc['CidrBlock']}",
"cidr": vpc["CidrBlock"],
}
count += 1
[
print(" {:2} > {}".format(key, value["description"]))
for key, value in vpcs.items()
]
allowed_choices = list(vpcs.keys())
choice = get_input(
f"Choose the VPC you want to use?", None, allowed_choices, int
)
return {"success": True, "message": vpcs[choice]}
except Exception as err:
return {"success": False, "message": str(err)}
def find_elasticsearch(self, vpc_id):
try:
print(
f"\n====== What {fg('misty_rose_3')}OpenSearch / ElasticSearch cluster{attr('reset')} do you want to use? [region: {self.region}, vpc: {vpc_id}] ======\n"
)
es = {}
count = 1
# note: list_domain_names() does not seem to support pagination
for es_cluster in self.es.list_domain_names()["DomainNames"]:
es[count] = {
"name": es_cluster["DomainName"],
"engine": es_cluster.get("EngineType", "unknown-engine"),
}
count += 1
[
print(" {} > {} ({})".format(key, value["name"], value["engine"]))
for key, value in es.items()
]
allowed_choices = list(es.keys())
choice = get_input(
f"Choose the OpenSearch/ElasticSearch Cluster you want to use?",
None,
allowed_choices,
int,
)
# note: describe_elasticsearch_domain() does not seem to support pagination
domain_info = self.es.describe_elasticsearch_domain(
DomainName=es[choice]["name"]
)
if domain_info["DomainStatus"]["VPCOptions"]["VPCId"] == vpc_id:
for scope, endpoint in domain_info["DomainStatus"]["Endpoints"].items():
es[choice]["endpoint"] = endpoint
return {"success": True, "message": es[choice]}
except Exception as err:
return {"success": False, "message": str(err)}
def find_directory_services(self, vpc_id):
try:
print(
f"\n====== What {fg('misty_rose_3')}Directory Services (Microsoft AD){attr('reset')} do you want to use? [region: {self.region}, vpc: {vpc_id}] ======\n"
)
ds = {}
count = 1
ds_paginator = self.ds.get_paginator("describe_directories")
ds_iterator = ds_paginator.paginate()
for page in ds_iterator:
for directory in page["DirectoryDescriptions"]:
# skip directories in flux
if directory.get("Stage", "unknown-stage").upper() not in {
"ACTIVE"
}:
continue
if directory["VpcSettings"]["VpcId"] == vpc_id:
ds[count] = {
"id": directory["DirectoryId"],
"name": directory["Name"],
"netbios": directory["ShortName"],
"dns": directory["DnsIpAddrs"],
"description": f"{directory['Name']} (Domain: {directory['ShortName']}, Id: {directory['DirectoryId']})",
}
count += 1
[
print(" {:2} > {}".format(key, value["description"]))
for key, value in ds.items()
]
allowed_choices = list(ds.keys())
choice = get_input(
f"Choose the directory you want to use?", None, allowed_choices, int
)
return {"success": True, "message": ds[choice]}
except Exception as err:
return {"success": False, "message": str(err)}
def get_subnets(self, vpc_id, environment, selected_subnets=None):
if selected_subnets is None:
selected_subnets = []
try:
if environment == "private":
print(
f"\n====== Select {fg('misty_rose_3')}3 subnets to use for your compute nodes (private subnets preferably) {attr('reset')} ======\n"
)
else:
print(
f"\n====== Select {fg('misty_rose_3')}3 subnets to use for the main Scheduler and Load Balancer (public subnets preferably) {attr('reset')} ======\n"
)
subnets_by_name = {}
subnet_paginator = self.ec2.get_paginator("describe_subnets")
subnet_iterator = subnet_paginator.paginate(
Filters=[
{
"Name": "vpc-id",
"Values": [vpc_id],
},
{"Name": "ipv6-native", "Values": ["false"]},
{
"Name": "state",
"Values": ["available"],
},
]
)
for page in subnet_iterator:
for subnet in page["Subnets"]:
resource_name = False
if "Tags" in subnet.keys():
for tag in subnet["Tags"]:
if tag["Key"] == "Name":
resource_name = tag["Value"]
# WARNING - This will skip unnamed subnets
if not resource_name:
continue
subnets_by_name[resource_name] = subnet
subnets = {}
count = 1
for resource_name in sorted(subnets_by_name):
subnet = subnets_by_name[resource_name]
if (
f"{subnet['SubnetId']},{subnet['AvailabilityZone']}"
not in selected_subnets
):
outpost_arn = subnet.get("OutpostArn", False)
is_outpost = True if outpost_arn else False
outpost_str = f"Outpost" if is_outpost else ""
subnet_description = f"{resource_name if resource_name else ''} {subnet['CidrBlock']}, AZ: {subnet['AvailabilityZone']}/{subnet['AvailabilityZoneId']} {outpost_str}"
subnets[count] = {
"id": subnet["SubnetId"],
"availability_zone": subnet["AvailabilityZone"],
"availability_zone_id": subnet["AvailabilityZoneId"],
"is_outpost": is_outpost,
"description": subnet_description,
}
count += 1
[
print(" {:2} > {}".format(key, value["description"]))
for key, value in subnets.items()
]
selected_subnets_count = get_input(
f"How many of these subnets do you want to use?",
None,
list(range(1, count)),
int,
)
while selected_subnets_count < 2:
print(
f"{fg('red')} You must use at least 2 subnets for high availability {attr('reset')}"
)
selected_subnets_count = get_input(
f"How many of these subnets do you want to use?",
None,
list(range(1, count)),
int,
)
selected_subnets = []
while len(selected_subnets) != selected_subnets_count:
allowed_choices = list(subnets.keys())
if len(allowed_choices) == 0:
return {"success": False, "message": "Not enough subnets available"}
choice = get_input(
f"Choose your subnet #{len(selected_subnets) + 1} ?",
None,
allowed_choices,
int,
)
selected_subnets.append(
f"{subnets[choice]['id']},{subnets[choice]['availability_zone']}"
)
del subnets[choice]
return {"success": True, "message": selected_subnets}
except Exception as err:
return {"success": False, "message": str(err)}
def get_fs(self, environment, vpc_id, selected_fs=None):
if selected_fs is None:
selected_fs = []
try:
print(
f"\n====== What {fg('misty_rose_3')}Filesystem{attr('reset')} do you want to use for {fg('misty_rose_3')}{environment}{attr('reset')}? [region: {self.region}, vpc: {vpc_id}] ======\n"
)
filesystems = {}
count = 1
efs_paginator = self.efs.get_paginator("describe_file_systems")
efs_iterator = efs_paginator.paginate()
for page in efs_iterator:
for filesystem in page["FileSystems"]:
# check for lifecycle
if filesystem.get("LifeCycleState", "unknown").upper() not in {
"AVAILABLE",
"UPDATING",
}:
continue
verified_vpc = False
for mount_target in self.efs.describe_mount_targets(
FileSystemId=filesystem["FileSystemId"]
)["MountTargets"]:
if mount_target["VpcId"] == vpc_id:
verified_vpc = True
if verified_vpc is True:
if filesystem["FileSystemId"] not in selected_fs:
filesystems[count] = {
"id": f"{filesystem['FileSystemId']}",
"fs_type": "efs",
"description": f"EFS: {filesystem['Name'] if 'Name' in filesystem.keys() else 'EFS: '} {filesystem['FileSystemId']}.efs.{self.region}.amazonaws.com",
}
count += 1
efs_count = count - 1
fsx_paginator = self.fsx.get_paginator("describe_file_systems")
fsx_iterator = fsx_paginator.paginate()
for page in fsx_iterator:
for filesystem in page["FileSystems"]:
# Check for proper Lifecycle
if filesystem.get("Lifecycle", "unknown-lifecycle").upper() not in {
"AVAILABLE",
"UPDATING",
}:
continue
fsx_type = filesystem.get("FileSystemType", "unknown-type")
# TODO - Add more FSx support here
# if fsx_type.upper() not in {'WINDOWS', 'LUSTRE', 'ONTAP', 'OPENZFS'}:
if fsx_type.upper() not in {"LUSTRE"}:
continue
resource_name = False
if filesystem["VpcId"] == vpc_id:
if filesystem["FileSystemId"] not in selected_fs:
for tag in filesystem["Tags"]:
if tag["Key"] == "Name":
resource_name = tag["Value"]
filesystems[count] = {
"id": f"{filesystem['FileSystemId']}",
"fs_type": "fsx_lustre",
"description": f"FSx/{fsx_type.upper()}: {resource_name if resource_name else f'FSx/{fsx_type.upper()}: '} {filesystem['FileSystemId']}.fsx.{self.region}.amazonaws.com",
}
count += 1
[
print(" {} > {}".format(key, value["description"]))
for key, value in filesystems.items()
]
allowed_choices = list(filesystems.keys())
choice = get_input(
f"Choose the filesystem to use for {environment}?",
None,
allowed_choices,
int,
)
return {
"success": True,
"message": filesystems[choice]["id"],
"provider": filesystems[choice]["fs_type"],
}
except Exception as err:
return {"success": False, "message": str(err)}
def get_security_groups(self, vpc_id, environment, scheduler_sg=None):
if scheduler_sg is None:
scheduler_sg = []
try:
print(
f"\n====== Choose the {fg('misty_rose_3')}security group to use for the {environment.upper()}{attr('reset')} [region: {self.region}, vpc: {vpc_id}] ======\n"
)
sgs_by_name = {}
sg_paginator = self.ec2.get_paginator("describe_security_groups")
sg_iterator = sg_paginator.paginate()
for page in sg_iterator:
for sg in page["SecurityGroups"]:
resource_name = False
if "Tags" in sg.keys():
for tag in sg["Tags"]:
if tag["Key"] == "Name":
resource_name = tag["Value"]
if not resource_name:
continue
sgs_by_name[resource_name] = sg
sgs = {}
count = 1
for resource_name in sorted(sgs_by_name):
sg = sgs_by_name[resource_name]
if sg["GroupId"] not in scheduler_sg:
sgs[count] = {
"id": f"{sg['GroupId']}",
"description": f"{resource_name if resource_name else ''} {sg['GroupId']} {sg['GroupName']}",
}
count += 1
[
print(" {:2} > {}".format(key, sgs[key]["description"]))
for key in sorted(sgs)
]
allowed_choices = list(sgs.keys())
choice = get_input(
f"What security group for you want to use for {environment.upper()}",
None,
allowed_choices,
int,
)
return {"success": True, "message": sgs[choice]["id"]}
except Exception as err:
return {"success": False, "message": str(err)}
def get_iam_roles(self, environment, selected_roles=None):
if selected_roles is None:
selected_roles = []
try:
print(
f"\n====== Choose the {fg('misty_rose_3')}IAM role to use for the {environment.upper()}{attr('reset')} ======\n"
)
roles = {}
count = 1
iam_paginator = self.iam.get_paginator("list_roles")
iam_iterator = iam_paginator.paginate()
for page in iam_iterator:
for role in page["Roles"]:
if role["RoleName"] not in selected_roles:
roles[count] = {
"arn": f"{role['Arn']}",
"name": role["RoleName"],
"description": f"{role['RoleName']} - {role['Description'] if 'Description' in role.keys() else ''}",
}
count += 1
[
print(" {} > {}".format(key, value["description"]))
for key, value in roles.items()
]
allowed_choices = list(roles.keys())
choice = get_input(
f"What IAM Role for you want to use for {environment.upper()}",
None,
allowed_choices,
int,
)
return {"success": True, "message": roles[choice]}
except Exception as err:
print(err)
return {"success": False, "message": str(err)}
def get_rules_for_security_group(self, sg_ids):
try:
rules = {}
for sg_id in sg_ids:
for page in self.ec2.get_paginator(
"describe_security_groups"
).paginate():
for sg in page["SecurityGroups"]:
sg_rules = []
if sg["GroupId"] != sg_id:
continue
if "IpPermissions" in sg.keys():
for permission in sg["IpPermissions"]:
if "FromPort" in permission.keys():
from_port = permission["FromPort"]
to_port = permission["ToPort"]
else:
# IpProtocol = -1 -> All Traffic
from_port = 0
to_port = 65535
approved_ips = []
if permission["IpRanges"].__len__() > 0:
for r in permission["IpRanges"]:
if "CidrIp" in r.keys():
approved_ips.append(r["CidrIp"])
if permission["UserIdGroupPairs"].__len__() > 0:
for g in permission["UserIdGroupPairs"]:
if "GroupId" in g.keys():
approved_ips.append(g["GroupId"])
sg_rules.append(
{
"from_port": from_port,
"to_port": to_port,
"approved_ips": approved_ips,
"type": "ingress",
}
)
rules[sg_id] = sg_rules
if "IpPermissionsEgress" in sg.keys():
for permission in sg["IpPermissionsEgress"]:
if "FromPort" in permission.keys():
from_port = permission["FromPort"]
to_port = permission["ToPort"]
else:
# IpProtocol = -1 -> All Traffic
from_port = 0
to_port = 65535
approved_ips = []
if permission["IpRanges"].__len__() > 0:
for r in permission["IpRanges"]:
if "CidrIp" in r.keys():
approved_ips.append(r["CidrIp"])
if permission["UserIdGroupPairs"].__len__() > 0:
for g in permission["UserIdGroupPairs"]:
if "GroupId" in g.keys():
approved_ips.append(g["GroupId"])
sg_rules.append(
{
"from_port": from_port,
"to_port": to_port,
"approved_ips": approved_ips,
"type": "egress",
}
)
rules[sg_id] = sg_rules
return {"success": True, "message": rules}
except Exception as err:
return {"success": False, "message": str(err)}
def get_fs_security_groups(self, cfn_params):
try:
filesystems = {}
efs_ids = []
fsx_ids = []
sgs = []
for fs_mount in {"fs_apps", "fs_data"}:
fs_mount_provider = cfn_params.get(
f"{fs_mount}_provider", "unknown-provider"
)
if fs_mount_provider.lower() == "efs":
efs_ids.append(cfn_params[fs_mount])
elif fs_mount_provider.lower() == "fsx_lustre":
fsx_ids.append(cfn_params[fs_mount])
else:
print(f"ERROR: Do not know about provider: {fs_mount_provider}")
for efs_id in efs_ids:
for mount in self.efs.describe_mount_targets(
FileSystemId=efs_id.split(".")[0]
)["MountTargets"]:
for sg in self.efs.describe_mount_target_security_groups(
MountTargetId=mount["MountTargetId"]
)["SecurityGroups"]:
if sg not in sgs:
sgs.append(sg)
filesystems[efs_id] = sgs
for fsx_id in fsx_ids:
for network_interface in self.fsx.describe_file_systems(
FileSystemIds=[fsx_id]
)["FileSystems"][0]["NetworkInterfaceIds"]:
for groups in self.ec2.describe_network_interface_attribute(
Attribute="groupSet", NetworkInterfaceId=network_interface
)["Groups"]:
sg = groups["GroupId"]
if sg not in sgs:
sgs.append(sg)
filesystems[fsx_id] = sgs
return {"success": True, "message": filesystems}
except Exception as err:
return {"success": False, "message": str(err)}
def validate_sg_rules(self, cfn_params, check_fs=True):
try:
# Begin Verify Security Group Rules
print(
f"\n====== Please wait a little as we {fg('misty_rose_3')}validate your security group rules {attr('reset')} ======\n"
)
security_groups = [
cfn_params["scheduler_sg"],
cfn_params["compute_node_sg"],
]
if "vpc_endpoint_sg" in cfn_params:
security_groups.append(cfn_params["vpc_endpoint_sg"])
sg_rules = self.get_rules_for_security_group(security_groups)
if check_fs is True:
fs_sg = self.get_fs_security_groups(cfn_params)
if sg_rules["success"] is True:
scheduler_sg_rules = sg_rules["message"][cfn_params["scheduler_sg"]]
compute_node_sg_rules = sg_rules["message"][
cfn_params["compute_node_sg"]
]
vpc_endpoint_sg_rules = sg_rules["message"].get(
cfn_params.get("vpc_endpoint_sg", None), None
)
else:
print(f"{fg('red')}Error: {sg_rules['message']} {attr('reset')}")
sys.exit(1)
errors = {}
# status == True means that the check passed
errors["SCHEDULER_SG_IN_COMPUTE"] = {
"status": False,
"error": f"Compute Node SG must allow all TCP traffic from Scheduler SG",
"resolution": f"Add new rule on {cfn_params['compute_node_sg']} that allow TCP ports '0-65535' for {cfn_params['scheduler_sg']}",
}
errors["COMPUTE_SG_IN_SCHEDULER"] = {
"status": False,
"error": f"Scheduler SG must allow all TCP traffic from Compute Node SG",
"resolution": f"Add a new rule on {cfn_params['scheduler_sg']} that allow TCP ports '0-65535' for {cfn_params['compute_node_sg']}",
}
errors["CLIENT_IP_HTTPS_IN_SCHEDULER"] = {
"status": False,
"error": f"Client IP must be allowed for port 443 (80 optional) on Scheduler SG",
"resolution": f"Add two rules on {cfn_params['scheduler_sg']} that allow TCP ports 80 and 443 for {self.client_ip}",
}
errors["CLIENT_IP_SSH_IN_SCHEDULER"] = {
"status": False,
"error": f"Client IP must be allowed for port 22 (SSH) on Scheduler SG",
"resolution": f"Add one rule on {cfn_params['scheduler_sg']} that allow TCP port 22 for {self.client_ip}",
}
errors["SCHEDULER_SG_EQUAL_COMPUTE"] = {
"status": False,
"error": "Scheduler SG and Compute SG must be different",
"resolution": "You must choose two different security groups",
}
errors["COMPUTE_SG_EGRESS_EFA"] = {
"status": False,
"error": "Compute SG must reference egress traffic to itself for EFA",
"resolution": f"Add a new (EGRESS) rule on {cfn_params['compute_node_sg']} that allow TCP ports '0-65535' for {cfn_params['compute_node_sg']}. Make sure you configure EGRESS rule and not INGRESS",
}
if "vpc_endpoint_sg" in cfn_params:
errors["COMPUTE_EGRESS_TO_VPC_ENDPOINTS"] = {
"status": False,
"error": "Compute SG must allow port 443 egress to the vpc endpoints security group",
"resolution": f"Add a new (EGRESS) rule on {cfn_params['compute_node_sg']} that allows TCP port '443' for {cfn_params['vpc_endpoint_sg']}. Make sure you configure EGRESS rule and not INGRESS",
}
errors["VPC_ENDPOINTS_INGRESS_FROM_COMPUTE"] = {
"status": False,
"error": "vpc Endpoints SG must allow port 443 ingress from the Compute SG",
"resolution": f"Add a new (INGRESS) rule on {cfn_params['vpc_endpoint_sg']} that allows TCP port '443' from {cfn_params['compute_node_sg']}. Make sure you configure INGRESS rule and not EGRESS",
}
errors["SCHEDULER_EGRESS_TO_VPC_ENDPOINTS"] = {
"status": False,
"error": "Scheduler SG must allow port 443 egress to the vpc endpoints security group",
"resolution": f"Add a new (EGRESS) rule on {cfn_params['scheduler_sg']} that allows TCP port '443' for {cfn_params['vpc_endpoint_sg']}. Make sure you configure EGRESS rule and not INGRESS",
}
errors["VPC_ENDPOINTS_INGRESS_FROM_SCHEDULER"] = {
"status": False,
"error": "vpc Endpoints SG must allow port 443 ingress from the Scheduler SG",
"resolution": f"Add a new (INGRESS) rule on {cfn_params['vpc_endpoint_sg']} that allows TCP port '443' from {cfn_params['scheduler_sg']}. Make sure you configure INGRESS rule and not EGRESS",
}
if check_fs is True:
errors["FS_APP_SG"] = {
"status": False,
"error": f"SG assigned to EFS App {cfn_params['fs_apps']} must allow Scheduler SG and Compute SG",
"resolution": f"Add {cfn_params['scheduler_sg']} and {cfn_params['compute_node_sg']} on your EFS Apps {cfn_params['fs_apps']}",
}
errors["FS_DATA_SG"] = {
"status": False,
"error": f"SG assigned to EFS App {cfn_params['fs_data']} must allow Scheduler SG and Compute SG",
"resolution": f"Add {cfn_params['scheduler_sg']} and {cfn_params['compute_node_sg']} on your EFS Data {cfn_params['fs_data']}",
}
# Verify Scheduler Rules
for rules in scheduler_sg_rules:
if rules["from_port"] == 0 and rules["to_port"] == 65535:
for rule in rules["approved_ips"]:
if cfn_params["compute_node_sg"] in rule:
errors["COMPUTE_SG_IN_SCHEDULER"]["status"] = True
if rules["from_port"] == 443 or rules["from_port"] == 22:
for rule in rules["approved_ips"]:
client_ip_netmask = 32
if client_ip_netmask == "32":
if ipaddress.IPv4Address(
self.client_ip
) in ipaddress.IPv4Network(rule):
if rules["from_port"] == 443:
errors["CLIENT_IP_HTTPS_IN_SCHEDULER"][
"status"
] = True
if rules["from_port"] == 22:
errors["CLIENT_IP_SSH_IN_SCHEDULER"][
"status"
] = True
else:
if self.client_ip in rule:
if rules["from_port"] == 443:
errors["CLIENT_IP_HTTPS_IN_SCHEDULER"][
"status"
] = True
if rules["from_port"] == 22:
errors["CLIENT_IP_SSH_IN_SCHEDULER"][
"status"
] = True
# Verify Compute Node Rules
for rules in compute_node_sg_rules:
if rules["from_port"] == 0 and rules["to_port"] == 65535:
for rule in rules["approved_ips"]:
if cfn_params["scheduler_sg"] in rule:
errors["SCHEDULER_SG_IN_COMPUTE"]["status"] = True
if rules["type"] == "egress":
if cfn_params["compute_node_sg"] in rule:
errors["COMPUTE_SG_EGRESS_EFA"]["status"] = True
# Verify VPC Endpoint Rules
if "vpc_endpoint_sg" in cfn_params:
for rule in compute_node_sg_rules:
# Make sure compute node allows egress to vpc endpoints
if rule["type"] != "egress":
continue
for approved_ip in rule["approved_ips"]:
if rule["from_port"] <= 443 <= rule["to_port"]:
if cfn_params["vpc_endpoint_sg"] in approved_ip:
errors["COMPUTE_EGRESS_TO_VPC_ENDPOINTS"][
"status"
] = True
for rule in scheduler_sg_rules:
# Make sure scheduler allows egress to vpc endpoints
if rule["type"] != "egress":
continue
for approved_ip in rule["approved_ips"]:
if rule["from_port"] <= 443 <= rule["to_port"]:
if cfn_params["vpc_endpoint_sg"] in approved_ip:
errors["SCHEDULER_EGRESS_TO_VPC_ENDPOINTS"][
"status"
] = True
for rule in vpc_endpoint_sg_rules:
# Make sure endpoints allow ingress from compute nodes and scheduler
if rule["type"] != "ingress":
continue
for approved_ip in rule["approved_ips"]:
if rule["from_port"] <= 443 <= rule["to_port"]:
if cfn_params["scheduler_sg"] in approved_ip:
errors["VPC_ENDPOINTS_INGRESS_FROM_SCHEDULER"][
"status"
] = True
if cfn_params["compute_node_sg"] in approved_ip:
errors["VPC_ENDPOINTS_INGRESS_FROM_COMPUTE"][
"status"
] = True
if check_fs is True:
if (
cfn_params["scheduler_sg"]
in fs_sg["message"][cfn_params["fs_apps"]]
and cfn_params["compute_node_sg"]
in fs_sg["message"][cfn_params["fs_apps"]]
):
errors["FS_APP_SG"]["status"] = True
if (
cfn_params["scheduler_sg"]
in fs_sg["message"][cfn_params["fs_data"]]
and cfn_params["compute_node_sg"]
in fs_sg["message"][cfn_params["fs_data"]]
):
errors["FS_DATA_SG"]["status"] = True
if cfn_params["scheduler_sg"] != cfn_params["compute_node_sg"]:
errors["SCHEDULER_SG_EQUAL_COMPUTE"]["status"] = True
sg_errors = {}
confirm_sg_settings = False
for error_id, error_info in errors.items():
if error_info["status"] is False:
if check_fs is False and "EFS" in error_id:
pass
else:
print(
f"{fg('yellow')}ATTENTION!! {error_info['error']} {attr('reset')}\nHow to solve: {error_info['resolution']}\n"
)
sg_errors[error_info["error"]] = error_info["resolution"]
confirm_sg_settings = True
if confirm_sg_settings:
choice = get_input(
"Your security groups may not be configured correctly. Verify them and determine if the warnings listed above are false-positive.\n Do you still want to continue with the installation?",
None,
["yes", "no"],
str,
)
if choice.lower() == "no":
sys.exit(1)
else:
print(
f"{fg('green')} Security Groups seem to be configured correctly{attr('reset')}"
)
return {"success": True, "message": ""}
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(f"{exc_type} {fname} {exc_tb.tb_lineno}")
return {
"success": False,
"message": f"{exc_type} {fname} {exc_tb.tb_lineno}",
}
|
6303774c29919094d459b9685acffd5519d7e0cb
|
a4269b96a9eba49bbe692733d6777ec1917ef629
|
/semana-04/scripts/3-eventos-y-señales-ejemplo_3.py
|
9bea8cebaa84a13bf93fd20020cbc28f751f4527
|
[] |
no_license
|
IIC2233/contenidos
|
7bbc687c10d5fa2f394891507cf733f24b14080f
|
ee49033e3aa382f1dcbd03004601a7c084f824ab
|
refs/heads/main
| 2023-08-29T10:15:56.430976
| 2023-08-22T04:35:17
| 2023-08-22T04:37:00
| 286,314,095
| 118
| 193
| null | 2020-10-13T16:55:19
| 2020-08-09T20:45:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
3-eventos-y-señales-ejemplo_3.py
|
import sys
from PyQt6.QtWidgets import QApplication, QWidget, QLabel
class MiVentana(QWidget):
def __init__(self):
super().__init__()
self.setGeometry(100, 100, 110, 400)
self.label = QLabel("Haz clic en mí", self)
self.label.setGeometry(10, 10, 90, 100)
self.label.setStyleSheet("background-color: lightblue;")
self.label.show()
self.click_dentro_del_label = False
def mousePressEvent(self, event):
x = event.position().x()
y = event.position().y()
print(f"El mouse fue presionado en {x},{y}")
self.click_dentro_del_label = self.label.underMouse()
if self.click_dentro_del_label:
print("\tFue presionado dentro del QLabel")
else:
print("\tFue presionado fuera del QLabel")
def mouseReleaseEvent(self, event):
x = event.position().x()
y = event.position().y()
print(f"El mouse fue liberado en {x},{y}")
if self.click_dentro_del_label:
print("\tAntes se había presionado dentro del QLabel")
else:
print("\tAntes habías presionado fuera del QLabel")
def mouseMoveEvent(self, event):
x = event.position().x()
y = event.position().y()
print(f"El mouse se mueve... está en {x},{y}")
if __name__ == "__main__":
def hook(type, value, traceback):
print(type)
print(traceback)
sys.__excepthook__ = hook
app = QApplication([])
window = MiVentana()
window.show()
sys.exit(app.exec())
|
7239e03f976db7ede7f3570170f032049df1adcc
|
5dac15b472b4cb73c746e5fcc36c37c110b528c2
|
/tools/macos.py
|
34a755abefc50b48c2d103c162449afc9011d7ae
|
[
"MIT"
] |
permissive
|
godotengine/godot-cpp
|
c2f75545e8c469282f7629185b5a8c68d2975089
|
c370f0f24a6e4ce767e21673731838f1affc45fb
|
refs/heads/master
| 2023-08-17T19:40:25.532820
| 2023-08-17T15:14:07
| 2023-08-17T15:14:07
| 83,732,863
| 787
| 356
|
MIT
| 2023-09-13T11:44:23
| 2017-03-02T22:50:13
|
C++
|
UTF-8
|
Python
| false
| false
| 2,326
|
py
|
macos.py
|
import os
import sys
def has_osxcross():
return "OSXCROSS_ROOT" in os.environ
def options(opts):
opts.Add("macos_deployment_target", "macOS deployment target", "default")
opts.Add("macos_sdk_path", "macOS SDK path", "")
if has_osxcross():
opts.Add("osxcross_sdk", "OSXCross SDK version", "darwin16")
def exists(env):
return sys.platform == "darwin" or has_osxcross()
def generate(env):
if env["arch"] not in ("universal", "arm64", "x86_64"):
print("Only universal, arm64, and x86_64 are supported on macOS. Exiting.")
Exit()
if sys.platform == "darwin":
# Use clang on macOS by default
env["CXX"] = "clang++"
env["CC"] = "clang"
else:
# OSXCross
root = os.environ.get("OSXCROSS_ROOT", "")
if env["arch"] == "arm64":
basecmd = root + "/target/bin/arm64-apple-" + env["osxcross_sdk"] + "-"
else:
basecmd = root + "/target/bin/x86_64-apple-" + env["osxcross_sdk"] + "-"
env["CC"] = basecmd + "clang"
env["CXX"] = basecmd + "clang++"
env["AR"] = basecmd + "ar"
env["RANLIB"] = basecmd + "ranlib"
env["AS"] = basecmd + "as"
binpath = os.path.join(root, "target", "bin")
if binpath not in env["ENV"]["PATH"]:
# Add OSXCROSS bin folder to PATH (required for linking).
env.PrependENVPath("PATH", binpath)
# Common flags
if env["arch"] == "universal":
env.Append(LINKFLAGS=["-arch", "x86_64", "-arch", "arm64"])
env.Append(CCFLAGS=["-arch", "x86_64", "-arch", "arm64"])
else:
env.Append(LINKFLAGS=["-arch", env["arch"]])
env.Append(CCFLAGS=["-arch", env["arch"]])
if env["macos_deployment_target"] != "default":
env.Append(CCFLAGS=["-mmacosx-version-min=" + env["macos_deployment_target"]])
env.Append(LINKFLAGS=["-mmacosx-version-min=" + env["macos_deployment_target"]])
if env["macos_sdk_path"]:
env.Append(CCFLAGS=["-isysroot", env["macos_sdk_path"]])
env.Append(LINKFLAGS=["-isysroot", env["macos_sdk_path"]])
env.Append(
LINKFLAGS=[
"-framework",
"Cocoa",
"-Wl,-undefined,dynamic_lookup",
]
)
env.Append(CPPDEFINES=["MACOS_ENABLED", "UNIX_ENABLED"])
|
7cfd7bff3a6f6e466d95262e79b46a5ac8356719
|
6415c13547e6943f7b65337cbd2790c4e18723c8
|
/netbox/dcim/migrations/0167_module_status.py
|
c048b4bd83f60ff3a00ddd91336c28dcc0ddc2c2
|
[
"Apache-2.0"
] |
permissive
|
netbox-community/netbox
|
287254a9698270d51f57b1297118e9f01536da5a
|
506884bc4dc70299db3e2a7ad577dd7fd808065e
|
refs/heads/develop
| 2023-08-24T09:11:46.685121
| 2023-08-23T18:44:14
| 2023-08-23T18:44:14
| 52,796,596
| 8,122
| 1,817
|
Apache-2.0
| 2023-09-14T18:16:01
| 2016-02-29T14:15:46
|
Python
|
UTF-8
|
Python
| false
| false
| 401
|
py
|
0167_module_status.py
|
# Generated by Django 4.1.2 on 2022-12-09 15:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dcim', '0166_virtualdevicecontext'),
]
operations = [
migrations.AddField(
model_name='module',
name='status',
field=models.CharField(default='active', max_length=50),
),
]
|
20e6010b1b4f3a78c0dbaa06319d3dcb7934ef77
|
65078b8087c2040cf0188e2550ea298d20518f62
|
/src/bentoml_cli/cli.py
|
303af2493ad16a2d9ffbc25b67808046445d39d5
|
[
"Apache-2.0"
] |
permissive
|
bentoml/BentoML
|
20ab6f8351b1c5cd116d6d60a28098246a1581b3
|
4a14f073d8a3e700aff29483b17ea053058c0c63
|
refs/heads/main
| 2023-09-05T16:03:08.909692
| 2023-09-04T18:54:33
| 2023-09-04T18:54:33
| 178,976,529
| 5,712
| 732
|
Apache-2.0
| 2023-09-14T20:07:54
| 2019-04-02T01:39:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,561
|
py
|
cli.py
|
from __future__ import annotations
import importlib.metadata
import click
import psutil
from bentoml_cli.bentos import add_bento_management_commands
from bentoml_cli.cloud import add_cloud_command
from bentoml_cli.containerize import add_containerize_command
from bentoml_cli.deployment import add_deployment_command
from bentoml_cli.env import add_env_command
from bentoml_cli.models import add_model_management_commands
from bentoml_cli.serve import add_serve_command
from bentoml_cli.start import add_start_command
from bentoml_cli.utils import BentoMLCommandGroup
def create_bentoml_cli() -> click.Group:
from bentoml._internal.context import component_context
component_context.component_type = "cli"
CONTEXT_SETTINGS = {"help_option_names": ("-h", "--help")}
@click.group(cls=BentoMLCommandGroup, context_settings=CONTEXT_SETTINGS)
@click.version_option(importlib.metadata.version("bentoml"), "-v", "--version")
def bentoml_cli():
"""
\b
██████╗ ███████╗███╗ ██╗████████╗ ██████╗ ███╗ ███╗██╗
██╔══██╗██╔════╝████╗ ██║╚══██╔══╝██╔═══██╗████╗ ████║██║
██████╔╝█████╗ ██╔██╗ ██║ ██║ ██║ ██║██╔████╔██║██║
██╔══██╗██╔══╝ ██║╚██╗██║ ██║ ██║ ██║██║╚██╔╝██║██║
██████╔╝███████╗██║ ╚████║ ██║ ╚██████╔╝██║ ╚═╝ ██║███████╗
╚═════╝ ╚══════╝╚═╝ ╚═══╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚══════╝
"""
# Add top-level CLI commands
add_env_command(bentoml_cli)
add_cloud_command(bentoml_cli)
add_bento_management_commands(bentoml_cli)
add_model_management_commands(bentoml_cli)
add_start_command(bentoml_cli)
add_serve_command(bentoml_cli)
add_containerize_command(bentoml_cli)
add_deployment_command(bentoml_cli)
if psutil.WINDOWS:
import sys
sys.stdout.reconfigure(encoding="utf-8") # type: ignore
return bentoml_cli
cli = create_bentoml_cli()
if __name__ == "__main__":
cli()
|
f0315d76012b1902fe4cadeedca1ddbb4d8a6eed
|
11e227cc06dfce171fe521591909e347b67220ff
|
/build/scripts/documentation_release.py
|
1758f38475291df851fe2c52f28b695cf6e0099f
|
[
"Apache-2.0"
] |
permissive
|
approvals/ApprovalTests.cpp
|
45638b954cd84d2c154c250111c60b46d8efc5d0
|
d49cbcbd8a83ad8be12ad647e84162d6b2fe69b0
|
refs/heads/master
| 2023-08-17T17:13:51.713763
| 2023-08-05T17:38:09
| 2023-08-05T17:38:09
| 9,286,563
| 298
| 61
|
Apache-2.0
| 2023-09-04T17:23:47
| 2013-04-08T01:53:54
|
C++
|
UTF-8
|
Python
| false
| false
| 2,975
|
py
|
documentation_release.py
|
import shutil
import time
from collections import Callable
from scripts.release_constants import release_constants
from scripts.utilities import read_file, use_directory, replace_text_in_file, run, check_step
from scripts.release_details import ReleaseDetails
from typing import Callable
class PrepareDocumentationRelease:
@staticmethod
def prepare_documentation(details: ReleaseDetails) -> None:
if not details.project_details.update_documentation:
return
PrepareDocumentationRelease.update_features_page(details)
PrepareDocumentationRelease.update_readme_and_docs(details)
PrepareDocumentationRelease.prepare_release_notes(details)
PrepareDocumentationRelease.regenerate_markdown()
@staticmethod
def prepare_update_features_page(old_version: str, new_version: str, content: str) -> Callable:
missing_features = ('\n'
'## v.x.y.z\n'
'\n'
f'## v.'
)
if missing_features in content:
def check(features_file: str, action:Callable = check_step) -> Callable:
return action("the Features page is empty: are you sure you want this?")
return check
else:
update_version = ('\n'
'## v.x.y.z\n'
'\n'
f'## {new_version}\n'
)
def replace(features_file: str, replace_text_in_file_action: Callable = replace_text_in_file) -> Callable:
return replace_text_in_file_action(features_file, '\n## v.x.y.z\n', update_version)
return replace
@staticmethod
def update_features_page(details: ReleaseDetails) -> None:
features_file = '../doc/Features.md'
content = read_file(features_file)
update_file = PrepareDocumentationRelease.prepare_update_features_page(details.old_version_as_text(), details.new_version_as_text(), content)
update_file(features_file)
@staticmethod
def update_readme_and_docs(details: ReleaseDetails) -> None:
with use_directory(".."):
replace_text_in_file("README.md", details.old_version_as_text(), details.new_version_as_text())
@staticmethod
def prepare_release_notes(details: ReleaseDetails) -> None:
replace_text_in_file(release_constants.xxx_release_notes_path, 'v.x.y.z', details.new_version_as_text())
shutil.move(release_constants.xxx_release_notes_path, details.new_release_notes_path)
# Make sure the above move has finished, before we create the new xxx file:
time.sleep(1)
shutil.copyfile(release_constants.template_release_notes_path, release_constants.xxx_release_notes_path)
@staticmethod
def regenerate_markdown() -> None:
with use_directory(".."):
run(["./run_markdown_templates.sh"])
|
52b0277658a1a8c004f9bac868c6f25cc3a2eb7f
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/libbasisu/all/conanfile.py
|
6dfaf4ee7f3dec6ff53af5698f3bc4400a191c99
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,769
|
py
|
conanfile.py
|
import os
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
required_conan_version = ">=1.33.0"
class LibBasisUniversalConan(ConanFile):
name = "libbasisu"
description = "Basis Universal Supercompressed GPU Texture Codec"
homepage = "https://github.com/BinomialLLC/basis_universal"
topics = ("conan", "basis", "textures", "compression")
url = "https://github.com/conan-io/conan-center-index"
license = "Apache-2.0"
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
options = {
"fPIC": [True, False],
"shared": [True, False],
"use_sse4": [True, False],
"with_zstd": [True, False],
"enable_encoder": [True, False],
"custom_iterator_debug_level": [True, False]
}
default_options = {
"fPIC": True,
"shared": False,
"use_sse4": False,
"with_zstd": True,
"enable_encoder": True,
"custom_iterator_debug_level": False
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def _use_custom_iterator_debug_level(self):
return self.options.get_safe("custom_iterator_debug_level", default=self.default_options["custom_iterator_debug_level"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.compiler != "Visual Studio":
del self.options.custom_iterator_debug_level
def _minimum_compiler_version(self) -> bool:
return {
"Visual Studio": "15",
"gcc": "5.4",
"clang": "3.9",
"apple-clang": "10"
}
def validate(self):
min_version = self._minimum_compiler_version().get(str(self.settings.compiler))
if not min_version:
self.output.warn("{} recipe lacks information about the {} compiler support.".format(
self.name, self.settings.compiler))
elif tools.Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration("{} {} does not support compiler with version {} {}, minimum supported compiler version is {} ".format(self.name, self.version, self.settings.compiler, self.settings.compiler.version, min_version))
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 11)
def configure(self):
if self.options.shared:
del self.options.fPIC
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["SSE4"] = self.options.use_sse4
self._cmake.definitions["ZSTD"] = self.options.with_zstd
self._cmake.definitions["ENABLE_ENCODER"] = self.options.enable_encoder
self._cmake.definitions["NO_ITERATOR_DEBUG_LEVEL"] = not self._use_custom_iterator_debug_level()
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("*.h", dst=os.path.join("include", self.name, "transcoder"), src=os.path.join(self._source_subfolder, "transcoder"))
if self.options.enable_encoder:
self.copy("*.h", dst=os.path.join("include", self.name, "encoder"), src=os.path.join(self._source_subfolder, "encoder"))
self.copy(pattern="*.a", dst="lib", keep_path=False)
self.copy(pattern="*.so", dst="lib", keep_path=False)
self.copy(pattern="*.dylib*", dst="lib", keep_path=False)
self.copy(pattern="*.lib", dst="lib", keep_path=False)
self.copy(pattern="*.dll", dst="bin", keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.names["cmake_find_package"] = self.name
self.cpp_info.names["cmake_find_package_multi"] = self.name
self.cpp_info.includedirs = ["include", os.path.join("include", self.name)]
if self.settings.os == "Linux":
self.cpp_info.system_libs = ["m", "pthread"]
self.cpp_info.defines.append("BASISU_NO_ITERATOR_DEBUG_LEVEL={}".format("1" if self._use_custom_iterator_debug_level() else "0"))
|
50459ca91fe0dfed99c09e4b675ceec28ceffd77
|
1543b53d145e1783ba0faa8a3b84b5c49ee29084
|
/tests/io/test_incremental_dataset.py
|
76218b63244213736372b9dd86f64bb3ba856d14
|
[
"Apache-2.0"
] |
permissive
|
kedro-org/kedro
|
9ed5920ac713e66861039ba4901a5347d3cda28e
|
0293dc15812b27330bba31a01c7b332b3165af2a
|
refs/heads/main
| 2023-09-01T08:57:52.258279
| 2023-08-31T09:19:39
| 2023-08-31T09:19:39
| 182,067,506
| 4,099
| 332
|
Apache-2.0
| 2023-09-14T12:12:52
| 2019-04-18T10:29:56
|
Python
|
UTF-8
|
Python
| false
| false
| 17,863
|
py
|
test_incremental_dataset.py
|
from __future__ import annotations
import os
import re
from pathlib import Path
from typing import Any
import boto3
import pandas as pd
import pytest
from moto import mock_s3
from pandas.util.testing import assert_frame_equal
from kedro.extras.datasets.pickle import PickleDataSet
from kedro.extras.datasets.text import TextDataSet
from kedro.io import AbstractDataset, DatasetError, IncrementalDataset
from kedro.io.data_catalog import CREDENTIALS_KEY
DATASET = "kedro.extras.datasets.pandas.CSVDataSet"
@pytest.fixture
def partitioned_data_pandas():
return {
f"p{counter:02d}/data.csv": pd.DataFrame(
{"part": counter, "col": list(range(counter + 1))}
)
for counter in range(5)
}
@pytest.fixture
def local_csvs(tmp_path, partitioned_data_pandas):
local_dir = Path(tmp_path / "csvs")
local_dir.mkdir()
for k, data in partitioned_data_pandas.items():
path = local_dir / k
path.parent.mkdir(parents=True)
data.to_csv(str(path), index=False)
return local_dir
class DummyDataset(AbstractDataset): # pragma: no cover
def __init__(self, filepath):
pass
def _describe(self) -> dict[str, Any]:
return {"dummy": True}
def _load(self) -> Any:
pass
def _save(self, data: Any) -> None:
pass
def dummy_gt_func(value1: str, value2: str):
return value1 > value2
def dummy_lt_func(value1: str, value2: str):
return value1 < value2
class TestIncrementalDatasetLocal:
def test_load_and_confirm(self, local_csvs, partitioned_data_pandas):
"""Test the standard flow for loading, confirming and reloading
an IncrementalDataset"""
pds = IncrementalDataset(str(local_csvs), DATASET)
loaded = pds.load()
assert loaded.keys() == partitioned_data_pandas.keys()
for partition_id, data in loaded.items():
assert_frame_equal(data, partitioned_data_pandas[partition_id])
checkpoint_path = local_csvs / pds.DEFAULT_CHECKPOINT_FILENAME
assert not checkpoint_path.exists()
pds.confirm()
assert checkpoint_path.is_file()
assert checkpoint_path.read_text() == pds._read_checkpoint() == "p04/data.csv"
reloaded = pds.load()
assert reloaded.keys() == loaded.keys()
pds.release()
reloaded_after_release = pds.load()
assert reloaded_after_release == {}
def test_save(self, local_csvs):
"""Test saving a new partition into an IncrementalDataset"""
df = pd.DataFrame({"dummy": [1, 2, 3]})
new_partition_key = "p05/data.csv"
new_partition_path = local_csvs / new_partition_key
pds = IncrementalDataset(str(local_csvs), DATASET)
assert not new_partition_path.exists()
assert new_partition_key not in pds.load()
pds.save({new_partition_key: df})
assert new_partition_path.exists()
loaded = pds.load()
assert_frame_equal(loaded[new_partition_key], df)
@pytest.mark.parametrize(
"filename_suffix,expected_partitions",
[
(
"",
{
"p00/data.csv",
"p01/data.csv",
"p02/data.csv",
"p03/data.csv",
"p04/data.csv",
},
),
(".csv", {"p00/data", "p01/data", "p02/data", "p03/data", "p04/data"}),
(".fake", set()),
],
)
def test_filename_suffix(self, filename_suffix, expected_partitions, local_csvs):
"""Test how specifying filename_suffix affects the available
partitions and their names"""
pds = IncrementalDataset(
str(local_csvs), DATASET, filename_suffix=filename_suffix
)
loaded = pds.load()
assert loaded.keys() == expected_partitions
@pytest.mark.parametrize(
"forced_checkpoint,expected_partitions",
[
(
"",
{
"p00/data.csv",
"p01/data.csv",
"p02/data.csv",
"p03/data.csv",
"p04/data.csv",
},
),
(
"p00/data.csv",
{"p01/data.csv", "p02/data.csv", "p03/data.csv", "p04/data.csv"},
),
("p03/data.csv", {"p04/data.csv"}),
],
)
def test_force_checkpoint_no_checkpoint_file(
self, forced_checkpoint, expected_partitions, local_csvs
):
"""Test how forcing checkpoint value affects the available partitions
if the checkpoint file does not exist"""
pds = IncrementalDataset(str(local_csvs), DATASET, checkpoint=forced_checkpoint)
loaded = pds.load()
assert loaded.keys() == expected_partitions
confirm_path = local_csvs / pds.DEFAULT_CHECKPOINT_FILENAME
assert not confirm_path.exists()
pds.confirm()
assert confirm_path.is_file()
assert confirm_path.read_text() == max(expected_partitions)
@pytest.mark.parametrize(
"forced_checkpoint,expected_partitions",
[
(
"",
{
"p00/data.csv",
"p01/data.csv",
"p02/data.csv",
"p03/data.csv",
"p04/data.csv",
},
),
(
"p00/data.csv",
{"p01/data.csv", "p02/data.csv", "p03/data.csv", "p04/data.csv"},
),
("p03/data.csv", {"p04/data.csv"}),
],
)
def test_force_checkpoint_checkpoint_file_exists(
self, forced_checkpoint, expected_partitions, local_csvs
):
"""Test how forcing checkpoint value affects the available partitions
if the checkpoint file exists"""
IncrementalDataset(str(local_csvs), DATASET).confirm()
checkpoint = local_csvs / IncrementalDataset.DEFAULT_CHECKPOINT_FILENAME
assert checkpoint.read_text() == "p04/data.csv"
pds = IncrementalDataset(str(local_csvs), DATASET, checkpoint=forced_checkpoint)
assert pds._checkpoint.exists()
loaded = pds.load()
assert loaded.keys() == expected_partitions
@pytest.mark.parametrize(
"forced_checkpoint", ["p04/data.csv", "p10/data.csv", "p100/data.csv"]
)
def test_force_checkpoint_no_partitions(self, forced_checkpoint, local_csvs):
"""Test that forcing the checkpoint to certain values results in no
partitions being returned"""
pds = IncrementalDataset(str(local_csvs), DATASET, checkpoint=forced_checkpoint)
loaded = pds.load()
assert loaded == {}
confirm_path = local_csvs / pds.DEFAULT_CHECKPOINT_FILENAME
assert not confirm_path.exists()
pds.confirm()
# confirming with no partitions available must have no effect
assert not confirm_path.exists()
def test_checkpoint_path(self, local_csvs, partitioned_data_pandas):
"""Test configuring a different checkpoint path"""
checkpoint_path = local_csvs / "checkpoint_folder" / "checkpoint_file"
assert not checkpoint_path.exists()
IncrementalDataset(
str(local_csvs), DATASET, checkpoint={"filepath": str(checkpoint_path)}
).confirm()
assert checkpoint_path.is_file()
assert checkpoint_path.read_text() == max(partitioned_data_pandas)
@pytest.mark.parametrize(
"checkpoint_config,expected_checkpoint_class",
[
(None, TextDataSet),
({"type": "kedro.extras.datasets.pickle.PickleDataSet"}, PickleDataSet),
({"type": "tests.io.test_incremental_dataset.DummyDataset"}, DummyDataset),
],
)
def test_checkpoint_type(
self, tmp_path, checkpoint_config, expected_checkpoint_class
):
"""Test configuring a different checkpoint dataset type"""
pds = IncrementalDataset(str(tmp_path), DATASET, checkpoint=checkpoint_config)
assert isinstance(pds._checkpoint, expected_checkpoint_class)
@pytest.mark.parametrize(
"checkpoint_config,error_pattern",
[
(
{"versioned": True},
"'IncrementalDataset' does not support versioning "
"of the checkpoint. Please remove 'versioned' key from the "
"checkpoint definition.",
),
(
{"version": None},
"'IncrementalDataset' does not support versioning "
"of the checkpoint. Please remove 'version' key from the "
"checkpoint definition.",
),
],
)
def test_version_not_allowed(self, tmp_path, checkpoint_config, error_pattern):
"""Test that invalid checkpoint configurations raise expected errors"""
with pytest.raises(DatasetError, match=re.escape(error_pattern)):
IncrementalDataset(str(tmp_path), DATASET, checkpoint=checkpoint_config)
@pytest.mark.parametrize(
"pds_config,fs_creds,dataset_creds,checkpoint_creds",
[
(
{"dataset": DATASET, "credentials": {"cred": "common"}},
{"cred": "common"},
{"cred": "common"},
{"cred": "common"},
),
(
{
"dataset": {"type": DATASET, "credentials": {"ds": "only"}},
"credentials": {"cred": "common"},
},
{"cred": "common"},
{"ds": "only"},
{"cred": "common"},
),
(
{
"dataset": DATASET,
"credentials": {"cred": "common"},
"checkpoint": {"credentials": {"cp": "only"}},
},
{"cred": "common"},
{"cred": "common"},
{"cp": "only"},
),
(
{
"dataset": {"type": DATASET, "credentials": {"ds": "only"}},
"checkpoint": {"credentials": {"cp": "only"}},
},
{},
{"ds": "only"},
{"cp": "only"},
),
(
{
"dataset": {"type": DATASET, "credentials": None},
"credentials": {"cred": "common"},
"checkpoint": {"credentials": None},
},
{"cred": "common"},
None,
None,
),
],
)
def test_credentials(self, pds_config, fs_creds, dataset_creds, checkpoint_creds):
"""Test correctness of credentials propagation into the dataset and
checkpoint constructors"""
pds = IncrementalDataset(str(Path.cwd()), **pds_config)
assert pds._credentials == fs_creds
assert pds._dataset_config[CREDENTIALS_KEY] == dataset_creds
assert pds._checkpoint_config[CREDENTIALS_KEY] == checkpoint_creds
@pytest.mark.parametrize(
"comparison_func,expected_partitions",
[
(
"tests.io.test_incremental_dataset.dummy_gt_func",
{"p03/data.csv", "p04/data.csv"},
),
(dummy_gt_func, {"p03/data.csv", "p04/data.csv"}),
(
"tests.io.test_incremental_dataset.dummy_lt_func",
{"p00/data.csv", "p01/data.csv"},
),
(dummy_lt_func, {"p00/data.csv", "p01/data.csv"}),
],
)
def test_comparison_func(self, comparison_func, expected_partitions, local_csvs):
"""Test that specifying a custom function for comparing the checkpoint value
to a partition id results in expected partitions being returned on load"""
checkpoint_config = {
"force_checkpoint": "p02/data.csv",
"comparison_func": comparison_func,
}
pds = IncrementalDataset(str(local_csvs), DATASET, checkpoint=checkpoint_config)
assert pds.load().keys() == expected_partitions
BUCKET_NAME = "fake_bucket_name"
@pytest.fixture
def mocked_s3_bucket():
"""Create a bucket for testing using moto."""
with mock_s3():
conn = boto3.client(
"s3",
region_name="us-east-1",
aws_access_key_id="fake_access_key",
aws_secret_access_key="fake_secret_key",
)
conn.create_bucket(Bucket=BUCKET_NAME)
yield conn
@pytest.fixture
def mocked_csvs_in_s3(mocked_s3_bucket, partitioned_data_pandas):
prefix = "csvs"
for key, data in partitioned_data_pandas.items():
mocked_s3_bucket.put_object(
Bucket=BUCKET_NAME,
Key=f"{prefix}/{key}",
Body=data.to_csv(index=False),
)
return f"s3://{BUCKET_NAME}/{prefix}"
class TestPartitionedDataSetS3:
os.environ["AWS_ACCESS_KEY_ID"] = "FAKE_ACCESS_KEY"
os.environ["AWS_SECRET_ACCESS_KEY"] = "FAKE_SECRET_KEY"
def test_load_and_confirm(self, mocked_csvs_in_s3, partitioned_data_pandas):
"""Test the standard flow for loading, confirming and reloading
a IncrementalDataset in S3"""
pds = IncrementalDataset(mocked_csvs_in_s3, DATASET)
assert pds._checkpoint._protocol == "s3"
loaded = pds.load()
assert loaded.keys() == partitioned_data_pandas.keys()
for partition_id, data in loaded.items():
assert_frame_equal(data, partitioned_data_pandas[partition_id])
assert not pds._checkpoint.exists()
assert pds._read_checkpoint() is None
pds.confirm()
assert pds._checkpoint.exists()
assert pds._read_checkpoint() == max(partitioned_data_pandas)
def test_load_and_confirm_s3a(
self, mocked_csvs_in_s3, partitioned_data_pandas, mocker
):
s3a_path = f"s3a://{mocked_csvs_in_s3.split('://', 1)[1]}"
pds = IncrementalDataset(s3a_path, DATASET)
assert pds._protocol == "s3a"
assert pds._checkpoint._protocol == "s3"
mocked_ds = mocker.patch.object(pds, "_dataset_type")
mocked_ds.__name__ = "mocked"
loaded = pds.load()
assert loaded.keys() == partitioned_data_pandas.keys()
assert not pds._checkpoint.exists()
assert pds._read_checkpoint() is None
pds.confirm()
assert pds._checkpoint.exists()
assert pds._read_checkpoint() == max(partitioned_data_pandas)
@pytest.mark.parametrize(
"forced_checkpoint,expected_partitions",
[
(
"",
{
"p00/data.csv",
"p01/data.csv",
"p02/data.csv",
"p03/data.csv",
"p04/data.csv",
},
),
(
"p00/data.csv",
{"p01/data.csv", "p02/data.csv", "p03/data.csv", "p04/data.csv"},
),
("p03/data.csv", {"p04/data.csv"}),
],
)
def test_force_checkpoint_no_checkpoint_file(
self, forced_checkpoint, expected_partitions, mocked_csvs_in_s3
):
"""Test how forcing checkpoint value affects the available partitions
in S3 if the checkpoint file does not exist"""
pds = IncrementalDataset(
mocked_csvs_in_s3, DATASET, checkpoint=forced_checkpoint
)
loaded = pds.load()
assert loaded.keys() == expected_partitions
assert not pds._checkpoint.exists()
pds.confirm()
assert pds._checkpoint.exists()
assert pds._checkpoint.load() == max(expected_partitions)
@pytest.mark.parametrize(
"forced_checkpoint,expected_partitions",
[
(
"",
{
"p00/data.csv",
"p01/data.csv",
"p02/data.csv",
"p03/data.csv",
"p04/data.csv",
},
),
(
"p00/data.csv",
{"p01/data.csv", "p02/data.csv", "p03/data.csv", "p04/data.csv"},
),
("p03/data.csv", {"p04/data.csv"}),
],
)
def test_force_checkpoint_checkpoint_file_exists(
self, forced_checkpoint, expected_partitions, mocked_csvs_in_s3
):
"""Test how forcing checkpoint value affects the available partitions
in S3 if the checkpoint file exists"""
# create checkpoint and assert that it exists
IncrementalDataset(mocked_csvs_in_s3, DATASET).confirm()
checkpoint_path = (
f"{mocked_csvs_in_s3}/{IncrementalDataset.DEFAULT_CHECKPOINT_FILENAME}"
)
checkpoint_value = TextDataSet(checkpoint_path).load()
assert checkpoint_value == "p04/data.csv"
pds = IncrementalDataset(
mocked_csvs_in_s3, DATASET, checkpoint=forced_checkpoint
)
assert pds._checkpoint.exists()
loaded = pds.load()
assert loaded.keys() == expected_partitions
@pytest.mark.parametrize(
"forced_checkpoint", ["p04/data.csv", "p10/data.csv", "p100/data.csv"]
)
def test_force_checkpoint_no_partitions(self, forced_checkpoint, mocked_csvs_in_s3):
"""Test that forcing the checkpoint to certain values results in no
partitions returned from S3"""
pds = IncrementalDataset(
mocked_csvs_in_s3, DATASET, checkpoint=forced_checkpoint
)
loaded = pds.load()
assert loaded == {}
assert not pds._checkpoint.exists()
pds.confirm()
# confirming with no partitions available must have no effect
assert not pds._checkpoint.exists()
|
d493c5c413a7d77cf2c63422924af6c04f52d3d1
|
54485e624e28529ff066d6d1ecab620c2ec14f36
|
/tests/test_outcome.py
|
f6f8500798025ec75eb0341eaa73549d884df587
|
[
"MIT"
] |
permissive
|
ucfopen/canvasapi
|
c63be9655672a1cd1f51028c2619523224af80ec
|
02d42cba3b0fd22e780ac0a5e904ea84fbc0b58d
|
refs/heads/develop
| 2023-08-19T03:08:34.936379
| 2023-05-25T14:46:58
| 2023-05-25T14:46:58
| 73,851,042
| 503
| 183
|
MIT
| 2023-06-15T06:25:10
| 2016-11-15T20:09:41
|
Python
|
UTF-8
|
Python
| false
| false
| 19,528
|
py
|
test_outcome.py
|
import unittest
import requests_mock
from canvasapi import Canvas
from canvasapi.outcome import Outcome, OutcomeGroup, OutcomeLink
from tests import settings
from tests.util import register_uris
@requests_mock.Mocker()
class TestOutcome(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris(
{
"course": ["get_by_id"],
"outcome": [
"account_root_outcome_group",
"canvas_root_outcome_group",
"course_root_outcome_group",
"course_outcome_links_in_context",
"outcome_example",
],
},
m,
)
self.course = self.canvas.get_course(1)
self.course_outcome_links = self.course.get_all_outcome_links_in_context()
self.example_outcome = self.course_outcome_links[0].get_outcome()
# __str__()
def test__str__(self, m):
string = str(self.example_outcome)
self.assertIsInstance(string, str)
# update()
def test_update(self, m):
register_uris({"outcome": ["outcome_update"]}, m)
self.assertEqual(self.example_outcome.title, "Outcome Show Example")
result = self.example_outcome.update(title="new_title")
self.assertTrue(result)
self.assertIsInstance(self.example_outcome, Outcome)
self.assertEqual(self.example_outcome.title, "new_title")
@requests_mock.Mocker()
class TestOutcomeLink(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris(
{
"account": ["get_by_id"],
"course": ["get_by_id"],
"outcome": [
"account_outcome_links_in_context",
"course_outcome_links_in_context",
],
},
m,
)
self.account = self.canvas.get_account(1)
self.account_outcome_links = self.account.get_all_outcome_links_in_context()
self.course = self.canvas.get_course(1)
self.course_outcome_links = self.course.get_all_outcome_links_in_context()
# __str__()
def test__str__(self, m):
register_uris({"outcome": ["course_outcome_links_in_context"]}, m)
string = str(self.course_outcome_links[0])
self.assertIsInstance(string, str)
# get_outcome()
def test_get_outcome(self, m):
register_uris(
{"outcome": ["outcome_example", "course_outcome_links_in_context"]}, m
)
result = self.course_outcome_links[0].get_outcome()
self.assertIsInstance(result, Outcome)
# get_outcome_group()
def test_get_outcome_group(self, m):
register_uris(
{
"outcome": [
"outcome_group_example_account",
"account_outcome_links_in_context",
"outcome_group_example_course",
"course_outcome_links_in_context",
]
},
m,
)
result = self.course_outcome_links[0].get_outcome_group()
self.assertIsInstance(result, OutcomeGroup)
result = self.account_outcome_links[0].get_outcome_group()
self.assertIsInstance(result, OutcomeGroup)
@requests_mock.Mocker()
class TestOutcomeGroup(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris(
{
"account": ["get_by_id"],
"course": ["get_by_id"],
"outcome": [
"account_root_outcome_group",
"canvas_root_outcome_group",
"course_root_outcome_group",
"course_outcome_links_in_context",
"outcome_example",
],
},
m,
)
self.canvas_outcome_group = self.canvas.get_root_outcome_group()
self.account = self.canvas.get_account(1)
self.account_outcome_group = self.account.get_root_outcome_group()
self.account_outcome_groups = self.account.get_outcome_groups_in_context()
self.account_outcome_links = self.account.get_all_outcome_links_in_context()
self.course = self.canvas.get_course(1)
self.course_outcome_group = self.course.get_root_outcome_group()
self.course_outcome_groups = self.course.get_outcome_groups_in_context()
self.course_outcome_links = self.course.get_all_outcome_links_in_context()
self.example_outcome = self.course_outcome_links[0].get_outcome()
# __str__()
def test__str__(self, m):
string = str(self.canvas_outcome_group)
self.assertIsInstance(string, str)
# update()
def test_update(self, m):
register_uris(
{
"outcome": [
"outcome_group_update_global",
"outcome_group_update_account",
"outcome_group_update_course",
]
},
m,
)
new_title = "New Outcome Group Title"
self.assertEqual(self.account_outcome_group.title, "ROOT")
result = self.account_outcome_group.update(title=new_title)
self.assertTrue(result)
self.assertIsInstance(self.account_outcome_group, OutcomeGroup)
self.assertEqual(self.account_outcome_group.title, new_title)
self.assertEqual(self.canvas_outcome_group.title, "ROOT")
result = self.canvas_outcome_group.update(title=new_title)
self.assertTrue(result)
self.assertIsInstance(self.canvas_outcome_group, OutcomeGroup)
self.assertEqual(self.canvas_outcome_group.title, new_title)
self.assertEqual(self.course_outcome_group.title, "ROOT")
result = self.course_outcome_group.update(title=new_title)
self.assertTrue(result)
self.assertIsInstance(self.course_outcome_group, OutcomeGroup)
self.assertEqual(self.course_outcome_group.title, new_title)
# delete()
def test_delete(self, m):
register_uris(
{
"outcome": [
"outcome_group_delete_global",
"outcome_group_delete_account",
"outcome_group_delete_course",
]
},
m,
)
self.assertEqual(self.account_outcome_group.title, "ROOT")
result = self.account_outcome_group.delete()
self.assertTrue(result)
self.assertEqual(self.canvas_outcome_group.title, "ROOT")
result = self.canvas_outcome_group.delete()
self.assertTrue(result)
self.assertEqual(self.course_outcome_group.title, "ROOT")
result = self.course_outcome_group.delete()
self.assertTrue(result)
# get_linked_outcomes()
def test_get_linked_outcomes(self, m):
register_uris(
{
"outcome": [
"outcome_group_list_linked_outcomes_account",
"outcome_group_list_linked_outcomes_global",
"outcome_group_list_linked_outcomes_courses",
]
},
m,
)
result = self.account_outcome_group.get_linked_outcomes()
self.assertIsInstance(result[0], OutcomeLink)
self.assertEqual(result[0].outcome_group["id"], 2)
self.assertEqual(result[0].outcome_group["title"], "Account Test Outcome Group")
result = self.canvas_outcome_group.get_linked_outcomes()
self.assertIsInstance(result[0], OutcomeLink)
self.assertEqual(result[0].outcome_group["id"], 2)
self.assertEqual(result[0].outcome_group["title"], "Global Test Outcome Group")
result = self.course_outcome_group.get_linked_outcomes()
self.assertIsInstance(result[0], OutcomeLink)
self.assertEqual(result[0].outcome_group["id"], 2)
self.assertEqual(result[0].outcome_group["title"], "Course Test Outcome Group")
# link_existing()
def test_link_existing(self, m):
register_uris(
{
"outcome": [
"outcome_example",
"outcome_group_link_existing_global",
"outcome_group_link_existing_account",
"outcome_group_link_existing_course",
]
},
m,
)
result = self.canvas_outcome_group.link_existing(self.example_outcome)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 2)
result = self.account_outcome_group.link_existing(self.example_outcome)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 2)
result = self.course_outcome_group.link_existing(self.example_outcome)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 2)
result = self.canvas_outcome_group.link_existing(3)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 2)
result = self.account_outcome_group.link_existing(3)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 2)
result = self.course_outcome_group.link_existing(3)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 2)
# link_new()
def test_link_new(self, m):
register_uris(
{
"outcome": [
"outcome_group_link_new_global",
"outcome_group_link_new_account",
"outcome_group_link_new_course",
]
},
m,
)
new_title = "New Outcome"
result = self.canvas_outcome_group.link_new(title=new_title)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 1)
self.assertEqual(result.outcome["id"], 2)
self.assertEqual(result.outcome["context_type"], None)
result = self.account_outcome_group.link_new(title=new_title)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 1)
self.assertEqual(result.outcome["id"], 2)
self.assertEqual(result.outcome["context_type"], "Account")
result = self.course_outcome_group.link_new(title=new_title)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 1)
self.assertEqual(result.outcome["id"], 2)
self.assertEqual(result.outcome["context_type"], "Course")
# unlink_outcome()
def test_unlink_outcome(self, m):
register_uris(
{
"outcome": [
"outcome_example",
"outcome_group_unlink_outcome_global",
"outcome_group_unlink_outcome_account",
"outcome_group_unlink_outcome_course",
]
},
m,
)
result = self.canvas_outcome_group.unlink_outcome(self.example_outcome)
self.assertTrue(result)
result = self.account_outcome_group.unlink_outcome(self.example_outcome)
self.assertTrue(result)
result = self.course_outcome_group.unlink_outcome(self.example_outcome)
self.assertTrue(result)
result = self.canvas_outcome_group.unlink_outcome(3)
self.assertTrue(result)
result = self.account_outcome_group.unlink_outcome(3)
self.assertTrue(result)
result = self.course_outcome_group.unlink_outcome(3)
self.assertTrue(result)
# get_subgroups()
def test_get_subgroups(self, m):
register_uris(
{
"outcome": [
"outcome_group_list_subgroups_global",
"outcome_group_list_subgroups_account",
"outcome_group_list_subgroups_course",
]
},
m,
)
result = self.canvas_outcome_group.get_subgroups()
self.assertIsInstance(result[0], OutcomeGroup)
self.assertEqual(result[0].id, 2)
self.assertEqual(result[0].title, "Global Listed Subgroup Title 1")
self.assertTrue(hasattr(result[0], "context_type"))
self.assertEqual(result[0].context_type, None)
self.assertTrue(hasattr(result[0], "context_id"))
self.assertEqual(result[0].context_id, None)
self.assertIsInstance(result[1], OutcomeGroup)
self.assertEqual(result[1].id, 3)
self.assertEqual(result[1].title, "Global Listed Subgroup Title 2")
self.assertTrue(hasattr(result[0], "context_type"))
self.assertEqual(result[0].context_type, None)
self.assertTrue(hasattr(result[0], "context_id"))
self.assertEqual(result[0].context_id, None)
result = self.account_outcome_group.get_subgroups()
self.assertIsInstance(result[0], OutcomeGroup)
self.assertEqual(result[0].id, 2)
self.assertEqual(result[0].title, "Account Listed Subgroup Title 1")
self.assertTrue(hasattr(result[0], "context_type"))
self.assertEqual(result[0].context_type, "Account")
self.assertTrue(hasattr(result[0], "context_id"))
self.assertEqual(result[0].context_id, self.account.id)
self.assertIsInstance(result[1], OutcomeGroup)
self.assertEqual(result[1].id, 3)
self.assertEqual(result[1].title, "Account Listed Subgroup Title 2")
self.assertTrue(hasattr(result[0], "context_type"))
self.assertEqual(result[0].context_type, "Account")
self.assertTrue(hasattr(result[0], "context_id"))
self.assertEqual(result[0].context_id, self.account.id)
result = self.course_outcome_group.get_subgroups()
self.assertIsInstance(result[0], OutcomeGroup)
self.assertEqual(result[0].id, 2)
self.assertEqual(result[0].title, "Course Listed Subgroup Title 1")
self.assertTrue(hasattr(result[0], "context_type"))
self.assertEqual(result[0].context_type, "Course")
self.assertTrue(hasattr(result[0], "context_id"))
self.assertEqual(result[0].context_id, self.course.id)
self.assertIsInstance(result[1], OutcomeGroup)
self.assertEqual(result[1].id, 3)
self.assertEqual(result[1].title, "Course Listed Subgroup Title 2")
self.assertTrue(hasattr(result[0], "context_type"))
self.assertEqual(result[0].context_type, "Course")
self.assertTrue(hasattr(result[0], "context_id"))
self.assertEqual(result[0].context_id, self.course.id)
# create_subgroup()
def test_create_subgroup(self, m):
register_uris(
{
"outcome": [
"outcome_group_create_subgroup_global",
"outcome_group_create_subgroup_account",
"outcome_group_create_subgroup_course",
]
},
m,
)
new_title = "New Subgroup Title"
result = self.canvas_outcome_group.create_subgroup(new_title)
self.assertEqual(
self.canvas_outcome_group.id, result.parent_outcome_group["id"]
)
self.assertEqual(result.parent_outcome_group["title"], "Parent of Subgroup")
self.assertEqual(result.title, "New Subgroup Title")
result = self.account_outcome_group.create_subgroup(new_title)
self.assertEqual(
self.canvas_outcome_group.id, result.parent_outcome_group["id"]
)
self.assertEqual(result.parent_outcome_group["title"], "Parent of Subgroup")
self.assertEqual(result.title, "New Subgroup Title")
result = self.course_outcome_group.create_subgroup(new_title)
self.assertEqual(
self.canvas_outcome_group.id, result.parent_outcome_group["id"]
)
self.assertEqual(result.parent_outcome_group["title"], "Parent of Subgroup")
self.assertEqual(result.title, "New Subgroup Title")
# import_outcome_group()
def test_import_outcome_group(self, m):
register_uris(
{
"outcome": [
"outcome_group_import_outcome_group_global",
"outcome_group_import_outcome_group_account",
"outcome_group_import_outcome_group_course",
]
},
m,
)
result = self.canvas_outcome_group.import_outcome_group(3)
self.assertEqual(result.id, 4)
self.assertEqual(result.title, "Global Imported Subgroup Title")
self.assertEqual(
result.parent_outcome_group["id"], self.canvas_outcome_group.id
)
self.assertEqual(
result.parent_outcome_group["title"], self.canvas_outcome_group.title
)
result = self.account_outcome_group.import_outcome_group(3)
self.assertEqual(result.id, 4)
self.assertEqual(result.title, "Account Imported Subgroup Title")
self.assertEqual(
result.parent_outcome_group["id"], self.account_outcome_group.id
)
self.assertEqual(
result.parent_outcome_group["title"], self.account_outcome_group.title
)
result = self.course_outcome_group.import_outcome_group(3)
self.assertEqual(result.id, 4)
self.assertEqual(result.title, "Course Imported Subgroup Title")
self.assertEqual(
result.parent_outcome_group["id"], self.course_outcome_group.id
)
self.assertEqual(
result.parent_outcome_group["title"], self.course_outcome_group.title
)
result_by_obj = self.course_outcome_group.import_outcome_group(result)
self.assertEqual(result_by_obj.id, 4)
self.assertEqual(result_by_obj.title, "Course Imported Subgroup Title")
self.assertEqual(
result_by_obj.parent_outcome_group["id"], self.course_outcome_group.id
)
self.assertEqual(
result_by_obj.parent_outcome_group["title"], self.course_outcome_group.title
)
@requests_mock.Mocker()
class TestOutcomeResult(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris(
{
"course": ["get_by_id"],
"outcome": ["outcome_example", "outcome_result_example"],
},
m,
)
self.course = self.canvas.get_course(1)
self.course_outcome_results = self.course.get_outcome_results()
self.outcome_result_example = self.course_outcome_results[0]
# self.example_outcome = self.course_outcome_links[0].get_outcome()
# __str__()
def test__str__(self, m):
string = str(self.outcome_result_example)
self.assertIsInstance(string, str)
|
31bc04de12a99fe8b72cc2fa76942b434a9fdca5
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/vue_cinemas.py
|
d1344a089a9e427fc91a1f7ff90806f8b32d91a8
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
vue_cinemas.py
|
from scrapy.spiders import SitemapSpider
from locations.google_url import extract_google_position
from locations.items import Feature
from locations.spiders.vapestore_gb import clean_address
class VueCinemasSpider(SitemapSpider):
name = "vue_cinemas"
item_attributes = {"brand": "Vue", "brand_wikidata": "Q2535134"}
sitemap_urls = ["https://www.myvue.com/sitemap.xml"]
sitemap_rules = [(r"/getting-here$", "parse")]
def parse(self, response, **kwargs):
item = Feature()
item["ref"] = response.xpath("//@data-selected-locationid").get()
item["name"] = response.xpath("//@data-selected-locationname").get()
item["website"] = response.url.replace("/getting-here", "")
cinema = response.xpath('//div[@data-scroll-id="cinema-details"]')
address_parts = cinema.xpath('.//img[@alt="location-pin"]/../text()').getall()
if not address_parts:
address_parts = cinema.xpath(
'.//div[contains(@data-page-url, "/getting-here")]/following-sibling::div//div[@class="container container--scroll"]/div/p/text()'
).getall()
item["addr_full"] = clean_address(address_parts)
extract_google_position(item, cinema)
yield item
|
9953a1454756bc48cb92b218638c12cf30429ee5
|
e22eeb5256e17a96a98b3ff25433aec2d641cd2c
|
/openstack/orchestration/v1/_proxy.py
|
27200f2e5531d75bf3c48e87c89823a1f0f55291
|
[
"Apache-2.0"
] |
permissive
|
openstack/openstacksdk
|
b4b95fd7869653feea5a3b783e9a5c588235c039
|
d474eb84c605c429bb9cccb166cabbdd1654d73c
|
refs/heads/master
| 2023-09-03T22:50:03.398512
| 2023-07-27T14:09:35
| 2023-08-29T16:28:46
| 16,223,378
| 124
| 130
|
Apache-2.0
| 2023-09-06T02:52:47
| 2014-01-25T02:48:00
|
Python
|
UTF-8
|
Python
| false
| false
| 22,746
|
py
|
_proxy.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import exceptions
from openstack.orchestration.util import template_utils
from openstack.orchestration.v1 import resource as _resource
from openstack.orchestration.v1 import software_config as _sc
from openstack.orchestration.v1 import software_deployment as _sd
from openstack.orchestration.v1 import stack as _stack
from openstack.orchestration.v1 import stack_environment as _stack_environment
from openstack.orchestration.v1 import stack_files as _stack_files
from openstack.orchestration.v1 import stack_template as _stack_template
from openstack.orchestration.v1 import template as _template
from openstack import proxy
from openstack import resource
class Proxy(proxy.Proxy):
_resource_registry = {
"resource": _resource.Resource,
"software_config": _sc.SoftwareConfig,
"software_deployment": _sd.SoftwareDeployment,
"stack": _stack.Stack,
"stack_environment": _stack_environment.StackEnvironment,
"stack_files": _stack_files.StackFiles,
"stack_template": _stack_template.StackTemplate,
}
def _extract_name_consume_url_parts(self, url_parts):
if (
len(url_parts) == 3
and url_parts[0] == 'software_deployments'
and url_parts[1] == 'metadata'
):
# Another nice example of totally different URL naming scheme,
# which we need to repair /software_deployment/metadata/server_id -
# just replace server_id with metadata to keep further logic
return ['software_deployment', 'metadata']
if (
url_parts[0] == 'stacks'
and len(url_parts) > 2
and not url_parts[2] in ['preview', 'resources']
):
# orchestrate introduce having stack name and id part of the URL
# (/stacks/name/id/everything_else), so if on third position we
# have not a known part - discard it, not to brake further logic
del url_parts[2]
return super(Proxy, self)._extract_name_consume_url_parts(url_parts)
def read_env_and_templates(
self,
template_file=None,
template_url=None,
template_object=None,
files=None,
environment_files=None,
):
"""Read templates and environment content and prepares
corresponding stack attributes
:param string template_file: Path to the template.
:param string template_url: URL of template.
:param string template_object: URL to retrieve template object.
:param dict files: dict of additional file content to include.
:param environment_files: Paths to environment files to apply.
:returns: Attributes dict to be set on the
:class:`~openstack.orchestration.v1.stack.Stack`
:rtype: dict
"""
stack_attrs = dict()
envfiles = dict()
tpl_files = None
if environment_files:
(
envfiles,
env,
) = template_utils.process_multiple_environments_and_files(
env_paths=environment_files
)
stack_attrs['environment'] = env
if template_file or template_url or template_object:
tpl_files, template = template_utils.get_template_contents(
template_file=template_file,
template_url=template_url,
template_object=template_object,
files=files,
)
stack_attrs['template'] = template
if tpl_files or envfiles:
stack_attrs['files'] = dict(
list(tpl_files.items()) + list(envfiles.items())
)
return stack_attrs
def create_stack(self, preview=False, **attrs):
"""Create a new stack from attributes
:param bool preview: When ``True``, a preview endpoint will be used to
verify the template
*Default: ``False``*
:param dict attrs: Keyword arguments which will be used to create
a :class:`~openstack.orchestration.v1.stack.Stack`,
comprised of the properties on the Stack class.
:returns: The results of stack creation
:rtype: :class:`~openstack.orchestration.v1.stack.Stack`
"""
base_path = None if not preview else '/stacks/preview'
return self._create(_stack.Stack, base_path=base_path, **attrs)
def find_stack(
self, name_or_id, ignore_missing=True, resolve_outputs=True
):
"""Find a single stack
:param name_or_id: The name or ID of a stack.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the resource does not exist.
When set to ``True``, None will be returned when
attempting to find a nonexistent resource.
:returns: One :class:`~openstack.orchestration.v1.stack.Stack` or None
"""
return self._find(
_stack.Stack,
name_or_id,
ignore_missing=ignore_missing,
resolve_outputs=resolve_outputs,
)
def stacks(self, **query):
"""Return a generator of stacks
:param kwargs query: Optional query parameters to be sent to limit
the resources being returned.
:returns: A generator of stack objects
:rtype: :class:`~openstack.orchestration.v1.stack.Stack`
"""
return self._list(_stack.Stack, **query)
def get_stack(self, stack, resolve_outputs=True):
"""Get a single stack
:param stack: The value can be the ID of a stack or a
:class:`~openstack.orchestration.v1.stack.Stack` instance.
:param resolve_outputs: Whether stack should contain outputs resolved.
:returns: One :class:`~openstack.orchestration.v1.stack.Stack`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
return self._get(_stack.Stack, stack, resolve_outputs=resolve_outputs)
def update_stack(self, stack, preview=False, **attrs):
"""Update a stack
:param stack: The value can be the ID of a stack or a
:class:`~openstack.orchestration.v1.stack.Stack` instance.
:param kwargs attrs: The attributes to update on the stack
represented by ``value``.
:returns: The updated stack
:rtype: :class:`~openstack.orchestration.v1.stack.Stack`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
res = self._get_resource(_stack.Stack, stack, **attrs)
return res.update(self, preview)
def delete_stack(self, stack, ignore_missing=True):
"""Delete a stack
:param stack: The value can be either the ID of a stack or a
:class:`~openstack.orchestration.v1.stack.Stack`
instance.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the stack does not exist.
When set to ``True``, no exception will be set when
attempting to delete a nonexistent stack.
:returns: ``None``
"""
self._delete(_stack.Stack, stack, ignore_missing=ignore_missing)
def check_stack(self, stack):
"""Check a stack's status
Since this is an asynchronous action, the only way to check the result
is to track the stack's status.
:param stack: The value can be either the ID of a stack or an instance
of :class:`~openstack.orchestration.v1.stack.Stack`.
:returns: ``None``
"""
if isinstance(stack, _stack.Stack):
stk_obj = stack
else:
stk_obj = _stack.Stack.existing(id=stack)
stk_obj.check(self)
def abandon_stack(self, stack):
"""Abandon a stack's without deleting it's resources
:param stack: The value can be either the ID of a stack or an instance
of :class:`~openstack.orchestration.v1.stack.Stack`.
:returns: ``None``
"""
res = self._get_resource(_stack.Stack, stack)
return res.abandon(self)
def get_stack_template(self, stack):
"""Get template used by a stack
:param stack: The value can be the ID of a stack or an instance of
:class:`~openstack.orchestration.v1.stack.Stack`
:returns: One object of
:class:`~openstack.orchestration.v1.stack_template.StackTemplate`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when no resource can be found.
"""
if isinstance(stack, _stack.Stack):
obj = stack
else:
obj = self._find(_stack.Stack, stack, ignore_missing=False)
return self._get(
_stack_template.StackTemplate,
requires_id=False,
stack_name=obj.name,
stack_id=obj.id,
)
def get_stack_environment(self, stack):
"""Get environment used by a stack
:param stack: The value can be the ID of a stack or an instance of
:class:`~openstack.orchestration.v1.stack.Stack`
:returns: One object of
:class:`~openstack.orchestration.v1.stack_environment.StackEnvironment`
:raises: :class:`~openstack.exceptions.ResourceNotFound` when no
resource can be found.
"""
if isinstance(stack, _stack.Stack):
obj = stack
else:
obj = self._find(_stack.Stack, stack, ignore_missing=False)
return self._get(
_stack_environment.StackEnvironment,
requires_id=False,
stack_name=obj.name,
stack_id=obj.id,
)
def get_stack_files(self, stack):
"""Get files used by a stack
:param stack: The value can be the ID of a stack or an instance of
:class:`~openstack.orchestration.v1.stack.Stack`
:returns: A dictionary containing the names and contents of all files
used by the stack.
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when the stack cannot be found.
"""
if isinstance(stack, _stack.Stack):
stk = stack
else:
stk = self._find(_stack.Stack, stack, ignore_missing=False)
obj = _stack_files.StackFiles(stack_name=stk.name, stack_id=stk.id)
return obj.fetch(self)
def resources(self, stack, **query):
"""Return a generator of resources
:param stack: This can be a stack object, or the name of a stack
for which the resources are to be listed.
:param kwargs query: Optional query parameters to be sent to limit
the resources being returned.
:returns: A generator of resource objects if the stack exists and
there are resources in it. If the stack cannot be found,
an exception is thrown.
:rtype: A generator of
:class:`~openstack.orchestration.v1.resource.Resource`
:raises: :class:`~openstack.exceptions.ResourceNotFound`
when the stack cannot be found.
"""
# first try treat the value as a stack object or an ID
if isinstance(stack, _stack.Stack):
obj = stack
else:
obj = self._find(_stack.Stack, stack, ignore_missing=False)
return self._list(
_resource.Resource, stack_name=obj.name, stack_id=obj.id, **query
)
def create_software_config(self, **attrs):
"""Create a new software config from attributes
:param dict attrs: Keyword arguments which will be used to create a
:class:`~openstack.orchestration.v1.software_config.SoftwareConfig`,
comprised of the properties on the SoftwareConfig class.
:returns: The results of software config creation
:rtype:
:class:`~openstack.orchestration.v1.software_config.SoftwareConfig`
"""
return self._create(_sc.SoftwareConfig, **attrs)
def software_configs(self, **query):
"""Returns a generator of software configs
:param dict query: Optional query parameters to be sent to limit the
software configs returned.
:returns: A generator of software config objects.
:rtype:
:class:`~openstack.orchestration.v1.software_config.SoftwareConfig`
"""
return self._list(_sc.SoftwareConfig, **query)
def get_software_config(self, software_config):
"""Get details about a specific software config.
:param software_config: The value can be the ID of a software config
or a instace of
:class:`~openstack.orchestration.v1.software_config.SoftwareConfig`,
:returns: An object of type
:class:`~openstack.orchestration.v1.software_config.SoftwareConfig`
"""
return self._get(_sc.SoftwareConfig, software_config)
def delete_software_config(self, software_config, ignore_missing=True):
"""Delete a software config
:param software_config: The value can be either the ID of a software
config or an instance of
:class:`~openstack.orchestration.v1.software_config.SoftwareConfig`
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the software config does not exist.
When set to ``True``, no exception will be set when
attempting to delete a nonexistent software config.
:returns: ``None``
"""
self._delete(
_sc.SoftwareConfig, software_config, ignore_missing=ignore_missing
)
def create_software_deployment(self, **attrs):
"""Create a new software deployment from attributes
:param dict attrs: Keyword arguments which will be used to create a
:class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment`,
comprised of the properties on the SoftwareDeployment class.
:returns: The results of software deployment creation
:rtype:
:class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment`
"""
return self._create(_sd.SoftwareDeployment, **attrs)
def software_deployments(self, **query):
"""Returns a generator of software deployments
:param dict query: Optional query parameters to be sent to limit the
software deployments returned.
:returns: A generator of software deployment objects.
:rtype:
:class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment`
"""
return self._list(_sd.SoftwareDeployment, **query)
def get_software_deployment(self, software_deployment):
"""Get details about a specific software deployment resource
:param software_deployment: The value can be the ID of a software
deployment or an instace of
:class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment`,
:returns: An object of type
:class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment`
"""
return self._get(_sd.SoftwareDeployment, software_deployment)
def delete_software_deployment(
self, software_deployment, ignore_missing=True
):
"""Delete a software deployment
:param software_deployment: The value can be either the ID of a
software deployment or an instance of
:class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment`
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the software deployment does not exist.
When set to ``True``, no exception will be set when
attempting to delete a nonexistent software deployment.
:returns: ``None``
"""
self._delete(
_sd.SoftwareDeployment,
software_deployment,
ignore_missing=ignore_missing,
)
def update_software_deployment(self, software_deployment, **attrs):
"""Update a software deployment
:param server: Either the ID of a software deployment or an instance of
:class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment`
:param dict attrs: The attributes to update on the software deployment
represented by ``software_deployment``.
:returns: The updated software deployment
:rtype:
:class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment`
"""
return self._update(
_sd.SoftwareDeployment, software_deployment, **attrs
)
def validate_template(
self, template, environment=None, template_url=None, ignore_errors=None
):
"""Validates a template.
:param template: The stack template on which the validation is
performed.
:param environment: A JSON environment for the stack, if provided.
:param template_url: A URI to the location containing the stack
template for validation. This parameter is only
required if the ``template`` parameter is None.
This parameter is ignored if ``template`` is
specified.
:param ignore_errors: A string containing comma separated error codes
to ignore. Currently the only valid error code
is '99001'.
:returns: The result of template validation.
:raises: :class:`~openstack.exceptions.InvalidRequest` if neither
`template` not `template_url` is provided.
:raises: :class:`~openstack.exceptions.HttpException` if the template
fails the validation.
"""
if template is None and template_url is None:
raise exceptions.InvalidRequest(
"'template_url' must be specified when template is None"
)
tmpl = _template.Template.new()
return tmpl.validate(
self,
template,
environment=environment,
template_url=template_url,
ignore_errors=ignore_errors,
)
def wait_for_status(
self, res, status='ACTIVE', failures=None, interval=2, wait=120
):
"""Wait for a resource to be in a particular status.
:param res: The resource to wait on to reach the specified status.
The resource must have a ``status`` attribute.
:type resource: A :class:`~openstack.resource.Resource` object.
:param status: Desired status.
:param failures: Statuses that would be interpreted as failures.
:type failures: :py:class:`list`
:param interval: Number of seconds to wait before to consecutive
checks. Default to 2.
:param wait: Maximum number of seconds to wait before the change.
Default to 120.
:returns: The resource is returned on success.
:raises: :class:`~openstack.exceptions.ResourceTimeout` if transition
to the desired status failed to occur in specified seconds.
:raises: :class:`~openstack.exceptions.ResourceFailure` if the resource
has transited to one of the failure statuses.
:raises: :class:`~AttributeError` if the resource does not have a
``status`` attribute.
"""
failures = [] if failures is None else failures
return resource.wait_for_status(
self, res, status, failures, interval, wait
)
def wait_for_delete(self, res, interval=2, wait=120):
"""Wait for a resource to be deleted.
:param res: The resource to wait on to be deleted.
:type resource: A :class:`~openstack.resource.Resource` object.
:param interval: Number of seconds to wait before to consecutive
checks. Default to 2.
:param wait: Maximum number of seconds to wait before the change.
Default to 120.
:returns: The resource is returned on success.
:raises: :class:`~openstack.exceptions.ResourceTimeout` if transition
to delete failed to occur in the specified seconds.
"""
return resource.wait_for_delete(self, res, interval, wait)
def get_template_contents(
self,
template_file=None,
template_url=None,
template_object=None,
files=None,
):
try:
return template_utils.get_template_contents(
template_file=template_file,
template_url=template_url,
template_object=template_object,
files=files,
)
except Exception as e:
raise exceptions.SDKException(
"Error in processing template files: %s" % str(e)
)
def _get_cleanup_dependencies(self):
return {
'orchestration': {'before': ['compute', 'network', 'identity']}
}
def _service_cleanup(
self,
dry_run=True,
client_status_queue=None,
identified_resources=None,
filters=None,
resource_evaluation_fn=None,
skip_resources=None,
):
if self.should_skip_resource_cleanup("stack", skip_resources):
return
stacks = []
for obj in self.stacks():
need_delete = self._service_cleanup_del_res(
self.delete_stack,
obj,
dry_run=dry_run,
client_status_queue=client_status_queue,
identified_resources=identified_resources,
filters=filters,
resource_evaluation_fn=resource_evaluation_fn,
)
if not dry_run and need_delete:
stacks.append(obj)
for stack in stacks:
self.wait_for_delete(stack)
|
8f39ffe2118c6b5dd76908ff1a9113973ccfc7d3
|
d1062421aed9448f583d1a084a073f12a5ac74e2
|
/sequana/enrichment/plot_go_terms.py
|
f27fc44b839a0fd4ce1f498f1fdb516c728ace18
|
[
"BSD-3-Clause"
] |
permissive
|
sequana/sequana
|
9d89cea55cf6987b832351ac35d34742620ce64a
|
8717094493d1993debd079f324c540541dece70f
|
refs/heads/main
| 2023-08-01T04:02:27.864027
| 2023-07-12T13:12:37
| 2023-07-12T13:12:37
| 53,329,678
| 155
| 41
|
BSD-3-Clause
| 2023-09-13T12:39:01
| 2016-03-07T14:00:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 16,481
|
py
|
plot_go_terms.py
|
#
# This file is part of Sequana software
#
# Copyright (c) 2016-2021 - Sequana Development Team
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
import colorlog
from sequana.lazy import pandas as pd
from sequana.lazy import pylab
logger = colorlog.getLogger(__name__)
__all__ = ["UniProtEnrichment"]
class PlotGOTerms:
"""Used by :class:`sequana.enrichment.panther.PantherEnrichment`
and :class:`sequana.enrichment.panther.UniprotEnrichment`
"""
def __init__(self):
# will be defined by the parent class
self.enrichment = {}
self.gene_sets = {}
self.df_genes = None
def get_data(self, category, ontologies, include_negative_enrichment=True, fdr=0.05):
"""
From all input GO term that have been found and stored in
enrichment[ONTOLOGY]['result'], we keep those with fdr<0.05. We also
exclude UNCLASSIFIED entries. The final dataframe is returned
::
pe.get_data("up", "MF")
"""
if isinstance(ontologies, str):
ontologies = [ontologies]
else:
assert isinstance(ontologies, list)
if category not in self.enrichment:
logger.warning(f"Category {category} not found. Have you called compute_enrichment ?")
return
# First, we select the required ontologies and build a common data set
all_data = []
for ontology in ontologies:
if ontology not in self.enrichment[category]:
logger.warning(f"Ontology {ontology} not found. Have you called compute_enrichment ?")
return
data = self.enrichment[category][ontology]["result"]
data["ontology"] = ontology
all_data.append(data)
df = pd.concat(all_data, axis=0)
if len(df) == 0:
return df
else:
logger.info("Found {} GO terms".format(len(df)))
logger.info("Found {} GO terms with at least 1 gene in reference".format(len(df)))
df.rename({"P-value": "pValue"}, axis=1, inplace=True)
df.rename({"Odds Ratio": "fold_enrichment"}, axis=1, inplace=True)
df.rename({"Adjusted P-value": "fdr"}, axis=1, inplace=True)
df["id"] = df["Term"]
# extract the ID and label
df["label"] = df["description"]
"""The field **number_in_reference** indicates from the reference, the number
of genes that have a given ontology term. For instance, 998 genes have
the term. This is stored in **number_in_reference**. If the reference
contains 4391 genes, and you provided 49
genes , the **expected** number of genes that have this ontology term is
49*998/4391 that is 11.1369, which is stored in **"expected**.
"""
nl = []
terms = df["Term"]
ontologies = df["ontology"]
for term, ontology in zip(terms, ontologies):
nl.append(len(self.gene_sets[ontology][term]))
df["number_in_reference"] = nl
df["number_in_list"] = len(df["Genes"])
df["total_genes"] = len(self.df_genes)
logger.warning("fold enrichment currently set to log10(adjusted pvalue)")
df["fold_enrichment"] = -pylab.log10(df["pValue"])
df["log2_fold_enrichment"] = pylab.log2(df["fold_enrichment"])
df["abs_log2_fold_enrichment"] = abs(pylab.log2(df["fold_enrichment"]))
# filter out FDR>0.05
df = df.query("fdr<=@fdr").copy()
logger.info("Found {} GO terms after keeping only FDR<{}".format(len(df), fdr))
return df
def _get_plot_go_terms_data(
self,
category,
ontologies=None,
max_features=50,
minimum_genes=0,
pvalue=0.05,
sort_by="fold_enrichment",
fdr_threshold=0.05,
include_negative_enrichment=False,
compute_levels=False,
):
if ontologies is None:
ontologies = {"MF", "BP", "CC"}
assert sort_by in ["pValue", "fold_enrichment", "fdr"]
df = self.get_data(
category,
ontologies,
include_negative_enrichment=include_negative_enrichment,
fdr=fdr_threshold,
)
if df is None or len(df) == 0:
return None, None
# df stores the entire data set
# subdf will store the subset (max of n_features, and add dummy values)
df = df.query("pValue<=@pvalue")
df = df.reset_index(drop=True)
subdf = df.copy()
logger.debug("Filtering out the 3 parent terms")
to_ignore = {"GO:0003674", "GO:0008150", "GO:0005575"}
subdf = subdf.query("id not in @to_ignore")
df = df.query("id not in @to_ignore")
if subdf is None or len(subdf) == 0:
return df, subdf
# Keeping only a part of the data, sorting by pValue
if sort_by in ["pValue", "fdr"]:
subdf = subdf.sort_values(by=sort_by, ascending=False).iloc[-max_features:]
df = df.sort_values(by=sort_by, ascending=False)
elif sort_by == "fold_enrichment":
subdf = subdf.sort_values(by="abs_log2_fold_enrichment", ascending=True).iloc[-max_features:]
df = df.sort_values(by="abs_log2_fold_enrichment", ascending=False)
subdf = subdf.reset_index(drop=True)
# We get all levels for each go id. They are stored by MF, CC or BP
subdf["level"] = ""
if compute_levels:
paths = self._get_graph(list(subdf["id"].values), ontologies=ontologies)
if paths:
levels = []
keys = list(paths.keys())
# FIXME this part is flaky. What would happen if the levels are
# different if several keys are found ? We use the last one...
goid_levels = paths[keys[0]]
if len(keys) > 1:
for k in keys[1:]:
goid_levels.update(paths[k])
# FIXME
# in rare cases, imd are not found in _get_graph()
# need to add them back here with a dummy level
for ID in subdf['id'].values:
if ID not in goid_levels:
goid_levels[ID] = 10
levels = [goid_levels[ID] for ID in subdf["id"].values]
subdf["level"] = levels
return df, subdf
def plot_go_terms(
self,
category,
ontologies=None,
max_features=50,
log=False,
fontsize=9,
minimum_genes=0,
pvalue=0.05,
cmap="summer_r",
sort_by="fold_enrichment",
show_pvalues=False,
include_negative_enrichment=False,
fdr_threshold=0.05,
compute_levels=True,
progress=True,
):
df, subdf = self._get_plot_go_terms_data(
category,
ontologies=ontologies,
max_features=max_features,
minimum_genes=minimum_genes,
pvalue=pvalue,
include_negative_enrichment=include_negative_enrichment,
sort_by=sort_by,
fdr_threshold=fdr_threshold,
compute_levels=compute_levels,
)
if df is None or subdf is None:
return
# now, for the subdf, which is used to plot the results, we add dummy
# rows to make the yticks range scale nicer.
M = 10
datum = subdf.iloc[-1].copy()
datum.fdr = 0
datum.number_in_list = 0
datum.fold_enrichment = 1
datum.label = ""
datum["id"] = ""
datum["level"] = ""
while len(subdf) < 10:
subdf = pd.concat([datum.to_frame().T, subdf], axis=0)
# here, we try to figure out a proper layout
N = len(subdf)
size_factor = 10000 / len(subdf)
max_size = subdf.number_in_list.max()
# ignore the dummy values
min_size = min([x for x in subdf.number_in_list.values if x != 0])
# here we define a size for each GO entry.
# For the dummy entries, size is null (int(bool(x))) makes sure
# it is not shown
sizes = [
max(max_size * 0.2, x) * int(bool(x))
for x in size_factor * subdf.number_in_list.values / subdf.number_in_list.max()
]
m1 = min([x for x in sizes if x != 0])
m3 = max(sizes)
m2 = m1 + (m3 - m1) / 2
# The plot itself. we stretch when there is lots of features
if len(subdf) > 25:
fig = pylab.figure(num=1)
fig.set_figwidth(10)
fig.set_figheight(8)
else:
fig = pylab.figure(num=1)
fig.set_figwidth(10)
fig.set_figheight(6)
pylab.clf()
if log:
pylab.scatter(
[pylab.log2(x) if x else 0 for x in subdf.fold_enrichment],
range(len(subdf)),
c=subdf.fdr,
s=sizes,
cmap=cmap,
alpha=0.8,
ec="k",
vmin=0,
vmax=fdr_threshold,
zorder=10,
)
else:
pylab.scatter(
subdf.fold_enrichment,
range(len(subdf)),
c=subdf.fdr,
cmap=cmap,
s=sizes,
ec="k",
alpha=0.8,
vmin=0,
vmax=fdr_threshold,
zorder=10,
)
# set color bar height
pylab.grid(zorder=-10)
ax2 = pylab.colorbar(shrink=0.5)
ax2.ax.set_ylabel("FDR")
# define the labels
max_label_length = 45
labels = [x if len(x) < max_label_length else x[0 : max_label_length - 3] + "..." for x in list(subdf.label)]
ticks = []
for level, ID, label in zip(subdf["level"], subdf.id, labels):
if ID:
if level:
ticks.append(f"{ID} ({level}) ; {label.title()}")
else:
ticks.append(f"{ID} ; {label.title()}")
else:
ticks.append("")
# Refine the fontsize of ylabel if not many
if len(subdf) < 10:
pylab.yticks(range(N), ticks, fontsize=fontsize, ha="left")
else:
pylab.yticks(range(N), ticks, fontsize=fontsize, ha="left")
yax = pylab.gca().get_yaxis()
try:
pad = [x.label1.get_window_extent().width for x in yax.majorTicks]
yax.set_tick_params(pad=max(pad))
except:
yax.set_tick_params(pad=60 * fontsize * 0.7)
yax.set_tick_params(pad=60 * fontsize * 0.6)
# deal with the x-axis now. what is the range ?
fc_max = subdf.fold_enrichment.max(skipna=True)
fc_min = subdf.fold_enrichment.min(skipna=True)
# go into log2 space
fc_max = pylab.log2(fc_max)
fc_min = pylab.log2(fc_min)
abs_max = max(fc_max, abs(fc_min), 1)
if log:
fc_max = abs_max * 1.5
else:
fc_max = 2**abs_max * 1.2
pylab.axvline(0, color="k", lw=2)
if log:
pylab.xlabel("Fold Enrichment (log2)")
else:
pylab.xlabel("Fold Enrichment")
# dealwith fold change below 0.
if include_negative_enrichment:
pylab.xlim([-fc_max, fc_max])
else:
pylab.xlim([0, fc_max])
pylab.tight_layout()
# The pvalues:
if show_pvalues:
ax = pylab.gca().twiny()
# ax.set_xlim([0, max(-pylab.log10(subdf.pValue))*1.2])
pvalues = [-pylab.log10(pv) if pv > 0 else 0 for pv in subdf.pValue]
ax.set_xlim([0, max(pvalues) * 1.2])
ax.set_xlabel("p-values (log10)", fontsize=12)
ax.plot(pvalues, range(len(subdf)), label="pvalue", lw=2, color="k")
ax.axvline(1.33, lw=1, ls="--", color="grey", label="pvalue=0.05")
pylab.tight_layout()
pylab.legend(loc="lower right")
# now, let us add a legend
s1 = pylab.scatter([], [], s=m1, marker="o", color="#555555", ec="k")
s2 = pylab.scatter([], [], s=m2, marker="o", color="#555555", ec="k")
s3 = pylab.scatter([], [], s=m3, marker="o", color="#555555", ec="k")
if len(subdf) <= 10:
labelspacing = 1.5 * 2
borderpad = 1.5
handletextpad = 2
elif len(subdf) < 20:
labelspacing = 1.5 * 2
borderpad = 1
handletextpad = 2
else:
labelspacing = 1.5
borderpad = 2
handletextpad = 2
# get back the dataframe without the dummies
subdf = subdf.query("number_in_list>0")
if len(subdf) >= 3:
leg = pylab.legend(
(s1, s2, s3),
(
str(int(min_size)),
str(int(min_size + (max_size - min_size) / 2)),
str(int(max_size)),
),
scatterpoints=1,
loc="lower right",
ncol=1,
frameon=True,
title="gene-set size",
labelspacing=labelspacing,
borderpad=borderpad,
handletextpad=handletextpad,
fontsize=8,
)
elif len(subdf) >= 2:
leg = pylab.legend(
(s1, s3),
(str(int(min_size)), str(int(max_size))),
scatterpoints=1,
loc="lower right",
ncol=1,
frameon=True,
title="gene-set size",
labelspacing=labelspacing,
borderpad=borderpad,
handletextpad=handletextpad,
fontsize=8,
)
else:
leg = pylab.legend(
(s1,),
(str(int(min_size)),),
scatterpoints=1,
loc="lower right",
ncol=1,
frameon=True,
title="gene-set size",
labelspacing=labelspacing,
borderpad=borderpad,
handletextpad=handletextpad,
fontsize=8,
)
frame = leg.get_frame()
frame.set_facecolor("#b4aeae")
frame.set_edgecolor("black")
frame.set_alpha(1)
return df
def save_chart(self, df, filename="chart.png"):
self.quick_go_graph.save_chart(df, filename)
def _get_graph(self, df, ontologies):
return self.quick_go_graph._get_graph(df, ontologies=ontologies)
def _get_go_description(self, goids):
return self.quick_go_graph.get_go_description(goids)
def _get_data(self, category, ontologies, include_negative_enrichment=True, fdr=0.05):
"""
From all input GO term that have been found and stored in
enrichment[ONTOLOGY]['result'], we keep those with fdr<0.05. We also
exclude UNCLASSIFIED entries. The final dataframe is returned
::
pe.get_data("up", "MF")
"""
if isinstance(ontologies, str):
ontologies = [ontologies]
else:
assert isinstance(ontologies, list)
if category not in self.enrichment:
logger.warning(f"Category {category} not found. Have you called compute_enrichment ?")
return
# First, we select the required ontologies and build a common data set
all_data = []
for ontology in ontologies:
if ontology not in self.enrichment[category]:
logger.warning(f"Ontology {ontology} not found. Have you called compute_enrichment ?")
return
data = self.enrichment[category][ontology]["result"]
data["ontology"] = ontology
all_data.append(data)
df = pd.concat(all_data, axis=0)
if len(df) == 0:
return df
else:
logger.info("Found {} GO terms".format(len(df)))
logger.info("Found {} GO terms with at least 1 gene in reference".format(len(df)))
return df
|
cbec670b567899eb9f908bddce9a05a7c875659f
|
bed3ac926beac0f4e0293303d7b2a6031ee476c9
|
/Modules/Core/Mesh/wrapping/test/itkMeshArrayPixelTypeTest.py
|
f18b498c0441ccb961d3730ddac25553cf333f78
|
[
"IJG",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"SMLNJ",
"BSD-3-Clause",
"BSD-4.3TAHOE",
"LicenseRef-scancode-free-unknown",
"Spencer-86",
"LicenseRef-scancode-llnl",
"FSFUL",
"Libpng",
"libtiff",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-hdf5",
"MIT",
"NTP",
"LicenseRef-scancode-mit-old-style",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"MPL-2.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
InsightSoftwareConsortium/ITK
|
ed9dbbc5b8b3f7511f007c0fc0eebb3ad37b88eb
|
3eb8fd7cdfbc5ac2d0c2e5e776848a4cbab3d7e1
|
refs/heads/master
| 2023-08-31T17:21:47.754304
| 2023-08-31T00:58:51
| 2023-08-31T14:12:21
| 800,928
| 1,229
| 656
|
Apache-2.0
| 2023-09-14T17:54:00
| 2010-07-27T15:48:04
|
C++
|
UTF-8
|
Python
| false
| false
| 2,315
|
py
|
itkMeshArrayPixelTypeTest.py
|
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import itk
import numpy as np
Dimension = 3
PixelType = itk.Array.D
NumberOfPoints = 10
PixelDataSize = 5
MeshType = itk.Mesh[PixelType, Dimension]
mesh = MeshType.New()
# Create Vector Container and Store values in it for each Point
# For windows use itk.ULL
if hasattr(itk.VectorContainer, "ULAD"):
IdentifierType = itk.UL
else:
IdentifierType = itk.ULL
v = itk.VectorContainer[IdentifierType, PixelType].New()
v.Reserve(NumberOfPoints)
for i in range(NumberOfPoints):
pixel_data_reference = v.CreateElementAt(i)
pixel_data_reference.SetSize(PixelDataSize)
pixel_data_reference.Fill(0)
pixel_data_reference[0] = i
pixel_data_reference[4] = i + 4
# Set the point data container
mesh.SetPointData(v)
assert mesh.GetPointData().Size() == NumberOfPoints
assert mesh.GetPointData().ElementAt(0)[0] == 0
assert mesh.GetPointData().ElementAt(0)[4] == 4
assert mesh.GetPointData().ElementAt(2)[0] == 2 + 0
assert mesh.GetPointData().ElementAt(2)[4] == 2 + 4
# resize the PixelDataSize to see if it can be altered successfully
PixelDataSize = 10
for i in range(NumberOfPoints):
pixel_data_reference = v.CreateElementAt(i)
pixel_data_reference.SetSize(PixelDataSize)
pixel_data_reference.Fill(0)
pixel_data_reference[0] = i
pixel_data_reference[9] = i + 10
assert mesh.GetPointData().Size() == NumberOfPoints
assert mesh.GetPointData().ElementAt(0)[0] == 0
assert mesh.GetPointData().ElementAt(0)[9] == 10
assert mesh.GetPointData().ElementAt(2)[0] == 2 + 0
assert mesh.GetPointData().ElementAt(2)[9] == 2 + 10
|
ccf86655111a6473f5a1ece300f641f803fe0a5e
|
4506d81df5ae98078e5cbe79f613514ad12b1c83
|
/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py
|
0a36716e87718052b208c853c01bcacaccde00de
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
nipy/nipype
|
d52eba1b98fda68e24d006ac0d5701fc8a531b9c
|
03a236320fa229299d637ff9af97865a6ae76aca
|
refs/heads/master
| 2023-08-28T10:36:07.020541
| 2023-08-25T13:40:09
| 2023-08-25T13:40:09
| 791,477
| 692
| 569
|
NOASSERTION
| 2023-09-11T06:04:51
| 2010-07-22T17:06:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,550
|
py
|
test_auto_dtiestim.py
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..diffusion import dtiestim
def test_dtiestim_inputs():
input_map = dict(
B0=dict(
argstr="--B0 %s",
hash_files=False,
),
B0_mask_output=dict(
argstr="--B0_mask_output %s",
hash_files=False,
),
DTI_double=dict(
argstr="--DTI_double ",
),
args=dict(
argstr="%s",
),
bad_region_mask=dict(
argstr="--bad_region_mask %s",
extensions=None,
),
brain_mask=dict(
argstr="--brain_mask %s",
extensions=None,
),
correction=dict(
argstr="--correction %s",
),
defaultTensor=dict(
argstr="--defaultTensor %s",
sep=",",
),
dwi_image=dict(
argstr="--dwi_image %s",
extensions=None,
),
environ=dict(
nohash=True,
usedefault=True,
),
idwi=dict(
argstr="--idwi %s",
hash_files=False,
),
method=dict(
argstr="--method %s",
),
shiftNeg=dict(
argstr="--shiftNeg ",
),
shiftNegCoeff=dict(
argstr="--shiftNegCoeff %f",
),
sigma=dict(
argstr="--sigma %f",
),
step=dict(
argstr="--step %f",
),
tensor_output=dict(
argstr="--tensor_output %s",
hash_files=False,
),
threshold=dict(
argstr="--threshold %d",
),
verbose=dict(
argstr="--verbose ",
),
weight_iterations=dict(
argstr="--weight_iterations %d",
),
)
inputs = dtiestim.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_dtiestim_outputs():
output_map = dict(
B0=dict(
extensions=None,
),
B0_mask_output=dict(
extensions=None,
),
idwi=dict(
extensions=None,
),
tensor_output=dict(
extensions=None,
),
)
outputs = dtiestim.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
242aa5607cdfc90ad387377c09442872cebe85cf
|
81a0a8218a45edcc8f295de5d41a3fd29cdc3ce6
|
/switch_model/hawaii/switch_patch.py
|
d0a85aa1fc7905e5901d6c1e1486788042933ecf
|
[
"Apache-2.0"
] |
permissive
|
switch-model/switch
|
af5ea212a141d97613ef1f13e550ee898fa352da
|
35bd3596a031fac7891f762cc87af610ded13615
|
refs/heads/master
| 2023-06-19T17:57:22.298285
| 2022-11-01T23:56:15
| 2022-11-01T23:56:15
| 33,576,546
| 114
| 81
|
NOASSERTION
| 2023-01-29T17:30:41
| 2015-04-08T00:59:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,413
|
py
|
switch_patch.py
|
from pyomo.environ import *
def define_components(m):
"""Make various changes to the model to support hawaii-specific modules."""
# # TODO: combine the following changes into a pull request for Pyomo
# # patch Pyomo's table-reading function to allow .csv files with headers but no data
# import os, re
# def new_tab_read(self):
# if not os.path.exists(self.filename):
# raise IOError("Cannot find file '%s'" % self.filename)
# self.FILE = open(self.filename, 'r')
# try:
# tmp=[]
# for line in self.FILE:
# line=line.strip()
# tokens = re.split("[,\t ]+",line)
# if tokens != ['']:
# tmp.append(tokens)
# if len(tmp) == 0:
# raise IOError("Empty *.csv file")
# else: # removed strange special handling for one-row files
# self._set_data(tmp[0], tmp[1:])
# except:
# raise
# finally:
# self.FILE.close()
# self.FILE = None
# from pyomo.core.plugins.data.text import TextTable
# TextTable.read = new_tab_read
#
# try:
# import inspect
# import pyomo.core.data.process_data
# pp_code = inspect.getsource(pyomo.core.data.process_data._process_param)
# start = pp_code.find('if singledef:', 0, 2000)
# if start < 0:
# raise RuntimeError('unable to find singledef statement')
# # patch to allow command to have no more arguments at this point (i.e., no data)
# srch, repl = 'if cmd[0] == "(tr)":', 'if cmd and cmd[0] == "(tr)":'
# start = pp_code.find(srch, start, start + 500)
# if start < 0:
# raise RuntimeError('unable to find (tr) statement')
# pp_code = pp_code[:start] + repl + pp_code[start+len(srch):]
# # patch next line for the same reason
# srch, repl = 'if cmd[0] != ":":', 'if not cmd or cmd[0] != ":":'
# start = pp_code.find(srch, start, start + 500)
# if start < 0:
# raise RuntimeError('unable to find ":" statement')
# pp_code = pp_code[:start] + repl + pp_code[start+len(srch):]
# # compile code to a function in the process_data module
# exec(pp_code, vars(pyomo.core.data.process_data))
# except Exception as e:
# print "Unable to patch current version of pyomo.core.data.process_data:"
# print '{}({})'.format(type(e).__name__, ','.join(repr(a) for a in e.args))
# print "Switch will not be able to read empty data files."
|
652eee0eddf6d587965f54d7ba0b0b85664f235d
|
21f35d6b81c94bd1ed07b923482c1a9e17423d4c
|
/sphinx_gallery/tests/test_load_style.py
|
713b03057b95887d207028c63d93c737fee13b36
|
[] |
permissive
|
sphinx-gallery/sphinx-gallery
|
06378c2ecedf9cb306b3958327a1ba294c8d6725
|
4e298a6ccee1c4ff8b33cd65371127118f626032
|
refs/heads/master
| 2023-08-17T06:30:59.195322
| 2023-08-15T16:00:25
| 2023-08-15T16:00:25
| 25,860,190
| 382
| 235
|
BSD-3-Clause
| 2023-09-12T15:29:13
| 2014-10-28T08:41:46
|
Python
|
UTF-8
|
Python
| false
| false
| 680
|
py
|
test_load_style.py
|
import os
import pytest
@pytest.mark.conf_file(extensions=["sphinx_gallery.load_style"])
def test_load_style(sphinx_app_wrapper):
"""Testing that style loads properly."""
sphinx_app = sphinx_app_wrapper.build_sphinx_app()
cfg = sphinx_app.config
assert cfg.project == "Sphinx-Gallery <Tests>"
build_warn = sphinx_app._warning.getvalue()
assert build_warn == ""
index_html = os.path.join(sphinx_app_wrapper.outdir, "index.html")
assert os.path.isfile(index_html)
with open(index_html) as fid:
content = fid.read()
assert (
'link rel="stylesheet" type="text/css" href="_static/sg_gallery.css' in content
) # noqa: E501
|
285e4fd662ef696aab4910008930fd12d4ecd076
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowIpOspfDatabaseDatabaseSummary/cli/equal/golden_output_expected.py
|
5ce27ae3693943997662c3141fcfc94eaf284378
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,424
|
py
|
golden_output_expected.py
|
expected_output = {
"instance": {
"1": {
"router_id": "1.1.1.1",
"area_summary": {
"0": {
"router": {"count": 2, "delete": 0, "maxage": 0},
"network": {"count": 1, "delete": 0, "maxage": 0},
"summary_net": {"count": 44, "delete": 0, "maxage": 0},
"summary_asbr": {"count": 1, "delete": 0, "maxage": 0},
"type_7_ext": {"count": 0, "delete": 0, "maxage": 0},
"prefixes_redist_type_7": 0,
"opaque_link": {"count": 0, "delete": 0, "maxage": 0},
"opaque_area": {"count": 0, "delete": 0, "maxage": 0},
"subtotal": {"count": 48, "delete": 0, "maxage": 0},
},
"1": {
"router": {"count": 23, "delete": 0, "maxage": 0},
"network": {"count": 2, "delete": 0, "maxage": 0},
"summary_net": {"count": 23, "delete": 0, "maxage": 0},
"summary_asbr": {"count": 0, "delete": 0, "maxage": 0},
"type_7_ext": {"count": 0, "delete": 0, "maxage": 0},
"prefixes_redist_type_7": 0,
"opaque_link": {"count": 0, "delete": 0, "maxage": 0},
"opaque_area": {"count": 0, "delete": 0, "maxage": 0},
"subtotal": {"count": 48, "delete": 0, "maxage": 0},
},
},
"process_summary": {
"1": {
"router": {"count": 25, "delete": 0, "maxage": 0},
"network": {"count": 3, "delete": 0, "maxage": 0},
"summary_net": {"count": 67, "delete": 0, "maxage": 0},
"summary_asbr": {"count": 1, "delete": 0, "maxage": 0},
"type_7_ext": {"count": 0, "delete": 0, "maxage": 0},
"opaque_link": {"count": 0, "delete": 0, "maxage": 0},
"opaque_area": {"count": 0, "delete": 0, "maxage": 0},
"type_5_ext": {"count": 0, "delete": 0, "maxage": 0},
"prefixes_redist_type_5": 0,
"opaque_as": {"count": 0, "delete": 0, "maxage": 0},
"total": {"count": 96, "delete": 0, "maxage": 0},
"non_self": 44,
}
},
}
}
}
|
0d89bc4d5fd6bf79e6ee59923397bb1d048afcae
|
79a6b3a5bb0c1c7b733502a06cdd2d4971ad9687
|
/Eclipse Projects/Section 3/strategies/auto_correlation.py
|
18f18d010dee13181c3ef09bf224871996d69fb5
|
[
"MIT"
] |
permissive
|
PacktPublishing/Machine-Learning-for-Algorithmic-Trading-Bots-with-Python
|
ebc15a6814f1b09bb232f97551d98cd947bd6a04
|
c01f1799cc85baaada1dd13b1fe91d885cfd69a3
|
refs/heads/master
| 2023-02-04T16:18:44.990198
| 2023-01-30T09:31:10
| 2023-01-30T09:31:10
| 160,665,937
| 361
| 170
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,150
|
py
|
auto_correlation.py
|
from zipline.api import order, symbol, record
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from scipy.stats.stats import pearsonr
class AutoCorrelation:
stocks = ['APL', 'MSFT', 'TSLA']
lag = 1
steps = 3
def initialize(self, context):
context.has_ordered = False
context.stocks = self.stocks
context.asset = symbol(self.stocks[-1])
def handle_data(self, context, data):
# loop stocks in portfolio
for stock in context.stocks:
# fetch history up to steps
s1 = data.history(
symbol(stock),
'price',
bar_count=self.steps,
frequency='1d')
# fetch history up to steps + lag and drop extra values
s2 = data.history(
symbol(stock),
'price',
bar_count=self.steps+self.lag,
frequency='1d').iloc[:-1 * self.lag]
# convert to numpy arrays
np_s1 = np.array(s1.values)
np_s2 = np.array(s2.values)
# calculate auto-correlation
corr, hypothesis = pearsonr(np_s1, np_s2)
# fetch our basket status
cpp = context.portfolio.positions
# map basket to symbol:shares pairs
cpp_symbols = map(lambda x: x.symbol, cpp)
# what is today price
curr_price = data.current(symbol(stock), 'price')
# what was yesterday closing price
last_price = data.history(
symbol(stock),
'price',
bar_count=2,
frequency='1d').iloc[0:1].values
# go short or long positions
if corr < -0.75 and hypothesis < 0.85:
# is stock falling? exit position
if curr_price < last_price:
order(symbol(stock), -1 * cpp[symbol(stock)].amount)
# is stock rising? enter position
elif curr_price > last_price:
order(symbol(stock), 1000)
record(ASSETME=data.current(context.asset, 'price'))
record(CORR=corr)
def _test_args(self):
return {
'start': pd.Timestamp('2017', tz='utc'),
'end': pd.Timestamp('2018', tz='utc'),
'capital_base': 1e7
}
def analyze(self, context, perf):
# init figure
fig = plt.figure()
# plot stock price
ax1 = fig.add_subplot(211)
perf['ASSETME'].plot(ax=ax1)
ax1.set_ylabel('price in $')
# plot correlation
ax2 = fig.add_subplot(212)
perf['CORR'].plot(ax=ax2)
ax2.set_ylabel('correlation')
# plot confidence levels
ax2.axhline(0.75, linestyle='dashed', color='k')
ax2.axhline(0, linestyle='dashed', color='b')
ax2.axhline(-0.75, linestyle='dashed', color='k')
# add spacing between plots
plt.subplots_adjust(hspace=1)
# display plot
plt.show()
|
21f8ab78d08217d548425e7f73b933f6ce5f7083
|
5327af5248f88082c81eaf6d0fcf5359cb7e4670
|
/src/libtmux/exc.py
|
0023cb3eb764e5efbb9f84ffdc9c4cb64973db3c
|
[
"MIT"
] |
permissive
|
tmux-python/libtmux
|
f272ee69d3c435f79972ed92441bee7244a13d0b
|
83a19fe78922f6e231172659a136db58ba232040
|
refs/heads/master
| 2023-09-06T01:40:40.500776
| 2023-09-03T10:07:39
| 2023-09-03T10:07:39
| 59,409,425
| 613
| 63
|
MIT
| 2023-05-23T06:57:08
| 2016-05-22T11:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,629
|
py
|
exc.py
|
"""libtmux exceptions.
libtmux.exc
~~~~~~~~~~~
"""
import typing as t
from libtmux._internal.query_list import ObjectDoesNotExist
if t.TYPE_CHECKING:
from libtmux.neo import ListExtraArgs
class LibTmuxException(Exception):
"""Base Exception for libtmux Errors."""
class TmuxSessionExists(LibTmuxException):
"""Session does not exist in the server."""
class TmuxCommandNotFound(LibTmuxException):
"""Application binary for tmux not found."""
class TmuxObjectDoesNotExist(ObjectDoesNotExist):
"""The query returned multiple objects when only one was expected."""
def __init__(
self,
obj_key: t.Optional[str] = None,
obj_id: t.Optional[str] = None,
list_cmd: t.Optional[str] = None,
list_extra_args: "t.Optional[ListExtraArgs]" = None,
*args: object,
):
if all(arg is not None for arg in [obj_key, obj_id, list_cmd, list_extra_args]):
return super().__init__(
f"Could not find {obj_key}={obj_id} for {list_cmd} "
f'{list_extra_args if list_extra_args is not None else ""}'
)
return super().__init__("Could not find object")
class VersionTooLow(LibTmuxException):
"""Raised if tmux below the minimum version to use libtmux."""
class BadSessionName(LibTmuxException):
"""Disallowed session name for tmux (empty, contains periods or colons)."""
def __init__(
self, reason: str, session_name: t.Optional[str] = None, *args: object
):
msg = f"Bad session name: {reason}"
if session_name is not None:
msg += f" (session name: {session_name})"
return super().__init__(msg)
class OptionError(LibTmuxException):
"""Root error for any error involving invalid, ambiguous or bad options."""
class UnknownOption(OptionError):
"""Option unknown to tmux show-option(s) or show-window-option(s)."""
class UnknownColorOption(UnknownOption):
"""Unknown color option."""
def __init__(self, *args: object):
return super().__init__("Server.colors must equal 88 or 256")
class InvalidOption(OptionError):
"""Option invalid to tmux, introduced in tmux v2.4."""
class AmbiguousOption(OptionError):
"""Option that could potentially match more than one."""
class WaitTimeout(LibTmuxException):
"""Function timed out without meeting condition"""
class VariableUnpackingError(LibTmuxException):
"""Error unpacking variable"""
def __init__(self, variable: t.Optional[t.Any] = None, *args: object):
return super().__init__(f"Unexpected variable: {variable!s}")
class PaneError(LibTmuxException):
"""Any type of pane related error"""
class PaneNotFound(PaneError):
"""Pane not found"""
def __init__(self, pane_id: t.Optional[str] = None, *args: object):
if pane_id is not None:
return super().__init__(f"Pane not found: {pane_id}")
return super().__init__("Pane not found")
class WindowError(LibTmuxException):
"""Any type of window related error"""
class MultipleActiveWindows(WindowError):
"""Multiple active windows"""
def __init__(self, count: int, *args: object):
return super().__init__(f"Multiple active windows: {count} found")
class NoActiveWindow(WindowError):
"""No active window found"""
def __init__(self, *args: object):
return super().__init__("No active windows found")
class NoWindowsExist(WindowError):
"""No windows exist for object"""
def __init__(self, *args: object):
return super().__init__("No windows exist for object")
|
3fa7ec2db08405d80efa582bccc31a102d30de54
|
e94df9dbed523a66b497928c2bf948f52349c319
|
/examples/dedup_deterministic.py
|
14db0176abaef00f40171bcdcfee212be4e72c9e
|
[
"BSD-3-Clause"
] |
permissive
|
J535D165/recordlinkage
|
dc12f44c3fe1f96ece2da5acd4d9050ec5b92c9f
|
b93d97641952f8c85106be5794ca93b1f1298fbc
|
refs/heads/master
| 2023-09-06T09:53:30.028866
| 2023-07-20T12:59:41
| 2023-07-20T12:59:41
| 44,471,657
| 830
| 165
|
BSD-3-Clause
| 2023-07-20T12:53:13
| 2015-10-18T09:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,688
|
py
|
dedup_deterministic.py
|
"""Example: Deterministic record linkage to find links in a single file.
In determininistic record linkage, each compared attribute get a certain
weight (coefficient). The higher the weight, the more dicriminating the
variable is. A low weight indicate a less discriminating variable. For
example, the given name has a higher weight than the hometown.
This example uses FEBRL3 datasets. This dataset contain records about
individuals.
Deterministic RL parameters are:
intercept = -11.0
coefficients = [1.5, 1.5, 8.0, 6.0, 2.5, 6.5, 5.0]
"""
import recordlinkage as rl
from recordlinkage.compare import Exact
from recordlinkage.compare import String
from recordlinkage.datasets import load_febrl3
from recordlinkage.index import Block
# set logging
rl.logging.set_verbosity(rl.logging.INFO)
# load dataset
print("Loading data...")
dfA, true_links = load_febrl3(return_links=True)
print(len(dfA), "records in dataset A")
print(len(true_links), "links in dataset A")
# start indexing
print("Build index...")
indexer = rl.Index()
indexer.add(Block("given_name"))
indexer.add(Block("surname"))
indexer.add(Block("soc_sec_id"))
candidate_links = indexer.index(dfA)
# start comparing
print("Start comparing...")
comparer = rl.Compare()
comparer.add(Exact("given_name", "given_name", label="given_name"))
comparer.add(
String("surname", "surname", method="jarowinkler", threshold=0.85, label="surname")
)
comparer.add(Exact("date_of_birth", "date_of_birth", label="date_of_birth"))
comparer.add(Exact("suburb", "suburb", label="suburb"))
comparer.add(Exact("state", "state", label="state"))
comparer.add(String("address_1", "address_1", threshold=0.85, label="address_1"))
comparer.add(String("address_2", "address_2", threshold=0.85, label="address_2"))
features = comparer.compute(candidate_links, dfA)
print("feature shape", features.shape)
# use the Logistic Regression Classifier
# this classifier is equivalent to the deterministic record linkage approach
intercept = -9.5
coefficients = [2.0, 3.0, 7.0, 6.0, 2.5, 5.0, 5.5]
print("Deterministic classifier")
print("intercept", intercept)
print("coefficients", coefficients)
logreg = rl.LogisticRegressionClassifier(coefficients=coefficients, intercept=intercept)
links = logreg.predict(features)
print(len(links), "links/matches")
# return the confusion matrix
conf_logreg = rl.confusion_matrix(true_links, links, len(candidate_links))
print("confusion matrix")
print(conf_logreg)
# compute the F-score for this classification
fscore = rl.fscore(conf_logreg)
print("fscore", fscore)
recall = rl.recall(true_links, links)
print("recall", recall)
precision = rl.precision(true_links, links)
print("precision", precision)
|
e9a8c1fe06cbc1bcb2e98fb99ce8b73564371411
|
b8c8a1c26035d3efa424477e3fd0ca2e275562cf
|
/setup.py
|
cfd3d5a95c3735bc39931caf7fa96c5331a5ac83
|
[
"MIT"
] |
permissive
|
crs4/hl7apy
|
f8eecf7b91fb9ab4cfc2eba5099ef3ede55d19dd
|
522e296febb5245d62a970531dc88a1782dba749
|
refs/heads/develop
| 2023-08-13T16:00:05.869949
| 2021-11-11T08:44:13
| 2021-11-11T08:44:13
| 12,003,789
| 197
| 96
|
MIT
| 2023-08-10T20:08:13
| 2013-08-09T15:14:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,403
|
py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2018, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
from setuptools import setup
import hl7apy
desc = "HL7apy: a lightweight Python library to parse, create and handle HL7 v2.x messages"
long_desc = """
HL7apy: a lightweight Python library to parse, create and handle HL7 v2.x messages
----------------------------------------------------------------------------------
HL7apy is a lightweight Python package to intuitively handle `HL7 <http://www.hl7.org>`_ v2 messages according to HL7 specifications.
The main features includes:
* Message parsing
* Message creation
* Message validation following the HL7 xsd specifications
* Access to elements by name, long name or position
* Support to all simple and complex datatypes
* Encoding chars customization
* Message encoding in ER7 format and compliant with MLLP protocol
"""
def _get_version():
with open('VERSION') as f:
return f.read().strip()
setup(
name='hl7apy',
version=_get_version(),
author=hl7apy.__author__,
author_email=hl7apy.__author_email__,
description=desc,
long_description=long_desc,
url=hl7apy.__url__,
download_url='http://sourceforge.net/projects/hl7apy/files/',
license='MIT License',
keywords=['HL7', 'Health Level 7', 'healthcare', 'python'],
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Healthcare Industry',
'Topic :: Scientific/Engineering',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
packages=['hl7apy', 'hl7apy.v2_1', 'hl7apy.v2_2', 'hl7apy.v2_3', 'hl7apy.v2_3_1', 'hl7apy.v2_4', 'hl7apy.v2_5',
'hl7apy.v2_5_1', 'hl7apy.v2_6', 'hl7apy.v2_7', 'hl7apy.v2_8', 'hl7apy.v2_8_1', 'hl7apy.v2_8_2'],
scripts=['utils/hl7apy_profile_parser'],
test_suite='tests',
)
|
48172a878d234527735d0771b1a58569af2e3ceb
|
5b6ba0f288b1e2ac236af846a9bf546a63228476
|
/prime/postrefine/mod_input.py
|
51fac0c403b70537d3354b4050206d9bbf4efb10
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
cctbx/cctbx_project
|
5b547b416cadbdf95cca21dace9f54272a08d98a
|
7f4dfb6c873fd560920f697cbfd8a5ff6eed82fa
|
refs/heads/master
| 2023-08-17T17:44:05.077010
| 2023-08-16T22:40:22
| 2023-08-16T22:40:22
| 39,508,026
| 206
| 131
|
NOASSERTION
| 2023-09-14T17:12:55
| 2015-07-22T13:36:27
|
Python
|
UTF-8
|
Python
| false
| false
| 20,727
|
py
|
mod_input.py
|
from __future__ import division, print_function, absolute_import
"""read PRIME input"""
#Define exceptions
class ReadInputError(Exception): pass
class InvalidData(ReadInputError): pass
class InvalidCrystalSystem(ReadInputError): pass
class InvalidPixelSize(ReadInputError): pass
class InvalidRunNo(ReadInputError): pass
class InvalidNumberOfResidues(ReadInputError): pass
import iotbx.phil
from libtbx.utils import Usage, Sorry
import sys, os, shutil, glob, tarfile
from six.moves import cPickle as pickle
master_phil = iotbx.phil.parse("""
data = None
.type = path
.multiple = True
.help = Directory containing integrated data in pickle format. Repeat to \
specify additional directories.
.style = input_list
run_no = None
.type = path
.help = Run no. is used as folder name that stores output files.
.optional = False
.alias = Output folder
.style = path:folder
title = None
.type = str
.help = Title of the run.
.multiple = False
.alias = Description
icering
.help = "Allowing exclusion of icering."
.alias = Exclude ice rings
.style = grid:auto has_scope_switch
{
flag_on = False
.type = bool
.help = Turn this flag on to allow exclusion of icering.
.style = scope_switch
d_upper = 3.9
.type = float
.help = Minimum resolution.
d_lower = 3.85
.type = float
.help = Maximum resolution.
}
scale
.help = "Parameters used to generate mean-intensity scaled reference set."
.alias = Initial Scaling Parameters
{
d_min = 0.1
.type = float
.help = Minimum resolution.
.alias = High resolution limit
d_max = 99
.type = float
.help = Maximum resolution.
.alias = Low resolution limit
sigma_min = 1.5
.type = float
.help = Minimum I/sigI cutoff.
.alias = Minimum I/sigI
}
postref
.help = Post-refinement parameters
.alias = Post-refinement Parameters
{
residual_threshold = 5
.type = float
.help = Percent increase in residual allowed during microcycle.
.alias = Residual threshold
residual_threshold_xy = 5
.type = float
.help = Percent increase in residual (xy) allowed during microcycle.
.alias = Residual XY threshold
scale
.help = Scale factors
.style = grid:auto
.alias = Scale factors
{
d_min = 0.1
.type = float
.help = Minimum resolution.
.alias = High resolution limit
d_max = 99
.type = float
.help = Maximum resolution.
.alias = Low resolution limit
sigma_min = 1.5
.type = float
.help = Minimum I/sigI cutoff.
.alias = Minimum I/sigI
partiality_min = 0.1
.type = float
.help = Minimum partiality cutoff.
.alias = Minimum partiality
}
crystal_orientation
.help = Crystal orientations
.alias = Crystal Orientation
.style = grid:auto has_scope_switch
.expert_level = 1
{
flag_on = True
.type = bool
.help = Set to False to turn post-refinement in this section off.
.alias = Refine crystal orientation
.style = scope_switch
d_min = 0.1
.type = float
.help = Minimum resolution.
.alias = High resolution limit
d_max = 99
.type = float
.help = Maximum resolution.
.alias = Low resolution limit
sigma_min = 1.5
.type = float
.help = Minimum I/sigI cutoff.
.alias = Minimum I/sigI
partiality_min = 0.1
.type = float
.help = Minimum partiality cutoff.
.alias = Minimum partiality
}
reflecting_range
.help = Reflecting range
.alias = Reflecting Range
.style = grid:auto has_scope_switch
.expert_level = 1
{
flag_on = True
.type = bool
.help = Set to False to turn post-refinement in this section off.
.alias = Refine reflecting range
.style = scope_switch
d_min = 0.1
.type = float
.help = Minimum resolution.
.alias = High resolution limit
d_max = 99
.type = float
.help = Maximum resolution.
.alias = Low resolution limit
sigma_min = 1.5
.type = float
.help = Minimum I/sigI cutoff.
.alias = Minimum I/sigI
partiality_min = 0.1
.type = float
.help = Minimum partiality cutoff.
.alias = Minimum partiality
}
unit_cell
.help = Unit-cell dimensions
.alias = Unit Cell Dimensions
.style = grid:auto has_scope_switch
.expert_level = 1
{
flag_on = True
.type = bool
.help = Set to False to turn post-refinement in this section off.
.style = scope_switch
d_min = 0.1
.type = float
.help = Minimum resolution.
.alias = High resolution limit
d_max = 99
.type = float
.help = Maximum resolution.
.alias = Low resolution limit
sigma_min = 1.5
.type = float
.help = Minimum I/sigI cutoff.
.alias = Minimum I/sigI
partiality_min = 0.1
.type = float
.help = Minimum partiality cutoff.
.alias = Minimum partiality
uc_tolerance = 5
.type = float
.help = Unit-cell tolerance in percent.
.alias = Unit cell tolerance (%)
}
allparams
.help = All parameters
.alias = Refine All Parameters
.style = grid:auto has_scope_switch
.expert_level = 1
{
flag_on = True
.type = bool
.help = Set to True to refine all parameters together.
.style = scope_switch
d_min = 0.1
.type = float
.help = Minimum resolution.
.alias = High resolution limit
d_max = 99
.type = float
.help = Maximum resolution.
.alias = Low resolution limit
sigma_min = 1.5
.type = float
.help = Minimum I/sigI cutoff.
.alias = Minimum I/sigI
partiality_min = 0.1
.type = float
.help = Minimum partiality cutoff.
.alias = Minimum partiality
uc_tolerance = 5
.type = float
.help = Unit-cell tolerance in percent.
.alias = Unit cell tolerance (%)
}
}
merge
.help = "Parameters used in merging"
.alias = Merging Parameters
{
d_min = 0.1
.type = float
.help = Minimum resolution.
.alias = High resolution limit
d_max = 99
.type = float
.help = Maximum resolution.
.alias = Low resolution limit
sigma_min = -3.0
.type = float
.help = Minimum I/sigI cutoff.
.help = Minimum I/sigI
partiality_min = 0.1
.type = float
.help = Minimum partiality cutoff.
.alias = Minimum partiality
uc_tolerance = 5
.type = float
.help = Unit-cell tolerance in percent.
.alias = Unit cell tolerance (%)
match_resolution = True
.type = bool
.help = GUI only: when True, merging resolution limits will be applied to \
all post-refinement operations
.alias = Use general resolution limits
}
target_unit_cell = None
.type = unit_cell
.help = Target unit-cell parameters are used to discard outlier cells.
.optional = False
.alias = Target Unit Cell
flag_override_unit_cell = False
.type = bool
.help = Set to True to overide unit cell in observations with the target cell.
target_space_group = None
.type = space_group
.help = Target space group.
.optional = False
.alias = Target Space Group
target_anomalous_flag = False
.type = bool
.help = Target anomalous flag (False = Not anomalous data)
.optional = False
.alias = Anomalous
flag_weak_anomalous = False
.type = bool
.help = Set to True to indicate that you have weak anomalous signal.
target_crystal_system = None Triclinic Monoclinic Orthorhombic Tetragonal Trigonal Hexagonal Cubic
.type = choice
.help = Target crystal system
.optional = True
.alias = Crystal System
n_residues = None
.type = int
.help = No. of amino acid residues.
.alias = No. residues
indexing_ambiguity
.help = "Parameters used in resolving indexing ambiguity"
{
mode = Auto
.type = str
.help = Set to Forced to solve pseudo-twinning.
index_basis_in = None
.type = path
.help = Pickle file with basis solution or an mtz file of an isomorphous structure.
assigned_basis = None
.multiple = True
.type = str
.help = Specify list of basis formats for pseudo-twinning.
d_min = 3.0
.type = float
.help = In case index_basis_in given is an mtz file, you can pecify minimum resolution used to calculate correlation with the given mtz file.
d_max = 10.0
.type = float
.help = In case index_basis_in given is an mtz file, you can pecify maximum resolution used to calculate correlation with the given mtz file.
sigma_min = 1.5
.type = float
.help = Minimum I/sigI cutoff.
n_sample_frames = 300
.type = int
.help = No. of frames used in scoring r_matrix. Images (n_selected_frames) with the highest score will be used in the Brehm & Diederichs algorithm.
n_selected_frames = 100
.type = int
.help = No. of frames used in Auto solution mode. The rest of the frame data will be determined against this merged dataset.
}
hklisoin = None
.type = path
.help = Mtz file for the calculation of CCiso
hklrefin = None
.type = path
.help = Mtz file used as a reference in post-refinement.
.expert_level = 1
flag_plot = False
.type = bool
.help = Normal plots.
.expert_level = 1
flag_plot_expert = False
.type = bool
.help = Expert plots.
.expert_level = 2
n_postref_cycle = 3
.type = int
.help = No. of cycles for post-refinement.
n_postref_sub_cycle = 1
.type = int
.help = No. of cycles for the least-squares minimization in post-refinement.
.expert_level = 1
n_rejection_cycle = 1
.type = int
.help = No. of cycles for the outlier rejection.
.expert_level = 1
sigma_rejection = 5
.type = float
.help = Sigma level for outlier rejection.
.expert_level = 1
n_bins = 20
.type = int
.help = No. of bins used to report statistics.
pixel_size_mm = None
.type = float
.help = Pixel size in mm. (MAR = 0.079346)
.optional = False
.alias = Pixel Size
frame_accept_min_cc = 0.25
.type = float
.help = CC cut-off for the rejection of frames before merging.
flag_apply_b_by_frame = True
.type = bool
.help = Set to False to dismiss B-factor checking.
.expert_level = 2
flag_monte_carlo = False
.type = bool
.help = Set to True to turn on Monte-Carlo merge w/o partiality correction. Use n_postref_cycle=0 to output the merged mtz file without post-refinement and partiality correction.
.expert_level = 2
b_refine_d_min = 99
.type = float
.help = Minimum resolution.
.expert_level = 2
partiality_model = Lorentzian
.type = str
.help = Your choice of partiality model: Lorentzian (default), Voigt (in beta test), Lognormal (in beta test).
.expert_level = 2
flag_LP_correction = True
.type = bool
.help = Do polarization correction.
.expert_level = 2
flag_volume_correction = True
.type = bool
.help = Do volume correction.
.expert_level = 2
flag_beam_divergence = False
.type = bool
.help = Default is not to refine beam divergence. Set to True to allow gammaX and gammaY refinement.
.expert_level = 2
n_processors = 32
.type = int
.help = No. of processing units
.optional = True
.alias = No. processors
gamma_e = 0.003
.type = float
.help = Initial spread of the energy spectrum (1/Angstrom).
.expert_level = 2
voigt_nu = 0.5
.type = float
.help = If select Voigt for partiality model, the voigt_nu parameter determines the Lorentzian and Gaussian component of the function (0.0 [Lorentzian]<= voigt_nu <= 1.0 [Gaussian]). The default value is 0.5 and will be refined in the run.
.expert_level = 2
polarization_horizontal_fraction = 1.0
.type = float
.help = Polarization fraction in horizontal direction.
.expert_level = 2
flag_output_verbose = False
.type = bool
.help = Output full detail of the refinement results.
.expert_level = 1
flag_replace_sigI = False
.type = bool
.help = Replace to experimental errors I with sqrt(I).
.expert_level = 2
percent_cone_fraction = 5.0
.type = float
.help = Perent used to select reflections inside a cone centering on each crystal axis.
.expert_level = 2
isoform_name = None
.type = str
.help = Use this isoform.
.expert_level = 1
flag_hush = False
.type = bool
.help = Set to true to hush all the disc and elaboarated stats. operations.
.expert_level = 1
timeout_seconds = 300
.type = int
.help = Time limits used when queing system is activated.
.expert_level = 1
.alias = Queue timeout (sec)
queue
.help = "Parameters used for submitting jobs to queuing system."
.alias = Multiprocessing options
{
mode = None
.type = str
.help = Queing system type. Only bsub is available now.
qname = psanaq
.type = str
.help = For system with queue name, specify your queue name here. For LCLS users, primary queue is the default value while high priority queue at NEH and FEH are psnehhiprioq and psfehhiprioq.
.alias = Queue
n_nodes = 12
.type = int
.help = No. of nodes used.
.alias = No. of nodes
}
isoform_cluster
.help = "Parameters used in clustering isoform"
.alias = Isoform Clustering
{
n_clusters = 2
.type = int
.help = No. of expected isoforms.
.alias = No of isoforms
isorefin = None
.multiple = True
.type = path
.help = Specify list of mtz files for identifying isoform clusters. Note that n_clusters will be replaced with no. of items in this list.
.alias = Reference data (MTZ)
d_min = 3.0
.type = float
.help = High resolution limit used in the calculation of r_metric.
.alias = High resolution limit
d_max = 10.0
.type = float
.help = Low resolution limit used in the calculation of r_metric.
.alias = Low resolution limit
sigma_min = 1.5
.type = float
.help = Minimum I/sigI cutoff.
.alias = Minimum I/sigI
n_sample_frames = 300
.type = int
.help = No. of frames used in scoring r_matrix. Images (n_selected_frames) with the highest score will be used in the Brehm & Diederichs algorithm.
.alias = No. of sample frames
n_selected_frames = 100
.type = int
.help = No. of frames used in Auto solution mode. The rest of the frame data will be determined against this merged dataset.
.alias = No. of selected frames
}
rejections = None
.type = str
.help = Dict of integration filenames and their rejected miller indices.
""")
txt_help = """**************************************************************************************************
Prime: post-refinement and merging.
For more detail and citation, see Enabling X-ray free electron laser crystallography
for challenging biological systems from a limited number of crystals
"DOI: https://doi.org/10.7554/eLife.05421".
Usage: prime.postrefine parameter.phil
With this command, you can specify all parameters required by prime in your parameter.phil file.
To obtain the template of these parameters, you can perform a dry run (simply run prime.postrefine).
You can then change the values of the parameters.
For feedback, please contact monarin@stanford.edu.
**************************************************************************************************
List of available parameters:
"""
def process_input(argv=None, flag_mkdir=True):
user_phil = []
if argv == None:
master_phil.show()
raise Usage("Use the above list of parameters to generate your input file (.phil). For more information, run prime.postrefine -h.")
else:
for arg in argv:
if os.path.isfile(arg):
user_phil.append(iotbx.phil.parse(open(arg).read()))
elif (os.path.isdir(arg)) :
user_phil.append(iotbx.phil.parse("""data=\"%s\" """ % arg))
else :
if arg == '--help' or arg == '-h':
print (txt_help)
master_phil.show(attributes_level=1)
raise Usage("Run prime.run to generate a list of initial parameters.")
else:
try:
user_phil.append(iotbx.phil.parse(arg))
except RuntimeError as e :
raise Sorry("Unrecognized argument '%s' (error: %s)" % (arg, str(e)))
#setup phil parameters
working_phil = master_phil.fetch(sources=user_phil)
params = working_phil.extract()
if not params.data:
raise InvalidData("Error: Data is required. Please specify path to your data folder (data=/path/to/integration/results).")
#check target_crystal_system
crystal_system_dict = {'Triclinic': 0, 'Monoclinic': 0, 'Orthorhombic': 0, 'Tetragonal': 0, 'Trigonal': 0, 'Hexagonal': 0, 'Cubic':0}
if params.target_crystal_system is not None:
if params.target_crystal_system not in crystal_system_dict:
raise InvalidCrystalSystem("Error: Invalid input target_crystal_system. Please choose following options: Triclinic, Monoclinic, Orthorhombic, Tetragonal, Trigonal, Hexagonal, or Cubic.")
#check n_residues
if not params.n_residues:
raise InvalidNumberOfResidues("Error: Number of residues is required. Please specify number of residues of your structure in asymmetric unit (n_residues = xxx).")
#check pixel_size
if not params.pixel_size_mm:
#look in the new integration pickle format (2016-08-05)
try:
frame_files = read_pickles(params.data)
frame_0 = frame_files[0]
int_pickle = read_frame(frame_0)
params.pixel_size_mm = int_pickle['pixel_size']
print ('Info: Found pixel size in the integration pickles (override pixel_size_mm=%10.8f)'%(params.pixel_size_mm))
except Exception:
raise InvalidPixelSize("Error: Pixel size in millimeter is required. Use cctbx.image_viewer to view one of your images and note down the value (e.g. for marccd, set pixel_size_mm=0.079346).")
#check sigma rejection
if params.sigma_rejection < 5:
print("Warning: sigma below 5 will result in discarding too many reflections")
#generate run_no folder
if not params.run_no:
#use default name
default_run = 'Prime_Run_'
all_runs = glob.glob(default_run+'*')
new_run_no = 1
if all_runs: new_run_no = max([int(run_no.split('_')[-1]) for run_no in all_runs])+1
params.run_no = default_run+str(new_run_no)
elif os.path.exists(params.run_no):
print ("Warning: run number %s already exists."%(params.run_no))
run_overwrite = raw_input('Overwrite?: N/Y (Enter for default)')
if run_overwrite == 'Y':
shutil.rmtree(params.run_no)
else:
raise InvalidRunNo("Error: Run number exists. Please specifiy different run no.")
#make result folders
if flag_mkdir:
os.makedirs(params.run_no)
os.makedirs(params.run_no+'/index_ambiguity')
os.makedirs(params.run_no+'/isoform_cluster')
os.makedirs(params.run_no+'/stats')
#capture input read out by phil
from six.moves import cStringIO as StringIO
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
sys.stdout = self._stdout
with Capturing() as output:
working_phil.show()
txt_out = 'prime.postrefine input:\n'
for one_output in output:
txt_out += one_output + '\n'
return params, txt_out
def read_pickles(data):
frame_files = []
tar_files = []
for p in data:
is_tar = False
if p.find('tar') >= 0: is_tar = True
if os.path.isdir(p) == False:
if os.path.isfile(p):
#read all file paths in the given input file
with open(p,'r') as f: file_list = f.read().splitlines()
else:
# p is a glob
file_list = glob.glob(p)
else:
file_list = glob.glob(os.path.join(p,'*'))
frame_files.extend(file_list)
if len(frame_files) == 0:
raise InvalidData("Error: no integration results found in the specified data parameter.")
if not is_tar: return frame_files
#take care of tar files
for tar_filename in frame_files:
tarf = tarfile.open(name=tar_filename, mode='r')
for myindex in range(len(tarf.getmembers())):
tar_files.append(tar_filename+':ind'+str(myindex))
return tar_files
def read_frame(frame_file):
'''
A frame_file can be .pickle or .tar:ind#no.
Read accordingly and return integration pickle
'''
observations_pickle = None
try:
if frame_file.endswith('.pickle'):
observations_pickle = pickle.load(open(frame_file,"rb"))
else:
tar_filename, tar_index = frame_file.split(':ind')
tarf = tarfile.open(name=tar_filename, mode='r')
tar_member = tarf.extractfile(member=tarf.getmembers()[int(tar_index)])
observations_pickle = pickle.load(tar_member)
except Exception:
print ("Warning: unable to read %s"%(frame_file))
pass
if any([len(obs.data()) == 0 for obs in observations_pickle['observations']]):
print ("Empty file %s"%(frame_file))
return
return observations_pickle
|
655767779bf082a0f7125a5badb6fcfe85b23605
|
3b07815fc01110db387261b9e5dc47ca8f2906cd
|
/nngen/onnx/concat.py
|
3b3d7cf3ffa1ffd6e2a90d9fc3c0146e82d442d0
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
NNgen/nngen
|
1580271dde93c79b484abfd191775802fb6e90f5
|
5cf1d66768ed07f64e7dcddf9a395675f57d85bd
|
refs/heads/develop
| 2023-07-10T06:22:48.408377
| 2023-06-23T01:42:38
| 2023-06-23T01:42:38
| 221,174,472
| 290
| 46
|
Apache-2.0
| 2023-06-16T08:03:22
| 2019-11-12T09:04:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
concat.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import nngen.operator as operator
from . import util
def Concat(visitor, node):
srcs = []
for src in node.input:
src_obj = visitor.visit(src)
srcs.append(src_obj)
axis = None
for attribute in node.attribute:
if attribute.name == 'axis':
axis = attribute.i
srcs = [util.optimize_to_raw_value(src) for src in srcs]
all_ndarray = True
for src in srcs:
if not isinstance(src, np.ndarray):
all_ndarray = False
break
if all_ndarray:
return np.concatenate(srcs, axis)
name = util.get_name(node)
scales = [1.0 for src in srcs]
shamt = 0
layout = None
onnx_layout = None
for src in srcs:
l = src.get_layout()
if l is None:
continue
if layout is None:
layout = l
elif layout != l:
raise ValueError("layout mismatch: '%s' != '%s'" % (layout, l))
l = src.get_onnx_layout()
if l is None:
continue
if onnx_layout is None:
onnx_layout = l
elif onnx_layout != l:
raise ValueError("onnx_layout mismatch: '%s' != '%s'" % (onnx_layout, l))
if layout is not None and onnx_layout is not None:
axis = layout.index(onnx_layout[axis])
c = operator.scaled_concat(srcs, scales, shamt, axis, name=name)
return c
|
1a25dc7cf30a673a4afcac2c42f5abbac06e6f06
|
64ab5b65afdf8d950c4b56ad2259133b95fc2fec
|
/zeus/db/func.py
|
7b2d16e7d919e8fa734635f48f59b85e98f7c948
|
[
"Apache-2.0"
] |
permissive
|
getsentry/zeus
|
3e88895443b23278fdb4c25121422ee214630512
|
6d4a490c19ebe406b551641a022ca08f26c21fcb
|
refs/heads/master
| 2023-09-01T14:20:11.396306
| 2021-04-30T17:08:33
| 2021-04-30T17:08:33
| 96,131,433
| 222
| 27
|
Apache-2.0
| 2022-06-01T03:17:16
| 2017-07-03T16:39:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,373
|
py
|
func.py
|
import re
from sqlalchemy.sql import func
from sqlalchemy.types import String, TypeDecorator
# https://bitbucket.org/zzzeek/sqlalchemy/issues/3729/using-array_agg-around-row-function-does
class ArrayOfRecord(TypeDecorator):
_array_regexp = re.compile(r"^\{(\".+?\")*\}$")
_chunk_regexp = re.compile(r'"(.*?)",?')
_param_regexp = re.compile(r"[^\(\),]+")
impl = String
def __init__(self, cols):
self.cols = cols
super().__init__()
def process_result_value(self, value, dialect):
# XXX(dcramer): if the trailing value(s?) of the returning array are NULL, postgres seems to
# not return them, and thus our output array does not match the same length as our column
# selection array
#
# For example if the input is:
# ARRAY_AGG_RESULT(col1, col2)
# And the value of col2 is NULL
# The resulting return value from this query will be:
# ({col1_value},)
elems = self._array_regexp.match(value).group(1)
elems = [e for e in self._chunk_regexp.split(elems) if e]
num_cols = len(self.cols)
padding = (None,) * num_cols
return [
(tuple(self._param_regexp.findall(e)) + padding)[:num_cols] for e in elems
]
def array_agg_row(*arg):
return func.array_agg(func.row(*arg), type_=ArrayOfRecord(arg))
|
cf599866f2926dc4016aa2e5e1a9fdd7ab8d798c
|
69d8d91954f6623f3674d52d734d589f72383628
|
/horizon/utils/secret_key.py
|
ca03b64a3c797d1496b6fd00f2680b04726f2bbd
|
[
"Apache-2.0"
] |
permissive
|
openstack/horizon
|
d031cebe126c06ad9717bbc52790b3d890e8661e
|
7896fd8c77a6766a1156a520946efaf792b76ca5
|
refs/heads/master
| 2023-09-04T06:57:58.069907
| 2023-09-01T20:17:10
| 2023-09-01T20:17:10
| 2,665,166
| 1,060
| 1,175
|
Apache-2.0
| 2023-08-07T02:33:44
| 2011-10-28T13:12:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,034
|
py
|
secret_key.py
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import random
import string
from oslo_concurrency import lockutils
class FilePermissionError(Exception):
"""The key file permissions are insecure."""
def generate_key(key_length=64):
"""Secret key generator.
The quality of randomness depends on operating system support,
see http://docs.python.org/library/random.html#random.SystemRandom.
"""
if hasattr(random, 'SystemRandom'):
logging.info('Generating a secure random key using SystemRandom.')
choice = random.SystemRandom().choice
else:
msg = "WARNING: SystemRandom not present. Generating a random "\
"key using random.choice (NOT CRYPTOGRAPHICALLY SECURE)."
logging.warning(msg)
choice = random.choice
return ''.join(map(lambda x: choice(string.digits + string.ascii_letters),
range(key_length)))
def read_from_file(key_file='.secret_key'):
if (os.stat(key_file).st_mode & 0o777) != 0o600:
raise FilePermissionError(
"Insecure permissions on key file %s, should be 0600." %
os.path.abspath(key_file))
with open(key_file, 'r') as f:
key = f.readline()
return key
def generate_or_read_from_file(key_file='.secret_key', key_length=64):
"""Multiprocess-safe secret key file generator.
Useful to replace the default (and thus unsafe) SECRET_KEY in settings.py
upon first start. Save to use, i.e. when multiple Python interpreters
serve the dashboard Django application (e.g. in a mod_wsgi + daemonized
environment). Also checks if file permissions are set correctly and
throws an exception if not.
"""
abspath = os.path.abspath(key_file)
# check, if key_file already exists
# if yes, then just read and return key
if os.path.exists(key_file):
key = read_from_file(key_file)
return key
# otherwise, first lock to make sure only one process
lock = lockutils.external_lock(key_file + ".lock",
lock_path=os.path.dirname(abspath))
with lock:
if not os.path.exists(key_file):
key = generate_key(key_length)
old_umask = os.umask(0o177) # Use '0600' file permissions
with open(key_file, 'w') as f:
f.write(key)
os.umask(old_umask)
else:
key = read_from_file(key_file)
return key
|
8128630b1a6d9f309b204161d59143e89066efbf
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/robot-server/tests/runs/test_run_auto_deleter.py
|
8e15626834301cace1788306f671db8a162110e3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,805
|
py
|
test_run_auto_deleter.py
|
"""Unit tests for `run_auto_deleter`."""
from datetime import datetime
import logging
import pytest
from decoy import Decoy
from robot_server.deletion_planner import RunDeletionPlanner
from robot_server.runs.run_auto_deleter import RunAutoDeleter
from robot_server.runs.run_store import (
RunStore,
RunResource,
)
def _make_dummy_run_resource(run_id: str) -> RunResource:
"""Return a RunResource with the given ID."""
return RunResource(
run_id=run_id,
protocol_id=None,
created_at=datetime.min,
actions=[],
)
def test_make_room_for_new_run(decoy: Decoy, caplog: pytest.LogCaptureFixture) -> None:
"""It should get a deletion plan and enact it on the store."""
mock_run_store = decoy.mock(cls=RunStore)
mock_deletion_planner = decoy.mock(cls=RunDeletionPlanner)
subject = RunAutoDeleter(
run_store=mock_run_store,
deletion_planner=mock_deletion_planner,
)
run_resources = [
_make_dummy_run_resource("run-id-1"),
_make_dummy_run_resource("run-id-2"),
_make_dummy_run_resource("run-id-3"),
]
deletion_plan = set(["run-id-4", "run-id-5"])
decoy.when(mock_run_store.get_all()).then_return(run_resources)
decoy.when(
mock_deletion_planner.plan_for_new_run(
existing_runs=["run-id-1", "run-id-2", "run-id-3"]
)
).then_return(deletion_plan)
# Run the subject, capturing log messages at least as severe as INFO.
with caplog.at_level(logging.INFO):
subject.make_room_for_new_run()
decoy.verify(mock_run_store.remove(run_id="run-id-4"))
decoy.verify(mock_run_store.remove(run_id="run-id-5"))
# It should log the runs that it deleted.
assert "run-id-4" in caplog.text
assert "run-id-5" in caplog.text
|
a5a7f8c9b717995dbad855d47baf671364e4693a
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/plugins/hg4idea/testData/bin/hgext/narrow/__init__.py
|
2b95b5983ffd6fe57081fd371e6ac3e754e4918e
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 2,417
|
py
|
__init__.py
|
# __init__.py - narrowhg extension
#
# Copyright 2017 Google, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''create clones which fetch history data for subset of files (EXPERIMENTAL)'''
from __future__ import absolute_import
from mercurial import (
localrepo,
registrar,
requirements,
)
from . import (
narrowbundle2,
narrowcommands,
narrowrepo,
narrowtemplates,
narrowwirepeer,
)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = b'ships-with-hg-core'
configtable = {}
configitem = registrar.configitem(configtable)
# Narrowhg *has* support for serving ellipsis nodes (which are used at
# least by Google's internal server), but that support is pretty
# fragile and has a lot of problems on real-world repositories that
# have complex graph topologies. This could probably be corrected, but
# absent someone needing the full support for ellipsis nodes in
# repositories with merges, it's unlikely this work will get done. As
# of this writining in late 2017, all repositories large enough for
# ellipsis nodes to be a hard requirement also enforce strictly linear
# history for other scaling reasons.
configitem(
b'experimental',
b'narrowservebrokenellipses',
default=False,
alias=[(b'narrow', b'serveellipses')],
)
# Export the commands table for Mercurial to see.
cmdtable = narrowcommands.table
def featuresetup(ui, features):
features.add(requirements.NARROW_REQUIREMENT)
def uisetup(ui):
"""Wraps user-facing mercurial commands with narrow-aware versions."""
localrepo.featuresetupfuncs.add(featuresetup)
narrowbundle2.setup()
narrowcommands.setup()
narrowwirepeer.uisetup()
def reposetup(ui, repo):
"""Wraps local repositories with narrow repo support."""
if not repo.local():
return
repo.ui.setconfig(b'experimental', b'narrow', True, b'narrow-ext')
if requirements.NARROW_REQUIREMENT in repo.requirements:
narrowrepo.wraprepo(repo)
narrowwirepeer.reposetup(repo)
templatekeyword = narrowtemplates.templatekeyword
revsetpredicate = narrowtemplates.revsetpredicate
|
06be59a890e639e72bff27ef523a6d5cd6cfbe07
|
163e851e35b1c7cf61051ad111f5928ce72ded0a
|
/azure/functions/kafka.py
|
4693e9d18d7cce4aa9183542b61ed1fd55245c85
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Azure/azure-functions-python-library
|
b410ae8c6103cdabdd790fecd55f0c945321ca01
|
88f96d360458a62a2139d1b9f60cffa6f7783bbe
|
refs/heads/dev
| 2023-08-28T12:49:45.754018
| 2023-08-10T16:05:01
| 2023-08-10T16:05:01
| 136,426,082
| 118
| 51
|
MIT
| 2023-08-31T22:57:29
| 2018-06-07T05:29:14
|
Python
|
UTF-8
|
Python
| false
| false
| 9,927
|
py
|
kafka.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import typing
import json
from typing import Any, List
from . import meta
from ._kafka import AbstractKafkaEvent
class KafkaEvent(AbstractKafkaEvent):
"""A concrete implementation of Kafka event message type."""
def __init__(self, *,
body: bytes,
trigger_metadata: typing.Optional[
typing.Mapping[str, meta.Datum]] = None,
key: typing.Optional[str] = None,
offset: typing.Optional[int] = None,
partition: typing.Optional[int] = None,
topic: typing.Optional[str] = None,
timestamp: typing.Optional[str] = None,
headers: typing.Optional[list] = None) -> None:
self.__body = body
self.__trigger_metadata = trigger_metadata
self.__key = key
self.__offset = offset
self.__partition = partition
self.__topic = topic
self.__timestamp = timestamp
self.__headers = headers
# Cache for trigger metadata after Python object conversion
self._trigger_metadata_pyobj: typing.Optional[
typing.Mapping[str, typing.Any]] = None
def get_body(self) -> bytes:
return self.__body
@property
def key(self) -> typing.Optional[str]:
return self.__key
@property
def offset(self) -> typing.Optional[int]:
return self.__offset
@property
def partition(self) -> typing.Optional[int]:
return self.__partition
@property
def topic(self) -> typing.Optional[str]:
return self.__topic
@property
def timestamp(self) -> typing.Optional[str]:
return self.__timestamp
@property
def headers(self) -> typing.Optional[list]:
return self.__headers
@property
def metadata(self) -> typing.Optional[typing.Mapping[str, typing.Any]]:
if self.__trigger_metadata is None:
return None
if self._trigger_metadata_pyobj is None:
self._trigger_metadata_pyobj = {}
for k, v in self.__trigger_metadata.items():
self._trigger_metadata_pyobj[k] = v.value
return self._trigger_metadata_pyobj
def __repr__(self) -> str:
return (
f'<azure.KafkaEvent '
f'key={self.key} '
f'partition={self.offset} '
f'offset={self.offset} '
f'topic={self.topic} '
f'timestamp={self.timestamp} '
f'at 0x{id(self):0x}>'
)
class KafkaConverter(meta.InConverter, meta.OutConverter, binding='kafka'):
@classmethod
def check_input_type_annotation(cls, pytype) -> bool:
valid_types = (KafkaEvent)
return (
meta.is_iterable_type_annotation(pytype, valid_types)
or (isinstance(pytype, type) and issubclass(pytype, valid_types))
)
@classmethod
def check_output_type_annotation(cls, pytype) -> bool:
valid_types = (str, bytes)
return (
meta.is_iterable_type_annotation(pytype, str)
or (isinstance(pytype, type) and issubclass(pytype, valid_types))
)
@classmethod
def decode(
cls, data: meta.Datum, *, trigger_metadata
) -> typing.Union[KafkaEvent, typing.List[KafkaEvent]]:
data_type = data.type
if data_type in ['string', 'bytes', 'json']:
return cls.decode_single_event(data, trigger_metadata)
elif data_type in ['collection_bytes', 'collection_string']:
return cls.decode_multiple_events(data, trigger_metadata)
else:
raise NotImplementedError(
f'unsupported event data payload type: {data_type}')
@classmethod
def decode_single_event(cls, data: meta.Datum,
trigger_metadata) -> KafkaEvent:
data_type = data.type
if data_type in ['string', 'json']:
body = data.value.encode('utf-8')
elif data_type == 'bytes':
body = data.value
else:
raise NotImplementedError(
f'unsupported event data payload type: {data_type}')
return KafkaEvent(body=body)
@classmethod
def decode_multiple_events(cls, data: meta.Datum,
trigger_metadata) -> typing.List[KafkaEvent]:
parsed_data: List[bytes] = []
if data.type == 'collection_bytes':
parsed_data = data.value.bytes
elif data.type == 'collection_string':
parsed_data = [
d.encode('utf-8') for d in data.value.string
]
return [KafkaEvent(body=pd) for pd in parsed_data]
@classmethod
def encode(cls, obj: typing.Any, *,
expected_type: typing.Optional[type]) -> meta.Datum:
raise NotImplementedError('Output bindings are not '
'supported for Kafka')
class KafkaTriggerConverter(KafkaConverter,
binding='kafkaTrigger', trigger=True):
@classmethod
def decode(
cls, data: meta.Datum, *, trigger_metadata
) -> typing.Union[KafkaEvent, typing.List[KafkaEvent]]:
data_type = data.type
if data_type in ['string', 'bytes', 'json']:
return cls.decode_single_event(data, trigger_metadata)
elif data_type in ['collection_bytes', 'collection_string']:
return cls.decode_multiple_events(data, trigger_metadata)
else:
raise NotImplementedError(
f'unsupported event data payload type: {data_type}')
@classmethod
def decode_single_event(cls, data: meta.Datum,
trigger_metadata) -> KafkaEvent:
data_type = data.type
if data_type in ['string', 'json']:
body = data.value.encode('utf-8')
elif data_type == 'bytes':
body = data.value
else:
raise NotImplementedError(
f'unsupported event data payload type: {data_type}')
return KafkaEvent(
body=body,
timestamp=cls._decode_trigger_metadata_field(
trigger_metadata, 'Timestamp', python_type=str),
key=cls._decode_trigger_metadata_field(
trigger_metadata, 'Key', python_type=str),
partition=cls._decode_trigger_metadata_field(
trigger_metadata, 'Partition', python_type=int),
offset=cls._decode_trigger_metadata_field(
trigger_metadata, 'Offset', python_type=int),
topic=cls._decode_trigger_metadata_field(
trigger_metadata, 'Topic', python_type=str),
headers=cls._decode_trigger_metadata_field(
trigger_metadata, 'Headers', python_type=list),
trigger_metadata=trigger_metadata
)
@classmethod
def decode_multiple_events(cls, data: meta.Datum,
trigger_metadata) -> typing.List[KafkaEvent]:
parsed_data: List[bytes] = []
if data.type == 'collection_bytes':
parsed_data = data.value.bytes
elif data.type == 'collection_string':
parsed_data = [
d.encode('utf-8') for d in data.value.string
]
timestamp_props = trigger_metadata.get('TimestampArray')
key_props = trigger_metadata.get('KeyArray')
partition_props = trigger_metadata.get('PartitionArray')
offset_props = trigger_metadata.get('OffsetArray')
topic_props = trigger_metadata.get('TopicArray')
header_props = trigger_metadata.get('HeadersArray')
parsed_timestamp_props: List[Any] = cls.get_parsed_props(
timestamp_props, parsed_data)
parsed_key_props = cls.get_parsed_props(
key_props, parsed_data)
parsed_partition_props = cls.get_parsed_props(
partition_props, parsed_data)
parsed_offset_props: List[Any] = []
if offset_props is not None:
parsed_offset_props = [v for v in offset_props.value.sint64]
if len(parsed_offset_props) != len(parsed_data):
raise AssertionError(
'Number of bodies and metadata mismatched')
parsed_topic_props: List[Any]
if topic_props is not None:
parsed_topic_props = [v for v in topic_props.value.string]
parsed_headers_props: List[Any]
if header_props is not None:
parsed_headers_list = cls.get_parsed_props(header_props,
parsed_data)
parsed_headers_props = [v for v in parsed_headers_list]
events = []
for i in range(len(parsed_data)):
event = KafkaEvent(
body=parsed_data[i],
timestamp=parsed_timestamp_props[i],
key=parsed_key_props[i],
partition=parsed_partition_props[i],
offset=parsed_offset_props[i],
topic=parsed_topic_props[i],
headers=parsed_headers_props[i],
trigger_metadata=trigger_metadata
)
events.append(event)
return events
@classmethod
def encode(cls, obj: typing.Any, *,
expected_type: typing.Optional[type]) -> meta.Datum:
raise NotImplementedError('Output bindings are not '
'supported for Kafka')
@classmethod
def get_parsed_props(
cls, props: meta.Datum, parsed_data) -> List[Any]:
parsed_props: List[Any] = []
if props is not None:
parsed_props = json.loads(props.value)
if len(parsed_data) != len(parsed_props):
raise AssertionError('Number of bodies and metadata mismatched')
return parsed_props
|
73576b5d11bdaaf114b41ef47e8e3f5ba08527bd
|
445a4a45ace6124d98569c16c24f53fb4d486ae9
|
/src/python/bezier/curve.py
|
94c4e9b9581f4a13059ee14709faa3c97133138e
|
[
"Apache-2.0"
] |
permissive
|
dhermes/bezier
|
7acd2d5a4020a3d1846e19a9f213344a0f3c93f4
|
bb9faa707916ef6f1b987677009e32cf68c54034
|
refs/heads/main
| 2023-08-08T22:41:00.943587
| 2023-08-01T20:56:28
| 2023-08-01T20:56:28
| 73,047,402
| 231
| 45
|
Apache-2.0
| 2023-08-02T03:19:48
| 2016-11-07T06:07:45
|
Python
|
UTF-8
|
Python
| false
| false
| 30,854
|
py
|
curve.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper for B |eacute| zier Curves.
.. |eacute| unicode:: U+000E9 .. LATIN SMALL LETTER E WITH ACUTE
:trim:
See :doc:`../../algorithms/curve-curve-intersection` for examples using the
:class:`Curve` class to find intersections.
.. testsetup:: *
import numpy as np
import bezier
def binary_exponent(value):
if value == 0.0:
return -np.inf
_, result = np.frexp(value)
# Shift [1/2, 1) --> [1, 2) borrows one from exponent
return result - 1
"""
import numpy as np
from bezier import _base
from bezier import _curve_helpers
from bezier import _geometric_intersection
from bezier import _plot_helpers
from bezier import _symbolic
from bezier.hazmat import algebraic_intersection
from bezier.hazmat import geometric_intersection
from bezier.hazmat import intersection_helpers
_LOCATE_ERROR_TEMPLATE = (
"Dimension mismatch: This curve is {:d}-dimensional, so the point should "
"be a {:d} x 1 NumPy array. Instead the point {} has dimensions {}."
)
IntersectionStrategy = intersection_helpers.IntersectionStrategy
class Curve(_base.Base):
r"""Represents a B |eacute| zier `curve`_.
.. _curve: https://en.wikipedia.org/wiki/B%C3%A9zier_curve
We take the traditional definition: a B |eacute| zier curve is a mapping
from :math:`s \in \left[0, 1\right]` to convex combinations
of points :math:`v_0, v_1, \ldots, v_n` in some vector space:
.. math::
B(s) = \sum_{j = 0}^n \binom{n}{j} s^j (1 - s)^{n - j} \cdot v_j
.. image:: ../../images/curve_constructor.png
:align: center
.. doctest:: curve-constructor
>>> import bezier
>>> import numpy as np
>>> nodes = np.asfortranarray([
... [0.0, 0.625, 1.0],
... [0.0, 0.5 , 0.5],
... ])
>>> curve = bezier.Curve(nodes, degree=2)
>>> curve
<Curve (degree=2, dimension=2)>
.. testcleanup:: curve-constructor
import make_images
make_images.curve_constructor(curve)
Args:
nodes (Sequence[Sequence[numbers.Number]]): The nodes in the curve.
Must be convertible to a 2D NumPy array of floating point values,
where the columns represent each node while the rows are the
dimension of the ambient space.
degree (int): The degree of the curve. This is assumed to
correctly correspond to the number of ``nodes``. Use
:meth:`from_nodes` if the degree has not yet been computed.
copy (bool): Flag indicating if the nodes should be copied before
being stored. Defaults to :data:`True` since callers may
freely mutate ``nodes`` after passing in.
verify (bool): Flag indicating if the degree should be verified against
the number of nodes. Defaults to :data:`True`.
"""
__slots__ = ("_degree",) # From constructor
def __init__(self, nodes, degree, *, copy=True, verify=True):
super().__init__(nodes, copy=copy)
self._degree = degree
self._verify_degree(verify)
@classmethod
def from_nodes(cls, nodes, copy=True):
"""Create a :class:`.Curve` from nodes.
Computes the ``degree`` based on the shape of ``nodes``.
Args:
nodes (Sequence[Sequence[numbers.Number]]): The nodes in the curve.
Must be convertible to a 2D NumPy array of floating point
values, where the columns represent each node while the rows
are the dimension of the ambient space.
copy (bool): Flag indicating if the nodes should be copied before
being stored. Defaults to :data:`True` since callers may
freely mutate ``nodes`` after passing in.
Returns:
Curve: The constructed curve.
"""
nodes_np = _base.sequence_to_array(nodes)
_, num_nodes = nodes_np.shape
degree = cls._get_degree(num_nodes)
return cls(nodes_np, degree, copy=copy, verify=False)
@staticmethod
def _get_degree(num_nodes):
"""Get the degree of the current curve.
Args:
num_nodes (int): The number of nodes provided.
Returns:
int: The degree of the current curve.
"""
return num_nodes - 1
def _verify_degree(self, verify):
"""Verify that the number of nodes matches the degree.
Args:
verify (bool): Flag indicating if the degree should be verified
against the number of nodes.
Raises:
ValueError: If ``verify`` is :data:`True` and the number of nodes
does not match the degree.
"""
if not verify:
return
_, num_nodes = self._nodes.shape
expected_nodes = self._degree + 1
if expected_nodes == num_nodes:
return
msg = (
f"A degree {self._degree} curve should have "
f"{expected_nodes} nodes, not {num_nodes}."
)
raise ValueError(msg)
@property
def length(self):
r"""The length of the current curve.
Computes the length via:
.. math::
\int_{B\left(\left[0, 1\right]\right)} 1 \, d\mathbf{x} =
\int_0^1 \left\lVert B'(s) \right\rVert_2 \, ds
Returns:
float: The length of the current curve.
"""
return _curve_helpers.compute_length(self._nodes)
@property
def __dict__(self):
"""dict: Dictionary of current curve's property namespace.
This is just a stand-in property for the usual ``__dict__``. This
class defines ``__slots__`` so by default would not provide a
``__dict__``.
This also means that the current object can't be modified by the
returned dictionary.
"""
return {
"_dimension": self._dimension,
"_nodes": self._nodes,
"_degree": self._degree,
}
def copy(self):
"""Make a copy of the current curve.
Returns:
Curve: Copy of current curve.
"""
return Curve(self._nodes, self._degree, copy=True, verify=False)
def evaluate(self, s):
r"""Evaluate :math:`B(s)` along the curve.
This method acts as a (partial) inverse to :meth:`locate`.
See :meth:`evaluate_multi` for more details.
.. image:: ../../images/curve_evaluate.png
:align: center
.. doctest:: curve-eval
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 0.625, 1.0],
... [0.0, 0.5 , 0.5],
... ])
>>> curve = bezier.Curve(nodes, degree=2)
>>> curve.evaluate(0.75)
array([[0.796875],
[0.46875 ]])
.. testcleanup:: curve-eval
import make_images
make_images.curve_evaluate(curve)
Args:
s (float): Parameter along the curve.
Returns:
numpy.ndarray: The point on the curve (as a two dimensional
NumPy array with a single column).
"""
return _curve_helpers.evaluate_multi(
self._nodes, np.asfortranarray([s])
)
def evaluate_multi(self, s_vals):
r"""Evaluate :math:`B(s)` for multiple points along the curve.
This is done via a modified Horner's method (vectorized for
each ``s``-value).
.. doctest:: curve-eval-multi
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.0],
... [0.0, 2.0],
... [0.0, 3.0],
... ])
>>> curve = bezier.Curve(nodes, degree=1)
>>> curve
<Curve (degree=1, dimension=3)>
>>> s_vals = np.linspace(0.0, 1.0, 5)
>>> curve.evaluate_multi(s_vals)
array([[0. , 0.25, 0.5 , 0.75, 1. ],
[0. , 0.5 , 1. , 1.5 , 2. ],
[0. , 0.75, 1.5 , 2.25, 3. ]])
Args:
s_vals (numpy.ndarray): Parameters along the curve (as a
1D array).
Returns:
numpy.ndarray: The points on the curve. As a two dimensional
NumPy array, with the columns corresponding to each ``s``
value and the rows to the dimension.
"""
return _curve_helpers.evaluate_multi(self._nodes, s_vals)
def evaluate_hodograph(self, s):
r"""Evaluate the tangent vector :math:`B'(s)` along the curve.
.. image:: ../../images/curve_evaluate_hodograph.png
:align: center
.. doctest:: curve-evaluate-hodograph
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 0.625, 1.0],
... [0.0, 0.5 , 0.5],
... ])
>>> curve = bezier.Curve(nodes, degree=2)
>>> curve.evaluate_hodograph(0.75)
array([[0.875],
[0.25 ]])
.. testcleanup:: curve-evaluate-hodograph
import make_images
make_images.curve_evaluate_hodograph(curve, 0.75)
Args:
s (float): Parameter along the curve.
Returns:
numpy.ndarray: The tangent vector along the curve (as a two
dimensional NumPy array with a single column).
"""
return _curve_helpers.evaluate_hodograph(s, self._nodes)
def plot(self, num_pts, color=None, alpha=None, ax=None):
"""Plot the current curve.
Args:
num_pts (int): Number of points to plot.
color (Optional[Tuple[float, float, float]]): Color as RGB profile.
alpha (Optional[float]): The alpha channel for the color.
ax (Optional[matplotlib.artist.Artist]): matplotlib axis object
to add plot to.
Returns:
matplotlib.artist.Artist: The axis containing the plot. This
may be a newly created axis.
Raises:
NotImplementedError: If the curve's dimension is not ``2``.
"""
if self._dimension != 2:
raise NotImplementedError(
"2D is the only supported dimension",
"Current dimension",
self._dimension,
)
s_vals = np.linspace(0.0, 1.0, num_pts)
points = self.evaluate_multi(s_vals)
if ax is None:
ax = _plot_helpers.new_axis()
ax.plot(points[0, :], points[1, :], color=color, alpha=alpha)
return ax
def subdivide(self):
r"""Split the curve :math:`B(s)` into a left and right half.
Takes the interval :math:`\left[0, 1\right]` and splits the curve into
:math:`B_1 = B\left(\left[0, \frac{1}{2}\right]\right)` and
:math:`B_2 = B\left(\left[\frac{1}{2}, 1\right]\right)`. In
order to do this, also reparameterizes the curve, hence the resulting
left and right halves have new nodes.
.. image:: ../../images/curve_subdivide.png
:align: center
.. doctest:: curve-subdivide
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.25, 2.0],
... [0.0, 3.0 , 1.0],
... ])
>>> curve = bezier.Curve(nodes, degree=2)
>>> left, right = curve.subdivide()
>>> left.nodes
array([[0. , 0.625, 1.125],
[0. , 1.5 , 1.75 ]])
>>> right.nodes
array([[1.125, 1.625, 2. ],
[1.75 , 2. , 1. ]])
.. testcleanup:: curve-subdivide
import make_images
make_images.curve_subdivide(curve, left, right)
Returns:
Tuple[Curve, Curve]: The left and right sub-curves.
"""
left_nodes, right_nodes = _curve_helpers.subdivide_nodes(self._nodes)
left = Curve(left_nodes, self._degree, copy=False, verify=False)
right = Curve(right_nodes, self._degree, copy=False, verify=False)
return left, right
def intersect(
self, other, strategy=IntersectionStrategy.GEOMETRIC, verify=True
):
"""Find the points of intersection with another curve.
See :doc:`../../algorithms/curve-curve-intersection` for more details.
.. image:: ../../images/curve_intersect.png
:align: center
.. doctest:: curve-intersect
:options: +NORMALIZE_WHITESPACE
>>> nodes1 = np.asfortranarray([
... [0.0, 0.375, 0.75 ],
... [0.0, 0.75 , 0.375],
... ])
>>> curve1 = bezier.Curve(nodes1, degree=2)
>>> nodes2 = np.asfortranarray([
... [0.5, 0.5 ],
... [0.0, 0.75],
... ])
>>> curve2 = bezier.Curve(nodes2, degree=1)
>>> intersections = curve1.intersect(curve2)
>>> 3.0 * intersections
array([[2.],
[2.]])
>>> s_vals = intersections[0, :]
>>> curve1.evaluate_multi(s_vals)
array([[0.5],
[0.5]])
.. testcleanup:: curve-intersect
import make_images
make_images.curve_intersect(curve1, curve2, s_vals)
Args:
other (Curve): Other curve to intersect with.
strategy (Optional[ \
~bezier.hazmat.intersection_helpers.IntersectionStrategy]): The
intersection algorithm to use. Defaults to geometric.
verify (Optional[bool]): Indicates if extra caution should be
used to verify assumptions about the input and current
curve. Can be disabled to speed up execution time.
Defaults to :data:`True`.
Returns:
numpy.ndarray: ``2 x N`` array of ``s``- and ``t``-parameters where
intersections occur (possibly empty).
Raises:
TypeError: If ``other`` is not a curve (and ``verify=True``).
NotImplementedError: If at least one of the curves
isn't two-dimensional (and ``verify=True``).
ValueError: If ``strategy`` is not a valid
:class:`.IntersectionStrategy`.
"""
if verify:
if not isinstance(other, Curve):
raise TypeError(
"Can only intersect with another curve", "Received", other
)
if self._dimension != 2 or other._dimension != 2:
raise NotImplementedError(
"Intersection only implemented in 2D"
)
if strategy == IntersectionStrategy.GEOMETRIC:
all_intersections = _geometric_intersection.all_intersections
elif strategy == IntersectionStrategy.ALGEBRAIC:
all_intersections = algebraic_intersection.all_intersections
else:
raise ValueError("Unexpected strategy.", strategy)
st_vals, _ = all_intersections(self._nodes, other._nodes)
return st_vals
def self_intersections(
self, strategy=IntersectionStrategy.GEOMETRIC, verify=True
):
"""Find the points where the curve intersects itself.
For curves in general position, there will be no self-intersections:
.. doctest:: curve-self-intersect1
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.0, 0.0],
... [0.0, 1.0, 2.0],
... ])
>>> curve = bezier.Curve(nodes, degree=2)
>>> curve.self_intersections()
array([], shape=(2, 0), dtype=float64)
However, some curves do have self-intersections. Consider a cubic
with
.. math::
B\\left(\\frac{3 - \\sqrt{5}}{6}\\right) =
B\\left(\\frac{3 + \\sqrt{5}}{6}\\right)
.. image:: ../../images/curve_self_intersect2.png
:align: center
.. doctest:: curve-self-intersect2
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, -1.0, 1.0, -0.75 ],
... [2.0, 0.0, 1.0, 1.625],
... ])
>>> curve = bezier.Curve(nodes, degree=3)
>>> self_intersections = curve.self_intersections()
>>> sq5 = np.sqrt(5.0)
>>> expected = np.asfortranarray([
... [3 - sq5],
... [3 + sq5],
... ]) / 6.0
>>> max_err = np.max(np.abs(self_intersections - expected))
>>> binary_exponent(max_err)
-53
.. testcleanup:: curve-self-intersect2
import make_images
make_images.curve_self_intersect2(curve, self_intersections)
Some (somewhat pathological) curves can have multiple
self-intersections, though the number possible is largely constrained
by the degree. For example, this degree six curve has two
self-intersections:
.. image:: ../../images/curve_self_intersect3.png
:align: center
.. doctest:: curve-self-intersect3
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [-300.0, 227.5 , -730.0, 0.0 , 730.0, -227.5 , 300.0],
... [ 150.0, 953.75, -2848.0, 4404.75, -2848.0, 953.75, 150.0],
... ])
>>> curve = bezier.Curve(nodes, degree=6)
>>> self_intersections = curve.self_intersections()
>>> 6.0 * self_intersections
array([[1., 4.],
[2., 5.]])
>>> curve.evaluate_multi(self_intersections[:, 0])
array([[-150., -150.],
[ 75., 75.]])
>>> curve.evaluate_multi(self_intersections[:, 1])
array([[150., 150.],
[ 75., 75.]])
.. testcleanup:: curve-self-intersect3
import make_images
make_images.curve_self_intersect3(curve, self_intersections)
Args:
strategy (Optional[ \
~bezier.hazmat.intersection_helpers.IntersectionStrategy]): The
intersection algorithm to use. Defaults to geometric.
verify (Optional[bool]): Indicates if extra caution should be
used to verify assumptions about the current curve. Can be
disabled to speed up execution time. Defaults to :data:`True`.
Returns:
numpy.ndarray: ``2 x N`` array of ``s1``- and ``s2``-parameters
where self-intersections occur (possibly empty). For each pair
we have :math:`s_1 \\neq s_2` and :math:`B(s_1) = B(s_2)`.
Raises:
NotImplementedError: If the curve isn't two-dimensional
(and ``verify=True``).
NotImplementedError: If ``strategy`` is not
:attr:`~.IntersectionStrategy.GEOMETRIC`.
"""
if strategy != IntersectionStrategy.GEOMETRIC:
raise NotImplementedError(
"Only geometric strategy for self-intersection detection"
)
if verify:
if self._dimension != 2:
raise NotImplementedError(
"Self-intersection only implemented in 2D",
"Current dimension",
self._dimension,
)
return geometric_intersection.self_intersections(self._nodes)
def elevate(self):
r"""Return a degree-elevated version of the current curve.
Does this by converting the current nodes :math:`v_0, \ldots, v_n`
to new nodes :math:`w_0, \ldots, w_{n + 1}` where
.. math::
\begin{align*}
w_0 &= v_0 \\
w_j &= \frac{j}{n + 1} v_{j - 1} + \frac{n + 1 - j}{n + 1} v_j \\
w_{n + 1} &= v_n
\end{align*}
.. image:: ../../images/curve_elevate.png
:align: center
.. testsetup:: curve-elevate
import numpy as np
import bezier
.. doctest:: curve-elevate
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.5, 3.0],
... [0.0, 1.5, 0.0],
... ])
>>> curve = bezier.Curve(nodes, degree=2)
>>> elevated = curve.elevate()
>>> elevated
<Curve (degree=3, dimension=2)>
>>> elevated.nodes
array([[0., 1., 2., 3.],
[0., 1., 1., 0.]])
.. testcleanup:: curve-elevate
import make_images
make_images.curve_elevate(curve, elevated)
Returns:
Curve: The degree-elevated curve.
"""
new_nodes = _curve_helpers.elevate_nodes(self._nodes)
return Curve(new_nodes, self._degree + 1, copy=False, verify=False)
def reduce_(self):
r"""Return a degree-reduced version of the current curve.
.. _pseudo-inverse:
https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse
Does this by converting the current nodes :math:`v_0, \ldots, v_n`
to new nodes :math:`w_0, \ldots, w_{n - 1}` that correspond to
reversing the :meth:`elevate` process.
This uses the `pseudo-inverse`_ of the elevation matrix. For example
when elevating from degree 2 to 3, the matrix :math:`E_2` is given by
.. math::
\mathbf{v} = \left[\begin{array}{c c c} v_0 & v_1 & v_2
\end{array}\right] \longmapsto \left[\begin{array}{c c c c}
v_0 & \frac{v_0 + 2 v_1}{3} & \frac{2 v_1 + v_2}{3} & v_2
\end{array}\right] = \frac{1}{3} \mathbf{v}
\left[\begin{array}{c c c c} 3 & 1 & 0 & 0 \\
0 & 2 & 2 & 0 \\ 0 & 0 & 1 & 3 \end{array}\right]
and the (right) pseudo-inverse is given by
.. math::
R_2 = E_2^T \left(E_2 E_2^T\right)^{-1} = \frac{1}{20}
\left[\begin{array}{c c c} 19 & -5 & 1 \\
3 & 15 & -3 \\ -3 & 15 & 3 \\ 1 & -5 & 19
\end{array}\right].
.. warning::
Though degree-elevation preserves the start and end nodes, degree
reduction has no such guarantee. Rather, the nodes produced are
"best" in the least squares sense (when solving the normal
equations).
.. image:: ../../images/curve_reduce.png
:align: center
.. testsetup:: curve-reduce, curve-reduce-approx
import numpy as np
import bezier
.. doctest:: curve-reduce
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [-3.0, 0.0, 1.0, 0.0],
... [ 3.0, 2.0, 3.0, 6.0],
... ])
>>> curve = bezier.Curve(nodes, degree=3)
>>> reduced = curve.reduce_()
>>> reduced
<Curve (degree=2, dimension=2)>
>>> reduced.nodes
array([[-3. , 1.5, 0. ],
[ 3. , 1.5, 6. ]])
.. testcleanup:: curve-reduce
import make_images
make_images.curve_reduce(curve, reduced)
In the case that the current curve **is not** degree-elevated.
.. image:: ../../images/curve_reduce_approx.png
:align: center
.. doctest:: curve-reduce-approx
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.25, 3.75, 5.0],
... [2.5, 5.0 , 7.5 , 2.5],
... ])
>>> curve = bezier.Curve(nodes, degree=3)
>>> reduced = curve.reduce_()
>>> reduced
<Curve (degree=2, dimension=2)>
>>> reduced.nodes
array([[-0.125, 2.5 , 5.125],
[ 2.125, 8.125, 2.875]])
.. testcleanup:: curve-reduce-approx
import make_images
make_images.curve_reduce_approx(curve, reduced)
Returns:
Curve: The degree-reduced curve.
"""
new_nodes = _curve_helpers.reduce_pseudo_inverse(self._nodes)
return Curve(new_nodes, self._degree - 1, copy=False, verify=False)
def specialize(self, start, end):
"""Specialize the curve to a given sub-interval.
.. image:: ../../images/curve_specialize.png
:align: center
.. doctest:: curve-specialize
>>> nodes = np.asfortranarray([
... [0.0, 0.5, 1.0],
... [0.0, 1.0, 0.0],
... ])
>>> curve = bezier.Curve(nodes, degree=2)
>>> new_curve = curve.specialize(-0.25, 0.75)
>>> new_curve.nodes
array([[-0.25 , 0.25 , 0.75 ],
[-0.625, 0.875, 0.375]])
.. testcleanup:: curve-specialize
import make_images
make_images.curve_specialize(curve, new_curve)
This is a generalized version of :meth:`subdivide`, and can even
match the output of that method:
.. testsetup:: curve-specialize2
import numpy as np
import bezier
nodes = np.asfortranarray([
[0.0, 0.5, 1.0],
[0.0, 1.0, 0.0],
])
curve = bezier.Curve(nodes, degree=2)
.. doctest:: curve-specialize2
>>> left, right = curve.subdivide()
>>> also_left = curve.specialize(0.0, 0.5)
>>> np.all(also_left.nodes == left.nodes)
True
>>> also_right = curve.specialize(0.5, 1.0)
>>> np.all(also_right.nodes == right.nodes)
True
Args:
start (float): The start point of the interval we
are specializing to.
end (float): The end point of the interval we
are specializing to.
Returns:
Curve: The newly-specialized curve.
"""
new_nodes = _curve_helpers.specialize_curve(self._nodes, start, end)
return Curve(new_nodes, self._degree, copy=False, verify=False)
def locate(self, point):
r"""Find a point on the current curve.
Solves for :math:`s` in :math:`B(s) = p`.
This method acts as a (partial) inverse to :meth:`evaluate`.
.. note::
A unique solution is only guaranteed if the current curve has no
self-intersections. This code assumes, but doesn't check, that
this is true.
.. image:: ../../images/curve_locate.png
:align: center
.. doctest:: curve-locate
>>> nodes = np.asfortranarray([
... [0.0, -1.0, 1.0, -0.75 ],
... [2.0, 0.0, 1.0, 1.625],
... ])
>>> curve = bezier.Curve(nodes, degree=3)
>>> point1 = np.asfortranarray([
... [-0.09375 ],
... [ 0.828125],
... ])
>>> curve.locate(point1)
0.5
>>> point2 = np.asfortranarray([
... [0.0],
... [1.5],
... ])
>>> curve.locate(point2) is None
True
>>> point3 = np.asfortranarray([
... [-0.25 ],
... [ 1.375],
... ])
>>> curve.locate(point3) is None
Traceback (most recent call last):
...
ValueError: Parameters not close enough to one another
.. testcleanup:: curve-locate
import make_images
make_images.curve_locate(curve, point1, point2, point3)
Args:
point (numpy.ndarray): A (``D x 1``) point on the curve,
where :math:`D` is the dimension of the curve.
Returns:
Optional[float]: The parameter value (:math:`s`) corresponding
to ``point`` or :data:`None` if the point is not on
the ``curve``.
Raises:
ValueError: If the dimension of the ``point`` doesn't match the
dimension of the current curve.
"""
if point.shape != (self._dimension, 1):
point_dimensions = " x ".join(
str(dimension) for dimension in point.shape
)
msg = _LOCATE_ERROR_TEMPLATE.format(
self._dimension, self._dimension, point, point_dimensions
)
raise ValueError(msg)
return _curve_helpers.locate_point(self._nodes, point)
def to_symbolic(self):
"""Convert to a SymPy matrix representing :math:`B(s)`.
.. note::
This method requires SymPy.
.. doctest:: curve-to-symbolic
>>> nodes = np.asfortranarray([
... [0.0, -1.0, 1.0, -0.75 ],
... [2.0, 0.0, 1.0, 1.625],
... ])
>>> curve = bezier.Curve(nodes, degree=3)
>>> curve.to_symbolic()
Matrix([
[ -3*s*(3*s - 2)**2/4],
[-(27*s**3 - 72*s**2 + 48*s - 16)/8]])
Returns:
:class:`sympy.Matrix <sympy.matrices.dense.MutableDenseMatrix>`:
The curve :math:`B(s)`.
"""
_, b_polynomial = _symbolic.curve_as_polynomial(
self._nodes, self._degree
)
return b_polynomial
def implicitize(self):
r"""Implicitize the curve.
.. note::
This method requires SymPy.
.. doctest:: curve-implicitize
>>> nodes = np.asfortranarray([
... [0.0, 1.0, 1.0],
... [2.0, 0.0, 1.0],
... ])
>>> curve = bezier.Curve(nodes, degree=2)
>>> curve.implicitize()
9*x**2 + 6*x*y - 20*x + y**2 - 8*y + 12
Returns:
:class:`sympy.Expr <sympy.core.expr.Expr>`: The function that
defines the curve in :math:`\mathbf{R}^2` via :math:`f(x, y) = 0`.
Raises:
ValueError: If the curve's dimension is not ``2``.
"""
if self._dimension != 2:
raise ValueError(
"Only a planar (2D) curve can be implicitized",
"Current dimension",
self._dimension,
)
return _symbolic.implicitize_curve(self._nodes, self._degree)
|
90ab7b918b0c70bfd67717bca3a3a3628cac9de0
|
68384147be31aadd870c0153ac75466b1f5de122
|
/keras_cv_attention_models/swin_transformer_v2/swin_transformer_v2_timm.py
|
e70987c73c8e1c33a25ed260e4240c7297d29079
|
[
"CC-BY-NC-4.0",
"CC-BY-4.0",
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
leondgarse/keras_cv_attention_models
|
cbee0b641e4ed727d9646669cf1614ddf3435a8a
|
ac1d1146ef10823ce83bffdc527f227e9732ab55
|
refs/heads/main
| 2023-08-31T07:09:57.302178
| 2023-08-25T12:51:31
| 2023-08-25T12:51:31
| 391,777,965
| 460
| 69
|
MIT
| 2022-10-13T14:31:01
| 2021-08-02T00:59:55
|
Python
|
UTF-8
|
Python
| false
| false
| 19,027
|
py
|
swin_transformer_v2_timm.py
|
import math
import numpy as np
from keras_cv_attention_models import backend
from keras_cv_attention_models.backend import layers, functional, models, initializers
from keras_cv_attention_models.models import register_model
from keras_cv_attention_models.attention_layers import (
ChannelAffine,
drop_block,
layer_norm,
mlp_block,
output_block,
add_pre_post_process,
)
from keras_cv_attention_models.download_and_load import reload_model_weights
PRETRAINED_DICT = {
"swin_transformer_v2_tiny_ns": {"imagenet": {224: "c3272af88ba0cf09c818ac558ca9970e"}},
"swin_transformer_v2_small_ns": {"imagenet": {224: "89d5a63d528bbb88a4a287871e868414"}},
}
@backend.register_keras_serializable(package="swinv2")
class DivideScale(layers.Layer):
def __init__(self, axis=-1, initializer="ones", min_value=0.01, **kwargs):
super().__init__(**kwargs)
self.axis, self.initializer, self.min_value = axis, initializer, min_value
def build(self, input_shape):
if self.axis == -1 or self.axis == len(input_shape) - 1:
weight_shape = (input_shape[-1],)
else:
weight_shape = [1] * len(input_shape)
axis = self.axis if isinstance(self.axis, (list, tuple)) else [self.axis]
for ii in axis:
weight_shape[ii] = input_shape[ii]
self.scale = self.add_weight(name="weight", shape=weight_shape, initializer=self.initializer, trainable=True)
super().build(input_shape)
def call(self, inputs, **kwargs):
return inputs / functional.maximum(self.scale, self.min_value)
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis, "min_value": self.min_value}) # Not saving initializer in config
return config
@backend.register_keras_serializable(package="swinv2")
class PairWiseRelativePositionalEmbedding(layers.Layer):
def __init__(self, **kwargs):
# No weight, just need to wrapper a layer, or will not in model structure
super().__init__(**kwargs)
self.use_layer_as_module = True
def build(self, input_shape):
# input_shape: [batch * window_patch, window_height, window_width, channel]
height, width = input_shape[1], input_shape[2]
xx, yy = np.meshgrid(range(height), range(width)) # tf.meshgrid is same with np.meshgrid 'xy' mode, while torch.meshgrid 'ij' mode
coords = np.stack([yy, xx], axis=-1).astype("float32") # [14, 14, 2]
coords_flatten = np.reshape(coords, [-1, 2]) # [196, 2]
relative_coords = coords_flatten[:, None, :] - coords_flatten[None, :, :] # [196, 196, 2]
relative_coords_log = np.sign(relative_coords) * np.log(1.0 + np.abs(relative_coords))
if hasattr(self, "register_buffer"): # PyTorch
self.register_buffer("relative_coords_log", functional.convert_to_tensor(relative_coords_log, dtype=self.compute_dtype), persistent=False)
else:
self.relative_coords_log = functional.convert_to_tensor(relative_coords_log, dtype=self.compute_dtype)
self.height, self.width = height, width
super().build(input_shape)
def call(self, inputs, **kwargs):
return self.relative_coords_log
def compute_output_shape(self, input_shape):
return [self.height * self.width, self.height * self.width, 2]
@backend.register_keras_serializable(package="swinv2")
class WindowAttentionMask(layers.Layer):
def __init__(self, height, width, window_height, window_width, shift_height=0, shift_width=0, **kwargs):
# No weight, just need to wrapper a layer, or will meet some error in model saving or loading...
self.height, self.width, self.window_height, self.window_width = height, width, window_height, window_width
self.shift_height, self.shift_width = shift_height, shift_width
self.blocks = (self.height // self.window_height) * (self.width // self.window_width)
super().__init__(**kwargs)
def build(self, input_shape):
hh_split = [0, self.height - self.window_height, self.height - self.shift_height, self.height]
ww_split = [0, self.width - self.window_width, self.width - self.shift_width, self.width]
mask_value, total_ww, mask = 0, len(ww_split) - 1, []
for hh_id in range(len(hh_split) - 1):
hh = hh_split[hh_id + 1] - hh_split[hh_id]
rr = [np.zeros([hh, ww_split[id + 1] - ww_split[id]], dtype="float32") + (id + mask_value) for id in range(total_ww)]
mask.append(np.concatenate(rr, axis=-1))
mask_value += total_ww
mask = np.concatenate(mask, axis=0)
# return mask
mask = np.reshape(mask, [self.height // self.window_height, self.window_height, self.width // self.window_width, self.window_width])
mask = np.transpose(mask, [0, 2, 1, 3])
mask = np.reshape(mask, [-1, self.window_height * self.window_width])
attn_mask = np.expand_dims(mask, 1) - np.expand_dims(mask, 2)
attn_mask = np.where(attn_mask != 0, -100, 0)
attn_mask = np.expand_dims(np.expand_dims(attn_mask, 1), 0) # expand dims on batch and num_heads
if hasattr(self, "register_buffer"): # PyTorch
self.register_buffer("attn_mask", functional.convert_to_tensor(attn_mask, dtype=self.compute_dtype), persistent=False)
else:
self.attn_mask = functional.convert_to_tensor(attn_mask, dtype=self.compute_dtype)
self.num_heads, self.query_blocks = input_shape[1], input_shape[2]
super().build(input_shape)
def call(self, inputs, **kwargs):
# inputs: [batch_size * blocks, num_heads, query_blocks, query_blocks]
# where query_blocks = `window_height * window_width`, blocks = `(height // window_height) * (width // window_width)`
nn = functional.reshape(inputs, [-1, self.blocks, self.num_heads, self.query_blocks, self.query_blocks])
nn = nn + self.attn_mask
return functional.reshape(nn, [-1, self.num_heads, self.query_blocks, self.query_blocks])
def compute_output_shape(self, input_shape):
return [None, self.num_heads, self.query_blocks, self.query_blocks]
def get_config(self):
config = super().get_config()
config.update(
{
"height": self.height,
"width": self.width,
"window_height": self.window_height,
"window_width": self.window_width,
"shift_height": self.shift_height,
"shift_width": self.shift_width,
}
)
return config
def window_mhsa_with_pair_wise_positional_embedding(
inputs, num_heads=4, key_dim=0, meta_hidden_dim=384, mask=None, out_bias=True, attn_dropout=0, out_dropout=0, name=None
):
input_channel = inputs.shape[-1]
key_dim = key_dim if key_dim > 0 else input_channel // num_heads
qk_out = key_dim * num_heads
qkv = layers.Dense(qk_out * 3, use_bias=True, name=name and name + "qkv")(inputs)
qkv = functional.reshape(qkv, [-1, qkv.shape[1] * qkv.shape[2], qkv.shape[-1]])
query, key, value = functional.split(qkv, 3, axis=-1)
query = functional.transpose(functional.reshape(query, [-1, query.shape[1], num_heads, key_dim]), [0, 2, 1, 3]) # [batch, num_heads, hh * ww, key_dim]
key = functional.transpose(functional.reshape(key, [-1, key.shape[1], num_heads, key_dim]), [0, 2, 3, 1]) # [batch, num_heads, key_dim, hh * ww]
value = functional.transpose(functional.reshape(value, [-1, value.shape[1], num_heads, key_dim]), [0, 2, 1, 3]) # [batch, num_heads, hh * ww, vv_dim]
norm_query, norm_key = functional.l2_normalize(query, axis=-1, epsilon=1e-6), functional.l2_normalize(key, axis=-2, epsilon=1e-6)
attn = functional.matmul(norm_query, norm_key)
attn = DivideScale(axis=1, name=name and name + "scale")(attn) # axis=1 means on head dimension
# _relative_positional_encodings
pos_coord = PairWiseRelativePositionalEmbedding(name=name and name + "pos_emb")(inputs) # Wrapper a layer, or will not in model structure
relative_position_bias = mlp_block(pos_coord, meta_hidden_dim, output_channel=num_heads, drop_rate=0.1, activation="relu", name=name and name + "meta_")
relative_position_bias = functional.expand_dims(functional.transpose(relative_position_bias, [2, 0, 1]), 0)
attn = attn + relative_position_bias
if mask is not None:
attn = mask(attn)
attention_scores = layers.Softmax(axis=-1, name=name and name + "attention_scores")(attn)
if attn_dropout > 0:
attention_scores = layers.Dropout(attn_dropout, name=name and name + "attn_drop")(attention_scores)
attention_output = functional.matmul(attention_scores, value)
attention_output = functional.transpose(attention_output, [0, 2, 1, 3])
attention_output = functional.reshape(attention_output, [-1, inputs.shape[1], inputs.shape[2], num_heads * key_dim])
# print(f">>>> {attention_output.shape = }, {attention_scores.shape = }")
# [batch, hh, ww, num_heads * vv_dim] * [num_heads * vv_dim, out] --> [batch, hh, ww, out]
attention_output = layers.Dense(qk_out, use_bias=out_bias, name=name and name + "output")(attention_output)
attention_output = layers.Dropout(out_dropout, name=name and name + "out_drop")(attention_output) if out_dropout > 0 else attention_output
return attention_output
def shifted_window_attention(inputs, window_size, num_heads=4, shift_size=0, name=None):
input_channel = inputs.shape[-1]
window_size = window_size if isinstance(window_size, (list, tuple)) else [window_size, window_size]
window_height = window_size[0] if window_size[0] < inputs.shape[1] else inputs.shape[1]
window_width = window_size[1] if window_size[1] < inputs.shape[2] else inputs.shape[2]
shift_size = 0 if (window_height == inputs.shape[1] and window_width == inputs.shape[2]) else shift_size
should_shift = shift_size > 0
# window_partition, partition windows, ceil mode padding if not divisible by window_size
# patch_height, patch_width = inputs.shape[1] // window_height, inputs.shape[2] // window_width
patch_height, patch_width = int(math.ceil(inputs.shape[1] / window_height)), int(math.ceil(inputs.shape[2] / window_width))
should_pad_hh, should_pad_ww = patch_height * window_height - inputs.shape[1], patch_width * window_width - inputs.shape[2]
# print(f">>>> shifted_window_attention {inputs.shape = }, {should_pad_hh = }, {should_pad_ww = }")
if should_pad_hh or should_pad_ww:
inputs = functional.pad(inputs, [[0, 0], [0, should_pad_hh], [0, should_pad_ww], [0, 0]])
if should_shift:
shift_height, shift_width = int(window_height * shift_size), int(window_width * shift_size)
# tf.roll is not supported by tflite
# inputs = tf.roll(inputs, shift=(shift_height * -1, shift_width * -1), axis=[1, 2])
inputs = functional.concat([inputs[:, shift_height:], inputs[:, :shift_height]], axis=1)
inputs = functional.concat([inputs[:, :, shift_width:], inputs[:, :, :shift_width]], axis=2)
# print(f">>>> shifted_window_attention {inputs.shape = }, {patch_height = }, {patch_width = }, {window_height = }, {window_width = }")
# [batch * patch_height, window_height, patch_width, window_width * channel], limit transpose perm <= 4
nn = functional.reshape(inputs, [-1, window_height, patch_width, window_width * input_channel])
nn = functional.transpose(nn, [0, 2, 1, 3]) # [batch * patch_height, patch_width, window_height, window_width * channel]
nn = functional.reshape(nn, [-1, window_height, window_width, input_channel]) # [batch * patch_height * patch_width, window_height, window_width, channel]
mask = WindowAttentionMask(inputs.shape[1], inputs.shape[2], window_height, window_width, shift_height, shift_width) if should_shift else None
nn = window_mhsa_with_pair_wise_positional_embedding(nn, num_heads=num_heads, mask=mask, name=name)
# window_reverse, merge windows
# [batch * patch_height, patch_width, window_height, window_width * input_channel], limit transpose perm <= 4
nn = functional.reshape(nn, [-1, patch_width, window_height, window_width * input_channel])
nn = functional.transpose(nn, [0, 2, 1, 3]) # [batch * patch_height, window_height, patch_width, window_width * input_channel]
nn = functional.reshape(nn, [-1, patch_height * window_height, patch_width * window_width, input_channel])
if should_shift:
# nn = tf.roll(nn, shift=(shift_height, shift_width), axis=[1, 2])
nn = functional.concat([nn[:, -shift_height:], nn[:, :-shift_height]], axis=1)
nn = functional.concat([nn[:, :, -shift_width:], nn[:, :, :-shift_width]], axis=2)
# print(f">>>> shifted_window_attention before: {nn.shape = }, {should_pad_hh = }, {should_pad_ww = }")
if should_pad_hh or should_pad_ww:
nn = nn[:, : nn.shape[1] - should_pad_hh, : nn.shape[2] - should_pad_ww, :] # In case should_pad_hh or should_pad_ww is 0
# print(f">>>> shifted_window_attention after: {nn.shape = }")
return nn
def swin_transformer_block(
inputs, window_size, num_heads=4, shift_size=0, mlp_ratio=4, mlp_drop_rate=0, attn_drop_rate=0, drop_rate=0, layer_scale=-1, name=None
):
input_channel = inputs.shape[-1]
attn = shifted_window_attention(inputs, window_size, num_heads, shift_size, name=name + "attn_")
attn = layer_norm(attn, zero_gamma=True, axis=-1, name=name + "attn_")
attn = ChannelAffine(use_bias=False, weight_init_value=layer_scale, axis=-1, name=name + "1_gamma")(attn) if layer_scale >= 0 else attn
attn = drop_block(attn, drop_rate=drop_rate, name=name + "attn_")
attn_out = layers.Add(name=name + "attn_out")([inputs, attn])
mlp = mlp_block(attn_out, int(input_channel * mlp_ratio), drop_rate=mlp_drop_rate, activation="gelu", name=name + "mlp_")
mlp = layer_norm(mlp, zero_gamma=True, axis=-1, name=name + "mlp_")
mlp = ChannelAffine(use_bias=False, weight_init_value=layer_scale, axis=-1, name=name + "2_gamma")(mlp) if layer_scale >= 0 else mlp
mlp = drop_block(mlp, drop_rate=drop_rate, name=name + "mlp_")
return layers.Add(name=name + "output")([attn_out, mlp])
def patch_merging(inputs, name=""):
input_channel = inputs.shape[-1]
should_pad_hh, should_pad_ww = inputs.shape[1] % 2, inputs.shape[2] % 2
# print(f">>>> patch_merging {inputs.shape = }, {should_pad_hh = }, {should_pad_ww = }")
if should_pad_hh or should_pad_ww:
inputs = functional.pad(inputs, [[0, 0], [0, should_pad_hh], [0, should_pad_ww], [0, 0]])
# limit transpose perm <= 4
nn = functional.reshape(inputs, [-1, 2, inputs.shape[2], input_channel]) # [batch * inputs.shape[1] // 2, height 2, inputs.shape[2], input_channel]
nn = functional.transpose(nn, [0, 2, 1, 3]) # [batch * inputs.shape[1] // 2, inputs.shape[2], height 2, input_channel]
nn = functional.reshape(nn, [-1, inputs.shape[1] // 2, inputs.shape[2] // 2, 2 * 2 * input_channel])
nn = layer_norm(nn, axis=-1, name=name)
nn = layers.Dense(2 * input_channel, use_bias=False, name=name + "dense")(nn)
return nn
def SwinTransformerV2(
num_blocks=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
embed_dim=96,
# window_ratio=32,
window_size=7,
stem_patch_size=4,
use_stack_norm=False, # True for extra layer_norm on each stack end
extra_norm_period=0, # > 0 for extra layer_norm frequency in each stack. May combine with use_stack_norm=True
layer_scale=-1,
input_shape=(224, 224, 3),
num_classes=1000,
drop_connect_rate=0,
classifier_activation="softmax",
dropout=0,
pretrained=None,
model_name="swin_transformer_v2",
kwargs=None,
):
"""Patch stem"""
# Regard input_shape as force using original shape if len(input_shape) == 4,
# else assume channel dimension is the one with min value in input_shape, and put it first or last regarding image_data_format
input_shape = backend.align_input_shape_by_image_data_format(input_shape)
inputs = layers.Input(input_shape)
nn = layers.Conv2D(embed_dim, kernel_size=stem_patch_size, strides=stem_patch_size, use_bias=True, name="stem_conv")(inputs)
nn = nn if backend.image_data_format() == "channels_last" else layers.Permute([2, 3, 1])(nn) # channels_first -> channels_last
nn = layer_norm(nn, axis=-1, name="stem_")
# window_size = [input_shape[0] // window_ratio, input_shape[1] // window_ratio]
# window_size = [int(tf.math.ceil(input_shape[0] / window_ratio)), int(tf.math.ceil(input_shape[1] / window_ratio))]
window_size = window_size if isinstance(window_size, (list, tuple)) else [window_size, window_size]
""" stages """
total_blocks = sum(num_blocks)
global_block_id = 0
for stack_id, (num_block, num_head) in enumerate(zip(num_blocks, num_heads)):
stack_name = "stack{}_".format(stack_id + 1)
if stack_id > 0:
# height, width downsample * 0.5, channel upsample * 2
nn = patch_merging(nn, name=stack_name + "downsample")
for block_id in range(num_block):
block_name = stack_name + "block{}_".format(block_id + 1)
block_drop_rate = drop_connect_rate * global_block_id / total_blocks
shift_size = 0 if block_id % 2 == 0 else 0.5
nn = swin_transformer_block(nn, window_size, num_head, shift_size, drop_rate=block_drop_rate, layer_scale=layer_scale, name=block_name)
global_block_id += 1
if extra_norm_period > 0 and (block_id + 1) % extra_norm_period == 0 and not (use_stack_norm and block_id == num_block - 1):
nn = layer_norm(nn, axis=-1, name=block_name + "output_")
if use_stack_norm and stack_id != len(num_blocks) - 1: # Exclude last stack
nn = layer_norm(nn, axis=-1, name=stack_name + "output_")
nn = layer_norm(nn, axis=-1, name="pre_output_")
nn = nn if backend.image_data_format() == "channels_last" else layers.Permute([3, 1, 2])(nn) # channels_last -> channels_first
nn = output_block(nn, num_classes=num_classes, drop_rate=dropout, classifier_activation=classifier_activation)
model = models.Model(inputs, nn, name=model_name)
add_pre_post_process(model, rescale_mode="torch")
reload_model_weights(model, PRETRAINED_DICT, "swin_transformer_v2", pretrained)
return model
@register_model
def SwinTransformerV2Tiny_ns(input_shape=(224, 224, 3), num_classes=1000, classifier_activation="softmax", pretrained="imagenet", **kwargs):
use_stack_norm = True
return SwinTransformerV2(**locals(), model_name="swin_transformer_v2_tiny_ns", **kwargs)
@register_model
def SwinTransformerV2Small_ns(input_shape=(224, 224, 3), num_classes=1000, classifier_activation="softmax", pretrained="imagenet", **kwargs):
num_blocks = [2, 2, 18, 2]
use_stack_norm = True
return SwinTransformerV2(**locals(), model_name="swin_transformer_v2_small_ns", **kwargs)
|
69b24a0713e8b579aa1311a81e7fa3155d802436
|
74426bb6633fa41e6c3eb86e22d58e18a9aba3c8
|
/wsgidav/server/server_sample.py
|
b1865750b427473363ec20288facdbbacbdcd347
|
[
"MIT",
"LGPL-2.0-or-later"
] |
permissive
|
mar10/wsgidav
|
30e89b10cca8984c8af951475390ea3ca113d03c
|
dd3e53a5c57b03c19d1d240fbe949b8f75dd6850
|
refs/heads/master
| 2023-08-31T05:34:01.991675
| 2023-08-27T07:21:27
| 2023-08-27T07:21:27
| 15,376,784
| 728
| 162
|
MIT
| 2023-08-27T07:21:28
| 2013-12-22T14:38:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,851
|
py
|
server_sample.py
|
# -*- coding: utf-8 -*-
# (c) 2009-2023 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Original PyFileServer (c) 2005 Ho Chun Wei.
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Simple example how to a run WsgiDAV in a 3rd-party WSGI server.
"""
from cheroot import wsgi
from wsgidav import __version__, util
from wsgidav.fs_dav_provider import FilesystemProvider
from wsgidav.wsgidav_app import WsgiDAVApp
def main():
root_path = "."
provider = FilesystemProvider(root_path, readonly=False, fs_opts={})
config = {
"host": "127.0.0.1",
"port": 8080,
"provider_mapping": {"/": provider},
"http_authenticator": {
"domain_controller": None # None: dc.simple_dc.SimpleDomainController(user_mapping)
},
"simple_dc": {"user_mapping": {"*": True}}, # anonymous access
"verbose": 4,
"logging": {
"enable": True,
"enable_loggers": [],
},
"property_manager": True, # True: use property_manager.PropertyManager
"lock_storage": True, # True: use LockManager(lock_storage.LockStorageDict)
}
app = WsgiDAVApp(config)
# For an example, use cheroot:
version = (
f"WsgiDAV/{__version__} {wsgi.Server.version} Python/{util.PYTHON_VERSION}"
)
server = wsgi.Server(
bind_addr=(config["host"], config["port"]),
wsgi_app=app,
server_name=version,
# "numthreads": 50,
)
app.logger.info(f"Running {version}")
app.logger.info(f"Serving on http://{config['host']}:{config['port']}/ ...")
try:
server.start()
except KeyboardInterrupt:
app.logger.info("Received Ctrl-C: stopping...")
finally:
server.stop()
if __name__ == "__main__":
main()
|
462e43d9a46b6b39d3f0ae2656097571d5e19fbf
|
39ae7e85ac6967b24886e08f3af58735a9b66adc
|
/utils/cluster.py
|
06dee26533cf56ff47514de7e7c844d162affb9b
|
[
"MIT"
] |
permissive
|
varunagrawal/tiny-faces-pytorch
|
fd5b6edc0ccb451da28629e53ec0ab3a1927dc7a
|
83058fe5ff22df2d9203432cdaf16ea668c00453
|
refs/heads/master
| 2023-07-20T18:03:11.951615
| 2023-05-10T15:12:05
| 2023-05-10T15:12:05
| 143,350,153
| 171
| 49
|
MIT
| 2023-07-20T13:15:14
| 2018-08-02T22:18:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,972
|
py
|
cluster.py
|
import argparse
from datetime import datetime
from pathlib import Path
import numpy as np
from PIL import Image, ImageDraw
from pyclust import KMedoids
from pyclustering.cluster.kmedoids import kmedoids
import joblib
from tqdm import tqdm
from .k_medoids import kMedoids
from .metrics import jaccard_index, rect_dist
def centralize_bbox(bboxes):
"""
Convert the bounding boxes from (x, y, w, h) to (-w/2, -h/2, w/2, h/2).
We perform clustering based on aspect ratio only.
"""
print("Centralize and vectorize")
hs = bboxes[:, 3] - bboxes[:, 1] + 1
ws = bboxes[:, 2] - bboxes[:, 0] + 1
rects = np.vstack([-(ws-1)/2, -(hs-1)/2, (ws-1)/2, (hs-1)/2]).T
return rects
def compute_distances(bboxes):
print("Computing distances")
distances = np.zeros((len(bboxes), len(bboxes)))
for i in tqdm(range(len(bboxes)), total=len(bboxes)):
for j in range(len(bboxes)):
distances[i, j] = 1 - jaccard_index(bboxes[i, :], bboxes[j, :], (i, j))
return distances
def draw_bboxes(clusters):
"""
Draw and save the clustered bounding boxes for inspection
:param clusters:
:return:
"""
im = Image.new('RGB', [512, 512])
d = ImageDraw.Draw(im)
for bbox in clusters['medoids']:
box = [(0, 0), (-bbox[0]+bbox[2], -bbox[1]+bbox[3])]
color = tuple(np.random.choice(range(256), size=3))
d.rectangle(box, outline=color)
im.save("canonical_bbox_clusters_{0}.jpg".format(len(clusters['medoids'])))
# im.show()
def compute_kmedoids(bboxes, cls, option='pyclustering', indices=15, max_clusters=35, max_limit=5000):
print("Performing clustering using", option)
clustering = [{} for _ in range(indices)]
bboxes = centralize_bbox(bboxes)
# subsample the number of bounding boxes so that it can fit in memory and is faster
if bboxes.shape[0] > max_limit:
sub_ind = np.random.choice(np.arange(bboxes.shape[0]), size=max_limit, replace=False)
bboxes = bboxes[sub_ind]
distances_cache = Path('distances_{0}.jbl'.format(cls))
if distances_cache.exists():
print("Loading distances")
dist = joblib.load(distances_cache)
else:
dist = compute_distances(bboxes)
joblib.dump(dist, distances_cache, compress=5)
if option == 'pyclustering':
for k in range(indices, max_clusters+1):
print(k, "clusters")
initial_medoids = np.random.choice(bboxes.shape[0], size=k, replace=False)
kmedoids_instance = kmedoids(dist, initial_medoids, ccore=True, data_type='distance_matrix')
print("Running KMedoids")
t1 = datetime.now()
kmedoids_instance.process()
dt = datetime.now() - t1
print("Total time taken for clustering {k} medoids: {0}min:{1}s"
.format(dt.seconds // 60, dt.seconds % 60, k=k))
medoids_idx = kmedoids_instance.get_medoids()
medoids = bboxes[medoids_idx]
clustering.append({'n_clusters': k, 'medoids': medoids, 'class': cls})
elif option == 'pyclust':
for k in range(indices, max_clusters+1):
print(k, "clusters")
kmd = KMedoids(n_clusters=k, distance=rect_dist, n_trials=1, max_iter=2)
t1 = datetime.now()
kmd.fit(bboxes)
dt = datetime.now() - t1
print("Total time taken for clustering {k} medoids: {0}min:{1}s"
.format(dt.seconds//60, dt.seconds % 60, k=k))
medoids = kmd.centers_
clustering.append({'n_clusters': k, 'medoids': medoids, 'class': cls})
elif option == 'local':
for k in range(indices, max_clusters+1):
print(k, "clusters")
curr_medoids, cluster_idxs = kMedoids(dist, k=k)
medoids = []
for m in curr_medoids:
medoids.append(bboxes[m, :])
clustering.append({'n_clusters': k, 'medoids': medoids, 'class': cls})
return clustering
def arguments():
parser = argparse.ArgumentParser()
parser.add_argument('dataset_path')
# 3 is the category ID for cars
parser.add_argument('--cls', default=3, type=int, help="Indicate which category of objects we are interested in")
parser.add_argument('--clustering', default='pyclustering', choices=('pyclustering', 'pyclust', 'local'))
return parser.parse_args()
# def main():
# args = arguments()
#
# bboxes = get_class_data(cls=args.cls, dataset_path=args.dataset_path)
#
# clustering = compute_kmedoids(bboxes, args.cls, option=args.clustering)
#
# cluster_file = Path(args.dataset_path, 'clustering.jbl')
#
# joblib.dump(clustering, cluster_file, compress=5)
#
# ## For visualization
# # clusters = joblib.load('clustering.jbl')
# # draw_bboxes(clusters[25])
# #
# # for i in range(25, 36):
# # draw_bboxes(clusters[i])
# if __name__ == "__main__":
# main()
|
0d6794e01f6c9ec33d4774b3a2b23dc9a770bd8a
|
576764ad37667f8da2c63aaa1a9f96da211795a6
|
/tests/forte/data/readers/conllu_ud_reader_test.py
|
43ab6980e7afb6352c2e991900187c96150296cd
|
[
"Apache-2.0"
] |
permissive
|
asyml/forte
|
96f852601647836dda3bccf3bd7900b9d10e6fcb
|
13e50aebe2afd79a7a8b3c01f0bb2568addea54f
|
refs/heads/master
| 2023-04-09T17:52:31.203644
| 2023-04-06T15:04:49
| 2023-04-06T15:04:49
| 201,518,876
| 233
| 73
|
Apache-2.0
| 2023-04-06T15:04:51
| 2019-08-09T18:12:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,138
|
py
|
conllu_ud_reader_test.py
|
# Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for conllU reader
"""
import os
import unittest
from typing import List
from forte.pipeline import Pipeline
from ft.onto.base_ontology import Sentence, Document, Dependency
from forte.data.readers import ConllUDReader
from forte.data.data_pack import DataPack
class ConllUDReaderTest(unittest.TestCase):
def setUp(self):
"""
Reading the data into data_pack object to be used in the tests
"""
file_dir_path = os.path.dirname(__file__)
conll_ud_dir = os.path.abspath(
os.path.join(
file_dir_path, *([os.pardir] * 4), "data_samples/conll_ud"
)
)
pl = Pipeline()
pl.set_reader(ConllUDReader())
pl.initialize()
self.data_packs: List[DataPack] = [
data_pack for data_pack in pl.process_dataset(conll_ud_dir)
]
self.doc_ids = [
"weblog-blogspot.com_nominations_20041117172713_ENG_"
"20041117_172713",
"weblog-blogspot.com_nominations_20041117172713_ENG_"
"20041117_172714",
]
def test_reader_text(self):
expected_docs_text = [
[
"From the AP comes this story :",
"President Bush on Tuesday nominated two individuals to "
"replace retiring jurists on federal courts in the "
"Washington area .",
],
[
"Bush nominated Jennifer M. Anderson for a 15 - year "
"term as associate judge of the Superior Court of the "
"District of Columbia , replacing Steffen W. Graae ."
],
]
self.assertEqual(len(self.data_packs), 2)
for doc_index, expected_doc_id in enumerate(self.doc_ids):
data_pack = self.data_packs[doc_index]
self.assertTrue(data_pack.pack_name == expected_doc_id)
doc_entry = None
for d in data_pack.get(Document):
doc_entry = d
break
expected_doc_text = expected_docs_text[doc_index]
self.assertEqual(doc_entry.text, " ".join(expected_doc_text))
sent_entries = data_pack.get(Sentence)
for sent_entry, expected_sent_text in zip(
sent_entries, expected_doc_text
):
self.assertEqual(sent_entry.text, expected_sent_text)
def test_reader_dependency_tree(self):
doc_index = 1
data_pack = self.data_packs[doc_index]
expected_doc_id = self.doc_ids[doc_index]
self.assertTrue(data_pack.pack_name == expected_doc_id)
self.assertEqual(len(list(data_pack.get(Sentence))), 1)
dependencies = data_pack.get(Dependency)
for link in dependencies:
root_token = get_dependency_tree_root(link, data_pack)
self.assertEqual(root_token.text, "nominated")
def get_dependency_tree_root(link, data_pack):
"""
Returns the root token of the dependency tree.
Args:
link: The intermediate dependency link.
data_pack: The data pack to be worked on.
Returns:
"""
# TODO: make it robust enough to handle cycles for enhanced dependencies
token = link.get_parent()
if token.is_root:
return token
parent_link = list(data_pack.get_links_by_child(token))[0]
return (
token
if token.is_root
else get_dependency_tree_root(parent_link, data_pack)
)
if __name__ == "__main__":
unittest.main()
|
8e739aa03f0237eacc74d69d601a40ee3b7b0a49
|
0349e502733a4c25f020fbcad4715f598d686799
|
/rl_coach/presets/CartPole_DFP.py
|
523c3e3da91dd7bf28b46c98b5480b1fa9819df0
|
[
"Apache-2.0"
] |
permissive
|
IntelLabs/coach
|
679592e9887f5788229fef9d77a1a7975e959bc4
|
2c60cb5acd8cd3c9c381a5066c208e69fc273c7b
|
refs/heads/master
| 2023-09-05T17:56:19.435416
| 2022-12-11T17:54:06
| 2022-12-11T17:54:06
| 105,468,219
| 497
| 102
|
Apache-2.0
| 2021-12-27T09:52:30
| 2017-10-01T19:27:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,440
|
py
|
CartPole_DFP.py
|
from rl_coach.agents.dfp_agent import DFPAgentParameters, HandlingTargetsAfterEpisodeEnd
from rl_coach.base_parameters import VisualizationParameters, EmbedderScheme, PresetValidationParameters
from rl_coach.core_types import TrainingSteps, EnvironmentEpisodes, EnvironmentSteps
from rl_coach.environments.gym_environment import GymVectorEnvironment
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.graph_managers.graph_manager import ScheduleParameters
from rl_coach.schedules import LinearSchedule
####################
# Graph Scheduling #
####################
schedule_params = ScheduleParameters()
schedule_params.improve_steps = TrainingSteps(10000000000)
schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(10)
schedule_params.evaluation_steps = EnvironmentEpisodes(1)
schedule_params.heatup_steps = EnvironmentSteps(100)
#########
# Agent #
#########
agent_params = DFPAgentParameters()
agent_params.network_wrappers['main'].learning_rate = 0.0001
agent_params.network_wrappers['main'].input_embedders_parameters['observation'].scheme = EmbedderScheme.Medium
agent_params.network_wrappers['main'].input_embedders_parameters['goal'].scheme = EmbedderScheme.Medium
agent_params.network_wrappers['main'].input_embedders_parameters['measurements'].scheme = EmbedderScheme.Medium
agent_params.exploration.epsilon_schedule = LinearSchedule(0.5, 0.01, 3000)
agent_params.exploration.evaluation_epsilon = 0.01
agent_params.algorithm.discount = 1.0
agent_params.algorithm.use_accumulated_reward_as_measurement = True
agent_params.algorithm.num_consecutive_playing_steps = EnvironmentSteps(1)
agent_params.algorithm.goal_vector = [1] # accumulated_reward
agent_params.algorithm.handling_targets_after_episode_end = HandlingTargetsAfterEpisodeEnd.LastStep
###############
# Environment #
###############
env_params = GymVectorEnvironment(level='CartPole-v0')
########
# Test #
########
preset_validation_params = PresetValidationParameters()
preset_validation_params.test = True
preset_validation_params.min_reward_threshold = 120
preset_validation_params.max_episodes_to_achieve_reward = 250
graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params,
schedule_params=schedule_params, vis_params=VisualizationParameters(),
preset_validation_params=preset_validation_params)
|
785d980ac78f3ff9e029a5119947dc47701b0f05
|
279f415dd1e06c594c6c87deda57e201c73c4542
|
/espnet2/enh/encoder/conv_encoder.py
|
ef3a44b9a6c011b2d7e11da056b026dcb84abf4b
|
[
"Apache-2.0"
] |
permissive
|
espnet/espnet
|
f7ba47271c1a6b1ed606dbbfb04a7f14220bb585
|
bcd20948db7846ee523443ef9fd78c7a1248c95e
|
refs/heads/master
| 2023-08-28T23:43:34.238336
| 2023-08-23T02:51:39
| 2023-08-23T02:51:39
| 114,054,873
| 7,242
| 2,244
|
Apache-2.0
| 2023-09-14T08:01:11
| 2017-12-13T00:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,785
|
py
|
conv_encoder.py
|
import math
import torch
from espnet2.enh.encoder.abs_encoder import AbsEncoder
class ConvEncoder(AbsEncoder):
"""Convolutional encoder for speech enhancement and separation"""
def __init__(
self,
channel: int,
kernel_size: int,
stride: int,
):
super().__init__()
self.conv1d = torch.nn.Conv1d(
1, channel, kernel_size=kernel_size, stride=stride, bias=False
)
self.stride = stride
self.kernel_size = kernel_size
self._output_dim = channel
@property
def output_dim(self) -> int:
return self._output_dim
def forward(self, input: torch.Tensor, ilens: torch.Tensor):
"""Forward.
Args:
input (torch.Tensor): mixed speech [Batch, sample]
ilens (torch.Tensor): input lengths [Batch]
Returns:
feature (torch.Tensor): mixed feature after encoder [Batch, flens, channel]
"""
assert input.dim() == 2, "Currently only support single channel input"
input = torch.unsqueeze(input, 1)
feature = self.conv1d(input)
feature = torch.nn.functional.relu(feature)
feature = feature.transpose(1, 2)
flens = (
torch.div(ilens - self.kernel_size, self.stride, rounding_mode="trunc") + 1
)
return feature, flens
def forward_streaming(self, input: torch.Tensor):
output, _ = self.forward(input, 0)
return output
def streaming_frame(self, audio: torch.Tensor):
"""streaming_frame. It splits the continuous audio into frame-level
audio chunks in the streaming *simulation*. It is noted that this
function takes the entire long audio as input for a streaming simulation.
You may refer to this function to manage your streaming input
buffer in a real streaming application.
Args:
audio: (B, T)
Returns:
chunked: List [(B, frame_size),]
"""
batch_size, audio_len = audio.shape
hop_size = self.stride
frame_size = self.kernel_size
audio = [
audio[:, i * hop_size : i * hop_size + frame_size]
for i in range((audio_len - frame_size) // hop_size + 1)
]
return audio
if __name__ == "__main__":
input_audio = torch.randn((2, 100))
ilens = torch.LongTensor([100, 98])
nfft = 32
win_length = 28
hop = 10
encoder = ConvEncoder(kernel_size=nfft, stride=hop, channel=16)
frames, flens = encoder(input_audio, ilens)
splited = encoder.streaming_frame(input_audio)
sframes = [encoder.forward_streaming(s) for s in splited]
sframes = torch.cat(sframes, dim=1)
torch.testing.assert_allclose(sframes, frames)
|
ceff232ea70ae3d8873b52c15a91a940cc043a75
|
13800b7827598e76428a335559b7bf11867ec2f0
|
/python/ccxt/mercado.py
|
abbe53f0722caa00265ceb1d9c707b7fbf59f56d
|
[
"MIT"
] |
permissive
|
ccxt/ccxt
|
b40a0466f5c430a3c0c6026552ae697aa80ba6c6
|
e4065f6a490e6fc4dd7a72b375428b2faa570668
|
refs/heads/master
| 2023-09-04T03:41:29.787733
| 2023-09-03T19:25:57
| 2023-09-03T19:25:57
| 91,253,698
| 30,798
| 8,190
|
MIT
| 2023-09-14T21:59:09
| 2017-05-14T15:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 34,786
|
py
|
mercado.py
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.abstract.mercado import ImplicitAPI
import hashlib
from ccxt.base.types import OrderSide
from ccxt.base.types import OrderType
from typing import Optional
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InvalidOrder
from ccxt.base.decimal_to_precision import TICK_SIZE
class mercado(Exchange, ImplicitAPI):
def describe(self):
return self.deep_extend(super(mercado, self).describe(), {
'id': 'mercado',
'name': 'Mercado Bitcoin',
'countries': ['BR'], # Brazil
'rateLimit': 1000,
'version': 'v3',
'has': {
'CORS': True,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelOrder': True,
'createMarketOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': False,
'createStopMarketOrder': False,
'createStopOrder': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarginMode': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': 'emulated',
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositionMode': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': False,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'withdraw': True,
},
'timeframes': {
'15m': '15m',
'1h': '1h',
'3h': '3h',
'1d': '1d',
'1w': '1w',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27837060-e7c58714-60ea-11e7-9192-f05e86adb83f.jpg',
'api': {
'public': 'https://www.mercadobitcoin.net/api',
'private': 'https://www.mercadobitcoin.net/tapi',
'v4Public': 'https://www.mercadobitcoin.com.br/v4',
'v4PublicNet': 'https://api.mercadobitcoin.net/api/v4',
},
'www': 'https://www.mercadobitcoin.com.br',
'doc': [
'https://www.mercadobitcoin.com.br/api-doc',
'https://www.mercadobitcoin.com.br/trade-api',
],
},
'api': {
'public': {
'get': [
'coins',
'{coin}/orderbook/', # last slash critical
'{coin}/ticker/',
'{coin}/trades/',
'{coin}/trades/{from}/',
'{coin}/trades/{from}/{to}',
'{coin}/day-summary/{year}/{month}/{day}/',
],
},
'private': {
'post': [
'cancel_order',
'get_account_info',
'get_order',
'get_withdrawal',
'list_system_messages',
'list_orders',
'list_orderbook',
'place_buy_order',
'place_sell_order',
'place_market_buy_order',
'place_market_sell_order',
'withdraw_coin',
],
},
'v4Public': {
'get': [
'{coin}/candle/',
],
},
'v4PublicNet': {
'get': [
'candles',
],
},
},
'fees': {
'trading': {
'maker': 0.003,
'taker': 0.007,
},
},
'options': {
'limits': {
'BTC': 0.001,
'BCH': 0.001,
'ETH': 0.01,
'LTC': 0.01,
'XRP': 0.1,
},
},
'precisionMode': TICK_SIZE,
})
def fetch_markets(self, params={}):
"""
retrieves data on all markets for mercado
:param dict [params]: extra parameters specific to the exchange api endpoint
:returns dict[]: an array of objects representing market data
"""
response = self.publicGetCoins(params)
#
# [
# "BCH",
# "BTC",
# "ETH",
# "LTC",
# "XRP",
# "MBPRK01",
# "MBPRK02",
# "MBPRK03",
# "MBPRK04",
# "MBCONS01",
# "USDC",
# "WBX",
# "CHZ",
# "MBCONS02",
# "PAXG",
# "MBVASCO01",
# "LINK"
# ]
#
result = []
amountLimits = self.safe_value(self.options, 'limits', {})
for i in range(0, len(response)):
coin = response[i]
baseId = coin
quoteId = 'BRL'
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
id = quote + base
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': None,
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number('1e-8'),
'price': self.parse_number('1e-5'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(amountLimits, baseId),
'max': None,
},
'price': {
'min': self.parse_number('1e-5'),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': coin,
})
return result
def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int [limit]: the maximum amount of order book entries to return
:param dict [params]: extra parameters specific to the mercado api endpoint
:returns dict: A dictionary of `order book structures <https://github.com/ccxt/ccxt/wiki/Manual#order-book-structure>` indexed by market symbols
"""
self.load_markets()
market = self.market(symbol)
request = {
'coin': market['base'],
}
response = self.publicGetCoinOrderbook(self.extend(request, params))
return self.parse_order_book(response, market['symbol'])
def parse_ticker(self, ticker, market=None):
#
# {
# "high":"103.96000000",
# "low":"95.00000000",
# "vol":"2227.67806598",
# "last":"97.91591000",
# "buy":"95.52760000",
# "sell":"97.91475000",
# "open":"99.79955000",
# "date":1643382606
# }
#
symbol = self.safe_symbol(None, market)
timestamp = self.safe_timestamp(ticker, 'date')
last = self.safe_string(ticker, 'last')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}, market)
def fetch_ticker(self, symbol: str, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict [params]: extra parameters specific to the mercado api endpoint
:returns dict: a `ticker structure <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'coin': market['base'],
}
response = self.publicGetCoinTicker(self.extend(request, params))
ticker = self.safe_value(response, 'ticker', {})
#
# {
# "ticker": {
# "high":"1549.82293000",
# "low":"1503.00011000",
# "vol":"81.82827101",
# "last":"1533.15000000",
# "buy":"1533.21018000",
# "sell":"1540.09000000",
# "open":"1524.71089000",
# "date":1643691671
# }
# }
#
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
timestamp = self.safe_timestamp_2(trade, 'date', 'executed_timestamp')
market = self.safe_market(None, market)
id = self.safe_string_2(trade, 'tid', 'operation_id')
type = None
side = self.safe_string(trade, 'type')
price = self.safe_string(trade, 'price')
amount = self.safe_string_2(trade, 'amount', 'quantity')
feeCost = self.safe_string(trade, 'fee_rate')
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': None,
}
return self.safe_trade({
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': None,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': None,
'fee': fee,
}, market)
def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int [since]: timestamp in ms of the earliest trade to fetch
:param int [limit]: the maximum amount of trades to fetch
:param dict [params]: extra parameters specific to the mercado api endpoint
:returns Trade[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#public-trades>`
"""
self.load_markets()
market = self.market(symbol)
method = 'publicGetCoinTrades'
request = {
'coin': market['base'],
}
if since is not None:
method += 'From'
request['from'] = self.parse_to_int(since / 1000)
to = self.safe_integer(params, 'to')
if to is not None:
method += 'To'
response = getattr(self, method)(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_balance(self, response):
data = self.safe_value(response, 'response_data', {})
balances = self.safe_value(data, 'balance', {})
result = {'info': response}
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
if currencyId in balances:
balance = self.safe_value(balances, currencyId, {})
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['total'] = self.safe_string(balance, 'total')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict [params]: extra parameters specific to the mercado api endpoint
:returns dict: a `balance structure <https://github.com/ccxt/ccxt/wiki/Manual#balance-structure>`
"""
self.load_markets()
response = self.privatePostGetAccountInfo(params)
return self.parse_balance(response)
def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float [price]: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict [params]: extra parameters specific to the mercado api endpoint
:returns dict: an `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'coin_pair': market['id'],
}
method = self.capitalize(side) + 'Order'
if type == 'limit':
method = 'privatePostPlace' + method
request['limit_price'] = self.price_to_precision(market['symbol'], price)
request['quantity'] = self.amount_to_precision(market['symbol'], amount)
else:
method = 'privatePostPlaceMarket' + method
if side == 'buy':
if price is None:
raise InvalidOrder(self.id + ' createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount')
request['cost'] = self.price_to_precision(market['symbol'], amount * price)
else:
request['quantity'] = self.amount_to_precision(market['symbol'], amount)
response = getattr(self, method)(self.extend(request, params))
# TODO: replace self with a call to parseOrder for unification
return self.safe_order({
'info': response,
'id': str(response['response_data']['order']['order_id']),
}, market)
def cancel_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict [params]: extra parameters specific to the mercado api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'coin_pair': market['id'],
'order_id': id,
}
response = self.privatePostCancelOrder(self.extend(request, params))
#
# {
# response_data: {
# order: {
# order_id: 2176769,
# coin_pair: 'BRLBCH',
# order_type: 2,
# status: 3,
# has_fills: False,
# quantity: '0.10000000',
# limit_price: '1996.15999',
# executed_quantity: '0.00000000',
# executed_price_avg: '0.00000',
# fee: '0.00000000',
# created_timestamp: '1536956488',
# updated_timestamp: '1536956499',
# operations: []
# }
# },
# status_code: 100,
# server_unix_timestamp: '1536956499'
# }
#
responseData = self.safe_value(response, 'response_data', {})
order = self.safe_value(responseData, 'order', {})
return self.parse_order(order, market)
def parse_order_status(self, status):
statuses = {
'2': 'open',
'3': 'canceled',
'4': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# {
# "order_id": 4,
# "coin_pair": "BRLBTC",
# "order_type": 1,
# "status": 2,
# "has_fills": True,
# "quantity": "2.00000000",
# "limit_price": "900.00000",
# "executed_quantity": "1.00000000",
# "executed_price_avg": "900.00000",
# "fee": "0.00300000",
# "created_timestamp": "1453838494",
# "updated_timestamp": "1453838494",
# "operations": [
# {
# "operation_id": 1,
# "quantity": "1.00000000",
# "price": "900.00000",
# "fee_rate": "0.30",
# "executed_timestamp": "1453838494",
# },
# ],
# }
#
id = self.safe_string(order, 'order_id')
order_type = self.safe_string(order, 'order_type')
side = None
if 'order_type' in order:
side = 'buy' if (order_type == '1') else 'sell'
status = self.parse_order_status(self.safe_string(order, 'status'))
marketId = self.safe_string(order, 'coin_pair')
market = self.safe_market(marketId, market)
timestamp = self.safe_timestamp(order, 'created_timestamp')
fee = {
'cost': self.safe_string(order, 'fee'),
'currency': market['quote'],
}
price = self.safe_string(order, 'limit_price')
# price = self.safe_number(order, 'executed_price_avg', price)
average = self.safe_string(order, 'executed_price_avg')
amount = self.safe_string(order, 'quantity')
filled = self.safe_string(order, 'executed_quantity')
lastTradeTimestamp = self.safe_timestamp(order, 'updated_timestamp')
rawTrades = self.safe_value(order, 'operations', [])
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': market['symbol'],
'type': 'limit',
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'triggerPrice': None,
'cost': None,
'average': average,
'amount': amount,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict [params]: extra parameters specific to the mercado api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'coin_pair': market['id'],
'order_id': int(id),
}
response = self.privatePostGetOrder(self.extend(request, params))
responseData = self.safe_value(response, 'response_data', {})
order = self.safe_value(responseData, 'order')
return self.parse_order(order, market)
def withdraw(self, code: str, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str tag:
:param dict [params]: extra parameters specific to the mercado api endpoint
:returns dict: a `transaction structure <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
'quantity': format(amount, '.10f'),
'address': address,
}
if code == 'BRL':
account_ref = ('account_ref' in params)
if not account_ref:
raise ArgumentsRequired(self.id + ' withdraw() requires account_ref parameter to withdraw ' + code)
elif code != 'LTC':
tx_fee = ('tx_fee' in params)
if not tx_fee:
raise ArgumentsRequired(self.id + ' withdraw() requires tx_fee parameter to withdraw ' + code)
if code == 'XRP':
if tag is None:
if not ('destination_tag' in params):
raise ArgumentsRequired(self.id + ' withdraw() requires a tag argument or destination_tag parameter to withdraw ' + code)
else:
request['destination_tag'] = tag
response = self.privatePostWithdrawCoin(self.extend(request, params))
#
# {
# "response_data": {
# "withdrawal": {
# "id": 1,
# "coin": "BRL",
# "quantity": "300.56",
# "net_quantity": "291.68",
# "fee": "8.88",
# "account": "bco: 341, ag: 1111, cta: 23456-X",
# "status": 1,
# "created_timestamp": "1453912088",
# "updated_timestamp": "1453912088"
# }
# },
# "status_code": 100,
# "server_unix_timestamp": "1453912088"
# }
#
responseData = self.safe_value(response, 'response_data', {})
withdrawal = self.safe_value(responseData, 'withdrawal')
return self.parse_transaction(withdrawal, currency)
def parse_transaction(self, transaction, currency=None):
#
# {
# "id": 1,
# "coin": "BRL",
# "quantity": "300.56",
# "net_quantity": "291.68",
# "fee": "8.88",
# "account": "bco: 341, ag: 1111, cta: 23456-X",
# "status": 1,
# "created_timestamp": "1453912088",
# "updated_timestamp": "1453912088"
# }
#
currency = self.safe_currency(None, currency)
return {
'id': self.safe_string(transaction, 'id'),
'txid': None,
'timestamp': None,
'datetime': None,
'network': None,
'addressFrom': None,
'address': None,
'addressTo': None,
'amount': None,
'type': None,
'currency': currency['code'],
'status': None,
'updated': None,
'tagFrom': None,
'tag': None,
'tagTo': None,
'comment': None,
'fee': None,
'info': transaction,
}
def parse_ohlcv(self, ohlcv, market=None):
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
def fetch_ohlcv(self, symbol: str, timeframe='15m', since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int [since]: timestamp in ms of the earliest candle to fetch
:param int [limit]: the maximum amount of candles to fetch
:param dict [params]: extra parameters specific to the mercado api endpoint
:returns int[][]: A list of candles ordered, open, high, low, close, volume
"""
self.load_markets()
market = self.market(symbol)
request = {
'resolution': self.safe_string(self.timeframes, timeframe, timeframe),
'symbol': market['base'] + '-' + market['quote'], # exceptional endpoint, that needs custom symbol syntax
}
if limit is None:
limit = 100 # set some default limit,'s required if user doesn't provide it
if since is not None:
request['from'] = self.parse_to_int(since / 1000)
request['to'] = self.sum(request['from'], limit * self.parse_timeframe(timeframe))
else:
request['to'] = self.seconds()
request['from'] = request['to'] - (limit * self.parse_timeframe(timeframe))
response = self.v4PublicNetGetCandles(self.extend(request, params))
candles = self.convert_trading_view_to_ohlcv(response, 't', 'o', 'h', 'l', 'c', 'v')
return self.parse_ohlcvs(candles, market, timeframe, since, limit)
def fetch_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches information on multiple orders made by the user
:param str symbol: unified market symbol of the market orders were made in
:param int [since]: the earliest time in ms to fetch orders for
:param int [limit]: the maximum number of orde structures to retrieve
:param dict [params]: extra parameters specific to the mercado api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'coin_pair': market['id'],
}
response = self.privatePostListOrders(self.extend(request, params))
responseData = self.safe_value(response, 'response_data', {})
orders = self.safe_value(responseData, 'orders', [])
return self.parse_orders(orders, market, since, limit)
def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all unfilled currently open orders
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch open orders for
:param int [limit]: the maximum number of open orders structures to retrieve
:param dict [params]: extra parameters specific to the mercado api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'coin_pair': market['id'],
'status_list': '[2]', # open only
}
response = self.privatePostListOrders(self.extend(request, params))
responseData = self.safe_value(response, 'response_data', {})
orders = self.safe_value(responseData, 'orders', [])
return self.parse_orders(orders, market, since, limit)
def fetch_my_trades(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all trades made by the user
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch trades for
:param int [limit]: the maximum number of trades structures to retrieve
:param dict [params]: extra parameters specific to the mercado api endpoint
:returns Trade[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'coin_pair': market['id'],
'has_fills': True,
}
response = self.privatePostListOrders(self.extend(request, params))
responseData = self.safe_value(response, 'response_data', {})
ordersRaw = self.safe_value(responseData, 'orders', [])
orders = self.parse_orders(ordersRaw, market, since, limit)
trades = self.orders_to_trades(orders)
return self.filter_by_symbol_since_limit(trades, market['symbol'], since, limit)
def orders_to_trades(self, orders):
result = []
for i in range(0, len(orders)):
trades = self.safe_value(orders[i], 'trades', [])
for y in range(0, len(trades)):
result.append(trades[y])
return result
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/'
query = self.omit(params, self.extract_params(path))
if (api == 'public') or (api == 'v4Public') or (api == 'v4PublicNet'):
url += self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
url += self.version + '/'
nonce = self.nonce()
body = self.urlencode(self.extend({
'tapi_method': path,
'tapi_nonce': nonce,
}, params))
auth = '/tapi/' + self.version + '/' + '?' + body
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'TAPI-ID': self.apiKey,
'TAPI-MAC': self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return None
#
# todo add a unified standard handleErrors with self.exceptions in describe()
#
# {"status":503,"message":"Maintenancing, try again later","result":null}
#
errorMessage = self.safe_value(response, 'error_message')
if errorMessage is not None:
raise ExchangeError(self.id + ' ' + self.json(response))
return None
|
a794a0342974d4117750b205ce75a573ace2b619
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Configuration/Eras/python/Modifier_run3_egamma_cff.py
|
5e0c0d5b734e4a0f76ace0fcd84e33fa71d332e5
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 70
|
py
|
Modifier_run3_egamma_cff.py
|
import FWCore.ParameterSet.Config as cms
run3_egamma =cms.Modifier()
|
690b9f5693ab7d43370a8b9acdf138167eb47053
|
483424524c70852cc043e0d77bf1b757a61d797a
|
/deepspeed/module_inject/containers/features/gated_mlp.py
|
24f0826db14ed08f373a2824f845ac6f9d7d9508
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/DeepSpeed
|
810f1af320020718d0794f5a97cde6f1d17af122
|
55d9964c59c0c6e23158b5789a5c36c28939a7b0
|
refs/heads/master
| 2023-09-06T07:40:52.145692
| 2023-09-05T23:51:23
| 2023-09-05T23:51:23
| 235,860,204
| 27,557
| 3,347
|
Apache-2.0
| 2023-09-14T21:38:46
| 2020-01-23T18:35:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,397
|
py
|
gated_mlp.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import abstractmethod
from .hybrid_engine import HybridEngineContainer
class HybridGatedMLPContainer(HybridEngineContainer):
"""
The HybridGatedMLPContainer supports models for which the first MLP layer
is represented with two separate weights, one for the activation function
and one for the gating function.
"""
def set_mlp(self, _h4h_w, _h4h_b, _4hh_w, _4hh_b):
super().set_mlp(_h4h_w, _h4h_b, _4hh_w, _4hh_b)
self.set_mlp_gate()
@abstractmethod
def set_mlp_gate(self):
"""
In `set_mlp_gate`, it is necessary to populate the following variables (where appropriate)
for the given model:
self.inter_up_w: inter up weight
self.inter_up_b: inter up bias
self.inter_gate_w: inter gate weight
self.inter_gate_b: inter gate bias
If the parameter does not exist in the original model, set the attribute to None.
"""
raise NotImplementedError("A set_mlp_gate() function must be defined in the model container \
in order to set the unfused inter up and gate tensors.")
def mlp_inter_mp(self, mp_replace, reversed_dim=False):
# Only need to alter behavior if we can't do the normal destructive copy
if self.module.mlp.inter_w is None:
params = [
(self.module.mlp.inter_up_w, self.inter_up_w),
(self.module.mlp.inter_up_b, self.inter_up_b),
(self.module.mlp.inter_gate_w, self.inter_gate_w),
(self.module.mlp.inter_gate_b, self.inter_gate_b),
]
for dst, src in params:
dst = mp_replace.copy(dst[:self.inter_up_w.shape[0] // mp_replace.mp_size],
src,
int8=reversed_dim,
allocate_tensor=reversed_dim) if src is not None else None
else:
self.module.mlp.inter_w = mp_replace.strided_copy(self.module.mlp.inter_w,
self._h4h_w,
num_splits=2,
int8=reversed_dim)
self.module.mlp.inter_b = mp_replace.strided_copy(self.module.mlp.inter_b,
self._h4h_b,
num_splits=2,
int8=reversed_dim)
def release_mlp(self):
super().release_mlp()
gated_mlp_params = [
(self.module.mlp.inter_up_w, self.inter_up_w),
(self.module.mlp.inter_up_b, self.inter_up_b),
(self.module.mlp.inter_gate_w, self.inter_gate_w),
(self.module.mlp.inter_gate_b, self.inter_gate_b),
]
self._release_params(gated_mlp_params)
def reset_mlp(self):
self._h4h_w.data[:self.inter_up_w.shape[0]] = self.inter_up_w.data
self._h4h_w.data[self.inter_up_w.shape[0]:] = self.inter_gate_w.data
if self.inter_up_b is not None:
self._h4h_b.data[:self.inter_up_b.shape[0]] = self.inter_up_b.data
self._h4h_b.data[self.inter_up_b.shape[0]:] = self.inter_gate_b.data
inter_data = [self.inter_up_w.data, self.inter_gate_w.data]
if self.inter_up_b is not None:
inter_data.extend([self.inter_up_b.data, self.inter_gate_b.data])
self.inter_up_w.data = self._h4h_w.data[:self.inter_up_w.shape[0]]
self.inter_gate_w.data = self._h4h_w.data[self.inter_up_w.shape[0]:]
if self.inter_up_b is not None:
self.inter_up_b.data = self._h4h_b.data[:self.inter_up_b.shape[0]]
self.inter_gate_b.data = self._h4h_b.data[self.inter_up_b.shape[0]:]
for data in inter_data:
del data
def set_mlp_params_wo_copy(self, Z3_enabled=False):
self.module.mlp.output_w = self._4hh_w
self.module.mlp.output_b = self._4hh_b
if not Z3_enabled:
# In initialize_tensors, we create a fused inter projection with the appropriate shape
# and copy the up projection and gate projection into it
self.module.mlp.inter_w = self._h4h_w
self.module.mlp.inter_b = self._h4h_b
self.inter_up_w.data = self._h4h_w[:self.inter_up_w.shape[0], :]
self.inter_gate_w.data = self._h4h_w[self.inter_up_w.shape[0]:, :]
if self.inter_up_b is not None:
self.inter_up_b.data = self._h4h_b[:self.inter_up_w.shape[0]] if self._h4h_b is not None else None
self.inter_gate_b.data = self._h4h_b[self.inter_up_w.shape[0]:] if self._h4h_b is not None else None
else:
self.module.mlp.inter_up_w = self.inter_up_w
self.module.mlp.inter_up_b = self.inter_up_b
self.module.mlp.inter_gate_w = self.inter_gate_w
self.module.mlp.inter_gate_b = self.inter_gate_b
def get_mlp_params(self):
params = super().get_mlp_params()
params.extend([self.inter_up_w, self.inter_up_b, self.inter_gate_w, self.inter_gate_b])
return params
|
f854c9044de6c73000a53700be4260466dd3ad47
|
c1b77c0b1630c2e319e7ba7782a744f4ac867f7d
|
/gpytorch/module.py
|
ff431a4218c91555d865985c5e7562f745874ff9
|
[
"MIT",
"Python-2.0"
] |
permissive
|
cornellius-gp/gpytorch
|
6b9ab969b2888fa7f27f236a1b20041f00cc0253
|
5e93d2c04ac0634a7aeea9fd964be529bb250888
|
refs/heads/master
| 2023-08-31T21:13:02.741585
| 2023-08-25T19:24:53
| 2023-08-25T19:24:53
| 93,868,719
| 3,182
| 578
|
MIT
| 2023-09-13T01:06:00
| 2017-06-09T14:48:20
|
Python
|
UTF-8
|
Python
| false
| false
| 25,459
|
py
|
module.py
|
#!/usr/bin/env python3
import copy
import inspect
import itertools
import operator
from collections import OrderedDict
from typing import Union
import torch
from linear_operator.operators import LinearOperator
from torch import nn, Tensor
from torch.distributions import Distribution
from .constraints import Interval
class Module(nn.Module):
def __init__(self):
super().__init__()
self._added_loss_terms = OrderedDict()
self._priors = OrderedDict()
self._constraints = OrderedDict()
self._strict_init = True
self._load_strict_shapes = True
self._register_load_state_dict_pre_hook(self._load_state_hook_ignore_shapes)
def __call__(self, *inputs, **kwargs) -> Union[Tensor, Distribution, LinearOperator]:
outputs = self.forward(*inputs, **kwargs)
if isinstance(outputs, list):
return [_validate_module_outputs(output) for output in outputs]
return _validate_module_outputs(outputs)
def _clear_cache(self):
"""
Clear any precomputed caches.
Should be implemented by any module that caches any computation at test time.
"""
pass
def _get_module_and_name(self, parameter_name):
"""Get module and name from full parameter name."""
module, name = parameter_name.split(".", 1)
if module in self._modules:
return self.__getattr__(module), name
else:
raise AttributeError(
"Invalid parameter name {}. {} has no module {}".format(parameter_name, type(self).__name__, module)
)
def _strict(self, value):
_set_strict(self, value)
def added_loss_terms(self):
for _, strategy in self.named_added_loss_terms():
yield strategy
def forward(self, *inputs, **kwargs) -> Union[Tensor, Distribution, LinearOperator]:
raise NotImplementedError
def constraints(self):
for _, constraint in self.named_constraints():
yield constraint
def hyperparameters(self):
for _, param in self.named_hyperparameters():
yield param
def initialize(self, **kwargs):
"""
Set a value for a parameter
kwargs: (param_name, value) - parameter to initialize.
Can also initialize recursively by passing in the full name of a
parameter. For example if model has attribute model.likelihood,
we can initialize the noise with either
`model.initialize(**{'likelihood.noise': 0.1})`
or
`model.likelihood.initialize(noise=0.1)`.
The former method would allow users to more easily store the
initialization values as one object.
Value can take the form of a tensor, a float, or an int
"""
for name, val in kwargs.items():
if isinstance(val, int):
val = float(val)
if "." in name:
module, name = self._get_module_and_name(name)
if isinstance(module, nn.ModuleList):
idx, name = name.split(".", 1)
module[int(idx)].initialize(**{name: val})
else:
module.initialize(**{name: val})
elif not hasattr(self, name):
raise AttributeError("Unknown parameter {p} for {c}".format(p=name, c=self.__class__.__name__))
elif name not in self._parameters and name not in self._buffers:
setattr(self, name, val)
elif torch.is_tensor(val):
constraint = self.constraint_for_parameter_name(name)
if constraint is not None and constraint.enforced and not constraint.check_raw(val):
raise RuntimeError(
"Attempting to manually set a parameter value that is out of bounds of "
f"its current constraints, {constraint}. "
"Most likely, you want to do the following:\n likelihood = GaussianLikelihood"
"(noise_constraint=gpytorch.constraints.GreaterThan(better_lower_bound))"
)
try:
self.__getattr__(name).data.copy_(val.expand_as(self.__getattr__(name)))
except RuntimeError:
if not self._strict_init:
self.__getattr__(name).data = val
else:
self.__getattr__(name).data.copy_(val.view_as(self.__getattr__(name)))
elif isinstance(val, float):
constraint = self.constraint_for_parameter_name(name)
if constraint is not None and not constraint.check_raw(val):
raise RuntimeError(
"Attempting to manually set a parameter value that is out of bounds of "
f"its current constraints, {constraint}. "
"Most likely, you want to do the following:\n likelihood = GaussianLikelihood"
"(noise_constraint=gpytorch.constraints.GreaterThan(better_lower_bound))"
)
self.__getattr__(name).data.fill_(val)
else:
raise AttributeError("Type {t} not valid for initializing parameter {p}".format(t=type(val), p=name))
# Ensure value is contained in support of prior (if present)
prior_name = "_".join([name, "prior"])
if prior_name in self._priors:
prior, closure, _ = self._priors[prior_name]
try:
prior._validate_sample(closure(self))
except ValueError as e:
raise ValueError("Invalid input value for prior {}. Error:\n{}".format(prior_name, e))
return self
def named_added_loss_terms(self):
"""Returns an iterator over module variational strategies, yielding both
the name of the variational strategy as well as the strategy itself.
Yields:
(string, VariationalStrategy): Tuple containing the name of the
strategy and the strategy
"""
return _extract_named_added_loss_terms(module=self, memo=None, prefix="")
def named_hyperparameters(self):
from .variational._variational_distribution import _VariationalDistribution
for module_prefix, module in self.named_modules():
if not isinstance(module, _VariationalDistribution):
for elem in module.named_parameters(prefix=module_prefix, recurse=False):
yield elem
def named_priors(self, memo=None, prefix=""):
"""Returns an iterator over the module's priors, yielding the name of the prior,
the prior, the associated parameter names, and the transformation callable.
Yields:
(string, Module, Prior, tuple((Parameter, callable)), callable): Tuple containing:
- the name of the prior
- the parent module of the prior
- the prior
- a tuple of tuples (param, transform), one for each of the parameters associated with the prior
- the prior's transform to be called on the parameters
"""
return _extract_named_priors(module=self, prefix="")
def named_constraints(self, memo=None, prefix=""):
return _extract_named_constraints(module=self, memo=None, prefix="")
def named_variational_parameters(self):
from .variational._variational_distribution import _VariationalDistribution
for module_prefix, module in self.named_modules():
if isinstance(module, _VariationalDistribution):
for elem in module.named_parameters(prefix=module_prefix, recurse=False):
yield elem
def register_added_loss_term(self, name):
self._added_loss_terms[name] = None
def register_parameter(self, name, parameter):
r"""
Adds a parameter to the module. The parameter can be accessed as an attribute using the given name.
Args:
name (str):
The name of the parameter
parameter (torch.nn.Parameter):
The parameter
"""
if "_parameters" not in self.__dict__:
raise AttributeError("Cannot assign parameter before Module.__init__() call")
super().register_parameter(name, parameter)
def register_prior(self, name, prior, param_or_closure, setting_closure=None):
"""
Adds a prior to the module. The prior can be accessed as an attribute using the given name.
Args:
name (str):
The name of the prior
prior (Prior):
The prior to be registered`
param_or_closure (string or callable):
Either the name of the parameter, or a closure (which upon calling evalutes a function on
the module instance and one or more parameters):
single parameter without a transform: `.register_prior("foo_prior", foo_prior, "foo_param")`
transform a single parameter (e.g. put a log-Normal prior on it):
`.register_prior("foo_prior", NormalPrior(0, 1), lambda module: torch.log(module.foo_param))`
function of multiple parameters:
`.register_prior("foo2_prior", foo2_prior, lambda module: f(module.param1, module.param2)))`
setting_closure (callable, optional):
A function taking in the module instance and a tensor in (transformed) parameter space,
initializing the internal parameter representation to the proper value by applying the
inverse transform. Enables setting parametres directly in the transformed space, as well
as sampling parameter values from priors (see `sample_from_prior`)
"""
if isinstance(param_or_closure, str):
if param_or_closure not in self._parameters and not hasattr(self, param_or_closure):
raise AttributeError(
"Unknown parameter {name} for {module}".format(
name=param_or_closure, module=self.__class__.__name__
)
+ " Make sure the parameter is registered before registering a prior."
)
def closure(module):
return getattr(module, param_or_closure)
if setting_closure is not None:
raise RuntimeError("Must specify a closure instead of a parameter name when providing setting_closure")
def setting_closure(module, val):
return module.initialize(**{param_or_closure: val})
else:
if len(inspect.signature(param_or_closure).parameters) == 0:
raise ValueError(
"""As of version 1.4, `param_or_closure` must operate on a module instance. For example:
likelihood.noise_covar.register_prior(
"noise_std_prior",
gpytorch.priors.NormalPrior(0, 1),
lambda module: module.noise.sqrt()
)
"""
)
if inspect.isfunction(setting_closure) and len(inspect.signature(setting_closure).parameters) < 2:
raise ValueError(
"""As of version 1.4, `setting_closure` must operate on a module instance and a tensor. For example:
kernel.register_prior(
"radius_prior",
gpytorch.priors.LogNormalPrior(0, 1),
lambda module: module.radius,
lambda module, value: m._set_radius(value),
)
"""
)
closure = param_or_closure
self.add_module(name, prior)
self._priors[name] = (prior, closure, setting_closure)
def register_constraint(self, param_name, constraint, replace=True):
if param_name not in self._parameters:
raise RuntimeError("Attempting to register constraint for nonexistent parameter.")
constraint_name = param_name + "_constraint"
if constraint_name in self._constraints:
current_constraint = self._constraints[constraint_name]
else:
current_constraint = None
if isinstance(current_constraint, Interval) and not replace:
new_constraint = constraint.intersect(current_constraint)
else:
new_constraint = constraint
self.add_module(constraint_name, new_constraint)
self._constraints[constraint_name] = new_constraint
# re-initialize the parameter if the constraint specifies an initial value
if new_constraint.initial_value is not None:
self.initialize(**{param_name: new_constraint.initial_value})
def train(self, mode=True):
# If we're going in training mode, we need to clear any pre-comptued caches from eval mode
if (self.training and not mode) or mode:
self._clear_cache()
return super().train(mode=mode)
def constraint_for_parameter_name(self, param_name):
base_module = self
base_name = param_name
while "." in base_name:
components = base_name.split(".")
submodule_name = components[0]
submodule = getattr(base_module, submodule_name)
base_module = submodule
base_name = ".".join(components[1:])
try:
constraint_name = base_name + "_constraint"
return base_module._constraints.get(constraint_name)
except AttributeError: # submodule may not always be a gpytorch module
return None
def _load_state_hook_ignore_shapes(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
if not self._load_strict_shapes:
local_name_params = itertools.chain(self._parameters.items(), self._buffers.items())
local_state = {k: v for k, v in local_name_params if v is not None}
for name, param in local_state.items():
key = prefix + name
if key in state_dict:
param.data = state_dict[key].data
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
# If we're loading from a state dict, we need to clear any precomputed caches
self._clear_cache()
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def load_strict_shapes(self, value):
def apply_fn(module):
module._load_strict_shapes = value
self.apply(apply_fn)
def named_parameters_and_constraints(self):
for name, param in self.named_parameters():
yield name, param, self.constraint_for_parameter_name(name)
def sample_from_prior(self, prior_name):
"""Sample parameter values from prior. Modifies the module's parameters in-place."""
if prior_name not in self._priors:
raise RuntimeError("Unknown prior name '{}'".format(prior_name))
prior, _, setting_closure = self._priors[prior_name]
if setting_closure is None:
raise RuntimeError("Must provide inverse transform to be able to sample from prior.")
setting_closure(self, prior.sample())
def to_pyro_random_module(self):
return self.to_random_module()
def to_random_module(self):
random_module_cls = type("_Random" + self.__class__.__name__, (RandomModuleMixin, self.__class__), {})
if not isinstance(self, random_module_cls):
new_module = copy.deepcopy(self)
new_module.__class__ = random_module_cls # hack
else:
# Unclear if this branch would ever get used in practice, but it semantically makes sense to have.
new_module = copy.deepcopy(self)
for mname, child in new_module.named_children():
if isinstance(child, Module):
setattr(new_module, mname, child.to_random_module())
return new_module
def pyro_sample_from_prior(self):
"""
For each parameter in this Module and submodule that have defined priors, sample a value for that parameter
from its corresponding prior with a pyro.sample primitive and load the resulting value in to the parameter.
This method can be used in a Pyro model to conveniently define pyro sample sites for all
parameters of the model that have GPyTorch priors registered to them.
"""
new_module = self.to_pyro_random_module()
return _pyro_sample_from_prior(module=new_module, memo=None, prefix="")
def local_load_samples(self, samples_dict, memo, prefix):
"""
Defines local behavior of this Module when loading parameters from a samples_dict generated by a Pyro
sampling mechanism.
The default behavior here should almost always be called from any overriding class. However, a class may
want to add additional functionality, such as reshaping things to account for the fact that parameters will
acquire an extra batch dimension corresponding to the number of samples drawn.
"""
self._strict(False)
for name, (prior, closure, setting_closure) in self._priors.items():
if prior is not None and prior not in memo:
memo.add(prior)
setting_closure(self, samples_dict[prefix + ("." if prefix else "") + name])
self._strict(True)
def pyro_load_from_samples(self, samples_dict):
"""
Convert this Module in to a batch Module by loading parameters from the given `samples_dict`. `samples_dict`
is typically produced by a Pyro sampling mechanism.
Note that the keys of the samples_dict should correspond to prior names (covar_module.outputscale_prior) rather
than parameter names (covar_module.raw_outputscale), because we will use the setting_closure associated with
the prior to properly set the unconstrained parameter.
Args:
samples_dict (dict): Dictionary mapping *prior names* to sample values.
"""
return _pyro_load_from_samples(module=self, samples_dict=samples_dict, memo=None, prefix="")
def update_added_loss_term(self, name, added_loss_term):
from .mlls import AddedLossTerm
if not isinstance(added_loss_term, AddedLossTerm):
raise RuntimeError("added_loss_term must be a AddedLossTerm")
if name not in self._added_loss_terms.keys():
raise RuntimeError("added_loss_term {} not registered".format(name))
self._added_loss_terms[name] = added_loss_term
def variational_parameters(self):
for _, param in self.named_variational_parameters():
yield param
def _validate_module_outputs(outputs):
if isinstance(outputs, tuple):
if not all(
torch.is_tensor(output) or isinstance(output, Distribution) or isinstance(output, LinearOperator)
for output in outputs
):
raise RuntimeError(
"All outputs must be a Distribution, torch.Tensor, or LinearOperator. "
"Got {}".format([output.__class__.__name__ for output in outputs])
)
if len(outputs) == 1:
outputs = outputs[0]
return outputs
elif torch.is_tensor(outputs) or isinstance(outputs, Distribution) or isinstance(outputs, LinearOperator):
return outputs
else:
raise RuntimeError(
"Output must be a Distribution, torch.Tensor, or LinearOperator. Got {}".format(outputs.__class__.__name__)
)
def _set_strict(module, value, memo=None):
if memo is None:
memo = set()
if hasattr(module, "_strict_init"):
module._strict_init = value
for mname, module_ in module.named_children():
_set_strict(module_, value)
def _pyro_sample_from_prior(module, memo=None, prefix=""):
try:
import pyro
except ImportError:
raise RuntimeError("Cannot call pyro_sample_from_prior without pyro installed!")
if memo is None:
memo = set()
if hasattr(module, "_priors"):
for prior_name, (prior, closure, setting_closure) in module._priors.items():
if prior is not None and prior not in memo:
if setting_closure is None:
raise RuntimeError(
"Cannot use Pyro for sampling without a setting_closure for each prior,"
f" but the following prior had none: {prior_name}, {prior}."
)
memo.add(prior)
prior = prior.expand(closure(module).shape)
value = pyro.sample(prefix + ("." if prefix else "") + prior_name, prior)
setting_closure(module, value)
for mname, module_ in module.named_children():
submodule_prefix = prefix + ("." if prefix else "") + mname
_pyro_sample_from_prior(module=module_, memo=memo, prefix=submodule_prefix)
return module
def _pyro_load_from_samples(module, samples_dict, memo=None, prefix=""):
if memo is None:
memo = set()
if hasattr(module, "_priors"):
module.local_load_samples(samples_dict, memo, prefix)
for mname, module_ in module.named_children():
submodule_prefix = prefix + ("." if prefix else "") + mname
_pyro_load_from_samples(module_, samples_dict, memo=memo, prefix=submodule_prefix)
def _extract_named_added_loss_terms(module, memo=None, prefix=""):
if memo is None:
memo = set()
if hasattr(module, "_added_loss_terms"):
for name, strategy in module._added_loss_terms.items():
if strategy is not None and strategy not in memo:
memo.add(strategy)
yield prefix + ("." if prefix else "") + name, strategy
for mname, module_ in module.named_children():
submodule_prefix = prefix + ("." if prefix else "") + mname
for name, strategy in _extract_named_added_loss_terms(module=module_, memo=memo, prefix=submodule_prefix):
yield name, strategy
def _extract_named_priors(module, prefix=""):
if hasattr(module, "_priors"):
for name, (prior, closure, inv_closure) in module._priors.items():
if prior is not None:
full_name = ("." if prefix else "").join([prefix, name])
yield full_name, module, prior, closure, inv_closure
for mname, module_ in module.named_children():
submodule_prefix = prefix + ("." if prefix else "") + mname
for name, parent_module, prior, closure, inv_closure in _extract_named_priors(module_, prefix=submodule_prefix):
yield name, parent_module, prior, closure, inv_closure
def _extract_named_constraints(module, memo=None, prefix=""):
if memo is None:
memo = set()
if hasattr(module, "_constraints"):
for name, constraint in module._constraints.items():
if constraint is not None and constraint not in memo:
memo.add(constraint)
full_name = ("." if prefix else "").join([prefix, name])
yield full_name, constraint
for mname, module_ in module.named_children():
submodule_prefix = prefix + ("." if prefix else "") + mname
for name, constraint in _extract_named_constraints(module_, memo=memo, prefix=submodule_prefix):
yield name, constraint
class RandomModuleMixin(object):
def initialize(self, **kwargs):
"""
Set a value for a parameter
kwargs: (param_name, value) - parameter to initialize.
Can also initialize recursively by passing in the full name of a
parameter. For example if model has attribute model.likelihood,
we can initialize the noise with either
`model.initialize(**{'likelihood.noise': 0.1})`
or
`model.likelihood.initialize(noise=0.1)`.
The former method would allow users to more easily store the
initialization values as one object.
Value can take the form of a tensor, a float, or an int
"""
for name, value in kwargs.items():
if not torch.is_tensor(value):
raise RuntimeError("Initialize in RandomModules can only be done with tensor values.")
names = name.rsplit(".")
if len(names) > 1:
mod_name, param_name = names
mod = operator.attrgetter(mod_name)(self)
else:
mod, param_name = self, name
old_param = getattr(mod, param_name)
is_property = hasattr(type(self), name) and isinstance(getattr(type(self), name), property)
if not isinstance(old_param, torch.nn.Parameter) or is_property:
# Presumably we're calling a getter that will call initialize again on the actual parameter.
setattr(mod, param_name, value.expand(old_param.shape))
else:
delattr(mod, param_name)
setattr(mod, param_name, value.expand(old_param.shape))
return self
|
0adb345f30e36adcffa3c874c45f74097d57c0d7
|
7d232f51e2330a4f537c50ede9c6bc023d656fd4
|
/examples/python/keep_alive/helloworld_pb2_grpc.py
|
47c186976e1c6ea1cf51aaa751506c49448a7c63
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
grpc/grpc
|
6975af3ba6f07a6fe965b875a0c09abf18999a52
|
e4d598ab64aa54f1da78c6ed6133b741742d11d4
|
refs/heads/master
| 2023-08-31T01:10:22.666618
| 2023-08-30T22:35:17
| 2023-08-30T22:35:17
| 27,729,880
| 42,330
| 13,022
|
Apache-2.0
| 2023-09-14T21:54:19
| 2014-12-08T18:58:53
|
C++
|
UTF-8
|
Python
| false
| false
| 2,310
|
py
|
helloworld_pb2_grpc.py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import helloworld_pb2 as helloworld__pb2
class GreeterStub(object):
"""The greeting service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SayHello = channel.unary_unary(
'/helloworld.Greeter/SayHello',
request_serializer=helloworld__pb2.HelloRequest.SerializeToString,
response_deserializer=helloworld__pb2.HelloReply.FromString,
)
class GreeterServicer(object):
"""The greeting service definition.
"""
def SayHello(self, request, context):
"""Sends a greeting
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GreeterServicer_to_server(servicer, server):
rpc_method_handlers = {
'SayHello': grpc.unary_unary_rpc_method_handler(
servicer.SayHello,
request_deserializer=helloworld__pb2.HelloRequest.FromString,
response_serializer=helloworld__pb2.HelloReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'helloworld.Greeter', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Greeter(object):
"""The greeting service definition.
"""
@staticmethod
def SayHello(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/helloworld.Greeter/SayHello',
helloworld__pb2.HelloRequest.SerializeToString,
helloworld__pb2.HelloReply.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
ba50d9fe6539cfe409bc6e9cf939a7f9945aff39
|
6f2fef1b207299681f8d67d3831c400bb91de04b
|
/data_collection/gazette/spiders/sc_palhoca.py
|
ec388a54ea0f156acd9837815f228010c743386a
|
[
"MIT"
] |
permissive
|
okfn-brasil/querido-diario
|
76177747aa5ad47e99514f38402e6bc747b9a715
|
548a9b1b2718dc78ba8ccb06b36cf337543ad71d
|
refs/heads/main
| 2023-08-22T04:26:30.798196
| 2023-08-18T14:12:37
| 2023-08-18T14:12:37
| 127,598,755
| 402
| 233
|
MIT
| 2023-09-14T18:56:02
| 2018-04-01T05:01:21
|
Python
|
UTF-8
|
Python
| false
| false
| 193
|
py
|
sc_palhoca.py
|
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScPalhocaSpider(FecamGazetteSpider):
name = "sc_palhoca"
FECAM_QUERY = "cod_entidade:185"
TERRITORY_ID = "4211900"
|
11a77de00ad6ba402ecdb712fab0c9ac8f23f4ca
|
93b858db8d5fd4990c36a6c9eed3b75047152863
|
/tests/test_obj_Cuboid.py
|
280a2879a9884088e8196f02747a305a157c5842
|
[
"BSD-2-Clause"
] |
permissive
|
magpylib/magpylib
|
c4dc908c4e1a0109f65f15cd73584693ddc71f4b
|
7fa3514afceb28e8b7ba94f2821f5e9c789cc996
|
refs/heads/main
| 2023-09-01T00:04:39.420872
| 2023-08-27T05:32:14
| 2023-08-27T05:32:14
| 169,390,106
| 174
| 39
|
BSD-2-Clause
| 2023-09-03T18:34:52
| 2019-02-06T10:39:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,144
|
py
|
test_obj_Cuboid.py
|
import os
import pickle
import numpy as np
import magpylib as magpy
from magpylib._src.obj_classes.class_Sensor import Sensor
from magpylib.magnet import Cuboid
# # # """data generation for test_Cuboid()"""
# # N = 100
# # mags = (np.random.rand(N,3)-0.5)*1000
# # dims = np.random.rand(N,3)*5
# # posos = (np.random.rand(N,333,3)-0.5)*10 #readout at 333 positions
# # angs = (np.random.rand(N,18)-0.5)*2*10 # each step rote by max 10 deg
# # axs = (np.random.rand(N,18,3)-0.5)
# # anchs = (np.random.rand(N,18,3)-0.5)*5.5
# # movs = (np.random.rand(N,18,3)-0.5)*0.5
# # B = []
# # for mag,dim,ang,ax,anch,mov,poso in zip(mags,dims,angs,axs,anchs,movs,posos):
# # pm = magpy.magnet.Cuboid(mag,dim)
# # # 18 subsequent operations
# # for a,aa,aaa,mv in zip(ang,ax,anch,mov):
# # pm.move(mv).rotate_from_angax(a,aa,aaa)
# # B += [pm.getB(poso)]
# # B = np.array(B)
# # inp = [mags,dims,posos,angs,axs,anchs,movs,B]
# # pickle.dump(inp, open(os.path.abspath('testdata_Cuboid.p'), 'wb'))
def test_Cuboid_basics():
"""test Cuboid fundamentals"""
# data generated in comment above
data = pickle.load(
open(os.path.abspath("./tests/testdata/testdata_Cuboid.p"), "rb")
)
mags, dims, posos, angs, axs, anchs, movs, B = data
btest = []
for mag, dim, ang, ax, anch, mov, poso in zip(
mags, dims, angs, axs, anchs, movs, posos
):
pm = magpy.magnet.Cuboid(mag, np.abs(dim))
# 18 subsequent operations
for a, aa, aaa, mv in zip(ang, ax, anch, mov):
pm.move(mv).rotate_from_angax(a, aa, aaa, start=-1)
btest += [pm.getB(poso)]
btest = np.array(btest)
np.testing.assert_allclose(B, btest)
def test_Cuboid_add():
"""testing __add__"""
src1 = Cuboid((1, 2, 3), (1, 2, 3))
src2 = Cuboid((1, 2, 3), (1, 2, 3))
col = src1 + src2
assert isinstance(col, magpy.Collection), "adding cuboides fail"
def test_Cuboid_squeeze():
"""testing squeeze output"""
src1 = Cuboid((1, 1, 1), (1, 1, 1))
sensor = Sensor(pixel=[(1, 2, 3), (1, 2, 3)])
B = src1.getB(sensor)
assert B.shape == (2, 3)
H = src1.getH(sensor)
assert H.shape == (2, 3)
B = src1.getB(sensor, squeeze=False)
assert B.shape == (1, 1, 1, 2, 3)
H = src1.getH(sensor, squeeze=False)
assert H.shape == (1, 1, 1, 2, 3)
def test_repr_cuboid():
"""test __repr__"""
pm1 = Cuboid((1, 2, 3), (1, 2, 3))
pm1.style.label = "cuboid_01"
assert pm1.__repr__()[:6] == "Cuboid", "Cuboid repr failed"
assert "label='cuboid_01'" in pm1.__repr__(), "Cuboid repr failed"
def test_cuboid_object_vs_lib():
"""
includes a test of the input copy problem
"""
a = 1
mag = np.array([(10, 20, 30)])
dim = np.array([(a, a, a)])
pos = np.array([(2 * a, 2 * a, 2 * a)])
B0 = magpy.core.magnet_cuboid_field("B", pos, mag, dim)
H0 = magpy.core.magnet_cuboid_field("H", pos, mag, dim)
src = magpy.magnet.Cuboid(mag[0], dim[0])
B1 = src.getB(pos)
H1 = src.getH(pos)
np.testing.assert_allclose(B0[0], B1)
np.testing.assert_allclose(H0[0], H1)
|
c4a551d7eaaf2dcf7690734daeb8977e2b57fdf0
|
499f5402baed77d000c65f243b457c69dc3d2fe4
|
/pycatia/hybrid_shape_interfaces/hybrid_shape_boundary.py
|
4b51fbaeda1c429359e5b882e00a7d264bbc92b1
|
[
"MIT"
] |
permissive
|
evereux/pycatia
|
416189b34f3c60effea8a76258e36ffc5ae86e22
|
5f5726d5dc66265b3eba8a01910c4aeae424365d
|
refs/heads/master
| 2023-08-21T10:03:41.660445
| 2023-08-09T16:21:10
| 2023-08-09T16:21:10
| 159,069,580
| 141
| 42
|
MIT
| 2023-08-09T11:15:27
| 2018-11-25T20:04:31
|
Python
|
UTF-8
|
Python
| false
| false
| 7,565
|
py
|
hybrid_shape_boundary.py
|
#! usr/bin/python3.9
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-07-06 14:02:20.222384
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.in_interfaces.reference import Reference
from pycatia.mec_mod_interfaces.hybrid_shape import HybridShape
class HybridShapeBoundary(HybridShape):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| MecModInterfaces.HybridShape
| HybridShapeBoundary
|
| Represents the hybrid shape boundary feature object.
| Role: To access the data of the hybrid shape boundary feature object. This data
| includes:
|
| The boundary propagation
| The initial element used for the boundary propagation
| The boundary support
|
| Use the CATIAHybridShapeFactory to create a HybridShapeBoundary
| object.
|
| See also:
| HybridShapeFactory
"""
def __init__(self, com_object):
super().__init__(com_object)
self.hybrid_shape_boundary = com_object
@property
def from_(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property From() As Reference
|
| Removes or sets the ending limit(i.e Limit2) of the boundary
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_boundary.From)
@from_.setter
def from_(self, reference: Reference):
"""
:param Reference reference:
"""
self.hybrid_shape_boundary.From = reference.com_object
@property
def from_orientation(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property FromOrientation() As long
|
| Gets or sets the Ending Limit Orientation (i.e same or inverse)
:return: int
:rtype: int
"""
return self.hybrid_shape_boundary.FromOrientation
@from_orientation.setter
def from_orientation(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_boundary.FromOrientation = value
@property
def initial_element(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property InitialElement() As Reference
|
| Returns or sets the element used to initialize the boundary
| propagation.
| Sub-element(s) supported (see Boundary object):
| BiDimFeatEdge.
|
| Example:
| This example retrieves in InitElem the initial element of the
| ShpBoundary hybrid shape boundary feature.
|
| Dim InitElem As Reference
| InitElem = ShpBoundary.InitialElement
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_boundary.InitialElement)
@initial_element.setter
def initial_element(self, value: Reference):
"""
:param Reference value:
"""
self.hybrid_shape_boundary.InitialElement = value.com_object
@property
def propagation(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Propagation() As long
|
| Returns or sets the boundary propagation.
| Legal values: xxxxxxxxxx
|
| Example:
| This example retrieves in Prop the boundary propagation of the
| ShpBoundary hybrid shape boundary feature.
|
| Prop = ShpBoundary.Propagation
:return: int
:rtype: int
"""
return self.hybrid_shape_boundary.Propagation
@propagation.setter
def propagation(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_boundary.Propagation = value
@property
def support(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Support() As Reference
|
| Returns or sets the support surface around which the boundary is
| computed.
| Sub-element(s) supported (see Boundary object): Face.
|
| Example:
| This example retrieves in SupSurf the initial element of the
| ShpBoundary hybrid shape boundary feature.
|
| Dim SupSurf As Reference
| SupSurf = ShpBoundary.Support
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_boundary.Support)
@support.setter
def support(self, support_reference: Reference):
"""
:param Reference support_reference:
"""
self.hybrid_shape_boundary.Support = support_reference.com_object
@property
def to(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property To() As Reference
|
| Removes or sets the starting limit(i.e Limit1) of the boundary
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_boundary.To)
@to.setter
def to(self, value: Reference):
"""
:param Reference value:
"""
self.hybrid_shape_boundary.To = value.com_object
@property
def to_orientation(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property ToOrientation() As long
|
| Gets or sets the Starting Limit Orientation (i.e same or inverse)
:return: int
:rtype: int
"""
return self.hybrid_shape_boundary.ToOrientation
@to_orientation.setter
def to_orientation(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_boundary.ToOrientation = value
def __repr__(self):
return f'HybridShapeBoundary(name="{self.name}")'
|
33058d52b6bcd478b75b4cc5ed290c149c9a9499
|
a6c84bfd01cf40a2ca32b538c02aa971a2abec88
|
/src/triage/component/catwalk/storage.py
|
81d60a79092c7e6b1ee5a85a8abb9b57615cf64f
|
[
"MIT"
] |
permissive
|
dssg/triage
|
01b480d103f1eba7e00822410a8ba462378c1f12
|
1b2049a9d10d8c6b70586e6fbc945ac4fa32fe68
|
refs/heads/master
| 2023-08-25T12:53:10.463078
| 2023-08-03T22:54:49
| 2023-08-03T22:54:49
| 71,394,134
| 177
| 64
|
NOASSERTION
| 2023-09-13T17:41:43
| 2016-10-19T19:55:46
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 20,731
|
py
|
storage.py
|
# coding: utf-8
import itertools
import verboselogs, logging
logger = verboselogs.VerboseLogger(__name__)
import os
import pathlib
from contextlib import contextmanager
from os.path import dirname
from urllib.parse import urlparse
import gzip
import pandas as pd
import s3fs
import wrapt
import yaml
import joblib
from triage.component.results_schema import (
TestEvaluation,
TrainEvaluation,
TestPrediction,
TrainPrediction,
ListPrediction,
TestPredictionMetadata,
TrainPredictionMetadata,
ListPredictionMetadata,
TestAequitas,
TrainAequitas
)
from triage.util.pandas import downcast_matrix
class Store:
"""Base class for classes which know how to access a file in a preset medium.
Used to hold references to persisted objects with knowledge about how they can be accessed.
without loading them into memory. In this way, they can be easily and quickly serialized
across processes but centralize the reading/writing code.
Each subclass be scoped to a specific storage medium (e.g. Filesystem, S3)
and implement the access methods for that medium.
Implements write/load methods for interacting directly using bytestreams,
plus an open method that works as an open filehandle.
"""
def __init__(self, *pathparts):
self.pathparts = pathparts
@classmethod
def factory(self, *pathparts):
path_parsed = urlparse(pathparts[0])
scheme = path_parsed.scheme
if scheme in ("", "file"):
return FSStore(*pathparts)
elif scheme == "s3":
return S3Store(*pathparts)
else:
raise ValueError("Unable to infer correct Store from project path")
def __str__(self):
return f"{self.__class__.__name__}(path={self.path})"
def __repr__(self):
return str(self)
def exists(self):
raise NotImplementedError
def load(self):
with self.open("rb") as fd:
return fd.read()
def write(self, bytestream):
with self.open("wb") as fd:
fd.write(bytestream)
def open(self, *args, **kwargs):
raise NotImplementedError
class S3Store(Store):
"""Store an object in S3.
Example:
```
store = S3Store('s3://my-bucket', 'models', 'model.pkl')
return store.load()
```
Args:
path_head, *path_parts: one or more path components,
(to be joined by PurePosixPath to create the final path).
**config: arguments to be passed to the S3Fs client constructor.
"""
class S3FileWrapper(wrapt.ObjectProxy):
# don't allow wrapped object to take wrapper's place
# upon __enter__
def __enter__(self):
return self
def write(self, data, block_size=(5 * 2 ** 20)):
out = 0
for offset in itertools.count(0, block_size):
chunk = data[offset:(offset + block_size)]
if not chunk:
return out
out += self.__wrapped__.write(chunk)
def __init__(self, path_head, *path_parts, **config):
self.path = str(
pathlib.PurePosixPath(path_head.replace('s3://', ''),
*path_parts)
)
self.config = config
@property
def client(self):
return s3fs.S3FileSystem(**self.config)
def exists(self):
return self.client.exists(self.path)
def delete(self):
self.client.rm(self.path)
def open(self, *args, **kwargs):
# NOTE: remove S3FileWrapper as soon as s3fs properly
# NOTE: chunks out too-large writes
# NOTE: see also: tests.catwalk_tests.test_storage.test_S3Store_large
s3file = self.client.open(self.path, *args, **kwargs)
return self.S3FileWrapper(s3file)
class FSStore(Store):
"""Store an object on the local filesystem.
Example:
```
store = FSStore('/mnt', 'models', 'model.pkl')
return store.load()
```
Args:
*pathparts: A variable length list of components of the path, to be processed in order.
All components will be joined using pathlib.Path to create the final path
using the correct separator for the operating system. However, if you pass
components that already contain a separator, those separators won't be modified
"""
def __init__(self, *pathparts):
self.path = pathlib.Path(*pathparts)
os.makedirs(dirname(self.path), exist_ok=True)
def exists(self):
return os.path.isfile(self.path)
def delete(self):
os.remove(self.path)
def open(self, *args, **kwargs):
return open(self.path, *args, **kwargs)
class ProjectStorage:
"""Store and access files associated with a project.
Args:
project_path (string): The base path for all files in the project.
The scheme prefix of the path will determine the storage medium.
"""
def __init__(self, project_path):
self.project_path = project_path
self.storage_class = Store.factory(self.project_path).__class__
def get_store(self, directories, leaf_filename):
"""Return a storage object for one filename
Args:
directories (list): A list of subdirectories
leaf_filename (string): The filename without any directory information
Returns:
triage.component.catwalk.storage.Store object
"""
return self.storage_class(self.project_path, *directories, leaf_filename)
def matrix_storage_engine(self, matrix_storage_class=None, matrix_directory=None):
"""Return a matrix storage engine bound to this project's storage
Args:
matrix_storage_class (class) A subclass of MatrixStore
matrix_directory (string, optional) A directory to store matrices.
If not passed will allow the MatrixStorageEngine to decide
Returns: triage.component.catwalk.storage.MatrixStorageEngine
"""
return MatrixStorageEngine(self, matrix_storage_class, matrix_directory)
def model_storage_engine(self, model_directory=None):
"""Return a model storage engine bound to this project's storage
Args:
model_directory (string, optional) A directory to store models
If not passed will allow the ModelStorageEngine to decide
Returns: triage.component.catwalk.storage.ModelStorageEngine
"""
return ModelStorageEngine(self, model_directory)
class ModelStorageEngine:
"""Store arbitrary models in a given project storage using joblib
Args:
project_storage (triage.component.catwalk.storage.ProjectStorage)
A project file storage engine
model_directory (string, optional) A directory name for models.
Defaults to 'trained_models'
"""
def __init__(self, project_storage, model_directory=None):
self.project_storage = project_storage
self.directories = [model_directory or "trained_models"]
self.should_cache = False
self.reset_cache()
def reset_cache(self):
self.cache = {}
@contextmanager
def cache_models(self):
"""Caches each model in memory as it is written.
Must be used as a context manager.
The cache is cleared when the context manager goes out of scope
"""
self.should_cache = True
try:
yield
finally:
self.reset_cache()
self.should_cache = False
def write(self, obj, model_hash):
"""Persist a model object using joblib. Also performs compression
Args:
obj (object) A picklable model object
model_hash (string) An identifier, unique within this project, for the model
"""
if self.should_cache:
logger.spam(f"Caching model {model_hash}")
self.cache[model_hash] = obj
with self._get_store(model_hash).open("wb") as fd:
joblib.dump(obj, fd, compress=True)
def load(self, model_hash):
"""Load a model object using joblib
Args:
model_hash (string) An identifier, unique within this project, for the model
Returns: (object) A model object
"""
if self.should_cache and model_hash in self.cache:
logger.spam(f"Returning model {model_hash} from cache")
return self.cache[model_hash]
with self._get_store(model_hash).open("rb") as fd:
return joblib.load(fd)
def exists(self, model_hash):
"""Check whether the model is persisted
Args:
model_hash (string) An identifier, unique within this project, for the model
Returns: (bool) Whether or not a model by that identifier exists in project storage
"""
return self._get_store(model_hash).exists()
def delete(self, model_hash):
"""Delete the model identified by this hash from project storage
Args:
model_hash (string) An identifier, unique within this project, for the model
"""
return self._get_store(model_hash).delete()
def _get_store(self, model_hash):
return self.project_storage.get_store(self.directories, model_hash)
class MatrixStorageEngine:
"""Store matrices in a given project storage
Args:
project_storage (triage.component.catwalk.storage.ProjectStorage)
A project file storage engine
matrix_storage_class (class) A subclass of MatrixStore
matrix_directory (string, optional) A directory to store matrices. Defaults to 'matrices'
"""
def __init__(
self, project_storage, matrix_storage_class=None, matrix_directory=None
):
self.project_storage = project_storage
self.matrix_storage_class = matrix_storage_class or CSVMatrixStore
self.directories = [matrix_directory or "matrices"]
def get_store(self, matrix_uuid):
"""Return a storage object for a given matrix uuid.
Args:
matrix_uuid (string) A unique identifier within the project for a matrix.
Returns: (MatrixStore) a reference to the matrix and its companion metadata
"""
return self.matrix_storage_class(
self.project_storage, self.directories, matrix_uuid
)
class MatrixStore:
"""Base class for classes that allow access of a matrix and its metadata.
Subclasses should be scoped to a storage format (e.g. CSV)
and implement the _load, save, and head_of_matrix methods for that storage format
Args:
project_storage (triage.component.catwalk.storage.ProjectStorage)
A project file storage engine
directories (list): A list of subdirectories
matrix_uuid (string): A unique identifier within the project for a matrix.
matrix (pandas.DataFrame, optional): The raw matrix.
Defaults to None, which means it will be loaded from storage on demand
metadata (dict, optional). The matrix' metadata.
Defaults to None, which means it will be loaded from storage on demand.
"""
_matrix_label_tuple = None
indices = ['entity_id', 'as_of_date']
def __init__(
self, project_storage, directories, matrix_uuid, matrix=None, metadata=None
):
self.should_cache = False
self.matrix_uuid = matrix_uuid
self.matrix_base_store = project_storage.get_store(
directories, f"{matrix_uuid}.{self.suffix}"
)
self.metadata_base_store = project_storage.get_store(
directories, f"{matrix_uuid}.yaml"
)
self.metadata = metadata
if matrix is not None:
self._matrix_label_tuple = self._preprocess_and_split_matrix(matrix)
@contextmanager
def cache(self):
"""Enable caching
Must be used as a context manager.
The cache is cleared when the context manager goes out of scope
"""
self.should_cache = True
try:
yield
finally:
self.clear_cache()
self.should_cache = False
def _preprocess_and_split_matrix(self, matrix_with_labels):
"""Perform desired preprocessing that we generally want to do after loading a matrix
This includes setting the index (depending on the storage, may not be serializable)
and downcasting.
"""
if matrix_with_labels.index.names != self.indices:
matrix_with_labels.set_index(self.indices, inplace=True)
index_of_date = matrix_with_labels.index.names.index('as_of_date')
if matrix_with_labels.index.levels[index_of_date].dtype != "datetime64[ns]":
raise ValueError(f"Woah is {matrix_with_labels.index.levels[index_of_date].dtype}")
matrix_with_labels = downcast_matrix(matrix_with_labels)
labels = matrix_with_labels.pop(self.label_column_name)
design_matrix = matrix_with_labels
return design_matrix, labels
@property
def matrix_label_tuple(self):
if self._matrix_label_tuple:
return self._matrix_label_tuple
design_matrix, labels = self._preprocess_and_split_matrix(self._load())
if self.should_cache:
self._matrix_label_tuple = design_matrix, labels
return design_matrix, labels
@matrix_label_tuple.setter
def matrix_label_tuple(self, matrix_label_tuple):
self._matrix_label_tuple = matrix_label_tuple
@property
def design_matrix(self):
"""The matrix without the label vector, only the index and features"""
return self.matrix_label_tuple[0]
@property
def labels(self):
if type(self.matrix_label_tuple[1]) != pd.Series:
raise TypeError("Label stored as something other than pandas Series")
return self.matrix_label_tuple[1]
@property
def metadata(self):
"""The raw metadata. Will load from storage into memory if not already loaded"""
if self.__metadata is not None:
return self.__metadata
self.__metadata = self.load_metadata()
return self.__metadata
@metadata.setter
def metadata(self, metadata):
self.__metadata = metadata
@property
def head_of_matrix(self):
"""The first line of the matrix"""
return self.matrix.head(1)
@property
def exists(self):
"""Whether or not the matrix and metadata exist in storage"""
return self.matrix_base_store.exists() and self.metadata_base_store.exists()
@property
def empty(self):
"""Whether or not the matrix has at least one row"""
if not self.matrix_base_store.exists():
return True
else:
head_of_matrix = self.head_of_matrix
return head_of_matrix.empty
def columns(self, include_label=False):
"""The matrix's column list"""
head_of_matrix = self.head_of_matrix
columns = head_of_matrix.columns.tolist()
if include_label:
return columns
else:
return [col for col in columns if col != self.metadata.get("label_name", None)]
@property
def label_column_name(self):
return self.metadata["label_name"]
@property
def index(self):
if self.metadata['indices'] != self.indices:
raise ValueError(f"Indices must be {self.indices}")
return self.design_matrix.index
@property
def uuid(self):
"""The matrix's unique id within the project"""
return self.matrix_uuid
@property
def as_of_dates(self):
"""All as-of-dates in the matrix. Will be converted to datetime.date"""
return sorted(set(
as_of_date.date() if hasattr(as_of_date, 'date') else as_of_date
for entity_id, as_of_date in self.design_matrix.index
))
@property
def num_entities(self):
"""The number of entities in the matrix"""
return len(
self.design_matrix.index.levels[self.design_matrix.index.names.index("entity_id")]
)
@property
def matrix_type(self):
"""The MatrixType (train or test). Returns an object with:
a string name,
evaluation ORM class
prediction ORM class
a boolean `is_test`
"""
if self.metadata["matrix_type"] == "train":
return TrainMatrixType
elif self.metadata["matrix_type"] == "test":
return TestMatrixType
elif self.metadata["matrix_type"] == "production":
return ProductionMatrixType
else:
raise Exception(
"""matrix metadata for matrix {} must contain 'matrix_type'
= "train" or "test" """.format(
self.uuid
)
)
def matrix_with_sorted_columns(self, columns):
"""Return the matrix with columns sorted in the given column order
Args:
columns (list) The order of column names to return.
Will error if this list does not contain the same elements as the matrix's columns
"""
columnset = set(self.columns())
desired_columnset = set(columns)
if columnset == desired_columnset:
if self.columns() != columns:
logger.debug("Column orders not the same, re-ordering")
return self.design_matrix[columns]
else:
if columnset.issuperset(desired_columnset):
raise ValueError(
"""
Columnset is superset of desired columnset. Extra items: %s
""",
columnset - desired_columnset,
)
elif columnset.issubset(desired_columnset):
raise ValueError(
"""
Columnset is subset of desired columnset. Extra items: %s
""",
desired_columnset - columnset,
)
else:
raise ValueError(
"""
Columnset and desired columnset mismatch. Unique items: %s
""",
columnset ^ desired_columnset,
)
@property
def full_matrix_for_saving(self):
if self.labels is not None:
return self.design_matrix.assign(**{self.label_column_name: self.labels})
else:
return self.design_matrix
def load_metadata(self):
"""Load metadata from storage"""
with self.metadata_base_store.open("rb") as fd:
return yaml.load(fd, Loader=yaml.Loader)
def save(self):
raise NotImplementedError
def clear_cache(self):
self._matrix_label_tuple = None
def __getstate__(self):
"""Remove object of a large size upon serialization.
This helps in a multiprocessing context.
"""
state = self.__dict__.copy()
state['_matrix_label_tuple'] = None
return state
class CSVMatrixStore(MatrixStore):
"""Store and access compressed matrices using CSV"""
suffix = "csv.gz"
@property
def head_of_matrix(self):
try:
with self.matrix_base_store.open("rb") as fd:
head_of_matrix = pd.read_csv(fd, compression="gzip", nrows=1)
head_of_matrix.set_index(self.indices, inplace=True)
except FileNotFoundError as fnfe:
logger.exception(f"Matrix {self.uuid} not found Returning Empty data frame")
head_of_matrix = pd.DataFrame()
return head_of_matrix
def _load(self):
with self.matrix_base_store.open("rb") as fd:
return pd.read_csv(fd, compression="gzip", parse_dates=["as_of_date"])
def save(self):
self.matrix_base_store.write(gzip.compress(self.full_matrix_for_saving.to_csv(None).encode("utf-8")))
with self.metadata_base_store.open("wb") as fd:
yaml.dump(self.metadata, fd, encoding="utf-8")
class TestMatrixType:
string_name = "test"
evaluation_obj = TestEvaluation
prediction_obj = TestPrediction
aequitas_obj = TestAequitas
prediction_metadata_obj = TestPredictionMetadata
is_test = True
class TrainMatrixType:
string_name = "train"
evaluation_obj = TrainEvaluation
prediction_obj = TrainPrediction
aequitas_obj = TrainAequitas
prediction_metadata_obj = TrainPredictionMetadata
is_test = False
class ProductionMatrixType(object):
string_name = "production"
prediction_obj = ListPrediction
prediction_metadata_obj = ListPredictionMetadata
|
d25e972c08499338ee523303aef22fc1843373d7
|
08f8d1203bae66dbb692f8c085b6bbe25934195a
|
/testSuite/scripts/utility.py
|
f708ac2904fc30e12c1a5d1b0f233a18ad9b0ea8
|
[
"LGPL-2.1-or-later",
"BSD-3-Clause",
"ISC",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Azure/azure-storage-azcopy
|
a747b43ad5cf1162dafbb308024f864baec13ae1
|
f8b3875a8d30bb56fecc3b70023d4a20c53bc7d5
|
refs/heads/main
| 2023-08-16T18:55:37.997091
| 2023-08-15T19:08:15
| 2023-08-15T19:08:15
| 114,798,676
| 552
| 218
|
MIT
| 2023-09-14T21:46:12
| 2017-12-19T18:33:42
|
Go
|
UTF-8
|
Python
| false
| false
| 32,226
|
py
|
utility.py
|
import ctypes
import os
import platform
import shutil
import subprocess
import shlex
import uuid
import random
import json
from pathlib import Path
from collections import namedtuple
# Command Class is used to create azcopy commands and validator commands.
class Command(object):
def __init__(self, command_type):
self.command_type = command_type
# initializing dictionary to store flags and its values.
self.flags = dict()
# initializing list to store arguments for azcopy and validator.
self.args = list()
# this api is used by command class instance to add arguments.
def add_arguments(self, argument):
if argument == None:
return
self.args.append(argument)
# auto-set MD5 checking flags, so that we always check when testing
ct = str.lower(self.command_type)
is_copy_or_sync = ct == "copy" or ct == "cp" or ct == "sync"
if is_copy_or_sync and (not str.startswith(argument, "http")): # this is a local location
if len(self.args) == 1:
self.add_flags("put-md5", "true") # this is an upload
else:
if "s3" in self.args[0]:
self.add_flags("check-md5", "FailIfDifferent")
else:
self.add_flags("check-md5", "FailIfDifferentOrMissing")
if ct == "create":
if len(self.args) == 1:
self.add_flags("generate-md5", "true") # We want to generate an MD5 on the way up.
return self
def add_flags(self, flag, value):
self.flags[flag] = value
return self
# returns the command by combining arguments and flags.
def string(self):
command = self.command_type
if len(self.args) > 0:
for arg in self.args:
if (len(arg) > 0):
# add '"' at start and end of each argument.
command += " " + '"' + arg + '"'
# iterating through all the values in dict and combining them.
if len(self.flags) > 0:
for key, value in self.flags.items():
command += " --" + key + "=" + '"' + str(value) + '"'
return command
# this api is used to execute a azcopy copy command.
# by default, command execute a upload command.
# return true or false for success or failure of command.
def execute_azcopy_copy_command(self):
return execute_azcopy_command(self.string())
# this api is used to execute a azcopy copy command.
# by default, command execute a upload command.
# return azcopy console output on successful execution.
def execute_azcopy_copy_command_get_output(self):
return execute_azcopy_command_get_output(self.string())
def execute_azcopy_command_interactive(self):
return execute_azcopy_command_interactive(self.string())
# api execute other azcopy commands like cancel, pause, resume or list.
def execute_azcopy_operation_get_output(self):
return execute_azcopy_command_get_output(self.string())
# api executes the azcopy validator to verify the azcopy operation.
def execute_azcopy_verify(self):
return verify_operation(self.string())
# api executes the clean command to delete the blob/container/file/share contents.
def execute_azcopy_clean(self):
return verify_operation(self.string())
# api executes the create command to create the blob/container/file/share/directory contents.
def execute_azcopy_create(self):
return verify_operation(self.string())
# api executes the info command to get AzCopy binary embedded infos.
def execute_azcopy_info(self):
return verify_operation_get_output(self.string())
# api executes the testSuite's upload command to upload(prepare) data to source URL.
def execute_testsuite_upload(self):
return verify_operation(self.string())
# processes oauth command according to switches
def process_oauth_command(
cmd,
fromTo=""):
if fromTo!="":
cmd.add_flags("from-to", fromTo)
# api executes the clean command on validator which deletes all the contents of the container.
def clean_test_container(container):
# execute the clean command.
result = Command("clean").add_arguments(container).add_flags("serviceType", "Blob").add_flags("resourceType", "Bucket").execute_azcopy_clean()
if not result:
print("error cleaning the container. please check the container sas provided")
return False
return True
def clean_test_blob_account(account):
result = Command("clean").add_arguments(account).add_flags("serviceType", "Blob").add_flags("resourceType", "Account").execute_azcopy_clean()
if not result:
print("error cleaning the blob account. please check the account sas provided")
return False
return True
def clean_test_s3_account(account):
if 'S3_TESTS_OFF' in os.environ and os.environ['S3_TESTS_OFF'] != "":
return True
result = Command("clean").add_arguments(account).add_flags("serviceType", "S3").add_flags("resourceType", "Account").execute_azcopy_clean()
if not result:
print("error cleaning the S3 account.")
return False
return True
def clean_test_gcp_account(account):
if 'GCP_TESTS_OFF' in os.environ and os.environ['GCP_TESTS_OFF'] != "":
return True
result = Command("clean").add_arguments(account).add_flags("serviceType", "GCP").add_flags("resourceType", "Account").execute_azcopy_clean()
if not result:
print("error cleaning the GCP account.")
return False
return True
def clean_test_file_account(account):
result = Command("clean").add_arguments(account).add_flags("serviceType", "File").add_flags("resourceType", "Account").execute_azcopy_clean()
if not result:
print("error cleaning the file account. please check the account sas provided")
return False
return True
# api executes the clean command on validator which deletes all the contents of the container.
def clean_test_share(shareURLStr):
# execute the clean command.
result = Command("clean").add_arguments(shareURLStr).add_flags("serviceType", "File").add_flags("resourceType", "Bucket").execute_azcopy_clean()
if not result:
print("error cleaning the share. please check the share sas provided")
return False
return True
def clean_test_filesystem(fileSystemURLStr):
result = Command("clean").add_arguments(fileSystemURLStr).add_flags("serviceType", "BlobFS").add_flags("resourceType", "Bucket").execute_azcopy_clean()
if not result:
print("error cleaning the filesystem. please check the filesystem URL, user and key provided")
return False
return True
# initialize_test_suite initializes the setup for executing test cases.
def initialize_test_suite(test_dir_path, container_sas, container_oauth, container_oauth_validate, share_sas_url, premium_container_sas, filesystem_url, filesystem_sas_url,
s2s_src_blob_account_url, s2s_src_file_account_url, s2s_src_s3_service_url, s2s_src_gcp_service_url, s2s_dst_blob_account_url, azcopy_exec_location, test_suite_exec_location):
# test_directory_path is global variable holding the location of test directory to execute all the test cases.
# contents are created, copied, uploaded and downloaded to and from this test directory only
global test_directory_path
# test_container_url is a global variable used in the entire testSuite holding the user given container shared access signature.
# all files / directory are uploaded and downloaded to and from this container.
global test_container_url
# test_oauth_container_url is a global variable used in the entire testSuite holding the user given container for oAuth testing.
# all files / directory are uploaded and downloaded to and from this container.
global test_oauth_container_url
# test_container_oauth_validate_sas_url is same container as test_oauth_container_url, while for validation purpose.
global test_oauth_container_validate_sas_url
# test_premium_account_contaier_url is a global variable used in the entire test suite holding the user given container sas of premium storage account container.
global test_premium_account_contaier_url
# test_share_url is a global variable used in the entire testSuite holding the user given share URL with shared access signature.
# all files / directory are uploaded and downloaded to and from this share.
global test_share_url
# holds the name of the azcopy executable
global azcopy_executable_name
# holds the name of the test suite executable
global test_suite_executable_name
# holds the filesystem url to perform the operations for blob fs service
global test_bfs_account_url
global test_bfs_sas_account_url
# holds account for s2s copy tests
global test_s2s_src_blob_account_url
global test_s2s_dst_blob_account_url
global test_s2s_src_file_account_url
global test_s2s_src_s3_service_url
global test_s2s_src_gcp_service_url
# creating a test_directory in the location given by user.
# this directory will be used to created and download all the test files.
new_dir_path = os.path.join(test_dir_path, "test_data")
# todo finally
try:
# removing the directory and its contents, if directory exists
shutil.rmtree(new_dir_path)
os.mkdir(new_dir_path)
except:
os.mkdir(new_dir_path)
# copying the azcopy executable to the newly created test directory.
# this copying is done to avoid using the executables at location which might be used by the user
# while test suite is running.
if os.path.isfile(azcopy_exec_location):
shutil.copy2(azcopy_exec_location, new_dir_path)
azcopy_executable_name = parse_out_executable_name(azcopy_exec_location)
else:
print("please verify the azcopy executable location")
return False
# copying the test executable to the newly created test directory.
# this copying is done to avoid using the executables at location which might be used by the user
# while test suite is running.
if os.path.isfile(test_suite_exec_location):
shutil.copy2(test_suite_exec_location, new_dir_path)
test_suite_executable_name = parse_out_executable_name(test_suite_exec_location)
else:
print("please verify the test suite executable location")
return False
test_directory_path = new_dir_path
test_bfs_account_url = filesystem_url
test_bfs_sas_account_url = filesystem_sas_url
if not (test_bfs_account_url.endswith("/") and test_bfs_account_url.endswith("\\")):
test_bfs_account_url = test_bfs_account_url + "/"
test_container_url = container_sas
test_oauth_container_url = container_oauth
if not (test_oauth_container_url.endswith("/") and test_oauth_container_url.endwith("\\")):
test_oauth_container_url = test_oauth_container_url + "/"
test_oauth_container_validate_sas_url = container_oauth_validate
test_premium_account_contaier_url = premium_container_sas
test_s2s_src_blob_account_url = s2s_src_blob_account_url
test_s2s_src_file_account_url = s2s_src_file_account_url
test_s2s_dst_blob_account_url = s2s_dst_blob_account_url
test_s2s_src_s3_service_url = s2s_src_s3_service_url
test_s2s_src_gcp_service_url = s2s_src_gcp_service_url
test_share_url = share_sas_url
if not clean_test_filesystem(test_bfs_account_url.rstrip("/").rstrip("\\")): # rstrip because clean fails if trailing /
print("failed to clean test filesystem.")
if not clean_test_container(test_container_url):
print("failed to clean test blob container.")
if not clean_test_container(test_oauth_container_validate_sas_url):
print("failed to clean OAuth test blob container.")
if not clean_test_container(test_premium_account_contaier_url):
print("failed to clean premium container.")
if not clean_test_blob_account(test_s2s_src_blob_account_url):
print("failed to clean s2s blob source account.")
if not clean_test_file_account(test_s2s_src_file_account_url):
print("failed to clean s2s file source account.")
if not clean_test_blob_account(test_s2s_dst_blob_account_url):
print("failed to clean s2s blob destination account.")
if not clean_test_s3_account(test_s2s_src_s3_service_url):
print("failed to clean s3 account.")
if not clean_test_gcp_account(test_s2s_src_gcp_service_url):
print("failed to clean GCS account")
if not clean_test_share(test_share_url):
print("failed to clean test share.")
return True
# initialize_test_suite initializes the setup for executing test cases.
def initialize_interactive_test_suite(test_dir_path, container_oauth, container_oauth_validate,
filesystem_url, oauth_tenant_id, oauth_aad_endpoint, azcopy_exec_location, test_suite_exec_location):
# test_directory_path is global variable holding the location of test directory to execute all the test cases.
# contents are created, copied, uploaded and downloaded to and from this test directory only
global test_directory_path
# test_oauth_container_url is a global variable used in the entire testSuite holding the user given container for oAuth testing.
# all files / directory are uploaded and downloaded to and from this container.
global test_oauth_container_url
# test_container_oauth_validate_sas_url is same container as test_oauth_container_url, while for validation purpose.
global test_oauth_container_validate_sas_url
# holds the name of the azcopy executable
global azcopy_executable_name
# holds the name of the test suite executable
global test_suite_executable_name
# holds the filesystem url to perform the operations for blob fs service
global test_bfs_account_url
# holds the oauth tenant id
global test_oauth_tenant_id
# holds the oauth aad encpoint
global test_oauth_aad_endpoint
# creating a test_directory in the location given by user.
# this directory will be used to created and download all the test files.
new_dir_path = os.path.join(test_dir_path, "test_data")
# todo finally
try:
# removing the directory and its contents, if directory exists
shutil.rmtree(new_dir_path)
os.mkdir(new_dir_path)
except:
os.mkdir(new_dir_path)
# copying the azcopy executable to the newly created test directory.
# this copying is done to avoid using the executables at location which might be used by the user
# while test suite is running.
if os.path.isfile(azcopy_exec_location):
shutil.copy2(azcopy_exec_location, new_dir_path)
azcopy_executable_name = parse_out_executable_name(azcopy_exec_location)
else:
print("please verify the azcopy executable location")
return False
# copying the test executable to the newly created test directory.
# this copying is done to avoid using the executables at location which might be used by the user
# while test suite is running.
if os.path.isfile(test_suite_exec_location):
shutil.copy2(test_suite_exec_location, new_dir_path)
test_suite_executable_name = parse_out_executable_name(test_suite_exec_location)
else:
print("please verify the test suite executable location")
return False
test_directory_path = new_dir_path
test_oauth_tenant_id = oauth_tenant_id
test_oauth_aad_endpoint = oauth_aad_endpoint
# set the filesystem url
test_bfs_account_url = filesystem_url
if not clean_test_filesystem(test_bfs_account_url):
return False
if not (test_bfs_account_url.endswith("/") and test_bfs_account_url.endwith("\\")):
test_bfs_account_url = test_bfs_account_url + "/"
test_oauth_container_url = container_oauth
if not (test_oauth_container_url.endswith("/") and test_oauth_container_url.endwith("\\")):
test_oauth_container_url = test_oauth_container_url + "/"
# as validate container URL point to same URL as oauth container URL, do clean up with validate container URL
test_oauth_container_validate_sas_url = container_oauth_validate
if not clean_test_container(test_oauth_container_validate_sas_url):
return False
return True
# given a path, parse out the name of the executable
def parse_out_executable_name(full_path):
head, tail = os.path.split(full_path)
return tail
# todo : find better way
# create_test_file creates a file with given file name and of given size inside the test directory.
# returns the local file path.
def create_test_file(filename, size):
# creating the file path
file_path = os.path.join(test_directory_path, filename)
# if file already exists, then removing the file.
if os.path.isfile(file_path):
os.remove(file_path)
f = open(file_path, 'w')
# since size of file can very large and size variable can overflow while holding the file size
# file is written in blocks of 1MB.
if size > 1024 * 1024:
total_size = size
while total_size > 0:
num_chars = 1024 * 1024
if total_size < num_chars:
num_chars = total_size
f.write('0' * num_chars)
total_size = total_size - num_chars
else:
num_chars = size
f.write('0' * num_chars)
f.close()
return file_path
def create_json_file(filename, jsonData):
# creating the file path
file_path = os.path.join(test_directory_path, filename + ".json")
# if file already exists, then removing the file.
if os.path.isfile(file_path):
os.remove(file_path)
with open(file_path, 'w') as outfile:
json.dump(jsonData, outfile)
outfile.close()
return file_path
def create_new_list_of_files(filename, list):
# creating the file path
file_path = os.path.join(test_directory_path, filename + ".txt")
if os.path.isfile(file_path):
os.remove(file_path)
with open(file_path, 'w') as outfile:
outfile.writelines(list)
outfile.close()
return file_path
# creates the a test html file inside the test directory.
# returns the local file path.
def create_test_html_file(filename):
# creating the file path
file_path = os.path.join(test_directory_path, filename)
# if file already exists, then removing the file.
if os.path.isfile(file_path):
os.remove(file_path)
f = open(file_path, 'w')
message = """<html>
<head></head>
<body><p>Hello World!</p></body>
</html>"""
f.write(message)
f.close()
return file_path
# creates a dir with given inside test directory
def create_test_dir(dir_name):
# If the directory exists, remove it.
dir_path = os.path.join(test_directory_path, dir_name)
if os.path.isdir(dir_path):
shutil.rmtree(dir_path)
try:
os.mkdir(dir_path)
except:
raise Exception("error creating directory ", dir_path)
return dir_path
# create_test_n_files creates given number of files for given size
# inside directory inside test directory.
# returns the path of directory in which n files are created.
def create_test_n_files(size, n, dir_name):
# creating directory inside test directory.
dir_n_files_path = os.path.join(test_directory_path, dir_name)
try:
shutil.rmtree(dir_n_files_path)
os.mkdir(dir_n_files_path)
except:
os.mkdir(dir_n_files_path)
# creating file prefix
filesprefix = "test" + str(n) + str(size)
# creating n files.
for index in range(0, n):
filename = filesprefix + '_' + str(index) + ".txt"
# creating the file path
file_path = os.path.join(dir_n_files_path, filename)
# if file already exists, then removing the file.
if os.path.isfile(file_path):
os.remove(file_path)
f = open(file_path, 'w')
# since size of file can very large and size variable can overflow while holding the file size
# file is written in blocks of 1MB.
if size > 1024 * 1024:
total_size = size
while total_size > 0:
num_chars = 1024 * 1024
if total_size < num_chars:
num_chars = total_size
f.write('0' * num_chars)
total_size = total_size - num_chars
else:
num_chars = size
f.write('0' * num_chars)
f.close()
return dir_n_files_path
# create_complete_sparse_file creates an empty used to
# test the page blob operations of azcopy
def create_complete_sparse_file(filename, filesize):
file_path = os.path.join(test_directory_path, filename)
sparse = Path(file_path)
sparse.touch()
os.truncate(str(sparse), filesize)
return file_path
# create_partial_sparse_file create a sparse file in test directory
# of size multiple of 8MB. for each 8MB, first 4MB is '0'
# and next 4MB is '\0'.
# return the local file path of created file.
def create_partial_sparse_file(filename, filesize):
file_path = os.path.join(test_directory_path, filename)
if os.path.isfile(file_path):
os.remove(file_path)
f = open(file_path, 'w')
# file size is less than 8MB or given size is not multiple of 8MB,
# no file is created.
if filesize < 8 * 1024 * 1024 or filesize % (8 * 1024 * 1024) != 0:
return None
else:
total_size = filesize
while total_size > 0:
num_chars = 4 * 1024 * 1024
f.write('0' * num_chars)
total_size = total_size - num_chars
if total_size <= 0:
break
f.write('\0' * num_chars)
total_size = total_size - num_chars
return file_path
# execute_azcopy_command executes the given azcopy command.
# returns true / false on success / failure of command.
def execute_azcopy_command(command):
# azcopy executable path location.
azspath = os.path.join(test_directory_path, azcopy_executable_name)
cmnd = azspath + " " + command
try:
# executing the command with timeout to set 3 minutes / 360 sec.
subprocess.check_output(
cmnd, stderr=subprocess.STDOUT, shell=True, timeout=360,
universal_newlines=True)
except subprocess.CalledProcessError as exec:
# todo kill azcopy command in case of timeout
print("command failed with error code " , exec.returncode , " and message " + exec.output)
return False
else:
return True
# execute_azcopy_command_interactive executes the given azcopy command in "inproc" mode.
# returns azcopy console output or none on success / failure of command.
def execute_azcopy_command_interactive(command):
# azcopy executable path location concatenated with inproc keyword.
azspath = os.path.join(test_directory_path, azcopy_executable_name)
cmnd = azspath + " " + command
if os.name == "nt":
process = subprocess.Popen(cmnd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
process = subprocess.Popen(shlex.split(cmnd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(process.stdout.readline, b''):
print(line.decode('utf-8'))
process.wait()
if process.poll() == 0:
return True
else:
return False
# execute_azcopy_command_get_output executes the given azcopy command in "inproc" mode.
# returns azcopy console output or none on success / failure of command.
def execute_azcopy_command_get_output(command):
# azcopy executable path location concatenated with inproc keyword.
azspath = os.path.join(test_directory_path, azcopy_executable_name)
cmnd = azspath + " " + command
output = ""
try:
# executing the command with timeout set to 6 minutes / 360 sec.
output = subprocess.check_output(
cmnd, stderr=subprocess.STDOUT, shell=True, timeout=360,
universal_newlines=True)
except subprocess.CalledProcessError as exec:
# print("command failed with error code ", exec.returncode, " and message " + exec.output)
return exec.output
else:
return output
# verify_operation executes the validator command to verify the azcopy operations.
# return true / false on success / failure of command.
def verify_operation(command):
# testSuite executable local path inside the test directory.
test_suite_path = os.path.join(test_directory_path, test_suite_executable_name)
command = test_suite_path + " " + command
try:
# executing the command with timeout set to 6 minutes / 360 sec.
subprocess.check_output(
command, stderr=subprocess.STDOUT, shell=True, timeout=360,
universal_newlines=True)
except subprocess.CalledProcessError as exec:
print("command failed with error code ", exec.returncode, " and message " + exec.output)
return False
else:
return True
# verify_operation_get_output executes the validator command and returns output.
def verify_operation_get_output(command):
# testSuite executable local path inside the test directory.
test_suite_path = os.path.join(test_directory_path, test_suite_executable_name)
command = test_suite_path + " " + command
try:
# executing the command with timeout set to 10 minutes / 600 sec.
output = subprocess.check_output(
command, stderr=subprocess.STDOUT, shell=True, timeout=600,
universal_newlines=True)
except subprocess.CalledProcessError as exec:
#print("command failed with error code ", exec.returncode, " and message " + exec.output)
return None
else:
return output
def get_object_sas(url_with_sas, object_name):
# Splitting the container URL to add the uploaded blob name to the SAS
url_parts = url_with_sas.split("?")
# adding the blob name after the container name
if url_parts[0].endswith("/"):
resource_sas = url_parts[0] + object_name + '?' + url_parts[1]
else:
resource_sas = url_parts[0] + "/" + object_name + '?' + url_parts[1]
return resource_sas
def get_object_without_sas(url, object_name):
# Splitting the container URL to add the uploaded blob name to the SAS
url_parts = url.split("?")
# adding the blob name after the container name
if url_parts[0].endswith("/"):
resource_sas = url_parts[0] + object_name
else:
resource_sas = url_parts[0] + "/" + object_name
return resource_sas
# get_resource_sas return the shared access signature for the given resource
# using the container url.
def get_resource_sas(resource_name):
# Splitting the container URL to add the uploaded blob name to the SAS
url_parts = test_container_url.split("?")
# adding the blob name after the container name
resource_sas = url_parts[0] + "/" + resource_name + '?' + url_parts[1]
return resource_sas
def get_resource_from_oauth_container_validate(resource_name):
# Splitting the container URL to add the uploaded blob name to the SAS
url_parts = test_oauth_container_validate_sas_url.split("?")
# adding the blob name after the container name
resource_sas = url_parts[0] + "/" + resource_name + '?' + url_parts[1]
return resource_sas
def get_resource_from_oauth_container(resource_name):
return test_oauth_container_url + resource_name
def append_text_path_resource_sas(resource_sas, text):
# Splitting the resource sas to add the text to the SAS
url_parts = resource_sas.split("?")
# adding the text to the blob name of the resource sas
if url_parts[0].endswith("/"):
# If there is a separator at the end of blob name
# no need to append "/" before the text after the blob name
resource_sas = url_parts[0] + text + '?' + url_parts[1]
else:
resource_sas = url_parts[0] + "/" + text + '?' + url_parts[1]
return resource_sas
# get_resource_sas_from_share return the shared access signature for the given resource
# based on the share url.
def get_resource_sas_from_share(resource_name):
# Splitting the share URL to add the file or directory name to the SAS
url_parts = test_share_url.split("?")
# adding the file or directory name after the share name
resource_sas = url_parts[0] + "/" + resource_name + '?' + url_parts[1]
return resource_sas
def get_resource_sas_from_bfs(resource_name):
# Splitting the share URL to add the file or directory name to the SAS
url_parts = test_bfs_sas_account_url.split("?")
# adding the file or directory name after the share name
resource_sas = url_parts[0] + "/" + resource_name + '?' + url_parts[1]
return resource_sas
# get_resource_sas return the shared access signature for the given resource
# using the premium storage account container url.
def get_resource_sas_from_premium_container_sas(resource_name):
# Splitting the container URL to add the uploaded blob name to the SAS
url_parts = test_premium_account_contaier_url.split("?")
# adding the blob name after the container name
resource_sas = url_parts[0] + "/" + resource_name + '?' + url_parts[1]
return resource_sas
# parseAzcopyOutput parses the Azcopy Output in JSON format to give the final Azcopy Output in JSON Format
# Final Azcopy Output is the last JobSummary for the Job
# Azcopy Output can have more than one Summary for the Job
# parseAzcopyOutput returns the final JobSummary in JSON format.
def parseAzcopyOutput(s):
count = 0
output = ""
final_output = ""
# Split the lines
lines = s.split('\n')
# Iterating through the output in reverse order since last summary has to be considered.
# Increment the count when line is "}"
# Reduce the count when line is "{"
# append the line to final output
# When the count is 0, it means the last Summary has been traversed
for line in reversed(lines):
# If the line is empty, then continue
if line == "":
continue
elif line == '}':
count = count + 1
elif line == "{":
count = count - 1
if count >= 0:
if len(output) > 0:
output = output + '\n' + line
else:
output = line
if count == 0:
break
lines = output.split('\n')
# Since the lines were iterated in reverse order revering them again and
# concatenating the lines to get the final JobSummary
for line in reversed(lines):
if len(final_output) > 0:
final_output = final_output + '\n' + line
else:
final_output = line
x = json.loads(final_output, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))
return x.MessageContent
def get_resource_name(prefix=''):
return prefix + str(uuid.uuid4()).replace('-', '')
def get_random_bytes(size):
rand = random.Random()
result = bytearray(size)
for i in range(size):
result[i] = int(rand.random()*255) # random() is consistent between python 2 and 3
return bytes(result)
def create_hidden_file(path, file_name, data):
FILE_ATTRIBUTE_HIDDEN = 0x02
os_type = platform.system()
os_type = os_type.upper()
# For *nix add a '.' prefix.
prefix = '.' if os_type != "WINDOWS" else ''
file_name = prefix + file_name
file_path = os.path.join(path, file_name)
# Write file.
with open(file_path, 'w') as f:
f.write(data)
# For windows set file attribute.
if os_type == "WINDOWS":
ret = ctypes.windll.kernel32.SetFileAttributesW(file_path,
FILE_ATTRIBUTE_HIDDEN)
if not ret: # There was an error.
raise ctypes.WinError()
def create_file_in_path(path, file_name, data):
file_path = os.path.join(path, file_name)
with open(file_path, 'w') as f:
f.write(data)
return file_path
|
1333ef2df59fcf8799100dac8f68bdeecfc51092
|
1f4f8d8cba7c6e458a96789008a2e5c9303ad530
|
/test_all_games.py
|
0d3d15191534cc9bc71d8c76e41f02a60101d90b
|
[
"MIT"
] |
permissive
|
suragnair/alpha-zero-general
|
45b0f59aa70c206e7617ab47499ca21b84a44283
|
ce020c8eebbabf0e22654279508a6887b4791015
|
refs/heads/master
| 2023-09-03T12:53:13.958587
| 2023-06-21T08:27:01
| 2023-06-21T08:27:01
| 112,687,645
| 3,545
| 1,099
|
MIT
| 2023-08-22T06:48:10
| 2017-12-01T02:55:15
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,405
|
py
|
test_all_games.py
|
""""
This is a Regression Test Suite to automatically test all combinations of games and ML frameworks. Each test
plays two quick games using an untrained neural network (randomly initialized) against a random player.
In order for the entire test suite to run successfully, all the required libraries must be installed. They are:
Pytorch, Keras.
[ Games ] Pytorch Keras
----------- ------- -----
- Othello [Yes] [Yes]
- TicTacToe [Yes]
- TicTacToe3D [Yes]
- Connect4 [Yes]
- Gobang [Yes]
- Tafl [Yes] [Yes]
- Rts [Yes]
- DotsAndBoxes [Yes]
"""
import unittest
import Arena
from MCTS import MCTS
from othello.OthelloGame import OthelloGame
from othello.OthelloPlayers import RandomPlayer
from othello.pytorch.NNet import NNetWrapper as OthelloPytorchNNet
from othello.keras.NNet import NNetWrapper as OthelloKerasNNet
from tictactoe.TicTacToeGame import TicTacToeGame
from tictactoe.keras.NNet import NNetWrapper as TicTacToeKerasNNet
from tictactoe_3d.TicTacToeGame import TicTacToeGame as TicTacToe3DGame
from tictactoe_3d.keras.NNet import NNetWrapper as TicTacToe3DKerasNNet
from connect4.Connect4Game import Connect4Game
from connect4.keras.NNet import NNetWrapper as Connect4KerasNNet
from gobang.GobangGame import GobangGame
from gobang.keras.NNet import NNetWrapper as GobangKerasNNet
from tafl.TaflGame import TaflGame
from tafl.pytorch.NNet import NNetWrapper as TaflPytorchNNet
from tafl.keras.NNet import NNetWrapper as TaflKerasNNet
from rts.RTSGame import RTSGame
from rts.keras.NNet import NNetWrapper as RTSKerasNNet
from dotsandboxes.DotsAndBoxesGame import DotsAndBoxesGame
from dotsandboxes.keras.NNet import NNetWrapper as DotsAndBoxesKerasNNet
import numpy as np
from utils import *
class TestAllGames(unittest.TestCase):
@staticmethod
def execute_game_test(game, neural_net):
rp = RandomPlayer(game).play
args = dotdict({'numMCTSSims': 25, 'cpuct': 1.0})
mcts = MCTS(game, neural_net(game), args)
n1p = lambda x: np.argmax(mcts.getActionProb(x, temp=0))
arena = Arena.Arena(n1p, rp, game)
print(arena.playGames(2, verbose=False))
def test_othello_pytorch(self):
self.execute_game_test(OthelloGame(6), OthelloPytorchNNet)
def test_othello_keras(self):
self.execute_game_test(OthelloGame(6), OthelloKerasNNet)
def test_tictactoe_keras(self):
self.execute_game_test(TicTacToeGame(), TicTacToeKerasNNet)
def test_tictactoe3d_keras(self):
self.execute_game_test(TicTacToe3DGame(3), TicTacToe3DKerasNNet)
def test_gobang_keras(self):
self.execute_game_test(GobangGame(), GobangKerasNNet)
def test_tafl_pytorch(self):
self.execute_game_test(TaflGame(5), TaflPytorchNNet)
def test_tafl_keras(self):
self.execute_game_test(TaflGame(5), TaflKerasNNet)
def test_connect4_keras(self):
self.execute_game_test(Connect4Game(5), Connect4KerasNNet)
def test_rts_keras(self):
self.execute_game_test(RTSGame(), RTSKerasNNet)
def test_dotsandboxes_keras(self):
self.execute_game_test(DotsAndBoxesGame(3), DotsAndBoxesKerasNNet)
if __name__ == '__main__':
unittest.main()
|
4c880ed290b91d53ede84d5cf1e37a998f9f31fe
|
6dd5027d9f02b2c40c96fdea9796a4fba6ee7e46
|
/tests/polybench/jacobi_1d.py
|
948fcdf016791afc28d6b14def1f6579a686810f
|
[
"Apache-2.0"
] |
permissive
|
cornell-zhang/heterocl
|
fb4fd3c9cdbb7c7ccbdb2a8a09f47b436200c8f6
|
b794409e68e326cafa6c3eaec2e3560ff066e129
|
refs/heads/main
| 2023-07-22T16:33:57.900104
| 2023-07-19T19:58:13
| 2023-07-19T19:58:13
| 114,906,951
| 312
| 111
|
Apache-2.0
| 2023-07-19T19:58:15
| 2017-12-20T16:13:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,223
|
py
|
jacobi_1d.py
|
# Copyright HeteroCL authors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import heterocl as hcl
import math as mt
import os
def top_jacobi_1d(N, TSTEPS, dtype=hcl.Int(), target=None):
hcl.init(dtype)
A = hcl.placeholder((N,), "A")
B = hcl.placeholder((N,), "B")
def kernel_jacobi_1d(A, B):
def update(A, B):
with hcl.for_(1, N - 1, name="L1") as i:
B[i] = 0.33333 * (A[i - 1] + A[i] + A[i + 1])
with hcl.for_(1, N - 1, name="L2") as i:
A[i] = 0.33333 * (B[i - 1] + B[i] + B[i + 1])
hcl.mutate((TSTEPS,), lambda m: update(A, B), "main_loop")
s = hcl.create_schedule([A, B], kernel_jacobi_1d)
#### Apply customizations ####
main_loop = kernel_jacobi_1d.main_loop
s[main_loop].unroll(main_loop.L1)
s[main_loop].unroll(main_loop.L2)
#### Apply customizations ####
return hcl.build(s, target=target)
import numpy as np
def jacobi_1d_golden(N, TSTEPS, A, B, DATA_TYPE):
for t in range(TSTEPS):
for i in range(1, N - 1):
B[i] = 0.33333 * (A[i - 1] + A[i] + A[i + 1])
for i in range(1, N - 1):
A[i] = 0.33333 * (B[i - 1] + B[i] + B[i + 1])
|
74070b05bdc7a3d2cea71f4219b1020913166e00
|
c8d98c2101a2932c4449183c9e8bd6501c57345f
|
/copulae/copula/__init__.py
|
3bbc310767c59e8d828e0b989bc9a718c3b2100d
|
[
"MIT"
] |
permissive
|
DanielBok/copulae
|
a9af8fa88a212a5436226a22d59799d671d78645
|
d48fbd064426605b8784684114844758e3ffc90d
|
refs/heads/master
| 2023-07-08T09:52:31.815899
| 2023-06-14T04:29:39
| 2023-06-14T05:22:31
| 165,516,660
| 131
| 30
|
MIT
| 2023-06-14T05:22:32
| 2019-01-13T14:43:39
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 49
|
py
|
__init__.py
|
from .base import *
from .summary import Summary
|
fd45a9b924541646b41f8e3d3247efaf179e8089
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/python/paddle/distributed/io.py
|
bc125cb242a29b2de69e89804ac620441ff2e262
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 23,608
|
py
|
io.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
from paddle.fluid.framework import Program, static_only
from paddle.framework import core, dygraph_not_support
def _load_distributed_persistables(executor, dirname, main_program=None):
"""
customized load_persistables for distributed training.
it should be used on parameter server,
Args:
executor(Executor): The executor to run for saving parameters.
dirname(str): The load directory path.
main_program(Program): The program whose parameters will be
loaded. the main_program must be the pserver_program
get after transpiler.
Returns:
None
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
exe = fluid.Executor(fluid.CPUPlace())
param_path = "./my_paddle_model"
t = paddle.distributed.transpiler.DistributeTranspiler()
t.transpile(...)
pserver_prog = t.get_pserver_program(...)
_load_distributed_persistables(executor=exe, dirname=param_path, main_program=pserver_prog)
"""
def __is_distributed_part_var(varname):
trainer_idx = varname.find(".trainer_")
block_idx = varname.find(".block")
return trainer_idx or block_idx
def __load_persistable_vars(executor, dirname, need_load_vars):
load_prog = Program()
load_block = load_prog.global_block()
need_delete_vars = []
for param in need_load_vars:
origin_var = param.origin
slice_var = param.slice
is_slice = param.is_slice
offset = param.offset
if is_slice:
slice = load_block.create_var(
name=slice_var.name,
type=slice_var.type,
shape=slice_var.shape,
dtype=slice_var.dtype,
persistable=True,
)
load_block.append_op(
type='load',
inputs={},
outputs={'Out': [slice]},
attrs={
'file_path': os.path.join(dirname, origin_var.name),
'seek': offset,
'shape': slice.shape,
},
)
else:
origin = load_block.create_var(
name=f"{origin_var.name}",
type=origin_var.type,
shape=origin_var.shape,
dtype=origin_var.dtype,
persistable=True,
)
load_block.append_op(
type='load',
inputs={},
outputs={'Out': [origin]},
attrs={'file_path': os.path.join(dirname, origin_var.name)},
)
load_block.append_op(
type='delete_var',
inputs={'X': need_delete_vars},
)
executor.run(load_prog)
if not isinstance(main_program, Program):
raise TypeError("'main_program' should be an instance of Program.")
if not main_program._is_distributed:
raise ValueError(
"'_load_distributed_persistables' just be designed for distributed training."
)
if not main_program._ps_endpoint:
raise ValueError(
"'_load_distributed_persistables' need current_endpoint set in DistributeTranspiler.transpile"
)
need_load_vars = (
main_program._parameters_on_pservers.get_distributed_vars_by_ep(
main_program._ps_endpoint
)
)
__load_persistable_vars(executor, dirname, need_load_vars)
@dygraph_not_support
def load_persistables(executor, dirname, main_program=None, filename=None):
"""
:api_attr: Static Graph
This API filters out all variables with ``persistable==True`` from the
given ``main_program`` and then tries to load these variables from the
directory ``dirname`` or the file ``filename``.
Use the ``dirname`` to specify the directory where persistable variables
(refer to :ref:`api_guide_model_save_reader_en`) were saved. If variables
were saved in separate files, set ``filename`` as None; if all variables
were saved in a single file, use ``filename`` to specify the file name.
Args:
executor(Executor): The executor used for loading persistable variables.
See :ref:`api_guide_executor_en` for more details about it.
dirname(str): The directory path.
main_program(Program, optional): The program whose persistable variables will
be loaded. If it is None, the ``default_main_program``
will be used automatically. See :ref:`api_guide_Program_en`
for more about ``Program``.
Default: None.
filename(str, optional): The file which saved all persistable variables. If variables
were saved in separated files, set it to None.
Default: None.
Returns:
None
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
exe = fluid.Executor(fluid.CPUPlace())
param_path = "./my_paddle_model"
prog = fluid.default_main_program()
paddle.distributed.io.load_persistables(executor=exe, dirname=param_path,
main_program=None)
"""
if main_program and main_program._is_distributed:
_load_distributed_persistables(
executor, dirname=dirname, main_program=main_program
)
else:
paddle.static.io.load_vars(
executor,
dirname=dirname,
main_program=main_program,
predicate=is_persistable,
filename=filename,
)
def _save_distributed_persistables(executor, dirname, main_program):
"""
save_persistables for distributed training.
the method will do things listed below:
1.save part of persistable variables on trainer.
2.receive "remote prefetch variables" from parameter servers and merge them.
3.save "distributed lookup table" on parameter servers.
4.receive "optimizer variables" from parameter servers and merge them.
Args:
executor(Executor): The executor to run for saving parameters.
dirname(str): The saving directory path.
main_program(Program): The program whose parameters will be
saved. the main_program must be the trainer_program
get after transpiler.
Returns:
None
Examples:
.. code-block:: python
import paddle
import paddle
paddle.enable_static()
exe = paddle.static.Executor(paddle.CPUPlace())
param_path = "./my_paddle_model"
t = paddle.distributed.transpiler.DistributeTranspiler()
t.transpile(...)
train_program = t.get_trainer_program()
_save_distributed_persistables(executor=exe, dirname=param_path, main_program=train_program)
"""
def __save_remote_params(executor, dirname, remote_params_map):
"""
receive params on pserver through rpc.
if the params are be sliced, will concat them to one, then save it.
"""
if not remote_params_map:
return
prog = paddle.static.Program()
block = prog.global_block()
# recv optimize vars from pserver
for name, remote_params in remote_params_map.items():
origin = remote_params[0].origin
is_slice = remote_params[0].is_slice
slices = [None] * len(remote_params)
slice_varnames = [None] * len(remote_params)
remote_varnames = [None] * len(remote_params)
endpoints = [None] * len(remote_params)
for idx, optimizer in enumerate(remote_params):
block_id = optimizer.block_id
slice = optimizer.slice
endpoint = optimizer.endpoint
index = block_id if is_slice else idx
slices[index] = slice
slice_varnames[index] = f"{slice.name}.slice.{idx}"
remote_varnames[index] = slice.name
endpoints[index] = endpoint
slice_shapes = []
for slice in slices:
tmp = [str(dim) for dim in slice.shape]
slice_shapes.append(",".join(tmp))
block.append_op(
type='recv_save',
attrs={
"trainer_id": 0,
"shape": origin.shape,
"slice_shapes": slice_shapes,
"slice_varnames": slice_varnames,
"remote_varnames": remote_varnames,
"endpoints": endpoints,
"file_path": os.path.join(dirname, origin.name),
},
)
executor.run(prog)
def __save_distributed_lookup_tables(
executor, dirname, distributed_lookup_table, endpoints
):
"""
because the distributed lookup table may too huge to merge and save at one place,
it will be saved at parameter server independent respectively.
the save directory is dirname/"__lookup_table__".
"""
prog = paddle.static.Program()
block = prog.global_block()
# if there is lookup table, the trainer 0 will notify all pserver to save.
lookup_table_filename = os.path.join(dirname, "__lookup_table__")
attrs = {}
attrs['epmap'] = endpoints
attrs['dir'] = lookup_table_filename
attrs['lookup_table'] = distributed_lookup_table
block.append_op(
type='checkpoint_notify', inputs={}, outputs={}, attrs=attrs
)
executor.run(prog)
def __exclude_vars(exclude_var_names=[]):
def is_valid(var):
if var.name in exclude_var_names:
return False
if (
var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH
or var.desc.type() == core.VarDesc.VarType.FETCH_LIST
or var.desc.type() == core.VarDesc.VarType.READER
):
return False
return var.persistable
return is_valid
if not isinstance(main_program, Program):
raise TypeError("'main_program' should be an instance of Program.")
if not main_program._is_distributed:
raise ValueError(
"'_save_distributed_persistables' just be designed for distributed training."
)
remote_params_map = (
main_program._parameters_on_pservers.get_distributed_vars_by_vtypes(
["Optimizer", "RemotePrefetch"], groupby=True
)
)
exclude_var_names = []
if remote_params_map:
exclude_var_names.extend(remote_params_map.keys())
if main_program._distributed_lookup_table:
if isinstance(main_program._distributed_lookup_table, list):
exclude_var_names.extend(main_program._distributed_lookup_table)
else:
exclude_var_names.append(main_program._distributed_lookup_table)
local_vars = list(
filter(__exclude_vars(exclude_var_names), main_program.list_vars())
)
paddle.static.save_vars(
executor, main_program=main_program, dirname=dirname, vars=local_vars
)
if main_program._is_chief:
if remote_params_map:
__save_remote_params(executor, dirname, remote_params_map)
if main_program._distributed_lookup_table:
__save_distributed_lookup_tables(
executor,
dirname,
main_program._distributed_lookup_table,
main_program._endpoints,
)
def is_persistable(var):
"""
Check whether the given variable is persistable.
Args:
var(Variable): The variable to be checked.
Returns:
bool: True if the given `var` is persistable
False if not.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
param = fluid.default_main_program().global_block().var('fc.b')
res = fluid.io.is_persistable(param)
"""
if (
var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH
or var.desc.type() == core.VarDesc.VarType.FETCH_LIST
or var.desc.type() == core.VarDesc.VarType.READER
):
return False
return var.persistable
@dygraph_not_support
def save_persistables(executor, dirname, main_program=None, filename=None):
"""
Save all persistable variables from :code:`main_program` to
the folder :code:`dirname` or file :code:`filename`. You can refer to
:ref:`api_guide_model_save_reader_en` for more details. And then
saves these persistables variables to the folder :code:`dirname` or file
:code:`filename`.
The :code:`dirname` is used to specify the folder where persistable variables
are going to be saved. If you would like to save variables in separate
files, set :code:`filename` None; if you would like to save all variables in a
single file, use :code:`filename` to specify the file name.
Args:
executor(Executor): The executor to run for saving persistable variables.
You can refer to :ref:`api_guide_executor_en` for
more details.
dirname(str, optional): The saving directory path.
When you need to save the parameter to the memory, set it to None.
main_program(Program, optional): The program whose persistbale variables will
be saved. You can refer to
:ref:`api_guide_Program_en` for more details.
If it is None, the default main program will
be used.
Default: None.
filename(str, optional): The file to save all variables. If you prefer to
save variables in different files, set it to None.
Default: None.
Returns:
str: When saving parameters to a file, returns None.
When saving parameters to memory, returns a binary string containing parameters.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
dir_path = "./my_paddle_model"
file_name = "persistables"
image = paddle.static..data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
feeder = paddle.static.DataFeeder(feed_list=[image, label], place=paddle.CPUPlace())
predict = paddle.static.nn.fc(x=image, size=10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(input=predict, label=label)
avg_loss = paddle.mean(loss)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
paddle.distributed.io.save_persistables(executor=exe, dirname=dir_path, filename=file_name)
# The persistables variables weights and bias in the fc layer of the network
# are going to be saved in the same file named "persistables" in the path
# "./my_paddle_model"
"""
if main_program and main_program._is_distributed:
return _save_distributed_persistables(
executor, dirname=dirname, main_program=main_program
)
else:
return paddle.static.save_vars(
executor,
dirname=dirname,
main_program=main_program,
vars=None,
predicate=is_persistable,
filename=filename,
)
@static_only
def load_inference_model_distributed(
dirname,
executor,
model_filename=None,
params_filename=None,
pserver_endpoints=None,
):
"""
Load the inference model from a given directory. By this API, you can get the model
structure(Inference Program) and model parameters. If you just want to load
parameters of the pre-trained model, please use the :ref:`api_fluid_io_load_params` API.
You can refer to :ref:`api_guide_model_save_reader_en` for more details.
Args:
dirname(str): One of the following:
- The given directory path.
- Set to None when reading the model from memory.
executor(Executor): The executor to run for loading inference model.
See :ref:`api_guide_executor_en` for more details about it.
model_filename(str, optional): One of the following:
- The name of file to load the inference program.
- If it is None, the default filename ``__model__`` will be used.
- When ``dirname`` is ``None``, it must be set to a string containing model.
Default: ``None``.
params_filename(str, optional): It is only used for the case that all
parameters were saved in a single binary file. One of the following:
- The name of file to load all parameters.
- When ``dirname`` is ``None``, it must be set to a string containing all the parameters.
- If parameters were saved in separate files, set it as ``None``.
Default: ``None``.
pserver_endpoints(list, optional): It is only needed by the distributed inference.
If using a distributed look up table during the training,
this table is also needed by the inference process. Its value is
a list of pserver endpoints.
Returns:
list: The return of this API is a list with three elements:
(program, feed_target_names, fetch_targets). The `program` is a
``Program`` (refer to :ref:`api_guide_Program_en`), which is used for inference.
The `feed_target_names` is a list of ``str``, which contains names of variables
that need to feed data in the inference program. The `fetch_targets` is a list of
``Variable`` (refer to :ref:`api_guide_Program_en`). It contains variables from which
we can get inference results.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
paddle.enable_static()
# Build the model
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
w = paddle.create_parameter(shape=[784, 200], dtype='float32')
b = paddle.create_parameter(shape=[200], dtype='float32')
hidden_w = paddle.matmul(x=data, y=w)
hidden_b = fluid.layers.elementwise_add(hidden_w, b)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
# Save the inference model
path = "./infer_model"
fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'],
target_vars=[hidden_b], executor=exe, main_program=main_prog)
# Demo one. Not need to set the distributed look up table, because the
# training doesn't use a distributed look up table.
[inference_program, feed_target_names, fetch_targets] = (
paddle.distributed.io.load_inference_model_distributed(dirname=path, executor=exe))
tensor_img = np.array(np.random.random((1, 64, 784)), dtype=np.float32)
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
# Demo two. If the training uses a distributed look up table, the pserver
# endpoints list should be supported when loading the inference model.
# The below is just an example.
endpoints = ["127.0.0.1:2023","127.0.0.1:2024"]
[dist_inference_program, dist_feed_target_names, dist_fetch_targets] = (
paddle.distributed.io.load_inference_model_distributed(dirname=path,
executor=exe,
pserver_endpoints=endpoints))
# In this example, the inference program was saved in the file
# "./infer_model/__model__" and parameters were saved in
# separate files under the directory "./infer_model".
# By the inference program, feed_target_names and
# fetch_targets, we can use an executor to run the inference
# program for getting the inference result.
"""
load_from_memory = False
if dirname is not None:
load_dirname = os.path.normpath(dirname)
if not os.path.isdir(load_dirname):
raise ValueError("There is no directory named '%s'" % dirname)
if model_filename is None:
model_filename = '__model__'
model_filename = os.path.join(
load_dirname, os.path.basename(model_filename)
)
if params_filename is not None:
params_filename = os.path.basename(params_filename)
with open(model_filename, "rb") as f:
program_desc_str = f.read()
else:
load_from_memory = True
if params_filename is None:
raise ValueError(
"The path of params cannot be None when the directory path is None."
)
load_dirname = dirname
program_desc_str = model_filename
params_filename = params_filename
program = Program.parse_from_string(program_desc_str)
if not core._is_program_version_supported(program._version()):
raise ValueError(
"Unsupported program version: %d\n" % program._version()
)
# Binary data also need versioning.
load_persistables(executor, load_dirname, program, params_filename)
feed_target_names = program.desc.get_feed_target_names()
fetch_target_names = program.desc.get_fetch_target_names()
fetch_targets = [
program.global_block().var(name) for name in fetch_target_names
]
return [program, feed_target_names, fetch_targets]
|
0f22caee1db0a402feeaab45cab0e73d54b8ca48
|
182bbadb0ee7f59f1abd154d06484e555a30c6d8
|
/api/tests/integration/tests/rpe/properties.py
|
5952a7ce2b71bbf80a074e76945608cfb518edbe
|
[
"Apache-2.0"
] |
permissive
|
epam/Indigo
|
08559861adf474122366b6e2e499ed3aa56272d1
|
8e473e69f393c3a57ff75b7728999c5fb4cbf1a3
|
refs/heads/master
| 2023-09-02T10:14:46.843829
| 2023-08-25T08:39:24
| 2023-08-25T08:39:24
| 37,536,320
| 265
| 106
|
Apache-2.0
| 2023-09-14T17:34:00
| 2015-06-16T14:45:56
|
C++
|
UTF-8
|
Python
| false
| false
| 1,505
|
py
|
properties.py
|
import os
import sys
sys.path.append(
os.path.normpath(
os.path.join(os.path.abspath(__file__), "..", "..", "..", "common")
)
)
from env_indigo import * # noqa
indigo = Indigo()
reaction = indigo.loadQueryReaction(
"Cl[C:1]([*:3])=O.[OH:2][*:4]>>[*:4][O:2][C:1]([*:3])=O"
)
print("=== Input Reaction ===\n{0}".format(reaction.canonicalSmiles()))
print("=== Input Monomers ===")
x = indigo.loadMolecule("CC(Cl)=O")
y = indigo.loadMolecule("OC1CCC(CC1)C(Cl)=O")
z = indigo.loadMolecule("O[C@H]1[C@H](O)[C@@H](O)[C@H](O)[C@@H](O)[C@@H]1O")
x.setProperty("name", "x")
y.setProperty("name", "y")
z.setProperty("name", "z")
i = 1
for monomer in [x, y, z]:
print(
"{0} = {1}".format(
monomer.getProperty("name"), monomer.canonicalSmiles()
)
)
i = i + 1
monomers_table = indigo.createArray()
monomers_table.arrayAdd(indigo.createArray())
monomers_table.arrayAdd(indigo.createArray())
monomers_table.at(0).arrayAdd(x)
monomers_table.at(0).arrayAdd(y)
monomers_table.at(1).arrayAdd(z)
output_reactions = indigo.reactionProductEnumerate(reaction, monomers_table)
i = 1
for reaction in output_reactions.iterateArray():
print(
"=== Output Reaction #{0} ===\n{1}".format(
i, reaction.canonicalSmiles()
)
)
for monomer in reaction.iterateReactants():
print(
"\t{0} = {1}".format(
monomer.getProperty("name"), monomer.canonicalSmiles()
)
)
i = i + 1
|
68b05b4c4ee389b00656c6ed7c8b4efb3f9a0b12
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Data_Analysis_with_Open_Source_Tools/listings/ch16_lst1.py
|
7bbdc7a181eb9503deeb4518719ea9888508a935
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 203
|
py
|
ch16_lst1.py
|
import dbm
db = dbm.open( "data.db", 'c' )
db[ 'abc' ] = "123"
db[ 'xyz' ] = "Hello, World!"
db[ '42' ] = "42"
print db[ 'abc' ]
del db[ 'xyz' ]
for k in db.keys():
print db[k]
db.close()
|
24d3b41af68c192003024b03e0c94742dfd461bc
|
59f64b5cf799e31c97b11828dba4787afb8f3f17
|
/batch/test/test_utils.py
|
e381fba9848ecdc8ab0be4980a2dcdfe01eae337
|
[
"MIT"
] |
permissive
|
hail-is/hail
|
2089e6f3b38548f13fa5c2a8ab67f5cfdd67b4f1
|
07a483ae0f46c66f3ed6fd265b48f48c06298f98
|
refs/heads/main
| 2023-09-01T15:03:01.450365
| 2023-09-01T02:46:35
| 2023-09-01T02:46:35
| 45,069,467
| 913
| 262
|
MIT
| 2023-09-14T21:53:32
| 2015-10-27T20:55:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,071
|
py
|
test_utils.py
|
from batch.cloud.resource_utils import adjust_cores_for_packability
from hailtop.batch_client.parse import parse_memory_in_bytes
def test_packability():
assert adjust_cores_for_packability(0) == 250
assert adjust_cores_for_packability(200) == 250
assert adjust_cores_for_packability(250) == 250
assert adjust_cores_for_packability(251) == 500
assert adjust_cores_for_packability(500) == 500
assert adjust_cores_for_packability(501) == 1000
assert adjust_cores_for_packability(1000) == 1000
assert adjust_cores_for_packability(1001) == 2000
assert adjust_cores_for_packability(2000) == 2000
assert adjust_cores_for_packability(2001) == 4000
assert adjust_cores_for_packability(3000) == 4000
assert adjust_cores_for_packability(4000) == 4000
assert adjust_cores_for_packability(4001) == 8000
assert adjust_cores_for_packability(8001) == 16000
def test_memory_str_to_bytes():
assert parse_memory_in_bytes('7') == 7
assert parse_memory_in_bytes('1K') == 1000
assert parse_memory_in_bytes('1Ki') == 1024
|
b38781198828f76a6a294c4bf432462afa3f47bd
|
af2d183c8281d5d3447b6ac799660fbb8684756c
|
/test/test_sysroot_creator.py
|
2e9d2b11575d5f99e1e1fcf9273b7c1befe5726c
|
[
"Apache-2.0"
] |
permissive
|
ros-tooling/cross_compile
|
4c8c441fd779b48f22448332a1199fb70a0a41a5
|
1cedd079950bb184b8ae76198a1b92a8a8229912
|
refs/heads/master
| 2022-12-23T14:51:19.274643
| 2022-12-14T16:51:57
| 2022-12-14T16:51:57
| 159,832,509
| 173
| 51
|
Apache-2.0
| 2022-12-14T16:51:58
| 2018-11-30T14:09:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,621
|
py
|
test_sysroot_creator.py
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the `create_cc_sysroot.py` script."""
import os
from pathlib import Path
from platform import system
from unittest.mock import Mock
from unittest.mock import patch
import pytest
from ros_cross_compile.platform import Platform
from ros_cross_compile.sysroot_creator import CreateSysrootStage
from ros_cross_compile.sysroot_creator import prepare_docker_build_environment
from ros_cross_compile.sysroot_creator import setup_emulator
from .utilities import default_pipeline_options
THIS_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
@patch('ros_cross_compile.sysroot_creator.py_platform.system', side_effect=lambda: 'Linux')
def test_emulator_not_installed(system_mock, tmpdir):
with pytest.raises(RuntimeError):
setup_emulator('not-an-arch', Path(str(tmpdir)))
@patch('ros_cross_compile.sysroot_creator.py_platform.system', side_effect=lambda: 'Darwin')
def test_emulator_touch(system_mock, tmpdir):
setup_emulator('aarch64', Path(str(tmpdir)))
def test_prepare_docker_build_basic(tmpdir):
platform = Platform('armhf', 'debian', 'melodic')
tmp = Path(str(tmpdir))
out_dir = prepare_docker_build_environment(platform, tmp, None, None)
if system() != 'Darwin':
assert (out_dir / 'bin' / 'qemu-arm-static').exists()
assert (out_dir / 'rosdep.Dockerfile').exists()
assert (out_dir / 'sysroot.Dockerfile').exists()
def test_run_twice(tmpdir):
# The test is that this doesn't throw an exception for already existing paths
platform = Platform('armhf', 'debian', 'noetic')
tmp = Path(str(tmpdir))
prepare_docker_build_environment(platform, tmp, None, None)
prepare_docker_build_environment(platform, tmp, None, None)
def test_prepare_docker_build_with_user_custom(tmpdir):
platform = Platform('aarch64', 'ubuntu', 'foxy')
tmp = Path(str(tmpdir))
this_dir = Path(__file__).parent
out_dir = prepare_docker_build_environment(
platform, tmp,
custom_data_dir=this_dir / 'data',
custom_setup_script=this_dir / 'user-custom-setup',
)
assert (out_dir / 'bin' / 'qemu-aarch64-static').exists()
assert (out_dir / 'rosdep_focal.Dockerfile').exists()
assert (out_dir / 'sysroot.Dockerfile').exists()
assert (out_dir / 'custom-data' / 'arbitrary.txt')
assert (out_dir / 'user-custom-setup')
def test_basic_sysroot_creation(tmpdir):
"""Very simple smoke test to validate that syntax is correct."""
# Very simple smoke test to validate that all internal syntax is correct
mock_docker_client = Mock()
mock_data_collector = Mock()
platform = Platform('aarch64', 'ubuntu', 'foxy')
stage = CreateSysrootStage()
stage(
platform,
mock_docker_client,
Path('dummy_path'),
default_pipeline_options(),
mock_data_collector)
assert mock_docker_client.build_image.call_count == 1
def test_create_sysroot_stage_creation():
temp_stage = CreateSysrootStage()
assert temp_stage
|
35480b0bd6c8e7c3a0af7ad880326a2ed16754af
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/HeterogeneousCore/CUDATest/python/prod6FromCUDA_cfi.py
|
de894a5f326172948c64e85211db1342dd11a403
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 231
|
py
|
prod6FromCUDA_cfi.py
|
import FWCore.ParameterSet.Config as cms
from HeterogeneousCore.CUDATest.testCUDAProducerGPUtoCPU_cfi import testCUDAProducerGPUtoCPU as _testCUDAProducerGPUtoCPU
prod6FromCUDA = _testCUDAProducerGPUtoCPU.clone(src = "prod6CUDA")
|
985328a088c03e9797dfb1d7bed8f8c44c641465
|
2ad93a1cf25a580fe980482d2d17a657de3b2523
|
/django-stubs/contrib/gis/db/models/__init__.pyi
|
47d6738a8cbf0660ca3ccec24ca4ff03b6e8ac15
|
[
"MIT"
] |
permissive
|
typeddjango/django-stubs
|
f35dfcb001e54694a0a1e8c0afcc6e6a3d130c32
|
0117348c3c7713f25f96b46e53ebdeed7bdba544
|
refs/heads/master
| 2023-08-25T19:42:52.707151
| 2023-08-23T15:13:25
| 2023-08-23T15:13:25
| 142,779,680
| 1,133
| 376
|
MIT
| 2023-09-13T19:05:06
| 2018-07-29T17:08:50
|
Python
|
UTF-8
|
Python
| false
| false
| 847
|
pyi
|
__init__.pyi
|
# noqa: F401
from django.contrib.gis.db.models.aggregates import *
from django.contrib.gis.db.models.fields import GeometryCollectionField as GeometryCollectionField
from django.contrib.gis.db.models.fields import GeometryField as GeometryField
from django.contrib.gis.db.models.fields import LineStringField as LineStringField
from django.contrib.gis.db.models.fields import MultiLineStringField as MultiLineStringField
from django.contrib.gis.db.models.fields import MultiPointField as MultiPointField
from django.contrib.gis.db.models.fields import MultiPolygonField as MultiPolygonField
from django.contrib.gis.db.models.fields import PointField as PointField
from django.contrib.gis.db.models.fields import PolygonField as PolygonField
from django.contrib.gis.db.models.fields import RasterField as RasterField
from django.db.models import *
|
28f8618e49c1d0af570e83ad7d89114b9b111e0b
|
78297bc868d588dd7a16cfea059ef7365ba18622
|
/scripts/irods/test/session.py
|
d39bc34c218ea6a4410f7cf7bbd9efdfc21e92c8
|
[
"BSD-3-Clause"
] |
permissive
|
irods/irods
|
ab72a41fdf05a4a905c3e3a97bb7ba3c2a6ae52d
|
f3ccaa842218e477395ebcf553639134433b63ee
|
refs/heads/main
| 2023-09-01T20:12:33.322002
| 2023-08-23T18:22:59
| 2023-08-31T13:41:31
| 14,724,975
| 381
| 167
|
NOASSERTION
| 2023-09-11T18:18:14
| 2013-11-26T18:10:18
|
C++
|
UTF-8
|
Python
| false
| false
| 13,854
|
py
|
session.py
|
from __future__ import print_function
import contextlib
import datetime
import errno
import hashlib
import itertools
import json
import os
import shutil
import tempfile
import time
from .. import test
from .. import lib
from .. import paths
from . import settings
from ..configuration import IrodsConfig
from .command import assert_command, assert_command_fail
from .. import six
def make_session_for_existing_user(username, password, hostname, zone):
env_dict = lib.make_environment_dict(username, hostname, zone, use_ssl=test.settings.USE_SSL)
return IrodsSession(env_dict, password, False)
def make_session_for_existing_admin():
irods_config = IrodsConfig()
if irods_config.version_tuple < (4, 1, 0):
client_environment = open_and_load_pre410_env_file(os.path.join(irods_config.home_directory, '.irods', '.irodsEnv'))
else:
client_environment = irods_config.client_environment
username = client_environment['irods_user_name']
zone_name = client_environment['irods_zone_name']
env_dict = lib.make_environment_dict(
username, test.settings.ICAT_HOSTNAME, zone_name, use_ssl=test.settings.USE_SSL)
return IrodsSession(env_dict, test.settings.PREEXISTING_ADMIN_PASSWORD, False)
def mkuser_and_return_session(user_type, username, password, hostname):
irods_config = IrodsConfig()
if irods_config.version_tuple < (4, 1, 0):
client_environment = open_and_load_pre410_env_file(os.path.join(irods_config.home_directory, '.irods', '.irodsEnv'))
else:
client_environment = irods_config.client_environment
zone_name = client_environment['irods_zone_name']
with make_session_for_existing_admin() as admin_session:
admin_session.assert_icommand(
['iadmin', 'mkuser', username, user_type])
if password is not None:
admin_session.assert_icommand(
['iadmin', 'moduser', username, 'password', password])
manage_data = password != None
env_dict = lib.make_environment_dict(username, hostname, zone_name, use_ssl=test.settings.USE_SSL)
return IrodsSession(env_dict, password, manage_data)
def mkgroup_and_add_users(group_name, usernames):
with make_session_for_existing_admin() as admin_session:
admin_session.assert_icommand(['iadmin', 'mkgroup', group_name])
for username in usernames:
admin_session.assert_icommand(
['iadmin', 'atg', group_name, username])
def get_data_id(session, collection_name, data_name):
rc, out, err = session.assert_icommand(['iquest', "select DATA_ID where COLL_NAME = '{0}' and DATA_NAME = '{1}'".format(collection_name, data_name)], 'STDOUT_SINGLELINE', 'DATA_ID')
assert rc == 0, rc
assert err == '', err
lines = out.split()
assert len(lines) == 4, lines # make sure genquery only returned one result
return int(lines[2])
def make_sessions_mixin(rodsadmin_name_password_list, rodsuser_name_password_list):
class SessionsMixin(object):
def setUp(self):
with make_session_for_existing_admin() as admin_session:
self.admin_sessions = [mkuser_and_return_session('rodsadmin', name, password, lib.get_hostname())
for name, password in rodsadmin_name_password_list]
self.user_sessions = [mkuser_and_return_session('rodsuser', name, password, lib.get_hostname())
for name, password in rodsuser_name_password_list]
super(SessionsMixin, self).setUp()
def tearDown(self):
with make_session_for_existing_admin() as admin_session:
for session in itertools.chain(self.admin_sessions, self.user_sessions):
session.__exit__()
admin_session.assert_icommand(
['iadmin', 'rmuser', session.username])
super(SessionsMixin, self).tearDown()
return SessionsMixin
class IrodsSession(object):
def __init__(self, environment_file_contents, password, manage_irods_data):
self._environment_file_contents = environment_file_contents
self._password = password
self._manage_irods_data = manage_irods_data
self._environment_file_invalid = True
self._local_session_dir = tempfile.mkdtemp(prefix='irods-testing-')
self._environment_file_path = os.path.join(self._local_session_dir, 'irods_environment.json')
self._authentication_file_path = os.path.join(self._local_session_dir, 'irods_authentication')
self._session_id = datetime.datetime.utcnow().strftime('%Y-%m-%dZ%H:%M:%S--') + os.path.basename(self._local_session_dir)
if self._password is not None:
self.assert_icommand('iinit', 'STDOUT_SINGLELINE',
input=f'{self._password}\n')
if self._manage_irods_data:
self.assert_icommand(['imkdir', self.session_collection])
self.assert_icommand(['icd', self.session_collection])
@property
def environment_file_contents(self):
self._environment_file_invalid = True
return self._environment_file_contents
@environment_file_contents.setter
def environment_file_contents(self, value):
self._environment_file_invalid = True
self._environment_file_contents = value
@property
def zone_name(self):
return self._environment_file_contents['irods_zone_name']
@property
def username(self):
return self._environment_file_contents['irods_user_name']
@property
def qualified_username(self):
return self.username + '#' + self._environment_file_contents.get('irods_zone_name', '<undefined>')
@property
def password(self):
return self._password
@property
def default_resource(self):
return self._environment_file_contents['irods_default_resource']
@property
def local_session_dir(self):
return self._local_session_dir
@property
def home_collection(self):
return os.path.join('/', self.zone_name, 'home', self.username)
@property
def session_collection(self):
return os.path.join(self.home_collection, self._session_id)
@property
def session_collection_trash(self):
return self.session_collection.replace('/home/', '/trash/home/', 1)
def remote_home_collection(self, remote_zone_name):
return '/{0}/home/{1}#{2}'.format(remote_zone_name, self.username, self.zone_name)
def run_icommand(self, *args, **kwargs):
self._prepare_run_icommand(args[0], kwargs)
return lib.execute_command_permissive(*args, **kwargs)
def assert_icommand(self, *args, **kwargs):
self._prepare_run_icommand(args[0], kwargs)
return assert_command(*args, **kwargs)
def assert_icommand_fail(self, *args, **kwargs):
self._prepare_run_icommand(args[0], kwargs)
return assert_command_fail(*args, **kwargs)
def assert_irule(self, rule_contents, *args, **kwargs):
with contextlib.closing(tempfile.NamedTemporaryFile(mode='wt', suffix='.r', dir=self.local_session_dir)) as f:
print(rule_contents, end='', file=f)
f.flush()
self.assert_icommand("irule -F %s" % (f.name), *args, **kwargs)
def _prepare_run_icommand(self, arg, kwargs):
self._log_run_icommand(arg)
self._write_environment_file()
if 'env' not in kwargs:
kwargs['env'] = os.environ.copy()
if IrodsConfig().version_tuple < (4, 1, 0):
kwargs['env']['irodsEnvFile'] = self._environment_file_path
kwargs['env']['irodsAuthFileName'] = self._authentication_file_path
else:
kwargs['env']['IRODS_ENVIRONMENT_FILE'] = self._environment_file_path
kwargs['env']['IRODS_AUTHENTICATION_FILE'] = self._authentication_file_path
def _log_run_icommand(self, arg):
if isinstance(arg, six.string_types):
icommand = lib.safe_shlex_split_for_2_6(arg)[0]
log_string = arg
else:
icommand = arg[0]
log_string = ' '.join(arg)
message = ' --- IrodsSession: icommand executed by [{0}] [{1}] --- \n'.format(
self.qualified_username, log_string)
if IrodsConfig().version_tuple < (4, 2, 0):
server_log_dir = os.path.join(paths.irods_directory(), 'iRODS', 'server', 'log')
server_log_path = sorted([os.path.join(server_log_dir, name)
for name in os.listdir(server_log_dir)
if name.startswith('rodsLog')],
key=lambda path: os.path.getctime(path))[-1]
else:
server_log_path = paths.server_log_path()
lib.write_to_log(server_log_path, message)
print(message, end='')
def _write_environment_file(self):
if self._environment_file_invalid:
with open(self._environment_file_path, 'w') as f:
if IrodsConfig().version_tuple < (4, 1, 0):
for key, value in self._environment_file_contents.items():
if key in json_env_map:
env_line = '{setting} {value}\n'.format(setting=json_env_map[key], value=value)
f.write(env_line)
else:
json.dump(self._environment_file_contents, f)
self._environment_file_invalid = False
def __enter__(self):
return self
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
if self._manage_irods_data:
self.assert_icommand('icd')
self.assert_icommand(['irm', '-rf', self.session_collection])
self.assert_icommand('irmtrash')
shutil.rmtree(self._local_session_dir)
def interrupt_icommand(self, fullcmd, filename, filesize):
''' Runs an icommand, but does not let it complete.
This function terminates the icommand once filename reaches (>=)
filesize in bytes.
Asserts that the icommand was successfully terminated early.
'''
filename = os.path.abspath(filename)
parameters = lib.safe_shlex_split_for_2_6(fullcmd)
print("\n")
print("INTERRUPTING iCMD")
print("running icommand: " + self.username + "[" + fullcmd + "]")
print(" filename set to: [" + filename + "]")
print(" filesize set to: [" + str(filesize) + "] bytes")
lib.write_to_log(
IrodsConfig().server_log_path, ' --- interrupt icommand [{0}] --- \n'.format(fullcmd))
env = os.environ.copy()
env['IRODS_ENVIRONMENT_FILE'] = self._environment_file_path
env['IRODS_AUTHENTICATION_FILE'] = self._authentication_file_path
self._write_environment_file()
timeout = 30
begin = time.time()
granularity = 0.005
p = lib.execute_command_nonblocking(parameters, env=env)
while time.time() - begin < timeout and (not os.path.exists(filename) or os.stat(filename).st_size < filesize):
time.sleep(granularity)
if (time.time() - begin) >= timeout:
print(execute_command(['ls', '-l', os.path.dirname(filename)])[1])
out, err = p.communicate()
print(out, err)
print(self.run_icommand(['ils', '-l'])[0])
assert False
elif p.poll() is None:
p.terminate()
else:
assert False
return 0
def get_entries_in_collection(self, collection):
out, _, _ = self.run_icommand(['ils', collection])
raw = out.strip().split('\n')
collection = raw[0]
entries = [entry.strip() for entry in raw[1:]]
return entries
def get_vault_path(self, resource='demoResc'):
out, err, rc = self.run_icommand(
['iquest', '%s', "select RESC_VAULT_PATH where RESC_NAME = '{0}'".format(resource)])
if err != '':
raise OSError(
err, 'iquest wrote to stderr when called from get_vault_path()')
return out.rstrip('\n')
def get_vault_session_path(self, resource='demoResc'):
return os.path.join(self.get_vault_path(resource),
"home",
self.username,
self._session_id)
# Two-way mapping of the new (json) and old iRODS environment setting names
json_env_map = {'irods_host': 'irodsHost',
'irods_port': 'irodsPort',
'irods_default_resource': 'irodsDefResource',
'irods_home': 'irodsHome',
'irods_cwd': 'irodsCwd',
'irods_user_name': 'irodsUserName',
'irods_zone_name': 'irodsZone',
'irods_client_server_negotiation': 'irodsClientServerNegotiation',
'irods_client_server_policy': 'irodsClientServerPolicy',
'irods_encryption_salt_size': 'irodsEncryptionSaltSize',
'irods_encryption_num_hash_rounds': 'irodsEncryptionNumHashRounds',
'irods_encryption_algorithm': 'irodsEncryptionAlgorithm',
'irods_default_hash_scheme': 'irodsDefaultHashScheme',
'irods_match_hash_policy': 'irodsMatchHashPolicy'}
json_env_map.update(dict([(val, key) for key, val in json_env_map.items()]))
def open_and_load_pre410_env_file(filename):
#A very brittle parsing takes place here:
#Each line of .irodsEnv is split into tokens.
#If the first token matches a key in our old-new setting map
#we use the corresponding json setting, and the second token as value
irods_env = {}
with open(filename) as env_file:
for line in env_file.readlines():
tokens = line.strip().split()
if len(tokens) > 1 and tokens[0] in json_env_map:
irods_env[json_env_map[tokens[0]]] = tokens[1].strip("'")
return irods_env
|
35e77e440286df74748f640f5a06df324ce237ba
|
52245910f830dbfb2b1432ad2a967df7321ee6de
|
/examples/apps/django_multi_apps/gbm/pn_app.py
|
bede5dc9316346195ac8cdb3fb945ec9c5657f99
|
[
"BSD-3-Clause"
] |
permissive
|
holoviz/panel
|
92c19f979353d456512abbce5a027dff6ddb3a5c
|
2c6e165e2bba96c0cb97947aa072d4429133cf7a
|
refs/heads/main
| 2023-08-17T11:28:06.581979
| 2023-08-17T11:23:09
| 2023-08-17T11:23:09
| 145,848,899
| 2,544
| 373
|
BSD-3-Clause
| 2023-09-14T17:13:31
| 2018-08-23T12:14:24
|
Python
|
UTF-8
|
Python
| false
| false
| 171
|
py
|
pn_app.py
|
import panel as pn
from .pn_model import GBM
def app(doc):
gbm = GBM()
row = pn.Row(pn.Column(gbm.param, gbm.refresh), gbm.update_plot)
row.server_doc(doc)
|
3d377993ada7e7ad6988315c536b3435a6dc4a82
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/coinbase/diagnostics.py
|
674ce9dca28ca995ab30f71fe2f6513665efe96d
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 919
|
py
|
diagnostics.py
|
"""Diagnostics support for Coinbase."""
from typing import Any
from homeassistant.components.diagnostics import async_redact_data
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_API_TOKEN, CONF_ID
from homeassistant.core import HomeAssistant
from . import CoinbaseData
from .const import API_ACCOUNT_AMOUNT, API_RESOURCE_PATH, CONF_TITLE, DOMAIN
TO_REDACT = {
API_ACCOUNT_AMOUNT,
API_RESOURCE_PATH,
CONF_API_KEY,
CONF_API_TOKEN,
CONF_ID,
CONF_TITLE,
}
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
instance: CoinbaseData = hass.data[DOMAIN][entry.entry_id]
return async_redact_data(
{
"entry": entry.as_dict(),
"accounts": instance.accounts,
},
TO_REDACT,
)
|
5cff63f142708633848245305c5cefa7ea6eea82
|
7cec2737769c70a575aa8dbedf630276666ed199
|
/elf/miasm_sandbox.py
|
fd34967bbfc1be1455cd1f6b908ff42062f70c27
|
[] |
no_license
|
Te-k/analyst-scripts
|
f73925d7cc4eba6ee30ac816f1f4999945dc2b22
|
d8b3ac8260289e374618d2c07fcd1d9d71f43ad3
|
refs/heads/master
| 2023-07-05T20:54:11.598609
| 2023-06-29T16:33:59
| 2023-06-29T16:33:59
| 59,152,609
| 126
| 43
| null | 2019-10-07T22:26:44
| 2016-05-18T21:29:46
|
Python
|
UTF-8
|
Python
| false
| false
| 500
|
py
|
miasm_sandbox.py
|
import logging
from pdb import pm
from miasm.analysis.sandbox import Sandbox_Linux_x86_64
from miasm.jitter.jitload import log_func
# Insert here user defined methods
# Parse arguments
parser = Sandbox_Linux_x86_64.parser(description="ELF sandboxer")
parser.add_argument("filename", help="ELF Filename")
options = parser.parse_args()
# Create sandbox
sb = Sandbox_Linux_x86_64(options.filename, options, globals())
log_func.setLevel(logging.ERROR)
# Run
sb.run()
assert(sb.jitter.run is False)
|
6bb9276567d2ceaa8aa8a72e498bf5723bb3e293
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Intro_Solid_Mechanics_Adeeb/12.1.2.1.py
|
517747ec899bb12757129b2237eea6ee9483b1c2
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,049
|
py
|
12.1.2.1.py
|
import sympy as sp
import numpy as np
from sympy import lambdify
from matplotlib import pyplot as plt
x = sp.symbols("x")
xL = np.arange(0,1,0.01)
c,L,EA = 1,1,1
# Exact Calculations
u_exact = c*L**2/2/EA*x - c/sp.Rational("6")/EA*x**3
s_exact = u_exact.diff(x)
print("u_exact(x) =",u_exact,"s_exact(x) =",s_exact)
F = lambdify(x,u_exact)
dF = lambdify(x,s_exact)
y_exact = F(xL)
sy_exact = dF(xL)
def PN1(x):
conds = [(x>=0)&(x<=L/4),(x>=L/4)&(x<L/2)]
functions = [lambda x:4*x/L,lambda x:4/L*(L/2-x)]
Dfunctions = [lambda x:4/L,lambda x:-4]
return np.piecewise(x,conds,functions), np.piecewise(x,conds,Dfunctions)
def PN2(x):
conds = [(x>=L/4)&(x<=L/2),(x>=L/2)&(x<3*L/4)]
functions = [lambda x:4/L*(-L/4+x),lambda x:4/L*(3*L/4-x)]
Dfunctions = [lambda x:4/L,lambda x:-4]
return np.piecewise(x,conds,functions), np.piecewise(x,conds,Dfunctions)
def PN3(x):
conds = [(x>=L/2)&(x<=3*L/4),(x>=3*L/4)&(x<=L)]
functions = [lambda x:4/L*(-L/2+x),lambda x:4/L*(L-x)]
Dfunctions = [lambda x:4/L,lambda x:-4]
return np.piecewise(x,conds,functions), np.piecewise(x,conds,Dfunctions)
def PN4(x):
conds = [(x>=3*L/4)&(x<=L)]
functions = [lambda x:4/L*(-3*L/4+x)]
Dfunctions = [lambda x:4/L]
return np.piecewise(x,conds,functions), np.piecewise(x,conds,Dfunctions)
N1, DN1 = PN1(xL)
N2, DN2 = PN2(xL)
N3, DN3 = PN3(xL)
N4, DN4 = PN4(xL)
u1 = c*L**3/EA*(47/384)
u2 = c*L**3/EA*(11/48)
u3 = c*L**3/EA*(39/128)
u4 = c*L**3/EA*(1/3)
u = u1*N1+u2*N2+u3*N3+u4*N4
up = u1*DN1+u2*DN2+u3*DN3+u4*DN4
fig, ax = plt.subplots(2, figsize=(6,8))
ax[0].set_title("")
ax[0].set_xlabel("x")
ax[0].set_ylabel("displacement")
ax[0].plot(xL,y_exact,label="(c*L^2*x)/(2*EA) - (c*x^3)/(6*EA)")
ax[0].plot(xL,u,label="u_FEA")
ax[1].plot(xL,sy_exact,label="sigma_exact")
ax[1].plot(xL,up,label="sigma_FEA")
ax[1].set_ylabel("sigma11")
for i in ax:
i.grid(True, which='both')
i.axhline(y = 0, color = 'k')
i.axvline(x = 0, color = 'k')
i.set_xlabel("x")
i.legend()
plt.savefig('/tmp/out-12.1.2.1.jpg')
|
8b0c7bcbb2a9acce3cfebd3e9606ac1fb22ee980
|
0bd8ef0ac54e82ba20e5f5e9cc554e7a26094daa
|
/{{cookiecutter.project_slug}}/src/{{cookiecutter.app_name}}/api/hello.py
|
464913cb6ed248c40643fcadc72d736b913e17e1
|
[
"MIT"
] |
permissive
|
mdklatt/cookiecutter-python-app
|
ef40b77efad195aa4ebc21e39785e17fa1ef15a1
|
02c1c78bd69bd9d3ec30396ed0bc2455164d2a78
|
refs/heads/main
| 2023-07-05T22:35:50.054536
| 2023-07-02T16:30:50
| 2023-07-02T17:05:55
| 40,841,335
| 132
| 37
|
MIT
| 2023-04-21T15:10:07
| 2015-08-16T21:12:20
|
Python
|
UTF-8
|
Python
| false
| false
| 260
|
py
|
hello.py
|
""" Implement the hello command.
"""
from ..core.logger import logger
def main(name="World") -> str:
""" Execute the command.
:param name: name to use in greeting
"""
logger.debug("executing hello command")
return f"Hello, {name}!"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.