blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e234dbdcf2925dd2a6ff2202ba4a3e08319abbd2
|
fd9f28e38a133e8a52d80ccc898bdf3725cd317d
|
/tests/models.py
|
d6eb67f1222ee801a566284edeb1279f205bdcea
|
[
"BSD-3-Clause"
] |
permissive
|
glemmaPaul/django-taggit-serializer
|
f5f3d2ad6f98c67a4b69076e4cea7a29e661a700
|
9cb3350a09091f80099b56bfa64cdeea5a0c2607
|
refs/heads/master
| 2023-05-29T09:27:09.851853
| 2023-05-10T13:44:46
| 2023-05-10T13:44:46
| 31,049,243
| 208
| 61
|
BSD-3-Clause
| 2022-12-26T20:15:05
| 2015-02-20T03:17:50
|
Python
|
UTF-8
|
Python
| false
| false
| 135
|
py
|
models.py
|
from django.db import models
from taggit.managers import TaggableManager
class TestModel(models.Model):
tags = TaggableManager()
|
e138ba3502239d7f6350ef8ee6ca3e746ab8e79b
|
0a23c93c0b61301081bd914754f88fbad29de00d
|
/alipy/utils/misc.py
|
52da0ef48aed89364dfacdf905b8388c31e64920
|
[
"BSD-3-Clause"
] |
permissive
|
NUAA-AL/ALiPy
|
187d4f3aa5a3e04324f5f98da8dc197a386d1079
|
1b2ee2e5acc2e8651fc64759aae332853ad9e437
|
refs/heads/master
| 2023-07-03T15:56:18.935587
| 2022-09-17T11:16:05
| 2022-09-17T11:16:05
| 149,413,428
| 844
| 127
|
BSD-3-Clause
| 2022-09-17T11:16:06
| 2018-09-19T07:54:37
|
Python
|
UTF-8
|
Python
| false
| false
| 7,049
|
py
|
misc.py
|
"""
Misc functions to be settled
"""
from __future__ import division
import numpy as np
from sklearn.metrics.pairwise import linear_kernel, polynomial_kernel, \
rbf_kernel
from sklearn.utils.validation import check_array
from .interface import BaseCollection
__all__ = ['check_matrix',
'get_gaussian_kernel_mat',
'randperm',
'nlargestarg',
'nsmallestarg',
'calc_kernel_matrix',
'check_one_to_one_correspondence',
'unpack',
]
def check_matrix(matrix):
"""check if the given matrix is legal."""
matrix = check_array(matrix, accept_sparse='csr', ensure_2d=False, order='C')
if matrix.ndim != 2:
if matrix.ndim == 1 and len(matrix) == 1:
matrix = matrix.reshape(1, -1)
else:
raise TypeError("Matrix should be a 2D array with [n_samples, n_features] or [n_samples, n_classes].")
return matrix
def get_gaussian_kernel_mat(X, sigma=1.0, check_arr=True):
"""Calculate kernel matrix between X and X.
Parameters
----------
X: np.ndarray
data matrix with [n_samples, n_features]
sigma: float, optional (default=1.0)
the width in gaussian kernel.
check_arr: bool, optional (default=True)
whether to check the given feature matrix.
Returns
-------
K: np.ndarray
Kernel matrix between X and X.
"""
if check_arr:
X = check_array(X, accept_sparse='csr', ensure_2d=True, order='C')
else:
if not isinstance(X, np.ndarray):
X = np.asarray(X)
n = X.shape[0]
tmp = np.sum(X ** 2, axis=1).reshape(1, -1)
return np.exp((-tmp.T.dot(np.ones((1, n))) - np.ones((n, 1)).dot(tmp) + 2 * (X.dot(X.T))) / (2 * (sigma ** 2)))
def randperm(n, k=None):
"""Generate a random array which contains k elements range from (n[0]:n[1])
Parameters
----------
n: int or tuple
range from [n[0]:n[1]], include n[0] and n[1].
if an int is given, then n[0] = 0
k: int, optional (default=end - start + 1)
how many numbers will be generated. should not larger than n[1]-n[0]+1,
default=n[1] - n[0] + 1.
Returns
-------
perm: list
the generated array.
"""
if isinstance(n, np.generic):
# n = np.asscalar(n) # deprecated in numpy v1.16
n = n.item()
if isinstance(n, tuple):
if n[0] is not None:
start = n[0]
else:
start = 0
end = n[1]
elif isinstance(n, int):
start = 0
end = n
else:
raise TypeError("n must be tuple or int.")
if k is None:
k = end - start + 1
if not isinstance(k, int):
raise TypeError("k must be an int.")
if k > end - start + 1:
raise ValueError("k should not larger than n[1]-n[0]+1")
randarr = np.arange(start, end + 1)
np.random.shuffle(randarr)
return randarr[0:k]
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def nlargestarg(a, n):
"""Return n largest values' indexes of the given array a.
Parameters
----------
a: {list, np.ndarray}
Data array.
n: int
The number of returned args.
Returns
-------
nlargestarg: list
The n largest args in array a.
"""
assert (_is_arraylike(a))
assert (n > 0)
argret = np.argsort(a)
# ascent
return argret[argret.size - n:]
def nsmallestarg(a, n):
"""Return n smallest values' indexes of the given array a.
Parameters
----------
a: {list, np.ndarray}
Data array.
n: int
The number of returned args.
Returns
-------
nlargestarg: list
The n smallest args in array a.
"""
assert (_is_arraylike(a))
assert (n > 0)
argret = np.argsort(a)
# ascent
return argret[0:n]
def calc_kernel_matrix(X, kernel, **kwargs):
"""calculate kernel matrix between X and X.
Parameters
----------
kernel : {'linear', 'poly', 'rbf', callable}, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', or a callable.
If a callable is given it is used to pre-compute the kernel matrix
from data matrices; that matrix should be an array of shape
``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=1.)
Kernel coefficient for 'rbf', 'poly'.
coef0 : float, optional (default=1.)
Independent term in kernel function.
It is only significant in 'poly'.
Returns
-------
kernel-matrix: array of shape (n_samples_1, n_samples_2)
kernel matrix between X and X.
"""
if kernel == 'rbf':
K = rbf_kernel(X=X, Y=X, gamma=kwargs.pop('gamma', 1.))
elif kernel == 'poly':
K = polynomial_kernel(X=X,
Y=X,
coef0=kwargs.pop('coef0', 1),
degree=kwargs.pop('degree', 3),
gamma=kwargs.pop('gamma', 1.))
elif kernel == 'linear':
K = linear_kernel(X=X, Y=X)
elif hasattr(kernel, '__call__'):
K = kernel(X=np.array(X), Y=np.array(X))
else:
raise NotImplementedError
return K
def check_one_to_one_correspondence(*args):
"""Check if the parameters are one-to-one correspondence.
Parameters
----------
args: object
The parameters to test.
Returns
-------
result: int
Whether the parameters are one-to-one correspondence.
1 : yes
0 : no
-1: some parameters have the length 1.
"""
first_not_none = True
result = True
for item in args:
# only check not none object
if item is not None:
if first_not_none:
# record item type
first_not_none = False
if_array = isinstance(item, (list, np.ndarray, BaseCollection))
if if_array:
itemlen = len(item)
else:
itemlen = 1
else:
if isinstance(item, (list, np.ndarray, BaseCollection)):
if len(item) != itemlen:
return False
else:
if itemlen != 1:
return False
return True
def unpack(*args):
"""Unpack the list with only one element.
"""
ret_args = []
for arg in args:
if isinstance(arg, (list, np.ndarray, BaseCollection)):
if len(arg) == 1:
ret_args.append(arg[0])
else:
ret_args.append(arg)
else:
ret_args.append(arg)
return tuple(ret_args)
|
f451847e811a3660d97a28a8d266ec5b5022c4a7
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowLicense/cli/equal/golden_output1_expected.py
|
b69b3a158b7d841f8315fec9f1fc4364aa1602a4
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,949
|
py
|
golden_output1_expected.py
|
expected_output = {
"licenses": {
1: {
"feature": "appxk9",
"period_left": "Life time",
"license_type": "Permanent",
"license_state": "Active, In Use",
"count": "Non-Counted",
"license_priority": "Medium",
},
2: {
"feature": "uck9",
"period_left": "Not Activated",
"period_minutes": 0,
"period_seconds": 0,
"license_type": "EvalRightToUse",
"license_state": "Active, Not in Use, EULA not accepted",
"count": "Non-Counted",
"license_priority": "None",
},
3: {
"feature": "securityk9",
"period_left": "Life time",
"license_type": "Permanent",
"license_state": "Active, In Use",
"count": "Non-Counted",
"license_priority": "Medium",
},
4: {
"feature": "ipbasek9",
"period_left": "Life time",
"license_type": "Permanent",
"license_state": "Active, In Use",
"count": "Non-Counted",
"license_priority": "Medium",
},
5: {
"feature": "FoundationSuiteK9",
"period_left": "Not Activated",
"period_minutes": 0,
"period_seconds": 0,
"license_type": "EvalRightToUse",
"license_state": "Active, Not in Use, EULA not accepted",
"count": "Non-Counted",
"license_priority": "None",
},
6: {
"feature": "AdvUCSuiteK9",
"period_left": "Not Activated",
"period_minutes": 0,
"period_seconds": 0,
"license_type": "EvalRightToUse",
"license_state": "Active, Not in Use, EULA not accepted",
"count": "Non-Counted",
"license_priority": "None",
},
7: {
"feature": "cme-srst",
"period_left": "Not Activated",
"period_minutes": 0,
"period_seconds": 0,
"license_type": "EvalRightToUse",
"license_state": "Active, Not in Use, EULA not accepted",
"count_in_use": 0,
"count_violation": 0,
"license_priority": "None",
},
8: {
"feature": "hseck9",
"period_left": "Life time",
"license_type": "Permanent",
"license_state": "Active, In Use",
"count": "Non-Counted",
"license_priority": "Medium",
},
9: {
"feature": "throughput",
"period_left": "Not Activated",
"period_minutes": 0,
"period_seconds": 0,
"license_type": "EvalRightToUse",
"license_state": "Active, Not in Use, EULA not accepted",
"count": "Non-Counted",
"license_priority": "None",
},
}
}
|
01360aa5656ff8b8aeee8e3629ca843798f93595
|
4b0a7c7cb4dd47e10e60381fe1fc0758d1659751
|
/robustness_metrics/models/random_imagenet_numpy.py
|
54a5c067a7ec7fa48d188855b591f53602f94cdd
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
google-research/robustness_metrics
|
c6e36e72f251b10af2522ecb87ad090ad45097f9
|
3e714b3dcce386fa3f473dff8a0f19d7108c18fd
|
refs/heads/master
| 2023-08-31T00:21:59.320508
| 2023-08-21T18:45:14
| 2023-08-21T18:45:42
| 292,792,659
| 451
| 28
|
Apache-2.0
| 2023-08-21T18:45:47
| 2020-09-04T08:24:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
random_imagenet_numpy.py
|
# coding=utf-8
# Copyright 2023 The Robustness Metrics Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example numpy model."""
import numpy as np
from scipy.special import softmax
class Model:
def __init__(self):
self.weights = np.random.randn(3, 1000)
def __call__(self, features):
images = features["image"].numpy().reshape((-1, 224, 224, 3))
means = np.mean(images, axis=(1, 2))
logits = np.matmul(means, self.weights)
return softmax(logits, axis=-1)
def create():
return Model(), None
|
3df8271225894130668d4ccca0cb255d042f20c0
|
e04a5b20f946c5033f24d4dd8acda395a98747c5
|
/esri-geo-enrichment/python-lib/geocoder_common.py
|
914d35b39675ea76a302fa9a8124cd88cb5f2bef
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
dataiku/dataiku-contrib
|
2a2f2fb420d7f2ab49b82d80659cc6f6ec1d8f61
|
9a9f189e8a544a81c205d8a8b3779d4517b88653
|
refs/heads/master
| 2023-09-04T03:33:58.625093
| 2023-04-26T08:17:34
| 2023-04-26T08:17:34
| 45,074,604
| 103
| 94
|
Apache-2.0
| 2023-06-08T21:29:07
| 2015-10-27T22:41:00
|
Python
|
UTF-8
|
Python
| false
| false
| 7,769
|
py
|
geocoder_common.py
|
# -*- coding: utf-8 -*-
import dataiku
import pandas as pd, numpy as np
from dataiku import pandasutils as pdu
import requests
import urllib
import json
import time
from datetime import datetime
from dataiku.customrecipe import *
import dataiku_esri_utils
import common
from dataiku_esri_utils import recipe_config_get_str_or_none
def run_geocoding_recipe(is_detailed):
P_USERNAME, P_PASSWORD, P_TOKEN_EXPIRATION, P_BATCH_SIZE_UNIT, P_EPSG_OUT_SR, \
P_PAUSE, P_SAMPLE = common.read_common_params()
P_COLUMN_OBJECT_ID = get_recipe_config()['column_object_id']
P_COLUMN_ADDRESS = get_recipe_config()['column_adress']
# Optional parameters
P_COLUMN_COUNTRY = recipe_config_get_str_or_none('column_country')
P_ADRESS_CATEGORY = recipe_config_get_str_or_none('category')
# Only for detailed mode
if is_detailed:
P_COLUMN_CITY = get_recipe_config()['column_city']
P_COLUMN_POSTAL = recipe_config_get_str_or_none('column_postal')
P_COLUMN_REGION = recipe_config_get_str_or_none('column_region')
# Input and outputs
input_name = get_input_names_for_role('input')[0]
output_geocoding_results = get_output_names_for_role('results')[0]
result_dataset = dataiku.Dataset(output_geocoding_results)
log_api_dataset = None
if len(get_output_names_for_role('log')) > 0:
log_api_dataset = dataiku.Dataset( get_output_names_for_role('log')[0])
(app_token,_) = dataiku_esri_utils.get_token_from_login_password(P_USERNAME,P_PASSWORD,P_TOKEN_EXPIRATION)
output_df = {
"value" : pd.DataFrame(),
"log" : pd.DataFrame()
}
for i,df in enumerate(dataiku.Dataset(input_name).iter_dataframes(chunksize= P_BATCH_SIZE_UNIT )):
print 'Processing batch #%s' % (i)
if P_SAMPLE > 0:
df = df.head(P_SAMPLE)
#'https://developers.arcgis.com/rest/geocode/api-reference/geocoding-service-output.htm#ESRI_SECTION1_42D7D3D0231241E9B656C01438209440'
def return_geocoder_results(params_dict):
print "Requesting data as %s" % params_dict
return requests.post('https://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/geocodeAddresses', data = params_dict)
def get_params_dict(attributes_dict_list, **kwargs):
params_dict = {
'addresses': str({"records" : attributes_dict_list})
,'token':app_token
,'outSR':P_EPSG_OUT_SR
,'f':'json'
,'forStorage': common.FOR_STORAGE
}
if P_ADRESS_CATEGORY is not None:
params['category'] = P_ADRESS_CATEGORY
for (k, v) in kwargs.items():
params_dict[k] = v
return params_dict
def build_attributes(batch_record):
attrs = {
'OBJECTID': batch_record[P_COLUMN_OBJECT_ID],
'Address': batch_record[P_COLUMN_ADDRESS]
}
if is_detailed:
attrs["City"] = batch_record[P_COLUMN_CITY]
attrs["Region"] = '' if P_COLUMN_POSTAL is None else batch_record[P_COLUMN_POSTAL]
attrs["Postal"] = '' if P_COLUMN_REGION is None else batch_record[P_COLUMN_REGION]
return {'attributes': attrs }
def parse_results(api_result):
n = len(api_result[u'locations'])
for ii in range(0,n):
result_dict = api_result[u'locations'][ii][u'attributes']
UserObjectID = result_dict[u'ResultID']
result_dict['latestWkid'] = api_result[u'spatialReference']['latestWkid']
result_dict['wkid'] = api_result[u'spatialReference']['wkid']
result_dict['collected_at']= datetime.now().isoformat()
df_value_tmp = pd.DataFrame.from_dict(result_dict, orient='index').T
output_df["value"] = pd.concat((output_df["value"], df_value_tmp), axis=0)
if P_COLUMN_COUNTRY is not None:
print "Using per-country mode"
# Case 1: country is in the data
# We'll make a query for each country + a generic query for lines without country data
# Build a dataframe of the rows where country is filled
df_nn = df[df[P_COLUMN_COUNTRY].notnull()]
nb_records_df_nn = df_nn.shape[0]
print 'Processing data to geocode: %s adresses in this batch with a country value...' % (nb_records_df_nn)
df_nn = df_nn.sort_values([P_COLUMN_COUNTRY],ascending=[1])
dicz = {k: list(v) for k,v in df_nn.groupby(P_COLUMN_COUNTRY)[P_COLUMN_OBJECT_ID]}
# Iterate on all countries present in the chunk
for c in dicz:
dfsub_nn = df_nn[(df_nn[P_COLUMN_COUNTRY]==c)]
assert dfsub_nn.shape[0] > 0
attributes_dict_list = []
for batch_record in dfsub_nn.to_dict('records'):
attributes_dict_list.append(build_attributes(batch_record))
params_dict = get_params_dict(attributes_dict_list, sourceCountry = c)
# Send the query
query_at = datetime.now().isoformat()
api_resp = return_geocoder_results(params_dict)
if api_resp.status_code == 200 and not "error" in api_resp.json():
parse_results(api_resp.json())
output_df["log"] = common.log_api_message(output_df["log"], api_resp, i, params_dict, query_at,
"country-col-country", country=c)
# Make a final query with the lines without country specified
df_null = df[-df[P_COLUMN_COUNTRY].notnull()]
nb_records_df_null = df_null.shape[0]
print 'Processing data to geocode: %s adresses in this batch without a country value...' % (nb_records_df_null)
if nb_records_df_null > 0:
df_null = df_null.sort_values([P_COLUMN_COUNTRY],ascending=[1])
attributes_dict_list_null = []
for batch_record_null in df_null.to_dict('records'):
attributes_dict_list_null.append(build_attributes(batch_record))
params_dict_null = get_params_dict(attributes_dict_list_null)
# Send the query
query_at = datetime.now().isoformat()
api_resp = return_geocoder_results(params_dict_null)
if api_resp.status_code == 200 and not "error" in api_resp.json():
parse_results(api_resp.json())
output_df["log"] = common.log_api_message(output_df["log"], api_resp, i, params_dict, query_at,
"country-col-no-country")
else:
print "Using auto-country mode"
# Case 2: the country must be guessed
attributes_dict_list = []
for batch_record in df.to_dict('records'):
attributes_dict_list.append(build_attributes(batch_record))
params_dict = get_params_dict(attributes_dict_list)
# Send the query
query_at = datetime.now().isoformat()
api_resp = return_geocoder_results(params_dict)
if api_resp.status_code == 200 and not "error" in api_resp.json():
parse_results(api_resp.json())
output_df["log"] = common.log_api_message(output_df["log"], api_resp, i, params_dict, query_at,
"country-nocol")
# Wait before the next batch
time.sleep(P_PAUSE)
# Flush results
result_dataset.write_with_schema(output_df["value"])
if log_api_dataset is not None:
log_api_dataset.write_with_schema(output_df["log"])
|
13fef7a730bf880efe4807a06008fc4958fcade4
|
e76a79816ff5203be2c4061e263a09d31072c940
|
/programs/buck_version.py
|
309cb96c69d96193797f6c3065058c6f91e56a93
|
[
"Apache-2.0"
] |
permissive
|
facebook/buck
|
ef3a833334499b1b44c586e9bc5e2eec8d930e09
|
9c7c421e49f4d92d67321f18c6d1cd90974c77c4
|
refs/heads/main
| 2023-08-25T19:30:28.803205
| 2023-04-19T11:32:59
| 2023-04-19T11:32:59
| 9,504,214
| 8,481
| 1,338
|
Apache-2.0
| 2023-05-04T22:13:59
| 2013-04-17T18:12:18
|
Java
|
UTF-8
|
Python
| false
| false
| 4,402
|
py
|
buck_version.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
import subprocess
import sys
import tempfile
from subprocess import CalledProcessError, check_output
from programs.subprocutils import which
class EmptyTempFile(object):
def __init__(self, prefix=None, dir=None, closed=True):
self.file, self.name = tempfile.mkstemp(prefix=prefix, dir=dir)
if closed:
os.close(self.file)
self.closed = closed
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
os.remove(self.name)
def close(self):
if not self.closed:
os.close(self.file)
self.closed = True
def fileno(self):
return self.file
def is_vcs(dirpath): # type: (str) -> bool
dot_git = os.path.join(dirpath, ".git")
if which("git") and sys.platform != "cygwin":
if os.path.exists(dot_git) and os.path.isdir(dot_git):
return True
try:
with open(os.devnull, "w") as devnull:
output = check_output(
["git", "rev-parse", "--is-inside-work-tree"],
cwd=dirpath,
stderr=devnull,
).decode("utf-8")
return output.strip() == "true"
except CalledProcessError:
pass
return False
def is_dirty(dirpath): # type: (str) -> bool
# Ignore any changes under these paths for the purposes of forcing a rebuild
# of Buck itself.
IGNORE_PATHS = ["test"]
IGNORE_PATHS_RE_GROUP = "|".join([re.escape(e) for e in IGNORE_PATHS])
IGNORE_PATHS_RE = re.compile("^.. (?:" + IGNORE_PATHS_RE_GROUP + ")")
if not is_vcs(dirpath):
return False
output = check_output(["git", "status", "--porcelain"], cwd=dirpath).decode("utf-8")
output = "\n".join(
[line for line in output.splitlines() if not IGNORE_PATHS_RE.search(line)]
)
return bool(output.strip())
def get_vcs_revision(dirpath): # type: (str) -> str
output = check_output(["git", "rev-parse", "HEAD", "--"], cwd=dirpath).decode(
"utf-8"
)
return output.splitlines()[0].strip()
def get_vcs_revision_timestamp(dirpath): # type: (str) -> str
return (
check_output(
["git", "log", "--pretty=format:%ct", "-1", "HEAD", "--"], cwd=dirpath
)
.decode("utf-8")
.strip()
)
def get_clean_buck_version(dirpath, allow_dirty=False): # type: (str, bool) -> str
if not is_vcs(dirpath):
return "N/A"
if allow_dirty or not is_dirty(dirpath):
return get_vcs_revision(dirpath)
def get_dirty_buck_version(dirpath): # type: (str) -> str
git_tree_in = (
check_output(
["git", "log", "-n1", "--pretty=format:%T", "HEAD", "--"], cwd=dirpath
)
.decode("utf-8")
.strip()
)
with EmptyTempFile(prefix="buck-git-index") as index_file:
new_environ = os.environ.copy()
new_environ["GIT_INDEX_FILE"] = index_file.name
subprocess.check_call(
["git", "read-tree", git_tree_in], cwd=dirpath, env=new_environ
)
subprocess.check_call(["git", "add", "-A"], cwd=dirpath, env=new_environ)
git_tree_out = (
check_output(["git", "write-tree"], cwd=dirpath, env=new_environ)
.decode("utf-8")
.strip()
)
with EmptyTempFile(prefix="buck-version-uid-input", closed=False) as uid_input:
subprocess.check_call(
["git", "ls-tree", "--full-tree", git_tree_out],
cwd=dirpath,
stdout=uid_input,
)
return (
check_output(["git", "hash-object", uid_input.name], cwd=dirpath)
.decode("utf-8")
.strip()
)
|
747dd4acf6c15384730764b8851d888dc6dcb128
|
32c1a6b75b73b2fe68d9e33a368754a5860f2f01
|
/qcengine/programs/turbomole/define.py
|
11f0c9b3f3d975fba976fdba46cc1280ae803108
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
MolSSI/QCEngine
|
44d6bbf9a2c8c2c6ea5a0e1ee011d512357d4346
|
cbec059bb52162c3db7a747d680871749d101f41
|
refs/heads/master
| 2023-08-20T15:05:17.353145
| 2023-08-18T21:06:46
| 2023-08-18T21:06:46
| 123,499,436
| 139
| 88
|
BSD-3-Clause
| 2023-09-09T02:41:01
| 2018-03-01T22:25:35
|
Python
|
UTF-8
|
Python
| false
| false
| 7,659
|
py
|
define.py
|
import itertools as it
from subprocess import PIPE, Popen, TimeoutExpired
from typing import Any, Dict, Optional
from qcengine.exceptions import InputError
from .methods import KEYWORDS, METHODS
def decode_define(str_: str) -> str:
"""Decode define output.
Depending on the employed basis set the encoding may differ.
"""
try:
str_ = str_.decode("utf-8")
except UnicodeDecodeError:
# Some of the basis files (cbas, I'm looking at you ...) are saved
# in ISO-8859-15 but most of them are in UTF-8. Decoding will
# crash in the former cases so here we try the correct decoding.
str_ = str_.decode("latin-1")
return str_
def execute_define(stdin: str, cwd: Optional["Path"] = None) -> str:
"""Call define with the input define in stdin."""
# TODO: replace this with a call to the default execute provided by QCEngine
# if possible. May be difficult though, as we have to pipe in stdin and
# be careful with the encoding.
# We cant use univeral_newlines=True or text=True in Popen as some of the
# data that define returns isn't proper UTF-8, so the decoding will crash.
# We will decode it later on manually.
with Popen("define", stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=cwd) as proc:
try:
# stdout, _ = proc.communicate(str.encode(stdin), timeout=30)
stdout, _ = proc.communicate(str.encode(stdin), timeout=15)
stdout = decode_define(stdout)
except TimeoutExpired:
raise InputError(f"define call timed out!")
# TODO: How to get the stdout when define times out? Calling
# communiate may also result in an indefinite hang so I disabled it
# for now...
# # Retrieve output of timed out define call
# stdout, stderr = proc.communicate()
# stdout = decode_define(stdout)
# stderr = decode_define(stderr)
# # Attach stdout and stderr of proc to error, so they can be
# # accessed later on.
# error.stdout = stdout
# error.stderr = stdout
# raise error
proc.terminate()
return stdout
def prepare_stdin(
method: str, basis: str, keywords: Dict[str, Any], charge: int, mult: int, geoopt: Optional[str] = ""
) -> str:
"""Prepares a str that can be sent to define to produce the desired
input for Turbomole."""
# Load data from keywords
unrestricted = keywords.get("unrestricted", False)
grid = keywords.get("grid", "m3")
scf_conv = keywords.get("scf_conv", 8)
scf_iters = keywords.get("scf_iters", 150)
methods_flat = list(it.chain(*[m for m in METHODS.values()]))
if method not in methods_flat:
raise InputError(f"Method {method} not in supported methods " f"{methods_flat}!")
# This variable may contain substitutions that will be made to
# the control file after it was created from a define call, e.g.
# setting XC functionals that aren't hardcoded in define etc.
subs = None
def occ_num_mo_data(charge: int, mult: int, unrestricted: Optional[bool] = False) -> str:
"""Handles the 'Occupation Number & Molecular Orbital' section
of define. Sets appropriate charge and multiplicity in the
system and decided between restricted and unrestricted calculation.
RHF and UHF are supported. ROHF could be implemented later on
by using the 's' command to list the available MOs and then
close the appropriate number of MOs to doubly occupied MOs
by 'c' by comparing the number of total MOs and the desired
multiplicity."""
# Do unrestricted calculation if explicitly requested or mandatory
unrestricted = unrestricted or (mult != 1)
unpaired = mult - 1
charge = int(charge)
occ_num_mo_data_stdin = f"""eht
y
{charge}
y
"""
if unrestricted:
# Somehow Turbomole/define asks us if we want to write
# natural orbitals... we don't want to.
occ_num_mo_data_stdin = f"""eht
y
{charge}
n
u {unpaired}
*
n
"""
return occ_num_mo_data_stdin
def set_method(method, grid):
if method == "hf":
method_stdin = ""
elif method in METHODS["ricc2"]:
# Setting geoopt in $ricc2 will make the ricc2 module to produce
# a gradient.
# Drop the 'ri'-prefix of the method string.
geoopt_stdin = f"geoopt {method[2:]} ({geoopt})" if geoopt else ""
method_stdin = f"""cc
freeze
*
cbas
*
ricc2
{method}
list models
{geoopt_stdin}
list geoopt
*
*
"""
elif method in METHODS["dft_hardcoded"]:
method_stdin = f"""dft
on
func
{method}
grid
{grid}
"""
# TODO: Handle xcfuncs that aren't defined in define, e.g.
# new functionals introduced in 7.4 from libxc. ...
# Maybe the best idea would be to not set the functional here
# but just turn on DFT and add it to the control file later on.
elif method in METHODS["dft_libxc"]:
raise InputError("libxc functionals are not supported right now.")
return method_stdin
# Resolution of identity
def set_ri(keywords):
# TODO: senex/RIJCOSX?
ri_kws = {ri_kw: keywords.get(ri_kw, False) for ri_kw in KEYWORDS["ri"]}
ri_stdins = {"rijk": "rijk\non\n\n", "ri": "ri\non\n\n", "marij": "marij\n\n"}
ri_stdin = "\n".join([ri_stdins[ri_kw] for ri_kw, use in ri_kws.items() if use])
return ri_stdin
# ri_stdin = ""
# # Use either RIJK or RIJ if requested.
# if ri_kws["rijk"]:
# ri_stdin = """rijk
# on
# """
# elif ri_kws["rij"]:
# ri_stdin = """rij
# on
# """
# # MARIJ can be used additionally.
# if ri_kws["marij"]:
# ri_stdin += """marij
# """
# return ri_stdin
# Dispersion correction
def set_dsp(keywords):
# TODO: set_ri and set_dsp are basically the same funtion. Maybe
# we could abstract this somehow?
dsp_kws = {dsp_kw: keywords.get(dsp_kw, False) for dsp_kw in KEYWORDS["dsp"]}
dsp_stdins = {"d3": "dsp\non\n\n", "d3bj": "dsp\nbj\n\n"}
dsp_stdin = "\n".join([dsp_stdins[dsp_kw] for dsp_kw, use in dsp_kws.items() if use])
return dsp_stdin
kwargs = {
"init_guess": occ_num_mo_data(charge, mult, unrestricted),
"set_method": set_method(method, grid),
"ri": set_ri(keywords),
"dsp": set_dsp(keywords),
"title": "QCEngine Turbomole",
"scf_conv": scf_conv,
"scf_iters": scf_iters,
"basis": basis,
}
stdin = """
{title}
a coord
*
no
b
all {basis}
*
{init_guess}
{set_method}
{ri}
{dsp}
scf
conv
{scf_conv}
iter
{scf_iters}
*
""".format(
**kwargs
)
return stdin, subs
|
ee7a35e3364cc45c4349e21fa1f9ae736540424a
|
6a017c87a1c3e016de5e1704d23d1d2034fab41c
|
/docs/source/conf.py
|
93436e2e4678c22a40a778631c8608d82a4fbd36
|
[
"BSD-3-Clause"
] |
permissive
|
CoffeaTeam/coffea
|
53997aefbccf583cc901718b5c639a4b4535dbcd
|
a33fc173f3bf2be307bac6517e624fc6ce0c4c3e
|
refs/heads/master
| 2023-08-10T12:36:49.238010
| 2023-08-02T02:57:18
| 2023-08-02T02:57:18
| 159,673,139
| 116
| 100
|
BSD-3-Clause
| 2023-09-12T20:32:08
| 2018-11-29T13:47:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,466
|
py
|
conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
import importlib
import inspect
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import subprocess
import sys
from functools import reduce
import coffea
print("sys.path:", sys.path)
print("coffea version:", coffea.__version__)
# -- Project information -----------------------------------------------------
project = "coffea"
copyright = "2019, Fermi National Accelerator Laboratory"
author = "L. Gray, N. Smith, et al. (The Coffea Team)"
version = coffea.__version__.rsplit(".", 1)[0]
release = coffea.__version__
githash = subprocess.check_output(["git", "rev-parse", "HEAD"]).strip().decode("ascii")
language = None
# -- General configuration ---------------------------------------------------
source_suffix = ".rst"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"nbsphinx",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.linkcode",
"sphinx.ext.napoleon",
"sphinx_automodapi.automodapi",
"sphinx_automodapi.smart_resolver",
"sphinx_copybutton",
# 'IPython.sphinxext.ipython_console_highlighting',
]
# sphinx-copybutton configuration
copybutton_prompt_text = r">>> |\.\.\. |\$ "
copybutton_prompt_is_regexp = True
copybutton_here_doc_delimiter = "EOF"
numpydoc_show_class_members = False
nbsphinx_execute = "never"
autosummary_generate = True
def linkcode_resolve(domain, info):
if domain != "py":
return None
if not info["module"]:
return None
mod = importlib.import_module(info["module"])
modpath = [p for p in sys.path if mod.__file__.startswith(p)]
if len(modpath) < 1:
raise RuntimeError("Cannot deduce module path")
modpath = modpath[0]
obj = reduce(getattr, [mod] + info["fullname"].split("."))
try:
path = inspect.getsourcefile(obj)
relpath = path[len(modpath) + 1 :]
_, lineno = inspect.getsourcelines(obj)
except TypeError:
# skip property or other type that inspect doesn't like
return None
return "http://github.com/CoffeaTeam/coffea/blob/{}/{}#L{}".format(
githash, relpath, lineno
)
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"numpy": ("http://docs.scipy.org/doc/numpy", None),
}
# The master toctree document.
master_doc = "index"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
default_role = "any"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
pygments_style = "sphinx"
html_theme = "sphinx_rtd_theme"
todo_include_todos = False
htmlhelp_basename = "coffeadoc"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "coffea.tex", "Coffea Documentation", "The Coffea Team", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "coffea", "Coffea Documentation", [author], 1)]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Coffea",
"Coffea Documentation",
author,
"Coffea",
"Efficient columnar HEP analysis in python.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
dcac74144ea941530698c5291d4da564fbed520b
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/tools/android/asan/third_party/with_asan.py
|
481993e4be015143fd0be5207bd5ae7dbc9300ff
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 3,873
|
py
|
with_asan.py
|
#!/usr/bin/env vpython3
# Copyright 2019 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import contextlib
import logging
import os
import subprocess
import sys
_SRC_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))
sys.path.append(os.path.join(_SRC_ROOT, 'third_party', 'catapult', 'devil'))
from devil import base_error
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.android.sdk import version_codes
from devil.utils import logging_common
sys.path.append(os.path.join(_SRC_ROOT, 'build', 'android'))
import devil_chromium
_SCRIPT_PATH = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'asan_device_setup.sh'))
@contextlib.contextmanager
def _LogDevicesOnFailure(msg):
try:
yield
except base_error.BaseError:
logging.exception(msg)
logging.error('Devices visible to adb:')
for entry in adb_wrapper.AdbWrapper.Devices(desired_state=None,
long_list=True):
logging.error(' %s: %s',
entry[0].GetDeviceSerial(),
' '.join(entry[1:]))
raise
@contextlib.contextmanager
def Asan(args):
env = os.environ.copy()
env['ADB'] = args.adb
try:
with _LogDevicesOnFailure('Failed to set up the device.'):
device = device_utils.DeviceUtils.HealthyDevices(
device_arg=args.device)[0]
disable_verity = device.build_version_sdk >= version_codes.MARSHMALLOW
if disable_verity:
device.EnableRoot()
# TODO(crbug.com/790202): Stop logging output after diagnosing
# issues on android-asan.
verity_output = device.adb.DisableVerity()
if verity_output:
logging.info('disable-verity output:')
for line in verity_output.splitlines():
logging.info(' %s', line)
device.Reboot()
# Call EnableRoot prior to asan_device_setup.sh to ensure it doesn't
# get tripped up by the root timeout.
device.EnableRoot()
setup_cmd = [_SCRIPT_PATH, '--lib', args.lib]
if args.device:
setup_cmd += ['--device', args.device]
subprocess.check_call(setup_cmd, env=env)
yield
finally:
with _LogDevicesOnFailure('Failed to tear down the device.'):
device.EnableRoot()
teardown_cmd = [_SCRIPT_PATH, '--revert']
if args.device:
teardown_cmd += ['--device', args.device]
subprocess.check_call(teardown_cmd, env=env)
if disable_verity:
# TODO(crbug.com/790202): Stop logging output after diagnosing
# issues on android-asan.
verity_output = device.adb.EnableVerity()
if verity_output:
logging.info('enable-verity output:')
for line in verity_output.splitlines():
logging.info(' %s', line)
device.Reboot()
def main(raw_args):
parser = argparse.ArgumentParser()
logging_common.AddLoggingArguments(parser)
parser.add_argument(
'--adb', type=os.path.realpath, required=True,
help='Path to adb binary.')
parser.add_argument(
'--device',
help='Device serial.')
parser.add_argument(
'--lib', type=os.path.realpath, required=True,
help='Path to asan library.')
parser.add_argument(
'command', nargs='*',
help='Command to run with ASAN installed.')
args = parser.parse_args()
# TODO(crbug.com/790202): Remove this after diagnosing issues
# with android-asan.
if not args.quiet:
args.verbose += 1
logging_common.InitializeLogging(args)
devil_chromium.Initialize(adb_path=args.adb)
with Asan(args):
if args.command:
return subprocess.call(args.command)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
7ffcaae187177345260515b1821540d90802cf21
|
a902290fb3b911676358ae4d93f83061a6c2bd0f
|
/InvenTree/plugin/samples/integration/test_api_caller.py
|
28b51835c80c1504a2b1db5bbc967b5aa6c3327a
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
inventree/InvenTree
|
a15e54182c9bfafdf5348cc9a66da1004e23e760
|
e88a8e99a5f0b201c67a95cba097c729f090d5e2
|
refs/heads/master
| 2023-09-03T19:32:35.438375
| 2023-08-30T00:25:40
| 2023-08-30T00:25:40
| 85,894,461
| 3,077
| 549
|
MIT
| 2023-09-14T14:21:01
| 2017-03-23T01:44:10
|
Python
|
UTF-8
|
Python
| false
| false
| 598
|
py
|
test_api_caller.py
|
"""Unit tests for action caller sample."""
from django.test import TestCase
from plugin import registry
class SampleApiCallerPluginTests(TestCase):
"""Tests for SampleApiCallerPluginTests."""
def test_return(self):
"""Check if the external api call works."""
# The plugin should be defined
self.assertIn('sample-api-caller', registry.plugins)
plg = registry.plugins['sample-api-caller']
self.assertTrue(plg)
# do an api call
result = plg.get_external_url()
self.assertTrue(result)
self.assertIn('data', result,)
|
32fd851ab39d9ff98dd9f8f3547d52e5ffb7306b
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-integrations/connectors/source-prestashop/source_prestashop/source.py
|
3da1f7c1106335718c4eec6a0a03465de99c253c
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Elastic-2.0"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 1,980
|
py
|
source.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
"""
This file provides the necessary constructs to interpret a provided declarative YAML configuration file into
source connector.
WARNING: Do not modify this file.
"""
import logging
from typing import Any, Iterator, List, Mapping, MutableMapping, Union
from airbyte_cdk.models import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteStateMessage,
ConfiguredAirbyteCatalog,
Status,
)
from airbyte_cdk.sources.declarative.yaml_declarative_source import YamlDeclarativeSource
class ConfigException(Exception):
pass
# Declarative Source
class SourcePrestashop(YamlDeclarativeSource):
def __init__(self):
super().__init__(**{"path_to_yaml": "manifest.yaml"})
def _validate_and_transform(self, config: Mapping[str, Any]):
if not config.get("_allow_http"):
if not config["url"].lower().startswith("https://"):
raise ConfigException(f"Invalid url: {config['url']}, only https scheme is allowed")
return config
def discover(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteCatalog:
config = self._validate_and_transform(config)
return super().discover(logger, config)
def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
try:
config = self._validate_and_transform(config)
except ConfigException as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=str(e))
return super().check(logger, config)
def read(
self,
logger: logging.Logger,
config: Mapping[str, Any],
catalog: ConfiguredAirbyteCatalog,
state: Union[List[AirbyteStateMessage], MutableMapping[str, Any]] = None,
) -> Iterator[AirbyteMessage]:
config = self._validate_and_transform(config)
return super().read(logger, config, catalog, state)
|
59285bc4abf625f332850bc740484a76be46ba84
|
c531778b6b568e5924fcf438dce274067b6e1d31
|
/packages/httpcore/_backends/sync.py
|
968aead9eb53bc022a2d5111a3efdaabe65f1269
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
CastagnaIT/plugin.video.netflix
|
a5180fbbaea244a490f750a2dd417b4e7303321a
|
ece10d24449faaccd7d65a4093c6b5679ee0b383
|
refs/heads/master
| 2023-07-01T23:32:20.442923
| 2023-06-27T06:42:18
| 2023-06-27T06:42:18
| 164,314,803
| 2,019
| 456
|
MIT
| 2023-09-13T13:34:06
| 2019-01-06T14:27:56
|
Python
|
UTF-8
|
Python
| false
| false
| 5,621
|
py
|
sync.py
|
import socket
import threading
import time
from ssl import SSLContext
from types import TracebackType
from typing import Optional, Type
from .._exceptions import (
ConnectError,
ConnectTimeout,
ReadError,
ReadTimeout,
WriteError,
WriteTimeout,
map_exceptions,
)
from .tcp_keep_alive import enable_tcp_keep_alive
from .._types import TimeoutDict
from .._utils import is_socket_readable
class SyncSocketStream:
"""
A socket stream with read/write operations. Abstracts away any asyncio-specific
interfaces into a more generic base class, that we can use with alternate
backends, or for stand-alone test cases.
"""
def __init__(self, sock: socket.socket) -> None:
self.sock = sock
self.read_lock = threading.Lock()
self.write_lock = threading.Lock()
def get_http_version(self) -> str:
selected_alpn_protocol = getattr(self.sock, "selected_alpn_protocol", None)
if selected_alpn_protocol is not None:
ident = selected_alpn_protocol()
return "HTTP/2" if ident == "h2" else "HTTP/1.1"
return "HTTP/1.1"
def start_tls(
self, hostname: bytes, ssl_context: SSLContext, timeout: TimeoutDict
) -> "SyncSocketStream":
connect_timeout = timeout.get("connect")
exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError}
with map_exceptions(exc_map):
self.sock.settimeout(connect_timeout)
wrapped = ssl_context.wrap_socket(
self.sock, server_hostname=hostname.decode("ascii")
)
return SyncSocketStream(wrapped)
def read(self, n: int, timeout: TimeoutDict) -> bytes:
read_timeout = timeout.get("read")
exc_map = {socket.timeout: ReadTimeout, socket.error: ReadError}
with self.read_lock:
with map_exceptions(exc_map):
self.sock.settimeout(read_timeout)
return self.sock.recv(n)
def write(self, data: bytes, timeout: TimeoutDict) -> None:
write_timeout = timeout.get("write")
exc_map = {socket.timeout: WriteTimeout, socket.error: WriteError}
with self.write_lock:
with map_exceptions(exc_map):
while data:
self.sock.settimeout(write_timeout)
n = self.sock.send(data)
data = data[n:]
def close(self) -> None:
with self.write_lock:
try:
self.sock.close()
except socket.error:
pass
def is_readable(self) -> bool:
return is_socket_readable(self.sock)
class SyncLock:
def __init__(self) -> None:
self._lock = threading.Lock()
def __enter__(self) -> None:
self.acquire()
def __exit__(
self,
exc_type: Type[BaseException] = None,
exc_value: BaseException = None,
traceback: TracebackType = None,
) -> None:
self.release()
def release(self) -> None:
self._lock.release()
def acquire(self) -> None:
self._lock.acquire()
class SyncSemaphore:
def __init__(self, max_value: int, exc_class: type) -> None:
self.max_value = max_value
self.exc_class = exc_class
self._semaphore = threading.Semaphore(max_value)
def acquire(self, timeout: float = None) -> None:
if not self._semaphore.acquire(timeout=timeout): # type: ignore
raise self.exc_class()
def release(self) -> None:
self._semaphore.release()
class SyncBackend:
def open_tcp_stream(
self,
hostname: bytes,
port: int,
ssl_context: Optional[SSLContext],
timeout: TimeoutDict,
*,
local_address: Optional[str],
) -> SyncSocketStream:
address = (hostname.decode("ascii"), port)
connect_timeout = timeout.get("connect")
source_address = None if local_address is None else (local_address, 0)
exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError}
with map_exceptions(exc_map):
sock = socket.create_connection(
address, connect_timeout, source_address=source_address # type: ignore
)
# Enable TCP Keep-Alive
enable_tcp_keep_alive(sock)
if ssl_context is not None:
sock = ssl_context.wrap_socket(
sock, server_hostname=hostname.decode("ascii")
)
return SyncSocketStream(sock=sock)
def open_uds_stream(
self,
path: str,
hostname: bytes,
ssl_context: Optional[SSLContext],
timeout: TimeoutDict,
) -> SyncSocketStream:
connect_timeout = timeout.get("connect")
exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError}
with map_exceptions(exc_map):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(connect_timeout)
sock.connect(path)
if ssl_context is not None:
sock = ssl_context.wrap_socket(
sock, server_hostname=hostname.decode("ascii")
)
return SyncSocketStream(sock=sock)
def create_lock(self) -> SyncLock:
return SyncLock()
def create_semaphore(self, max_value: int, exc_class: type) -> SyncSemaphore:
return SyncSemaphore(max_value, exc_class=exc_class)
def time(self) -> float:
return time.monotonic()
def sleep(self, seconds: float) -> None:
time.sleep(seconds)
|
f969b3ce84a73785da5f9ec8992f0e8058387121
|
8475af3849ca19094d3aa95c9108d7f8ef3e2536
|
/shub/image/push.py
|
89520e48f6aa13222804db1ac863b05ef183188c
|
[
"BSD-3-Clause"
] |
permissive
|
scrapinghub/shub
|
9b1b9df4caad48861707aca3685fe8b2544e433b
|
519809d7eb16212f2644f6e2bf1672b1e86a78c2
|
refs/heads/master
| 2023-09-03T01:43:35.014440
| 2023-04-14T16:02:25
| 2023-04-14T16:02:25
| 20,890,335
| 124
| 84
|
BSD-3-Clause
| 2023-04-14T14:43:21
| 2014-06-16T15:38:11
|
Python
|
UTF-8
|
Python
| false
| false
| 7,653
|
py
|
push.py
|
from collections import OrderedDict
import click
from shub import exceptions as shub_exceptions
from shub.config import load_shub_config, list_targets_callback
from shub.image import utils
from shub.image.test import test_cmd
from shub.image.utils import get_image_registry
SHORT_HELP = 'Push an image to a specified docker registry'
HELP = """
A command to push your image to specified docker registry.
The command is a simple wrapper for `docker push` command and uses docker
daemon on your system to build an image. The only differences are that it
can generate correct image version and provide easy registry login logic.
The optional params are mostly related with registry authorization.
By default, the tool tries to call the registry in insecure manner,
otherwise you have to enter your credentials (at least username/password).
"""
LOGIN_ERROR_MSG = 'Please authorize with docker login'
@click.command(help=HELP, short_help=SHORT_HELP)
@click.argument("target", required=False, default="default")
@click.option("-l", "--list-targets", is_flag=True, is_eager=True,
expose_value=False, callback=list_targets_callback,
help="List available project names defined in your config")
@click.option("-d", "--debug", help="debug mode", is_flag=True,
callback=utils.deprecate_debug_parameter)
@click.option("-v", "--verbose", is_flag=True,
help="stream push logs to console")
@click.option("-V", "--version", help="release version")
@click.option("--username", help="docker registry name")
@click.option("--password", help="docker registry password")
@click.option("--email", help="docker registry email")
@click.option("--apikey", help="SH apikey to use built-in registry")
@click.option("--insecure", is_flag=True, help="use insecure registry")
@click.option("-S", "--skip-tests", help="skip testing image", is_flag=True)
@click.option("-R", "--reauth", is_flag=True,
help="re-authenticate to registry")
def cli(target, debug, verbose, version, username, password, email, apikey,
insecure, skip_tests, reauth):
push_cmd(target, version, username, password, email, apikey, insecure,
skip_tests, reauth)
def push_cmd(target, version, username, password, email, apikey, insecure,
skip_tests, reauth):
# Test the image content after building it
if not skip_tests:
test_cmd(target, version)
client = utils.get_docker_client()
config = load_shub_config()
image = config.get_image(target)
username, password = utils.get_credentials(
username=username, password=password, insecure=insecure,
apikey=apikey, target_apikey=config.get_apikey(target))
if username:
_execute_push_login(
client, image, username, password, email, reauth)
image_name = utils.format_image_name(image, version)
click.echo("Pushing {} to the registry.".format(image_name))
events = client.push(image_name, stream=True, decode=True)
if utils.is_verbose():
push_progress_cls = _LoggedPushProgress
else:
push_progress_cls = _PushProgress
push_progress = push_progress_cls(events)
push_progress.show()
click.echo("The image {} pushed successfully.".format(image_name))
def _execute_push_login(client, image, username, password, email, reauth):
"""Login if there're provided credentials for the registry"""
registry = get_image_registry(image)
resp = client.login(username=username, password=password,
email=email, registry=registry, reauth=reauth)
if not (isinstance(resp, dict) and 'username' in resp or
('Status' in resp and resp['Status'] == 'Login Succeeded')):
raise shub_exceptions.RemoteErrorException(
"Docker registry login error.")
click.echo("Login to {} succeeded.".format(registry))
class _LoggedPushProgress(utils.BaseProgress):
"""Visualize push progress in verbose mode.
Output all the events received from the docker daemon.
"""
def handle_event(self, event):
if 'error' in event and LOGIN_ERROR_MSG in event['error']:
click.echo(
"Something went wrong when trying to authenticate to Docker "
"registry when pushing the image. Please ensure your "
"credentials are correct and try again with --reauth flag.")
raise shub_exceptions.RemoteErrorException(
"Docker registry authentication error")
super(_LoggedPushProgress, self).handle_event(event)
if 'status' in event:
self.handle_status_event(event)
def handle_status_event(self, event):
msg = "Logs:{} {}".format(event['status'], event.get('progress'))
utils.debug_log(msg)
class _PushProgress(_LoggedPushProgress):
"""Visualize push progress in non-verbose mode.
Show total progress bar and separate bar for each pushed layer.
"""
def __init__(self, push_events):
super(_PushProgress, self).__init__(push_events)
# Total bar repesents total progress in terms of amount of layers.
self.total_bar = self._create_total_bar()
self.layers = set()
# XXX: has to be OrderedDict to make tqdm.write/click.echo work as expected.
# Otherwise it writes at random position, usually in the middle of the progress bars.
self.layers_bars = OrderedDict()
def handle_status_event(self, event):
layer_id = event.get('id')
status = event.get('status')
progress = event.get('progressDetail')
# `preparing` events are correlated with amount of layers to push
if status in ('Preparing', 'Waiting'):
self._add_layer(layer_id)
# the events are final and used to update total bar once per layer
elif status in ('Layer already exists', 'Pushed'):
self._add_layer(layer_id)
self.total_bar.update()
# `pushing` events represents actual push process per layer
elif event.get('status') == 'Pushing' and progress:
progress_current = progress.get('current', 0)
progress_total = max(progress.get('total', 0), progress_current)
if layer_id not in self.layers_bars:
if not progress_total:
return
# create a progress bar per pushed layer
self.layers_bars[layer_id] = self._create_bar_per_layer(
layer_id, progress_total, progress_current)
bar = self.layers_bars[layer_id]
bar.total = max(bar.total, progress_total)
bar.update(max(progress_current - bar.n, 0))
def _add_layer(self, layer_id):
self.layers.add(layer_id)
self.total_bar.total = max(self.total_bar.total, len(self.layers))
self.total_bar.refresh()
def show(self):
super(_PushProgress, self).show()
self.total_bar.close()
for bar in self.layers_bars.values():
bar.close()
def _create_total_bar(self):
return utils.create_progress_bar(
total=1,
desc='Layers',
# don't need rate here, let's simplify the bar
bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt}'
)
def _create_bar_per_layer(self, layer_id, total, initial):
return utils.create_progress_bar(
desc=layer_id,
total=total,
initial=initial,
unit='B',
unit_scale=True,
# don't need estimates here, keep only rate
bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{rate_fmt}]',
)
|
d4de3577bd6ed7f02710e460837c301551af81dc
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/unicode_escape_decode.py
|
2e18d3d73fff9adbe169e9a5cd109f37527fb2bd
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 425
|
py
|
unicode_escape_decode.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import codecs
text = "\u041e\u0442\u0441\u0443\u0442\u0441\u0442\u0432\u0443\u0435\u0442 \u043e\u0431\u044f\u0437\u0430\u0442\u0435\u043b\u044c\u043d\u044b\u0439 \u043f\u0430\u0440\u0430\u043c\u0435\u0442\u0440 (username)"
print(codecs.unicode_internal_decode(text)[0])
# Отсутствует обязательный параметр (username)
|
d2fe7c1917a34afb5721fc09f04ae8e4755cc3ea
|
ac235a23f22be0d6f1818bb53902177f9969813a
|
/ddtrace/profiling/collector/_task.pyi
|
f26c5d69b7d7bab0998e7713c85557dae8ffe9e7
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
DataDog/dd-trace-py
|
f09d6d48c4c69aea68f999fc8a458ade5c6150cf
|
1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17
|
refs/heads/1.x
| 2023-09-01T20:25:26.746324
| 2023-09-01T18:54:37
| 2023-09-01T18:54:37
| 61,572,326
| 461
| 426
|
NOASSERTION
| 2023-09-14T20:38:57
| 2016-06-20T18:52:23
|
Python
|
UTF-8
|
Python
| false
| false
| 241
|
pyi
|
_task.pyi
|
import types
import typing
def get_task(
thread_id: int,
) -> typing.Tuple[typing.Optional[int], typing.Optional[str], typing.Optional[types.FrameType]]: ...
def list_tasks() -> typing.List[typing.Tuple[int, str, types.FrameType]]: ...
|
188ca556f7462648b208c4c4e7c4023eabee0e99
|
353730afc44b31cf4efded67a4e2835d19c75922
|
/tensorly/decomposition/_nn_cp.py
|
ab9c0a88906fd6b1e58412af2d7d5eaa625718fb
|
[
"BSD-3-Clause"
] |
permissive
|
tensorly/tensorly
|
605529bf5206f1977c6067f96f47bec439355246
|
de05e178850eb2abe43ec1a40f80624ca606807d
|
refs/heads/main
| 2023-08-31T14:01:45.527525
| 2023-08-20T18:28:25
| 2023-08-20T18:28:25
| 71,603,727
| 1,533
| 334
|
NOASSERTION
| 2023-09-08T18:10:37
| 2016-10-21T23:14:52
|
Python
|
UTF-8
|
Python
| false
| false
| 24,486
|
py
|
_nn_cp.py
|
import warnings
import tensorly as tl
from ._base_decomposition import DecompositionMixin
from ._cp import initialize_cp
from ..tenalg.proximal import hals_nnls
from ..cp_tensor import (
CPTensor,
unfolding_dot_khatri_rao,
cp_norm,
cp_normalize,
validate_cp_rank,
)
from ..tenalg.svd import svd_interface
# Authors: Jean Kossaifi <jean.kossaifi+tensors@gmail.com>
# Chris Swierczewski <csw@amazon.com>
# Sam Schneider <samjohnschneider@gmail.com>
# Aaron Meurer <asmeurer@gmail.com>
# Aaron Meyer <tensorly@ameyer.me>
# Jeremy Cohen <jeremy.cohen@irisa.fr>
# Axel Marmoret <axel.marmoret@inria.fr>
# Caglayan TUna <caglayantun@gmail.com>
# License: BSD 3 clause
def non_negative_parafac(
tensor,
rank,
n_iter_max=100,
init="svd",
svd="truncated_svd",
tol=10e-7,
random_state=None,
verbose=0,
normalize_factors=False,
return_errors=False,
mask=None,
cvg_criterion="abs_rec_error",
fixed_modes=None,
):
"""
Non-negative CP decomposition
Uses multiplicative updates, see [2]_
Parameters
----------
tensor : ndarray
rank : int
number of components
n_iter_max : int
maximum number of iteration
init : {'svd', 'random'}, optional
svd : str, default is 'truncated_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
tol : float, optional
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
random_state : {None, int, np.random.RandomState}
verbose : int, optional
level of verbosity
normalize_factors : if True, aggregate the weights of each factor in a 1D-tensor
of shape (rank, ), which will contain the norms of the factors
fixed_modes : list, default is None
A list of modes for which the initial value is not modified.
The last mode cannot be fixed due to error computation.
Returns
-------
factors : ndarray list
list of positive factors of the CP decomposition
element `i` is of shape ``(tensor.shape[i], rank)``
References
----------
.. [2] Amnon Shashua and Tamir Hazan,
"Non-negative tensor factorization with applications to statistics and computer vision",
In Proceedings of the International Conference on Machine Learning (ICML),
pp 792-799, ICML, 2005
"""
epsilon = tl.eps(tensor.dtype)
rank = validate_cp_rank(tl.shape(tensor), rank=rank)
weights, factors = initialize_cp(
tensor,
rank,
init=init,
svd=svd,
non_negative=True,
mask=mask,
random_state=random_state,
normalize_factors=normalize_factors,
)
rec_errors = []
norm_tensor = tl.norm(tensor, 2)
if fixed_modes is None:
fixed_modes = []
if tl.ndim(tensor) - 1 in fixed_modes:
warnings.warn(
"You asked for fixing the last mode, which is not supported while tol is fixed.\n The last mode will not be fixed. Consider using tl.moveaxis()"
)
fixed_modes.remove(tl.ndim(tensor) - 1)
modes_list = [mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes]
for iteration in range(n_iter_max):
if verbose > 1:
print("Starting iteration", iteration + 1)
for mode in modes_list:
if verbose > 1:
print("Mode", mode, "of", tl.ndim(tensor))
accum = 1
# khatri_rao(factors).tl.dot(khatri_rao(factors))
# simplifies to multiplications
sub_indices = [i for i in range(len(factors)) if i != mode]
for i, e in enumerate(sub_indices):
if i:
accum *= tl.dot(tl.transpose(factors[e]), factors[e])
else:
accum = tl.dot(tl.transpose(factors[e]), factors[e])
accum = tl.reshape(weights, (-1, 1)) * accum * tl.reshape(weights, (1, -1))
if mask is not None:
tensor = tensor * mask + tl.cp_to_tensor(
(weights, factors), mask=1 - mask
)
mttkrp = unfolding_dot_khatri_rao(tensor, (weights, factors), mode)
numerator = tl.clip(mttkrp, a_min=epsilon, a_max=None)
denominator = tl.dot(factors[mode], accum)
denominator = tl.clip(denominator, a_min=epsilon, a_max=None)
factor = factors[mode] * numerator / denominator
factors[mode] = factor
if normalize_factors and mode != modes_list[-1]:
weights, factors = cp_normalize((weights, factors))
if tol:
# ||tensor - rec||^2 = ||tensor||^2 + ||rec||^2 - 2*<tensor, rec>
factors_norm = cp_norm((weights, factors))
# mttkrp and factor for the last mode. This is equivalent to the
# inner product <tensor, factorization>
iprod = tl.sum(tl.sum(mttkrp * factor, axis=0))
rec_error = (
tl.sqrt(tl.abs(norm_tensor**2 + factors_norm**2 - 2 * iprod))
/ norm_tensor
)
rec_errors.append(rec_error)
if iteration >= 1:
rec_error_decrease = rec_errors[-2] - rec_errors[-1]
if verbose:
print(
f"iteration {iteration}, reconstraction error: {rec_error}, decrease = {rec_error_decrease}"
)
if cvg_criterion == "abs_rec_error":
stop_flag = abs(rec_error_decrease) < tol
elif cvg_criterion == "rec_error":
stop_flag = rec_error_decrease < tol
else:
raise TypeError("Unknown convergence criterion")
if stop_flag:
if verbose:
print(f"PARAFAC converged after {iteration} iterations")
break
else:
if verbose:
print(f"reconstruction error={rec_errors[-1]}")
if normalize_factors:
weights, factors = cp_normalize((weights, factors))
cp_tensor = CPTensor((weights, factors))
if return_errors:
return cp_tensor, rec_errors
else:
return cp_tensor
def non_negative_parafac_hals(
tensor,
rank,
n_iter_max=100,
init="svd",
svd="truncated_svd",
tol=10e-8,
random_state=None,
sparsity_coefficients=None,
fixed_modes=None,
nn_modes="all",
exact=False,
normalize_factors=False,
verbose=False,
return_errors=False,
cvg_criterion="abs_rec_error",
):
"""
Non-negative CP decomposition via HALS
Uses Hierarchical ALS (Alternating Least Squares)
which updates each factor column-wise (one column at a time while keeping all other columns fixed), see [1]_
Parameters
----------
tensor : ndarray
rank : int
number of components
n_iter_max : int
maximum number of iteration
init : {'svd', 'random'}, optional
svd : str, default is 'truncated_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
tol : float, optional
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
Default: 1e-8
random_state : {None, int, np.random.RandomState}
sparsity_coefficients: array of float (of length the number of modes)
The sparsity coefficients on each factor.
If set to None, the algorithm is computed without sparsity
Default: None,
fixed_modes: array of integers (between 0 and the number of modes)
Has to be set not to update a factor, 0 and 1 for U and V respectively
Default: None
nn_modes: None, 'all' or array of integers (between 0 and the number of modes)
Used to specify which modes to impose non-negativity constraints on.
If 'all', then non-negativity is imposed on all modes.
Default: 'all'
exact: If it is True, the algorithm gives a results with high precision but it needs high computational cost.
If it is False, the algorithm gives an approximate solution
Default: False
normalize_factors : if True, aggregate the weights of each factor in a 1D-tensor
of shape (rank, ), which will contain the norms of the factors
verbose: boolean
Indicates whether the algorithm prints the successive
reconstruction errors or not
Default: False
return_errors: boolean
Indicates whether the algorithm should return all reconstruction errors
and computation time of each iteration or not
Default: False
cvg_criterion : {'abs_rec_error', 'rec_error'}, optional
Stopping criterion for ALS, works if `tol` is not None.
If 'rec_error', ALS stops at current iteration if ``(previous rec_error - current rec_error) < tol``.
If 'abs_rec_error', ALS terminates when `|previous rec_error - current rec_error| < tol`.
sparsity : float or int
random_state : {None, int, np.random.RandomState}
Returns
-------
factors : ndarray list
list of positive factors of the CP decomposition
element `i` is of shape ``(tensor.shape[i], rank)``
errors: list
A list of reconstruction errors at each iteration of the algorithm.
References
----------
.. [1] N. Gillis and F. Glineur, Accelerated Multiplicative Updates and
Hierarchical ALS Algorithms for Nonnegative Matrix Factorization,
Neural Computation 24 (4): 1085-1105, 2012.
"""
weights, factors = initialize_cp(
tensor,
rank,
init=init,
svd=svd,
non_negative=True,
random_state=random_state,
normalize_factors=normalize_factors,
)
norm_tensor = tl.norm(tensor, 2)
n_modes = tl.ndim(tensor)
if sparsity_coefficients is None or isinstance(sparsity_coefficients, float):
sparsity_coefficients = [sparsity_coefficients] * n_modes
if fixed_modes is None:
fixed_modes = []
if nn_modes == "all":
nn_modes = set(range(n_modes))
elif nn_modes is None:
nn_modes = set()
# Avoiding errors
for fixed_value in fixed_modes:
sparsity_coefficients[fixed_value] = None
for mode in range(n_modes):
if sparsity_coefficients[mode] is not None:
warnings.warn("Sparsity coefficient is ignored in unconstrained modes.")
# Generating the mode update sequence
modes = [mode for mode in range(n_modes) if mode not in fixed_modes]
# initialisation - declare local varaibles
rec_errors = []
# Iteratation
for iteration in range(n_iter_max):
# One pass of least squares on each updated mode
for mode in modes:
# Computing Hadamard of cross-products
pseudo_inverse = tl.tensor(tl.ones((rank, rank)), **tl.context(tensor))
for i, factor in enumerate(factors):
if i != mode:
pseudo_inverse = pseudo_inverse * tl.dot(
tl.transpose(factor), factor
)
pseudo_inverse = (
tl.reshape(weights, (-1, 1))
* pseudo_inverse
* tl.reshape(weights, (1, -1))
)
mttkrp = unfolding_dot_khatri_rao(tensor, (weights, factors), mode)
if mode in nn_modes:
# Call the hals resolution with nnls, optimizing the current mode
nn_factor, _, _, _ = hals_nnls(
tl.transpose(mttkrp),
pseudo_inverse,
tl.transpose(factors[mode]),
n_iter_max=100,
sparsity_coefficient=sparsity_coefficients[mode],
exact=exact,
)
factors[mode] = tl.transpose(nn_factor)
else:
factor = tl.solve(tl.transpose(pseudo_inverse), tl.transpose(mttkrp))
factors[mode] = tl.transpose(factor)
if normalize_factors and mode != modes[-1]:
weights, factors = cp_normalize((weights, factors))
if tol:
factors_norm = cp_norm((weights, factors))
iprod = tl.sum(tl.sum(mttkrp * factors[-1], axis=0))
rec_error = (
tl.sqrt(tl.abs(norm_tensor**2 + factors_norm**2 - 2 * iprod))
/ norm_tensor
)
rec_errors.append(rec_error)
if iteration >= 1:
rec_error_decrease = rec_errors[-2] - rec_errors[-1]
if verbose:
print(
f"iteration {iteration}, reconstruction error: {rec_error}, decrease = {rec_error_decrease}"
)
if cvg_criterion == "abs_rec_error":
stop_flag = abs(rec_error_decrease) < tol
elif cvg_criterion == "rec_error":
stop_flag = rec_error_decrease < tol
else:
raise TypeError("Unknown convergence criterion")
if stop_flag:
if verbose:
print(f"PARAFAC converged after {iteration} iterations")
break
else:
if verbose:
print(f"reconstruction error={rec_errors[-1]}")
if normalize_factors:
weights, factors = cp_normalize((weights, factors))
cp_tensor = CPTensor((weights, factors))
if return_errors:
return cp_tensor, rec_errors
else:
return cp_tensor
class CP_NN(DecompositionMixin):
"""
Non-Negative Candecomp-Parafac decomposition via Alternating-Least Square
Computes a rank-`rank` decomposition of `tensor` [1]_ such that,
``tensor = [|weights; factors[0], ..., factors[-1] |]``.
Parameters
----------
tensor : ndarray
rank : int
Number of components.
n_iter_max : int
Maximum number of iteration
init : {'svd', 'random'}, optional
Type of factor matrix initialization. See `initialize_factors`.
svd : str, default is 'truncated_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
normalize_factors : if True, aggregate the weights of each factor in a 1D-tensor
of shape (rank, ), which will contain the norms of the factors
tol : float, optional
(Default: 1e-6) Relative reconstruction error tolerance. The
algorithm is considered to have found the global minimum when the
reconstruction error is less than `tol`.
random_state : {None, int, np.random.RandomState}
verbose : int, optional
Level of verbosity
return_errors : bool, optional
Activate return of iteration errors
mask : ndarray
array of booleans with the same shape as ``tensor`` should be 0 where
the values are missing and 1 everywhere else. Note: if tensor is
sparse, then mask should also be sparse with a fill value of 1 (or
True). Allows for missing values [2]_
cvg_criterion : {'abs_rec_error', 'rec_error'}, optional
Stopping criterion for ALS, works if `tol` is not None.
If 'rec_error', ALS stops at current iteration if (previous rec_error - current rec_error) < tol.
If 'abs_rec_error', ALS terminates when |previous rec_error - current rec_error| < tol.
sparsity : float or int
If `sparsity` is not None, we approximate tensor as a sum of low_rank_component and sparse_component, where low_rank_component = cp_to_tensor((weights, factors)). `sparsity` denotes desired fraction or number of non-zero elements in the sparse_component of the `tensor`.
fixed_modes : list, default is None
A list of modes for which the initial value is not modified.
The last mode cannot be fixed due to error computation.
svd_mask_repeats: int
If using a tensor with masked values, this initializes using SVD multiple times to
remove the effect of these missing values on the initialization.
Returns
-------
CPTensor : (weight, factors)
* weights : 1D array of shape (rank, )
all ones if normalize_factors is False (default),
weights of the (normalized) factors otherwise
* factors : List of factors of the CP decomposition element `i` is of shape
(tensor.shape[i], rank)
* sparse_component : nD array of shape tensor.shape. Returns only if `sparsity` is not None.
errors : list
A list of reconstruction errors at each iteration of the algorithms.
References
----------
.. [1] T.G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
.. [2] Tomasi, Giorgio, and Rasmus Bro. "PARAFAC and missing values."
Chemometrics and Intelligent Laboratory Systems 75.2 (2005): 163-180.
.. [3] R. Bro, "Multi-Way Analysis in the Food Industry: Models, Algorithms, and
Applications", PhD., University of Amsterdam, 1998
"""
def __init__(
self,
rank,
n_iter_max=100,
init="svd",
svd="truncated_svd",
tol=10e-7,
random_state=None,
verbose=0,
normalize_factors=False,
mask=None,
cvg_criterion="abs_rec_error",
fixed_modes=None,
):
self.n_iter_max = n_iter_max
self.init = init
self.svd = svd
self.tol = tol
self.random_state = random_state
self.verbose = verbose
self.normalize_factors = normalize_factors
self.mask = mask
self.cvg_criterion = cvg_criterion
self.fixed_modes = fixed_modes
def fit_transform(self, tensor):
"""Decompose an input tensor
Parameters
----------
tensor : tensorly tensor
input tensor to decompose
Returns
-------
CPTensor
decomposed tensor
"""
cp_tensor, errors = non_negative_parafac(
tensor,
n_iter_max=self.n_iter_max,
init=self.init,
svd=self.svd,
tol=self.tol,
random_state=self.random_state,
verbose=self.verbose,
normalize_factors=self.normalize_factors,
mask=self.mask,
cvg_criterion=self.cvg_criterion,
fixed_modes=self.fixed_modes,
return_errors=True,
)
self.decomposition_ = cp_tensor
self.errors_ = errors
return self.decomposition_
def __repr__(self):
return f"Rank-{self.rank} Non-Negative CP decomposition."
class CP_NN_HALS(DecompositionMixin):
"""
Non-Negative Candecomp-Parafac decomposition via Alternating-Least Square
Computes a rank-`rank` decomposition of `tensor` [1]_ such that::
``tensor = [|weights; factors[0], ..., factors[-1] |]``.
Parameters
----------
tensor : ndarray
rank : int
Number of components.
n_iter_max : int
Maximum number of iteration
init : {'svd', 'random'}, optional
Type of factor matrix initialization. See `initialize_factors`.
svd : str, default is 'truncated_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
normalize_factors : if True, aggregate the weights of each factor in a 1D-tensor
of shape (rank, ), which will contain the norms of the factors
tol : float, optional
(Default: 1e-6) Relative reconstruction error tolerance. The
algorithm is considered to have found the global minimum when the
reconstruction error is less than `tol`.
random_state : {None, int, np.random.RandomState}
verbose : int, optional
Level of verbosity
return_errors : bool, optional
Activate return of iteration errors
mask : ndarray
array of booleans with the same shape as ``tensor`` should be 0 where
the values are missing and 1 everywhere else. Note: if tensor is
sparse, then mask should also be sparse with a fill value of 1 (or
True). Allows for missing values [2]_
cvg_criterion : {'abs_rec_error', 'rec_error'}, optional
Stopping criterion for ALS, works if `tol` is not None.
If 'rec_error', ALS stops at current iteration if (previous rec_error - current rec_error) < tol.
If 'abs_rec_error', ALS terminates when ``|previous rec_error - current rec_error| < tol``.
sparsity : float or int
If `sparsity` is not None, we approximate tensor as a sum of low_rank_component and sparse_component, where low_rank_component = cp_to_tensor((weights, factors)). `sparsity` denotes desired fraction or number of non-zero elements in the sparse_component of the `tensor`.
fixed_modes : list, default is None
A list of modes for which the initial value is not modified.
The last mode cannot be fixed due to error computation.
svd_mask_repeats: int
If using a tensor with masked values, this initializes using SVD multiple times to
remove the effect of these missing values on the initialization.
Returns
-------
CPTensor : (weight, factors)
* weights : 1D array of shape (rank, )
all ones if normalize_factors is False (default),
weights of the (normalized) factors otherwise
* factors : List of factors of the CP decomposition element `i` is of shape
(tensor.shape[i], rank)
* sparse_component : nD array of shape tensor.shape. Returns only if `sparsity` is not None.
errors : list
A list of reconstruction errors at each iteration of the algorithms.
References
----------
.. [1] T.G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
.. [2] Tomasi, Giorgio, and Rasmus Bro. "PARAFAC and missing values."
Chemometrics and Intelligent Laboratory Systems 75.2 (2005): 163-180.
.. [3] R. Bro, "Multi-Way Analysis in the Food Industry: Models, Algorithms, and
Applications", PhD., University of Amsterdam, 1998
"""
def __init__(
self,
rank,
n_iter_max=100,
init="svd",
svd="truncated_svd",
tol=10e-8,
sparsity_coefficients=None,
fixed_modes=None,
nn_modes="all",
exact=False,
verbose=False,
normalize_factors=False,
cvg_criterion="abs_rec_error",
random_state=None,
):
self.rank = rank
self.n_iter_max = n_iter_max
self.init = init
self.svd = svd
self.tol = tol
self.sparsity_coefficients = sparsity_coefficients
self.random_state = random_state
self.fixed_modes = fixed_modes
self.nn_modes = nn_modes
self.exact = exact
self.verbose = verbose
self.normalize_factors = normalize_factors
self.cvg_criterion = cvg_criterion
self.random_state = random_state
def fit_transform(self, tensor):
"""Decompose an input tensor
Parameters
----------
tensor : tensorly tensor
input tensor to decompose
Returns
-------
CPTensor
decomposed tensor
"""
cp_tensor, errors = non_negative_parafac_hals(
tensor,
rank=self.rank,
n_iter_max=self.n_iter_max,
init=self.init,
svd=self.svd,
tol=self.tol,
random_state=self.random_state,
sparsity_coefficients=self.sparsity_coefficients,
fixed_modes=self.fixed_modes,
nn_modes=self.nn_modes,
exact=self.exact,
verbose=self.verbose,
normalize_factors=self.normalize_factors,
return_errors=True,
cvg_criterion=self.cvg_criterion,
)
self.decomposition_ = cp_tensor
self.errors_ = errors
return self.decomposition_
def __repr__(self):
return f"Rank-{self.rank} Non-Negative CP decomposition."
|
7870cb715cb792b139fcc8092b0c35e8cbc008f4
|
8d77f3b72dc52b85ee0c4ef6ba06f63a6920841f
|
/python/aitemplate/frontend/nn/__init__.py
|
16e597a8486d5be952f42266065cd8a8c93bc948
|
[
"Apache-2.0"
] |
permissive
|
facebookincubator/AITemplate
|
b643c217e1d15f7f17dab1eb1cc6855eab664b97
|
c60dc19788217556ba12ea378c02b9fd0aea9ffe
|
refs/heads/main
| 2023-08-28T18:22:15.828008
| 2023-08-28T14:43:41
| 2023-08-28T14:43:41
| 514,321,895
| 4,065
| 334
|
Apache-2.0
| 2023-09-14T04:53:57
| 2022-07-15T15:40:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,956
|
py
|
__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# flake8: noqa
from aitemplate.frontend.nn.container import ModuleDict, ModuleList, Sequential
from aitemplate.frontend.nn.embedding import BertEmbeddings, Embedding
from aitemplate.frontend.nn.module import Module
from aitemplate.frontend.nn.conv1d import *
from aitemplate.frontend.nn.conv2d import *
from aitemplate.frontend.nn.conv3d import *
from aitemplate.frontend.nn.linear import *
from aitemplate.frontend.nn.padding import *
from aitemplate.frontend.nn.pool2d import *
from aitemplate.frontend.nn.fpn_proposal import FPNProposal
from aitemplate.frontend.nn.proposal import Proposal
from aitemplate.frontend.nn.roi_ops import *
from aitemplate.frontend.nn.upsample import *
from aitemplate.frontend.nn.view_ops import *
from aitemplate.frontend.nn.attention import (
CrossAttention,
FlashAttention,
MultiheadAttention,
ScaledDotProductAttention,
)
from aitemplate.frontend.nn.identity import Identity
from aitemplate.frontend.nn.multiscale_attention import MultiScaleBlock
from aitemplate.frontend.nn.vanilla_attention import (
vanilla_attention,
VanillaCrossAttention,
VanillaMultiheadAttention,
)
from aitemplate.frontend.nn.dropout import *
from aitemplate.frontend.nn.layer_norm import *
from aitemplate.frontend.nn.group_norm import *
from aitemplate.frontend.nn.dual_gemm import T5DenseGatedGeluDense
|
5462eba4ae8a7807c382a34b82c9e07c3c2089ca
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/tests/secrets/test_runner.py
|
041ab6e6b82520871600b1a7ab9e19f3a2f49b38
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 19,174
|
py
|
test_runner.py
|
import unittest
import os
from checkov.common.bridgecrew.check_type import CheckType
from checkov.common.bridgecrew.integration_features.features.policy_metadata_integration import integration as metadata_integration
from checkov.common.bridgecrew.severities import BcSeverities, Severities
from checkov.secrets.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestRunnerValid(unittest.TestCase):
def setUp(self) -> None:
self.orig_metadata = metadata_integration.check_metadata
def test_runner_failing_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/cfn"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets']))
self.assertEqual(len(report.failed_checks), 2)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_sanity_check_secrets(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/sanity/secrets"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], checks=['CKV_SECRET_6']))
self.assertEqual(len(report.failed_checks), 6)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_fp_sanity_check_secrets_non_iac(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/sanity/iac_fp"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], checks=['CKV_SECRET_6'], enable_secret_scan_all_files=True))
self.assertEqual(len(report.failed_checks), 0)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_fp_sanity_check_secrets_iac(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/sanity/non_iac_fp"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], checks=['CKV_SECRET_6'], enable_secret_scan_all_files=True))
self.assertEqual(len(report.failed_checks), 0)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_sanity_check_non_secrets(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/sanity/non_secrets"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], checks=['CKV_SECRET_6']))
self.assertEqual(len(report.failed_checks), 0)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_runner_honors_enforcement_rules(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/cfn"
runner = Runner()
filter = RunnerFilter(framework=['secrets'], use_enforcement_rules=True)
# this is not quite a true test, because the checks don't have severities. However, this shows that the check registry
# passes the report type properly to RunnerFilter.should_run_check, and we have tests for that method
filter.enforcement_rule_configs = {CheckType.SECRETS: Severities[BcSeverities.OFF]}
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=filter)
self.assertEqual(len(report.failed_checks), 0)
self.assertEqual(len(report.parsing_errors), 0)
self.assertEqual(len(report.passed_checks), 0)
self.assertEqual(len(report.skipped_checks), 0)
report.print_console()
def test_runner_passing_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/terraform"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets']))
self.assertEqual(len(report.passed_checks), 0)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.failed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_runner_tf_failing_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/terraform_failed"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets']))
self.assertEqual(2, len(report.failed_checks))
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_runner_tf_skip_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/terraform_skip"
report = Runner().run(
root_folder=valid_dir_path,
external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'])
)
self.assertEqual(len(report.skipped_checks), 1)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
self.assertEqual(len(report.skipped_checks), 1)
report.print_console()
def test_runner_specific_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/cfn"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], checks=['CKV_SECRET_2']))
self.assertEqual(len(report.skipped_checks), 0)
self.assertEqual(len(report.failed_checks), 1)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
def test_runner_wildcard_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/cfn"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], checks=['CKV_SECRET*']))
self.assertEqual(len(report.skipped_checks), 0)
self.assertEqual(len(report.failed_checks), 2)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
def test_runner_skip_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/cfn"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], skip_checks=['CKV_SECRET_2']))
self.assertEqual(len(report.skipped_checks), 0)
self.assertEqual(len(report.failed_checks), 1)
self.assertEqual(report.failed_checks[0].check_id, 'CKV_SECRET_6')
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
def test_record_has_severity(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/cfn"
metadata_integration.check_metadata = {
'CKV_SECRET_2': {
'severity': Severities[BcSeverities.LOW]
}
}
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], checks=['CKV_SECRET_2']))
self.assertEqual(report.failed_checks[0].severity, Severities[BcSeverities.LOW])
def test_runner_check_severity(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/cfn"
metadata_integration.check_metadata = {
'CKV_SECRET_2': {
'severity': Severities[BcSeverities.LOW]
},
'CKV_SECRET_6': {
'severity': Severities[BcSeverities.HIGH]
}
}
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], checks=['MEDIUM']))
self.assertEqual(len(report.skipped_checks), 0)
self.assertEqual(len(report.failed_checks), 1)
self.assertEqual(report.failed_checks[0].check_id, 'CKV_SECRET_6')
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
def test_runner_skip_check_severity(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/cfn"
metadata_integration.check_metadata = {
'CKV_SECRET_2': {
'severity': Severities[BcSeverities.LOW]
},
'CKV_SECRET_6': {
'severity': Severities[BcSeverities.HIGH]
}
}
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], skip_checks=['MEDIUM']))
self.assertEqual(len(report.skipped_checks), 0)
self.assertEqual(len(report.failed_checks), 1)
self.assertEqual(report.failed_checks[0].check_id, 'CKV_SECRET_6')
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
def test_runner_skip_check_wildcard(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/cfn"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], skip_checks=['CKV_SECRET*']))
self.assertEqual(len(report.skipped_checks), 0)
self.assertEqual(len(report.failed_checks), 0)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
def test_runner_multiple_files(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets']))
self.assertEqual(9, len(report.failed_checks))
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
self.assertEqual(len(report.skipped_checks), 1)
def test_runner_bc_ids(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources"
runner = Runner()
# the other tests will implicitly test this value being None
metadata_integration.check_metadata = {
'CKV_SECRET_2': {
'id': 'BC_GIT_2'
}
}
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets']))
for fc in report.failed_checks:
if fc.check_id == 'CKV_SECRET_2':
self.assertEqual(fc.bc_check_id, 'BC_GIT_2')
else:
self.assertIsNone(fc.bc_check_id)
def tearDown(self) -> None:
metadata_integration.check_metadata = self.orig_metadata
def test_runner_requested_file_type_only_ts(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources"
runner = Runner()
report = runner.run(root_folder=valid_dir_path,
external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'],
block_list_secret_scan=['.py', 'Dockerfile', '.tf', '.yml'],
enable_secret_scan_all_files=True))
self.assertEqual(len(report.failed_checks), 2)
def test_runner_requested_file_type_only_py(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], block_list_secret_scan=['.ts', 'Dockerfile', '.tf', '.yml'],
enable_secret_scan_all_files=True))
self.assertEqual(len(report.failed_checks), 2)
def test_runner_requested_file_type_only_yml(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], block_list_secret_scan=['.py', 'Dockerfile', '.tf', '.ts'],
enable_secret_scan_all_files=True))
self.assertEqual(len(report.failed_checks), 2)
def test_runner_requested_file_type_only_tf(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'],
block_list_secret_scan=['.py', 'Dockerfile', '.ts', '.yml'],
enable_secret_scan_all_files=True))
self.assertEqual(len(report.failed_checks), 3)
def test_runner_requested_file_type_only_tf_yml(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], block_list_secret_scan=['.py', 'Dockerfile', '.ts'],
enable_secret_scan_all_files=True))
self.assertEqual(len(report.failed_checks), 5)
def test_runner_requested_file_type_all(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], enable_secret_scan_all_files=True))
self.assertEqual(len(report.failed_checks), 13)
def test_runner_requested_file_only_dockerfile(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], block_list_secret_scan=['.py', '.tf', '.ts', '.yml'],
enable_secret_scan_all_files=True))
self.assertEqual(len(report.failed_checks), 4)
def test_runner_no_requested_file(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets']))
self.assertEqual(len(report.failed_checks), 9)
def test_true_positive_py(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_file_path = current_dir + "/resources/file_type/test.py"
runner = Runner()
report = runner.run(root_folder=None, files=[valid_file_path], external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'], enable_secret_scan_all_files=True))
self.assertEqual(len(report.failed_checks), 2)
def test_no_false_positive_yml_2(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/cfn"
valid_file_path = valid_dir_path + "/secret-no-false-positive.yml"
runner = Runner()
report = runner.run(root_folder=None, files=[valid_file_path], external_checks_dir=None,
runner_filter=RunnerFilter(framework=['secrets'],
enable_secret_scan_all_files=True))
self.assertEqual(len(report.failed_checks), 0)
def test_runner_entropy_source_files(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/test_entropy_source_files"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, runner_filter=RunnerFilter(framework=['secrets'],
enable_secret_scan_all_files=True))
self.assertEqual(len(report.failed_checks), 2)
for failed in report.failed_checks:
if failed.check_id == 'CKV_SECRET_6':
self.assertEqual(failed.file_line_range, [4, 5])
elif failed.check_id == 'CKV_SECRET_4':
self.assertEqual(failed.file_line_range, [6, 7])
else:
self.fail(f'Got a bad result: {failed}')
if __name__ == '__main__':
unittest.main()
|
da7f8ff00e612614f33d204070a2ed46a86ad6fc
|
a0eb6744e6f7f509b96d21f0bc8b3f8387f6861c
|
/notebook/numpy_ravel_flatten_timeit.py
|
8f843e3161cecd6ebffc9b447d677826a71f8997
|
[
"MIT"
] |
permissive
|
nkmk/python-snippets
|
a6c66bdf999502e52f4795a3074ced63bf440817
|
f9dd286a9cf93f474e20371f8fffc4732cb3c4d5
|
refs/heads/master
| 2023-08-03T04:20:05.606293
| 2023-07-26T13:21:11
| 2023-07-26T13:21:11
| 98,900,570
| 253
| 77
|
MIT
| 2020-10-25T01:12:53
| 2017-07-31T14:54:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 786
|
py
|
numpy_ravel_flatten_timeit.py
|
import numpy as np
a = np.arange(12).reshape(3, 4)
print(a)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
%%timeit
a.ravel()
# 242 ns ± 2.78 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
%%timeit
a.flatten()
# 725 ns ± 45.2 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
%%timeit
a.reshape(-1)
# 851 ns ± 13.5 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
a_large = np.arange(1000000).reshape(100, 100, 100)
%%timeit
a_large.ravel()
# 242 ns ± 3.6 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
%%timeit
a_large.flatten()
# 2.03 ms ± 8.63 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
%%timeit
a_large.reshape(-1)
# 899 ns ± 52 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
|
c7b401919d5058221032c931ae73d79f675b0210
|
6436d1e6c23f9f43a8025889dc4414a3ad66acf2
|
/Assets/Python/BUG/BugDll.py
|
168ffe9db1dbf40857346e450ffa2fa415fb3970
|
[
"MIT"
] |
permissive
|
dguenms/Dawn-of-Civilization
|
b710195c4f46fe11d9229182c3b1e07b77f42637
|
a305e7846d085d6edf1e9c472e8dfceee1c07dd4
|
refs/heads/develop
| 2023-09-04T04:57:00.086384
| 2023-09-01T15:24:28
| 2023-09-01T15:24:28
| 45,362,597
| 116
| 121
|
MIT
| 2023-02-08T00:18:53
| 2015-11-01T23:52:28
|
C++
|
UTF-8
|
Python
| false
| false
| 6,042
|
py
|
BugDll.py
|
## BugDll
##
## Collection of utility functions for dealing with the optional BUG DLL.
##
## General
##
## - isPresent()
## Returns True if the BUG DLL is present.
##
## - getVersion()
## Returns the version number of the BUG DLL if it's present, zero (0) otherwise.
##
## - isVersion(version)
## Returns True if the BUG DLL is present and is version <version> or later.
##
## Widgets
##
## - widget(bugWidget, bugData1=None, bugData2=None, *args)
## Returns a tuple of arguments for passing to a function that takes a WidgetTypes and parameters.
## Chooses between two sets of widgets and parameters based on the presence of the BUG DLL.
##
## - widgetVersion(version, bugWidget, bugData1=None, bugData2=None, *args)
## Same as widget() but also checks the BUG DLL version as per isVersion().
##
## - isWidget(widget, bugWidget)
## Returns True if <bugWidget> exists and matches <widget>
##
## - isWidgetVersion(version, widget, bugWidget)
## Same as isWidget() but also checks the BUG DLL version as per isVersion().
##
## Notes
## - Must be initialized externally by calling init()
##
## Copyright (c) 2009 The BUG Mod.
##
## Author: EmperorFool
from CvPythonExtensions import *
import BugOptions
import BugUtil
gc = CyGlobalContext()
IS_PRESENT = False
VERSION = -1
## General and Versions
def isBug():
return True
def isPresent():
return IS_PRESENT
def getVersion():
return VERSION
def isVersion(version):
if version > 0:
return IS_PRESENT and VERSION >= version
else:
return IS_PRESENT
def decode(value, noneIsZero=False):
if value:
if isinstance(value, int):
if value >= 1:
return value
else:
try:
version = int(value)
if version >= 1:
return version
except ValueError:
pass
if noneIsZero:
return 0
else:
return None
## Widgets
def widget(bugWidget, bugData1=None, bugData2=None, *args):
return widgetVersion(VERSION, bugWidget, bugData1, bugData2, *args)
def widgetVersion(version, bugWidget, bugData1=None, bugData2=None, *args):
"""
Picks one of two WidgetTypes and parameters to return based on presence of BUG DLL and <version>.
The bugWidget must be the name of the desired widget, e.g. "WIDGET_SET_PERCENT";
the other widget should be the WidgetTypes constant, e.g. WidgetTypes.WIDGET_CHANGE_PERCENT.
The default widget values are WidgetTypes.WIDGET_GENERAL, -1, -1. To specify a different set,
pass them in as the 4th, 5th and 6th arguments.
To return zero or one data values, pass None for the BUG values you want not to have returned;
the same ones won't be returned if the BUG DLL isn't present. When overriding the non-BUG widget
type, you must always pass in three values: the WidgetTypes and its two data values. The matching
BUG ones that are None will not be returned, regardless of their own values.
Any arguments after the widget arguments are added at the end of the returned tuple; use this
when the function you are passing the arguments to takes more arguments after the widget values.
Make sure to use a * to unpack the returned tuple when passing the arguments to your function.
Example:
screen.setButtonGFC( "MinCommerceRate", u"", "", 130, 50, 20, 20,
*BugDll.widget("WIDGET_SET_PERCENT", eCommerce, 0,
WidgetTypes.WIDGET_CHANGE_PERCENT, eCommerce, -100,
ButtonStyles.BUTTON_STYLE_CITY_MINUS) )
"""
realArgs = []
if len(args) >= 3 and isinstance(args[0], WidgetTypes):
normalWidget, normalData1, normalData2 = args[:3]
args = args[3:]
else:
normalWidget=WidgetTypes.WIDGET_GENERAL
normalData1=-1
normalData2=-1
handled = False
if isVersion(version):
try:
if isinstance(bugWidget, WidgetTypes):
realArgs.append(bugWidget)
else:
realArgs.append(getattr(WidgetTypes, bugWidget))
if bugData1 is not None:
realArgs.append(bugData1)
if bugData2 is not None:
realArgs.append(bugData2)
handled = True
except AttributeError:
BugUtil.warn("WidgetTypes.%s not found", bugWidget)
if not handled:
realArgs.append(normalWidget)
if bugData1 is not None:
realArgs.append(normalData1)
if bugData2 is not None:
realArgs.append(normalData2)
if args:
realArgs.extend(args)
return realArgs
def isWidget(widget, bugWidget):
return isWidgetVersion(VERSION, widget, bugWidget)
def isWidgetVersion(version, widget, bugWidget):
"""
Returns True if <widget> has the same value as <bugWidget>, False otherwise.
If the BUG DLL isn't present, doesn't have the correct version, or the widget
doesn't exist, False is safely returned.
"""
if isVersion(version):
try:
return widget == getattr(WidgetTypes, bugWidget)
except:
pass
return False
## Accessing Options
def getOptionBOOL(argsList):
return castOptionValue(bool, *argsList)
def getOptionINT(argsList):
return castOptionValue(int, *argsList)
def getOptionFLOAT(argsList):
return castOptionValue(float, *argsList)
def getOptionSTRING(argsList):
return castOptionValue(str, *argsList)
def castOptionValue(func, id, default):
try:
return func(BugOptions.getOption(id).getValue())
except:
return default
def init():
"""
Checks for the presence of the BUG DLL and grabs its Python API version if found.
"""
try:
if gc.isBull():
global IS_PRESENT, VERSION
IS_PRESENT = True
VERSION = gc.getBullApiVersion()
BugUtil.info("BugDll - %s %s, API version %d", gc.getBullName(), gc.getBullVersion(), VERSION)
if hasattr(CyGlobalContext, "setIsBug"):
import BugInit
BugInit.addInit("setIsBug", setIsBug)
else:
BugUtil.debug("BugDll - setIsBug() not found")
except:
BugUtil.debug("BugDll - BULL not present")
def setIsBug():
"""
Tells BULL that BUG is ready to receive queries for options.
"""
BugUtil.debug("BugDll - calling setIsBug()")
gc.setIsBug(True)
|
be07245500341137e3d783b9391c3159baac8704
|
fad7958b93f330e365835576712a2caeec87a669
|
/tests/test_base.py
|
6cdf8fc5757f17f91efbeafe77215e05ffddf773
|
[
"MIT"
] |
permissive
|
JackMcKew/pandas_alive
|
1a8e8ec82a230b0d92b3593c6a968a6c840c625a
|
1caa67677226b3ecdd700f7878ddc0d09bed4c17
|
refs/heads/main
| 2023-08-29T21:01:11.181482
| 2022-03-10T04:05:09
| 2022-03-10T04:05:09
| 260,785,184
| 559
| 109
|
MIT
| 2023-02-10T23:09:08
| 2020-05-02T22:18:27
|
Python
|
UTF-8
|
Python
| false
| false
| 295
|
py
|
test_base.py
|
import os
import sys
import pandas_alive
import pytest
# myPath = os.path.dirname(os.path.abspath(__file__))
# sys.path.insert(0, "...")
# @pytest.fixture(scope="function")
def test_load_dataset():
# Load default dataset:
covid_df = pandas_alive.load_dataset()
return covid_df
|
16230f050a8300db13b3ee5e46560705dd4e3cad
|
c618bbf2719431999b1007461df0865bab60c883
|
/docs/examples/use_cases/tensorflow/efficientdet/model/utils/optimizers.py
|
bcb81e4a26eb5242addd28d850b10b70677a5119
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/DALI
|
3d0d061135d19e092647e6522046b2ff23d4ef03
|
92ebbe5c20e460050abd985acb590e6c27199517
|
refs/heads/main
| 2023-09-04T01:53:59.033608
| 2023-09-01T13:45:03
| 2023-09-01T13:45:03
| 135,768,037
| 4,851
| 648
|
Apache-2.0
| 2023-09-12T18:00:22
| 2018-06-01T22:18:01
|
C++
|
UTF-8
|
Python
| false
| false
| 7,764
|
py
|
optimizers.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras efficientdet optimizers."""
from absl import logging
import numpy as np
import tensorflow as tf
_DEFAULT_BATCH_SIZE = 64
def get_optimizer(params, *args):
"""Get optimizer."""
learning_rate = learning_rate_schedule(params, *args)
if params["optimizer"].lower() == "sgd":
logging.info("Use SGD optimizer")
optimizer = tf.keras.optimizers.legacy.SGD(learning_rate, momentum=params["momentum"])
elif params["optimizer"].lower() == "adam":
logging.info("Use Adam optimizer")
optimizer = tf.keras.optimizers.legacy.Adam(learning_rate)
else:
raise ValueError("optimizers should be adam or sgd")
return optimizer
def update_learning_rate_schedule_parameters(
params, epochs, global_batch_size, steps_per_epoch
):
"""Updates params that are related to the learning rate schedule."""
# Learning rate is proportional to the batch size
params["adjusted_learning_rate"] = (
params["learning_rate"] * global_batch_size / _DEFAULT_BATCH_SIZE
)
if "lr_warmup_init" in params:
params["adjusted_lr_warmup_init"] = (
params["lr_warmup_init"] * global_batch_size / _DEFAULT_BATCH_SIZE
)
params["lr_warmup_step"] = int(params["lr_warmup_epoch"] * steps_per_epoch)
params["first_lr_drop_step"] = int(params["first_lr_drop_epoch"] * steps_per_epoch)
params["second_lr_drop_step"] = int(
params["second_lr_drop_epoch"] * steps_per_epoch
)
params["total_steps"] = epochs * steps_per_epoch
def learning_rate_schedule(params, *args):
"""Learning rate schedule based on global step."""
update_learning_rate_schedule_parameters(params, *args)
lr_decay_method = params["lr_decay_method"]
if lr_decay_method == "stepwise":
return StepwiseLrSchedule(
params["adjusted_learning_rate"],
params["adjusted_lr_warmup_init"],
params["lr_warmup_step"],
params["first_lr_drop_step"],
params["second_lr_drop_step"],
)
if lr_decay_method == "cosine":
return CosineLrSchedule(
params["adjusted_learning_rate"],
params["adjusted_lr_warmup_init"],
params["lr_warmup_step"],
params["total_steps"],
)
if lr_decay_method == "polynomial":
return PolynomialLrSchedule(
params["adjusted_learning_rate"],
params["adjusted_lr_warmup_init"],
params["lr_warmup_step"],
params["poly_lr_power"],
params["total_steps"],
)
if lr_decay_method == "constant":
return params["adjusted_learning_rate"]
raise ValueError("unknown lr_decay_method: {}".format(lr_decay_method))
class StepwiseLrSchedule(tf.optimizers.schedules.LearningRateSchedule):
"""Stepwise learning rate schedule."""
def __init__(
self,
adjusted_lr: float,
lr_warmup_init: float,
lr_warmup_step: int,
first_lr_drop_step: int,
second_lr_drop_step: int,
):
"""Build a StepwiseLrSchedule.
Args:
adjusted_lr: `float`, The initial learning rate.
lr_warmup_init: `float`, The warm up learning rate.
lr_warmup_step: `int`, The warm up step.
first_lr_drop_step: `int`, First lr decay step.
second_lr_drop_step: `int`, Second lr decay step.
"""
super().__init__()
logging.info("LR schedule method: stepwise")
self.adjusted_lr = adjusted_lr
self.lr_warmup_init = lr_warmup_init
self.lr_warmup_step = lr_warmup_step
self.first_lr_drop_step = first_lr_drop_step
self.second_lr_drop_step = second_lr_drop_step
def __call__(self, step):
linear_warmup = self.lr_warmup_init + (
tf.cast(step, dtype=tf.float32)
/ self.lr_warmup_step
* (self.adjusted_lr - self.lr_warmup_init)
)
learning_rate = tf.where(
step < self.lr_warmup_step, linear_warmup, self.adjusted_lr
)
lr_schedule = [
[1.0, self.lr_warmup_step],
[0.1, self.first_lr_drop_step],
[0.01, self.second_lr_drop_step],
]
for mult, start_global_step in lr_schedule:
learning_rate = tf.where(
step < start_global_step, learning_rate, self.adjusted_lr * mult
)
return learning_rate
class CosineLrSchedule(tf.optimizers.schedules.LearningRateSchedule):
"""Cosine learning rate schedule."""
def __init__(
self,
adjusted_lr: float,
lr_warmup_init: float,
lr_warmup_step: int,
total_steps: int,
):
"""Build a CosineLrSchedule.
Args:
adjusted_lr: `float`, The initial learning rate.
lr_warmup_init: `float`, The warm up learning rate.
lr_warmup_step: `int`, The warm up step.
total_steps: `int`, Total train steps.
"""
super().__init__()
logging.info("LR schedule method: cosine")
self.adjusted_lr = adjusted_lr
self.lr_warmup_init = lr_warmup_init
self.lr_warmup_step = lr_warmup_step
self.decay_steps = tf.cast(total_steps - lr_warmup_step, tf.float32)
def __call__(self, step):
linear_warmup = self.lr_warmup_init + (
tf.cast(step, dtype=tf.float32)
/ self.lr_warmup_step
* (self.adjusted_lr - self.lr_warmup_init)
)
cosine_lr = (
0.5
* self.adjusted_lr
* (1 + tf.cos(np.pi * tf.cast(step, tf.float32) / self.decay_steps))
)
return tf.where(step < self.lr_warmup_step, linear_warmup, cosine_lr)
class PolynomialLrSchedule(tf.optimizers.schedules.LearningRateSchedule):
"""Polynomial learning rate schedule."""
def __init__(
self,
adjusted_lr: float,
lr_warmup_init: float,
lr_warmup_step: int,
power: float,
total_steps: int,
):
"""Build a PolynomialLrSchedule.
Args:
adjusted_lr: `float`, The initial learning rate.
lr_warmup_init: `float`, The warm up learning rate.
lr_warmup_step: `int`, The warm up step.
power: `float`, power.
total_steps: `int`, Total train steps.
"""
super().__init__()
logging.info("LR schedule method: polynomial")
self.adjusted_lr = adjusted_lr
self.lr_warmup_init = lr_warmup_init
self.lr_warmup_step = lr_warmup_step
self.power = power
self.total_steps = total_steps
def __call__(self, step):
linear_warmup = self.lr_warmup_init + (
tf.cast(step, dtype=tf.float32)
/ self.lr_warmup_step
* (self.adjusted_lr - self.lr_warmup_init)
)
polynomial_lr = self.adjusted_lr * tf.pow(
1 - (tf.cast(step, dtype=tf.float32) / self.total_steps), self.power
)
return tf.where(step < self.lr_warmup_step, linear_warmup, polynomial_lr)
|
0eaf05513b0108d1b67e4ece34612524bd59eeca
|
6a40d044498ecaf41d6dd93f4fcd53fd65b51ae2
|
/recommends/storages/djangoorm/managers.py
|
f1ef4f4ae5fb613c22b4293f924af21943135047
|
[
"MIT"
] |
permissive
|
fcurella/django-recommends
|
f1d565bc148fe1b4ec2546f17d3bda7fe02efc2e
|
414436f83b8a0fc5a0c45eb585f9db7a4ed4e70a
|
refs/heads/master
| 2023-04-07T12:31:34.728654
| 2023-01-09T16:45:59
| 2023-01-09T16:45:59
| 2,696,671
| 162
| 43
|
MIT
| 2023-03-31T14:39:20
| 2011-11-02T17:32:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,646
|
py
|
managers.py
|
from django.conf import settings
from django.db import models
from recommends.managers import CachedContentTypesMixin
class RecommendsManager(models.Manager, CachedContentTypesMixin):
def filter_for_model(self, model):
ctype_id = self.get_ctype_id_for_obj(model)
return self.filter(object_ctype=ctype_id)
def filter_for_object(self, obj):
return self.filter_for_model(obj).filter(object_id=obj.id)
class SimilarityManager(RecommendsManager):
def filter_for_related_model(self, related_model):
ctype_id = self.get_ctype_id_for_obj(related_model)
return self.filter(related_object_ctype=ctype_id)
def filter_for_related_object(self, related_obj):
return self.filter_for_related_model(related_obj).filter(related_object_id=related_obj.id)
def filter_by_couple(self, target_object, related_obj):
related_ctype_id = self.get_ctype_id_for_obj(related_obj)
return self.filter_for_object(target_object).filter(
related_object_ctype=related_ctype_id,
related_object_id=related_obj.id
)
def get_queryset(self):
return super(SimilarityManager, self).get_queryset().filter(score__isnull=False)
def get_or_create_for_objects(self, object_target, object_target_site, object_related, object_related_site):
object_ctype_id = self.get_ctype_id_for_obj(object_target)
object_id = object_target.id
related_object_ctype_id = self.get_ctype_id_for_obj(object_related)
related_object_id = object_related.id
return self.get_or_create(
object_ctype_id=object_ctype_id,
object_id=object_id,
object_site=object_target_site.id,
related_object_ctype_id=related_object_ctype_id,
related_object_id=related_object_id,
related_object_site=object_related_site.id
)
def set_score_for_objects(self, object_target, object_target_site, object_related, object_related_site, score):
if score == 0:
self.filter_by_couple(object_target, object_related).filter(
object_site=object_target_site.id,
related_object_site=object_related_site.id
).delete()
return None
result, created = self.get_or_create_for_objects(object_target, object_target_site, object_related, object_related_site)
result.score = score
result.save()
return result
def similar_to(self, obj, site=None, **kwargs):
if site is None and 'related_object_site' not in kwargs:
kwargs['related_object_site'] = settings.SITE_ID
return self.filter_for_object(obj).filter(**kwargs)
class RecommendationManager(RecommendsManager):
def get_queryset(self):
return super(RecommendationManager, self).get_queryset().filter(score__isnull=False)
def get_or_create_for_object(self, user, object_recommended, object_site):
object_ctype_id = self.get_ctype_id_for_obj(object_recommended)
object_id = object_recommended.id
return self.get_or_create(
object_ctype_id=object_ctype_id,
object_id=object_id,
object_site=object_site.id,
user=user.id
)
def set_score_for_object(self, user, object_recommended, object_site, score):
if score == 0:
self.filter_for_object(object_recommended).filter(user=user.id).delete()
return None
result, created = self.get_or_create_for_object(user, object_recommended, object_site)
result.score = score
result.save()
return result
|
d26842a7efabe7f6e866ffd922df175475538c69
|
fbdc48c28e54fb33ae4842ef95ff63893902c99a
|
/scripts/examples/02-Image-Processing/01-Image-Filters/midpoint_filter.py
|
f6a0ebcf77089077a05374d8af1069ed2659df0f
|
[
"MIT"
] |
permissive
|
openmv/openmv
|
44d4b79fc8693950a2e330e5e0fd95b5c36e230f
|
8a90e070a88b7fc14c87a00351b9c4a213278419
|
refs/heads/master
| 2023-08-30T20:59:57.227603
| 2023-08-23T16:50:55
| 2023-08-23T16:50:55
| 14,360,940
| 2,150
| 1,226
|
MIT
| 2023-09-14T07:18:15
| 2013-11-13T10:23:44
|
C
|
UTF-8
|
Python
| false
| false
| 1,262
|
py
|
midpoint_filter.py
|
# Midpoint Filter Example
#
# This example shows off midpoint filtering. Midpoint filtering replaces each
# pixel by the average of the min and max pixel values for a NxN neighborhood.
import sensor
import time
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time=2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
while True:
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
# The first argument is the kernel size. N coresponds to a ((N*2)+1)^2
# kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You
# shouldn't ever need to use a value bigger than 2. The "bias" argument
# lets you select between min and max blending. 0.5 == midpoint filter,
# 0.0 == min filter, and 1.0 == max filter. Note that the min filter
# makes images darker while the max filter makes images lighter.
img.midpoint(1, bias=0.5)
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.
|
e1c976c8dbb25fcd3f27f7a5fba0a16f2e336a47
|
5eb640122da4460b38ecb0a5add0204f3fa3e7d2
|
/test/unit/data/model/mapping/test_annotation_associations.py
|
e1cdcfbd8325515d5b0859493fa3bc8044c60e7d
|
[
"CC-BY-2.5",
"MIT",
"CC-BY-3.0",
"AFL-3.0"
] |
permissive
|
galaxyproject/galaxy
|
5748409eb6693b1611f289d164f85e20c3237495
|
d12132439843085e5fdbd3977e77590acb5e4802
|
refs/heads/master
| 2023-08-28T22:35:51.248138
| 2023-08-26T07:59:46
| 2023-08-26T07:59:46
| 31,211,061
| 1,277
| 1,137
|
NOASSERTION
| 2023-09-14T19:39:01
| 2015-02-23T14:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,932
|
py
|
test_annotation_associations.py
|
import pytest
from galaxy import model
from galaxy.model.unittest_utils.model_testing_utils import ( # noqa: F401
create_and_drop_database,
dbcleanup,
disposing_engine,
get_stored_obj,
url,
)
pytest_plugins = ["galaxy.model.unittest_utils.gxy_model_fixtures"]
@pytest.fixture(scope="module")
def engine(url): # noqa: F811
with create_and_drop_database(url):
with disposing_engine(url) as engine:
yield engine
def test_history_annotation_associations(session, history_annotation_association, user, history):
_annotation_assoc_helper(
session, history_annotation_association, user, history, "history", model.HistoryAnnotationAssociation
)
def test_history_dataset_association_annotation_associations(
session, history_dataset_association_annotation_association, user, history_dataset_association
):
_annotation_assoc_helper(
session,
history_dataset_association_annotation_association,
user,
history_dataset_association,
"hda",
model.HistoryDatasetAssociationAnnotationAssociation,
)
def test_stored_workflow_annotation_associations(
session,
stored_workflow_annotation_association,
user,
stored_workflow,
):
_annotation_assoc_helper(
session,
stored_workflow_annotation_association,
user,
stored_workflow,
"stored_workflow",
model.StoredWorkflowAnnotationAssociation,
)
def test_workflow_step_annotation_associations(session, workflow_step_annotation_association, user, workflow_step):
_annotation_assoc_helper(
session,
workflow_step_annotation_association,
user,
workflow_step,
"workflow_step",
model.WorkflowStepAnnotationAssociation,
)
def test_page_annotation_associations(session, page_annotation_association, user, page):
_annotation_assoc_helper(session, page_annotation_association, user, page, "page", model.PageAnnotationAssociation)
def test_visualization_annotation_associations(session, visualization_annotation_association, user, visualization):
_annotation_assoc_helper(
session,
visualization_annotation_association,
user,
visualization,
"visualization",
model.VisualizationAnnotationAssociation,
)
def test_history_dataset_collection_association_annotation_associations(
session, history_dataset_collection_annotation_association, user, history_dataset_collection_association
):
_annotation_assoc_helper(
session,
history_dataset_collection_annotation_association,
user,
history_dataset_collection_association,
"history_dataset_collection",
model.HistoryDatasetCollectionAssociationAnnotationAssociation,
)
def test_library_dataset_collection_annotation_associations(
session, library_dataset_collection_annotation_association, user, library_dataset_collection_association
):
_annotation_assoc_helper(
session,
library_dataset_collection_annotation_association,
user,
library_dataset_collection_association,
"dataset_collection",
model.LibraryDatasetCollectionAnnotationAssociation,
)
def _annotation_assoc_helper(session, association, user, assoc_object, assoc_object_name, cls_):
annotation = "a"
setattr(association, assoc_object_name, assoc_object) # FooAnnotAssoc.foo
association.annotation = annotation # FooAnnotAssoc.annotation
association.user = user # FooAnnotAssoc.user
with dbcleanup(session, association) as id:
stored_obj = get_stored_obj(session, cls_, id)
assert stored_obj.id == id
_stored_assoc_object = getattr(stored_obj, assoc_object_name)
assert _stored_assoc_object.id == assoc_object.id
assert stored_obj.annotation == annotation
assert stored_obj.user_id == user.id
|
3edba58fcb89cf8394f2ec054d91af2aff61e053
|
c4b8e1e09dedbccd37ca008ecaaca4438610bbaf
|
/z3/hidato.py
|
518802a4d16286f49d2ce426d9595a31bea0f1ce
|
[
"MIT"
] |
permissive
|
hakank/hakank
|
4806598b98cb36dd51b24b0ab688f52dadfe9626
|
c337aaf8187f15dcdc4d5b09cd2ed0dbdb2e72c2
|
refs/heads/master
| 2023-08-15T00:21:52.750270
| 2023-07-27T16:21:40
| 2023-07-27T16:21:40
| 11,933,517
| 336
| 97
|
MIT
| 2023-07-27T11:19:42
| 2013-08-06T20:12:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,884
|
py
|
hidato.py
|
#!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Hidato puzzle in Z3
# http://www.shockwave.com/gamelanding/hidato.jsp
# http://www.hidato.com/
# '''
# Puzzles start semi-filled with numbered tiles.
# The first and last numbers are circled.
# Connect the numbers together to win. Consecutive
# number must touch horizontally, vertically, or
# diagonally.
# '''
#
# Time to first solution:
#
# puzzle1 : 0.0412900447845459
# puzzle2 : 3.2280960083007812
# puzzle3 : 0.31029653549194336
# puzzle4 : 0.26546549797058105
# puzzle5 : 1.6768722534179688
# puzzle6 : 11.532785654067993
# puzzle7 : 86.42020177841187
# puzzle8 : 356.0383880138397
#
# Time to prove unicity:
# puzzle1 : 0.042975664138793945
# puzzle2 : 3.936161994934082
# puzzle3 : 0.340954065322876
# puzzle4 : 0.26323676109313965
# puzzle5 : 1.8561887741088867
# puzzle6 : 13.294347524642944
# puzzle7 : 92.71803569793701
# puzzle8 : 414.0651047229767
#
#
#
# Note: This model is quite slow.
# See hidato_table.py and hidato_function.py for faster programs.
#
# See hidato_function.py for a comparison of the Hidato solvers.
#
#
# This Z3 model was written by Hakan Kjellerstrand (hakank@gmail.com)
# See also my Z3 page: http://hakank.org/z3/
#
from __future__ import print_function
import time
from hidato_instances import instances
from z3_utils_hakank import *
def hidato(puzzle,num_sols=0):
sol = SimpleSolver()
r = len(puzzle)
c = len(puzzle[0])
print_game(puzzle, r, c)
#
# declare variables
#
x = {}
for i in range(r):
for j in range(c):
x[(i, j)] = makeIntVar(sol, "x(%i,%i)" % (i, j), 1, r * c)
x_flat = [x[(i, j)] for i in range(r) for j in range(c)]
#
# constraints
#
sol.add(Distinct(x_flat))
#
# Fill in the clues
#
for i in range(r):
for j in range(c):
if puzzle[i][j] > 0:
sol.add(x[(i, j)] == puzzle[i][j])
# From the numbers k = 1 to r*c-1, find this position,
# and then the position of k+1
cc = 0
for k in range(1, r * c):
i = makeIntVar(sol,"i_tmp_%i_%i" % (k,cc), 0, r-1)
j = makeIntVar(sol,"j_tmp_%i_%i" % (k,cc), 0, c-1)
a = makeIntVar(sol,"a_tmp_%i_%i" % (k,cc), -1, 1)
b = makeIntVar(sol,"b_tmp_%i_%i" % (k,cc), -1, 1)
cc += 1
# 1) First: fix "this" k
# sol.add(k == x[(i,j)])
element(sol,i * c + j,x_flat,k,r*c)
# 2) and then find the position of the next value (k+1)
# solver.add(k + 1 == x[(i+a,j+b)])
element(sol,(i + a) * c + (j + b),x_flat, k + 1,r*c)
sol.add(i + a >= 0)
sol.add(j + b >= 0)
sol.add(i + a < r)
sol.add(j + b < c)
sol.add(Or(a != 0, b != 0))
#
# solution and search
#
num_solutions = 0
while sol.check() == sat:
num_solutions += 1
mod = sol.model()
xx_flat = [mod.eval(x_flat[i*c+j]) for i in range(r) for j in range(c)]
print("\nSolution:", num_solutions)
print_board(mod, x, r, c)
print()
if num_sols > 0 and num_solutions >= num_sols:
break
sol.add(Or([xx_flat[i*c+j] != x_flat[i*c+j] for i in range(r) for j in range(c) ]))
print("num_solutions:", num_solutions)
def print_board(mod, x, rows, cols):
for i in range(rows):
for j in range(cols):
print("% 3s" % mod.eval(x[i,j]), end=' ')
print("")
def print_game(game, rows, cols):
for i in range(rows):
for j in range(cols):
print("% 3s" % game[i][j], end=' ')
print("")
def test_all(num_sols=0):
times = {}
for puzzle in instances:
print()
print(f"----- Solving problem {puzzle} -----")
print()
t0 = time.time()
hidato(instances[puzzle],num_sols)
t1 = time.time()
print("Time:", t1-t0)
times[puzzle] = t1-t0
print()
print("Times:")
for puzzle in times:
print(puzzle, ":", times[puzzle])
print("\nTime to first solution:")
num_sols = 1
test_all(num_sols)
print("Time to prove unicity:")
num_sols = 0
test_all(num_sols)
|
6acadda8e0aa4cc4d8a8ecd35682ebf98cb95070
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-AuthenticationServices/PyObjCTest/test_ascredentialidentitystorestate.py
|
e76776271f9afd78cccf78e11883e615dad722b7
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 456
|
py
|
test_ascredentialidentitystorestate.py
|
import AuthenticationServices
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestASCredentialIdentityStoreState(TestCase):
@min_os_level("11.0")
def test_methods11_0(self):
self.assertResultIsBOOL(
AuthenticationServices.ASCredentialIdentityStoreState.isEnabled
)
self.assertResultIsBOOL(
AuthenticationServices.ASCredentialIdentityStoreState.supportsIncrementalUpdates
)
|
a002f85ab0a041e3bb1c2845abb8407c938f5e7e
|
c2bcf42e04a1e2146b41b250ff14e62fddcdf589
|
/skl2onnx/shape_calculators/multioutput.py
|
fe275dfb4de21c2f2b0b18b9f6675a9b9b8d62fd
|
[
"Apache-2.0"
] |
permissive
|
onnx/sklearn-onnx
|
0f958e1c090572fbe11e15f95bec975d1780cf8d
|
895c3a76a315c7a6567a1a07a96dc658994ec16a
|
refs/heads/main
| 2023-08-18T18:49:25.164433
| 2023-08-17T09:52:31
| 2023-08-17T09:52:31
| 162,340,939
| 455
| 92
|
Apache-2.0
| 2023-08-31T16:04:13
| 2018-12-18T20:18:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,352
|
py
|
multioutput.py
|
# SPDX-License-Identifier: Apache-2.0
from ..common._registration import register_shape_calculator
from ..common.utils import check_input_and_output_numbers
from ..common.data_types import SequenceType
_stack = []
def multioutput_regressor_shape_calculator(operator):
"""Shape calculator for MultiOutputRegressor"""
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
i = operator.inputs[0]
o = operator.outputs[0]
N = i.get_first_dimension()
C = len(operator.raw_operator.estimators_)
o.type = o.type.__class__([N, C])
def multioutput_classifier_shape_calculator(operator):
"""Shape calculator for MultiOutputClassifier"""
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=2)
if not isinstance(operator.outputs[1].type, SequenceType):
raise RuntimeError(
"Probabilites should be a sequence not %r." "" % operator.outputs[1].type
)
i = operator.inputs[0]
outputs = operator.outputs
N = i.get_first_dimension()
C = len(operator.raw_operator.estimators_)
outputs[0].type.shape = [N, C]
register_shape_calculator(
"SklearnMultiOutputRegressor", multioutput_regressor_shape_calculator
)
register_shape_calculator(
"SklearnMultiOutputClassifier", multioutput_classifier_shape_calculator
)
|
f7a273d9c56d00c7b13eba59989148da3fa0664b
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowLispRegistrationHistory/cli/equal/golden_output2_expected.py
|
7fbbd8c7b78ede41244663a7f42a52dfd774a866
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 6,370
|
py
|
golden_output2_expected.py
|
expected_output = {
'lisp_id': {
0: {
'eid_address': {
'0.0.0.0/0': [{
'time': 'Dec 1 09:34:15.025',
'instance_id': 16777214,
'protocol': 'UDP',
'roam': 'No',
'wlc': 'No',
'source': '12.1.1.6',
'reg_type': '+',
'eid': '0.0.0.0',
'mask': 0
}, {
'time': 'Dec 1 09:34:17.164',
'instance_id': 16777214,
'protocol': 'UDP',
'roam': 'Yes',
'wlc': 'No',
'source': '11.1.2.2',
'reg_type': '+',
'eid': '0.0.0.0',
'mask': 0
},{
'time': 'Dec 1 09:34:17.164',
'instance_id': 16777214,
'protocol': 'UDP',
'roam': 'Yes',
'wlc': 'No',
'source': '10.1.1.1',
'reg_type': '+',
'eid': '0.0.0.0',
'mask': 0
},{
'time': 'Dec 1 09:34:17.168',
'instance_id': 16777214,
'protocol': 'TCP',
'roam': 'No',
'wlc': 'No',
'source': '11.11.11.11',
'reg_type': '+',
'eid': '0.0.0.0',
'mask': 0
},{
'time': 'Dec 1 09:34:20.769',
'instance_id': 16777214,
'protocol': 'TCP',
'roam': 'Yes',
'wlc': 'No',
'source': '66.66.66.66',
'reg_type': '+',
'eid': '0.0.0.0',
'mask': 0
}, {
'time': 'Dec 1 09:34:21.286',
'instance_id': 16777214,
'protocol': 'TCP',
'roam': 'Yes',
'wlc': 'No',
'source': '22.22.22.22',
'reg_type': '+',
'eid': '0.0.0.0',
'mask': 0
}],
'aabb.cc00.c901/48': [{
'time': 'Dec 1 09:34:17.164',
'instance_id': 101,
'protocol': 'UDP',
'roam': 'No',
'wlc': 'No',
'source': '10.1.1.1',
'reg_type': '+',
'eid': 'aabb.cc00.c901',
'mask': 48
}, {
'time': 'Dec 1 09:34:17.168',
'instance_id': 101,
'protocol': 'TCP',
'roam': 'No',
'wlc': 'No',
'source': '11.11.11.11',
'reg_type': '+',
'eid': 'aabb.cc00.c901',
'mask': 48
}],
'192.168.1.71/32': [{
'time': 'Dec 1 09:34:17.164',
'instance_id': 4100,
'protocol': 'UDP',
'roam': 'No',
'wlc': 'No',
'source': '10.1.1.1',
'reg_type': '+',
'eid': '192.168.1.71',
'mask': 32
},{
'time': 'Dec 1 09:34:17.168',
'instance_id': 4100,
'protocol': 'TCP',
'roam': 'No',
'wlc': 'No',
'source': '11.11.11.11',
'reg_type': '+',
'eid': '192.168.1.71',
'mask': 32
}, {
'time': 'Dec 1 09:34:17.168',
'instance_id': 101,
'protocol': 'TCP',
'roam': 'No',
'wlc': 'No',
'source': '11.11.11.11',
'reg_type': '+',
'eid': '192.168.1.71',
'mask': 32
}],
'2001:192:168:1::71/128': [{
'time': 'Dec 1 09:34:17.164',
'instance_id': 4100,
'protocol': 'UDP',
'roam': 'No',
'wlc': 'No',
'source': '10.1.1.1',
'reg_type': '+',
'eid': '2001:192:168:1::71',
'mask': 128
},{
'time': 'Dec 1 09:34:17.168',
'instance_id': 4100,
'protocol': 'TCP',
'roam': 'No',
'wlc': 'No',
'source': '11.11.11.11',
'reg_type': '+',
'eid': '2001:192:168:1::71',
'mask': 128
},{
'time': 'Dec 1 09:34:17.168',
'instance_id': 101,
'protocol': 'TCP',
'roam': 'No',
'wlc': 'No',
'source': '11.11.11.11',
'reg_type': '+',
'eid': '2001:192:168:1::71',
'mask': 128
}],
'FE80::A8BB:CCFF:FE00:C901/128': [{
'time': 'Dec 1 09:34:17.168',
'instance_id': 101,
'protocol': 'TCP',
'roam': 'No',
'wlc': 'No',
'source': '11.11.11.11',
'reg_type': '+',
'eid': 'FE80::A8BB:CCFF:FE00:C901',
'mask': 128
}],
'194.168.1.72/32': [{
'time': 'Dec 1 09:42:03.972',
'instance_id': 4100,
'protocol': 'TCP',
'roam': 'No',
'wlc': 'No',
'source': '22.22.22.22',
'reg_type': '+',
'eid': '194.168.1.72',
'mask': 32
}]
}
}
}
}
|
12755759edfda56e91cdc52220e93b6b48ae5d1d
|
9cdd1751bc27310f486427aaaae901ca06b79003
|
/tests/custom_cluster/test_breakpad.py
|
38c8275889423162dde2bea549b79a902a2d367e
|
[
"Apache-2.0",
"OpenSSL",
"bzip2-1.0.6",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-ssleay-windows",
"LicenseRef-scancode-google-patent-license-webrtc",
"PSF-2.0",
"BSD-3-Clause",
"dtoa",
"MIT",
"LicenseRef-scancode-mit-modification-obligations",
"Minpack",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
apache/impala
|
bb9970c64a43824950ec5b69f2ef4b50158a1e8a
|
b718d63860356a04814e07d91711c3c748b3e769
|
refs/heads/master
| 2023-09-03T04:29:12.639452
| 2023-06-07T23:51:15
| 2023-08-30T04:56:51
| 56,128,733
| 985
| 475
|
Apache-2.0
| 2023-08-31T14:15:44
| 2016-04-13T07:00:08
|
C++
|
UTF-8
|
Python
| false
| false
| 21,154
|
py
|
test_breakpad.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
from builtins import range
import glob
import os
import psutil
import pytest
import shutil
import tempfile
import time
from resource import setrlimit, RLIMIT_CORE, RLIM_INFINITY
from signal import SIGSEGV, SIGKILL, SIGUSR1, SIGTERM
from subprocess import CalledProcessError
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.skip import SkipIfBuildType
DAEMONS = ['impalad', 'statestored', 'catalogd']
DAEMON_ARGS = ['impalad_args', 'state_store_args', 'catalogd_args']
class TestBreakpadBase(CustomClusterTestSuite):
"""Base class with utility methods for all breakpad tests."""
@classmethod
def get_workload(cls):
return 'functional-query'
def setup_method(self, method):
# Override parent
# The temporary directory gets removed in teardown_method() after each test.
self.tmp_dir = tempfile.mkdtemp()
def teardown_method(self, method):
# Override parent
# Stop the cluster to prevent future accesses to self.tmp_dir.
self.kill_cluster(SIGKILL)
assert self.tmp_dir
shutil.rmtree(self.tmp_dir)
@classmethod
def setup_class(cls):
super(TestBreakpadBase, cls).setup_class()
# Disable core dumps for this test
setrlimit(RLIMIT_CORE, (0, RLIM_INFINITY))
@classmethod
def teardown_class(cls):
# Re-enable core dumps
setrlimit(RLIMIT_CORE, (RLIM_INFINITY, RLIM_INFINITY))
def start_cluster_with_args(self, **kwargs):
cluster_options = []
for daemon_arg in DAEMON_ARGS:
daemon_options = " ".join("-{0}={1}".format(k, v) for k, v in kwargs.items())
cluster_options.append("--{0}={1}".format(daemon_arg, daemon_options))
self._start_impala_cluster(cluster_options)
def start_cluster(self):
self.start_cluster_with_args(minidump_path=self.tmp_dir)
def kill_cluster(self, signal):
self.cluster.refresh()
processes = self.cluster.impalads + [self.cluster.catalogd, self.cluster.statestored]
processes = [_f for _f in processes if _f]
self.kill_processes(processes, signal)
signal is SIGUSR1 or self.assert_all_processes_killed()
def kill_processes(self, processes, signal):
for process in processes:
process.kill(signal)
signal is SIGUSR1 or self.wait_for_all_processes_dead(processes)
def wait_for_all_processes_dead(self, processes, timeout=300):
for process in processes:
try:
# For every process in the list we might see the original Impala process plus a
# forked off child that is writing the minidump. We need to catch both.
for pid in process.get_pids():
print("Checking pid %s" % pid)
psutil_process = psutil.Process(pid)
psutil_process.wait(timeout)
except psutil.NoSuchProcess:
# Process has exited in the meantime
pass
except psutil.TimeoutExpired:
raise RuntimeError("Unable to kill %s (pid %d) after %d seconds." %
(psutil_process.name, psutil_process.pid, timeout))
def get_num_processes(self, daemon):
self.cluster.refresh()
if daemon == 'impalad':
return len(self.cluster.impalads)
elif daemon == 'catalogd':
return self.cluster.catalogd and 1 or 0
elif daemon == 'statestored':
return self.cluster.statestored and 1 or 0
raise RuntimeError("Unknown daemon name: %s" % daemon)
def wait_for_num_processes(self, daemon, num_expected, timeout=30):
end = time.time() + timeout
self.cluster.refresh()
num_processes = self.get_num_processes(daemon)
while num_processes != num_expected and time.time() <= end:
time.sleep(1)
num_processes = self.get_num_processes(daemon)
return num_processes
def assert_all_processes_killed(self):
self.cluster.refresh()
assert not self.cluster.impalads
assert not self.cluster.statestored
assert not self.cluster.catalogd
def count_minidumps(self, daemon, base_dir=None):
base_dir = base_dir or self.tmp_dir
path = os.path.join(base_dir, daemon)
return len(glob.glob("%s/*.dmp" % path))
def count_all_minidumps(self, base_dir=None):
return sum((self.count_minidumps(daemon, base_dir) for daemon in DAEMONS))
def assert_num_minidumps_for_all_daemons(self, cluster_size, base_dir=None):
self.assert_num_logfile_entries(1)
assert self.count_minidumps('impalad', base_dir) == cluster_size
assert self.count_minidumps('statestored', base_dir) == 1
assert self.count_minidumps('catalogd', base_dir) == 1
def assert_num_logfile_entries(self, expected_count):
self.assert_impalad_log_contains('INFO', 'Wrote minidump to ',
expected_count=expected_count)
self.assert_impalad_log_contains('ERROR', 'Wrote minidump to ',
expected_count=expected_count)
self.assert_impalad_log_contains('INFO', 'Minidump with no thread info available.',
expected_count=expected_count)
self.assert_impalad_log_contains('ERROR', 'Minidump with no thread info available.',
expected_count=expected_count)
class TestBreakpadCore(TestBreakpadBase):
"""Core tests to check that the breakpad integration into the daemons works as
expected. This includes writing minidump when the daemons call abort(). Add tests here
that depend on functionality of Impala other than the breakpad integration itself.
"""
@pytest.mark.execute_serially
def test_abort_writes_minidump(self):
"""Check that abort() (e.g. hitting a DCHECK macro) writes a minidump."""
assert self.count_all_minidumps() == 0
failed_to_start = False
try:
# Calling with an unresolvable hostname will abort.
self.start_cluster_with_args(minidump_path=self.tmp_dir,
hostname="jhzvlthd")
except CalledProcessError:
failed_to_start = True
assert failed_to_start
# Don't check for minidumps until all processes have gone away so that
# the state of the cluster is not in flux.
self.wait_for_num_processes('impalad', 0)
assert self.count_minidumps('impalad') > 0
class TestBreakpadExhaustive(TestBreakpadBase):
"""Exhaustive tests to check that the breakpad integration into the daemons works as
expected. This includes writing minidump files on unhandled signals and rotating old
minidumps on startup.
"""
@classmethod
def setup_class(cls):
if cls.exploration_strategy() != 'exhaustive':
pytest.skip('These breakpad tests only run in exhaustive')
super(TestBreakpadExhaustive, cls).setup_class()
@pytest.mark.execute_serially
def test_minidump_creation(self):
"""Check that when a daemon crashes, it writes a minidump file."""
assert self.count_all_minidumps() == 0
self.start_cluster()
assert self.count_all_minidumps() == 0
cluster_size = self.get_num_processes('impalad')
self.kill_cluster(SIGSEGV)
self.assert_num_minidumps_for_all_daemons(cluster_size)
@pytest.mark.execute_serially
def test_sigusr1_writes_minidump(self):
"""Check that when a daemon receives SIGUSR1, it writes a minidump file."""
assert self.count_all_minidumps() == 0
self.start_cluster()
assert self.count_all_minidumps() == 0
cluster_size = self.get_num_processes('impalad')
self.kill_cluster(SIGUSR1)
# Breakpad forks to write its minidump files, wait for all the clones to terminate.
assert self.wait_for_num_processes('impalad', cluster_size) == cluster_size
assert self.wait_for_num_processes('catalogd', 1) == 1
assert self.wait_for_num_processes('statestored', 1) == 1
# Make sure impalad still answers queries.
client = self.create_impala_client()
self.execute_query_expect_success(client, "SELECT COUNT(*) FROM functional.alltypes")
# Kill the cluster. Sending SIGKILL will not trigger minidumps to be written.
self.kill_cluster(SIGKILL)
self.assert_num_minidumps_for_all_daemons(cluster_size)
@pytest.mark.execute_serially
def test_sigusr1_doesnt_kill(self):
"""Check that when minidumps are disabled and a daemon receives SIGUSR1, it does not
die.
"""
assert self.count_all_minidumps() == 0
self.start_cluster_with_args(enable_minidumps=False)
cluster_size = self.get_num_processes('impalad')
self.kill_cluster(SIGUSR1)
# Check that no minidumps have been written.
self.assert_num_logfile_entries(0)
assert self.count_all_minidumps() == 0
# Check that all daemons are still alive.
assert self.get_num_processes('impalad') == cluster_size
assert self.get_num_processes('catalogd') == 1
assert self.get_num_processes('statestored') == 1
@pytest.mark.execute_serially
def test_sigterm_no_minidumps(self):
"""Check that when a SIGTERM is caught, no minidump file is written.
After receiving SIGTERM there should be no impalad/catalogd/statestored
running.
"""
assert self.count_all_minidumps() == 0
self.start_cluster()
cluster_size = self.get_num_processes('impalad')
assert self.count_all_minidumps() == 0
# impalad/catalogd/statestored should be running.
assert cluster_size > 0
assert self.get_num_processes('catalogd') == 1
assert self.get_num_processes('statestored') == 1
# There should be no SIGTERM message in the log
# when the system starts.
self.assert_impalad_log_contains('INFO', 'Caught signal: SIGTERM. Daemon will exit',
expected_count=0)
self.kill_cluster(SIGTERM)
# There should be no impalad/catalogd/statestored running.
# There should be no minidump generated.
assert self.get_num_processes('impalad') == 0
assert self.get_num_processes('catalogd') == 0
assert self.get_num_processes('statestored') == 0
assert self.count_all_minidumps() == 0
uid = os.getuid()
# There should be a SIGTERM message in the log now
# since we raised one above.
log_str = 'Caught signal: SIGTERM. Daemon will exit.'
self.assert_impalad_log_contains('INFO', log_str, expected_count=1)
@pytest.mark.execute_serially
def test_minidump_relative_path(self):
"""Check that setting 'minidump_path' to a relative value results in minidump files
written to 'log_dir'.
"""
minidump_base_dir = os.path.join(os.environ.get('LOG_DIR', '/tmp'), 'minidumps')
shutil.rmtree(minidump_base_dir, ignore_errors=True)
# Omitting minidump_path as a parameter to the cluster will choose the default
# configuration, which is a FLAGS_log_dir/minidumps.
self.start_cluster_with_args()
assert self.count_all_minidumps(minidump_base_dir) == 0
cluster_size = self.get_num_processes('impalad')
self.kill_cluster(SIGSEGV)
self.assert_num_minidumps_for_all_daemons(cluster_size, minidump_base_dir)
shutil.rmtree(minidump_base_dir)
@pytest.mark.execute_serially
def test_minidump_cleanup(self):
"""Check that a limited number of minidumps is preserved during startup."""
assert self.count_all_minidumps() == 0
self.start_cluster()
cluster_size = self.get_num_processes('impalad')
self.kill_cluster(SIGSEGV)
self.assert_num_logfile_entries(1)
# Maximum number of minidumps that the impalads should keep for this test.
max_minidumps = 2
self.start_cluster_with_args(minidump_path=self.tmp_dir,
max_minidumps=max_minidumps,
logbufsecs=1)
# Wait for log maintenance thread to clean up minidumps asynchronously.
start = time.time()
expected_impalad_minidumps = min(cluster_size, max_minidumps)
while (self.count_minidumps('impalad') != expected_impalad_minidumps
and time.time() - start < 10):
time.sleep(0.1)
assert self.count_minidumps('impalad') == expected_impalad_minidumps
assert self.count_minidumps('statestored') == 1
assert self.count_minidumps('catalogd') == 1
@pytest.mark.execute_serially
def test_minidump_cleanup_thread(self):
"""Check that periodic rotation preserves a limited number of minidumps."""
assert self.count_all_minidumps() == 0
# Maximum number of minidumps that the impalads should keep for this test.
max_minidumps = 2
# Sleep interval for the log rotation thread.
rotation_interval = 1
self.start_cluster_with_args(minidump_path=self.tmp_dir,
max_minidumps=max_minidumps,
logbufsecs=rotation_interval)
cluster_size = self.get_num_processes('impalad')
# We trigger several rounds of minidump creation to make sure that all daemons wrote
# enough files to trigger rotation.
for i in range(max_minidumps + 1):
self.kill_cluster(SIGUSR1)
# Breakpad forks to write its minidump files, sleep briefly to allow the forked
# processes to start.
time.sleep(1)
# Wait for all the clones to terminate.
assert self.wait_for_num_processes('impalad', cluster_size) == cluster_size
assert self.wait_for_num_processes('catalogd', 1) == 1
assert self.wait_for_num_processes('statestored', 1) == 1
self.assert_num_logfile_entries(i + 1)
# Sleep long enough for log cleaning to take effect.
time.sleep(rotation_interval + 1)
assert self.count_minidumps('impalad') == min(cluster_size, max_minidumps)
assert self.count_minidumps('statestored') == max_minidumps
assert self.count_minidumps('catalogd') == max_minidumps
@pytest.mark.execute_serially
def test_disable_minidumps(self):
"""Check that setting enable_minidumps to false disables minidump creation."""
assert self.count_all_minidumps() == 0
self.start_cluster_with_args(enable_minidumps=False)
self.kill_cluster(SIGSEGV)
self.assert_num_logfile_entries(0)
@pytest.mark.execute_serially
def test_empty_minidump_path_disables_breakpad(self):
"""Check that setting the minidump_path to an empty value disables minidump creation.
"""
assert self.count_all_minidumps() == 0
self.start_cluster_with_args(minidump_path='')
self.kill_cluster(SIGSEGV)
self.assert_num_logfile_entries(0)
def trigger_single_minidump_and_get_size(self):
"""Kill a single impalad with SIGSEGV to make it write a minidump. Kill the rest of
the cluster. Clean up the single minidump file and return its size.
"""
self.cluster.refresh()
assert self.get_num_processes('impalad') > 0
# Make one impalad write a minidump.
self.kill_processes(self.cluster.impalads[:1], SIGSEGV)
# Kill the rest of the cluster.
self.kill_cluster(SIGKILL)
assert self.count_minidumps('impalad') == 1
# Get file size of that miniump.
path = os.path.join(self.tmp_dir, 'impalad')
minidump_file = glob.glob("%s/*.dmp" % path)[0]
minidump_size = os.path.getsize(minidump_file)
os.remove(minidump_file)
assert self.count_all_minidumps() == 0
return minidump_size
@pytest.mark.execute_serially
def test_limit_minidump_size(self):
"""Check that setting the 'minidump_size_limit_hint_kb' to a small value will reduce
the minidump file size.
"""
assert self.count_all_minidumps() == 0
# Generate minidump with default settings.
self.start_cluster()
full_minidump_size = self.trigger_single_minidump_and_get_size()
# Start cluster with limited minidump file size, we use a very small value, to ensure
# the resulting minidump will be as small as possible.
self.start_cluster_with_args(minidump_path=self.tmp_dir,
minidump_size_limit_hint_kb=1)
reduced_minidump_size = self.trigger_single_minidump_and_get_size()
# Check that the minidump file size has been reduced.
assert reduced_minidump_size < full_minidump_size
class TestLogging(TestBreakpadBase):
"""Exhaustive tests to check that impala log is rolled periodically, obeying
max_log_size and max_log_files, even in the presence of heavy stderr writing.
"""
@classmethod
def setup_class(cls):
if cls.exploration_strategy() != 'exhaustive':
pytest.skip('These logging tests only run in exhaustive')
super(TestLogging, cls).setup_class()
def start_cluster_with_args(self, cluster_size, log_dir, **kwargs):
cluster_options = []
for daemon_arg in DAEMON_ARGS:
daemon_options = " ".join("-{0}={1}".format(k, v) for k, v in kwargs.items())
cluster_options.append("--{0}={1}".format(daemon_arg, daemon_options))
self._start_impala_cluster(cluster_options, cluster_size=cluster_size,
expected_num_impalads=cluster_size, impala_log_dir=log_dir)
def assert_logs(self, daemon, max_count, max_bytes):
"""Assert that there are at most 'max_count' of INFO + ERROR log files for the
specified daemon and the individual file size does not exceed 'max_bytes'.
Also assert that stdout/stderr are redirected to correct file on each rotation."""
log_dir = self.tmp_dir
log_paths = glob.glob("%s/%s*log.ERROR.*" % (log_dir, daemon)) \
+ glob.glob("%s/%s*log.INFO.*" % (log_dir, daemon))
assert len(log_paths) <= max_count
# group log_paths by pid and kind
log_group = {}
for path in sorted(log_paths):
tok = path.split('.')
key = tok[-1] + '.' + tok[-3] # pid + kind
if key in log_group:
log_group[key].append(path)
else:
log_group[key] = [path]
for key, paths in log_group.items():
for i in range(0, len(paths)):
try:
curr_path = paths[i]
# check log size
log_size = os.path.getsize(curr_path)
assert log_size <= max_bytes, "{} exceed {} bytes".format(curr_path, max_bytes)
if i < len(paths) - 1:
# check that we print the next_path in last line of this log file
next_path = paths[i + 1]
with open(curr_path, 'rb') as f:
f.seek(-2, os.SEEK_END)
while f.read(1) != b'\n':
f.seek(-2, os.SEEK_CUR)
last_line = f.readline().decode()
assert next_path in last_line
except OSError:
# The daemon might delete the log in the middle of assertion.
# In that case, do nothing and move on.
pass
def silent_remove(self, filename):
try:
os.remove(filename)
except OSError:
pass
def start_excessive_cerr_cluster(self, test_cluster_size=1, remove_symlink=False):
"""Check that impalad log is kept being rotated when most writing activity is coming
from stderr stream.
Along with LogFaultInjectionThread in init.cc, this test will fill impalad error logs
with approximately 128kb error messages per second."""
test_logbufsecs = 3
test_max_log_files = 2
test_max_log_size = 1 # 1 MB
test_error_msg = ('123456789abcde_' * 64) # 1 KB error message
test_debug_actions = 'LOG_MAINTENANCE_STDERR:FAIL@1.0@' + test_error_msg
daemon = 'impalad'
os.chmod(self.tmp_dir, 0o744)
expected_log_max_bytes = int(1.2 * 1024**2) # 1.2 MB
self.assert_logs(daemon, 0, expected_log_max_bytes)
self.start_cluster_with_args(test_cluster_size, self.tmp_dir,
logbufsecs=test_logbufsecs,
max_log_files=test_max_log_files,
max_log_size=test_max_log_size,
debug_actions=test_debug_actions)
self.wait_for_num_processes(daemon, test_cluster_size, 30)
# Count both INFO and ERROR logs
expected_log_max_count = test_max_log_files * test_cluster_size * 2
# Wait for log maintenance thread to flush and rotate the logs asynchronously.
start = time.time()
while (time.time() - start < 40):
time.sleep(1)
self.assert_logs(daemon, expected_log_max_count, expected_log_max_bytes)
if (remove_symlink):
pattern = self.tmp_dir + '/' + daemon + '*'
symlinks = glob.glob(pattern + '.INFO') + glob.glob(pattern + '.ERROR')
for symlink in symlinks:
self.silent_remove(symlink)
@pytest.mark.execute_serially
def test_excessive_cerr(self):
"""Test excessive cerr activity with single node cluster."""
self.start_excessive_cerr_cluster()
@pytest.mark.execute_serially
def test_excessive_cerr_no_symlink(self):
"""Test excessive cerr activity with two node cluster and missing log symlinks."""
self.start_excessive_cerr_cluster(2, True)
|
dda3e65053667f2ee342be907bbed69ccf07869c
|
10cb11f83e1c8b51b9d72c28d6259a56ff1a97c8
|
/tests/unit/hook_packages/terraform/hooks/prepare/test_types.py
|
16de4bd21e2bf89aa5b7645695350fb17efa2b0b
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] |
permissive
|
aws/aws-sam-cli
|
6d4411aacf7f861e75e5cf4882a32858797a276d
|
b297ff015f2b69d7c74059c2d42ece1c29ea73ee
|
refs/heads/develop
| 2023-08-30T23:28:36.179932
| 2023-08-30T21:58:26
| 2023-08-30T21:58:26
| 92,205,085
| 1,402
| 470
|
Apache-2.0
| 2023-09-14T21:14:23
| 2017-05-23T18:16:23
|
Python
|
UTF-8
|
Python
| false
| false
| 3,288
|
py
|
test_types.py
|
"""Test types"""
from unittest import TestCase
from unittest.mock import patch, Mock
from samcli.hook_packages.terraform.hooks.prepare.types import (
ResourceProperties,
TFResource,
ResourceTranslationProperties,
)
class TestLambdaLayerVersionProperties(TestCase):
@patch("samcli.hook_packages.terraform.hooks.prepare.types.get_configuration_address")
def test_collect(self, mock_get_configuration_address):
class TestingProperties(ResourceProperties):
def __init__(self):
super(TestingProperties, self).__init__()
module_mock = Mock()
mock_get_configuration_address.side_effect = ["address_a", "address_a", "address_b"]
testing_resource_a_tf_config = {
"address": "address_b",
"mode": "managed",
"type": "testing_type",
"values": {
"prop": "value",
},
}
testing_resource_b_tf_config = {
"address": "address_b",
"mode": "managed",
"type": "testing_type",
"values": {
"prop": "value",
},
}
config_a = TFResource(address="address_a", type="testing_type", module=module_mock, attributes={})
dummy_properties = ResourceTranslationProperties(
resource=testing_resource_a_tf_config,
translated_resource={"cfn_resource": "a"},
config_resource=config_a,
logical_id="my_testing_resource_a",
resource_full_address=Mock(),
)
testing_properties = TestingProperties()
testing_properties.collect(dummy_properties)
self.assertEqual(testing_properties.terraform_config["address_a"], config_a)
dummy_properties = ResourceTranslationProperties(
resource=testing_resource_b_tf_config,
translated_resource={"cfn_resource": "b"},
config_resource=config_a,
logical_id="my_testing_resource_b",
resource_full_address=Mock(),
)
testing_properties.collect(dummy_properties)
config_b = TFResource(address="address_b", type="testing_type", module=module_mock, attributes={})
dummy_properties = ResourceTranslationProperties(
resource=testing_resource_b_tf_config,
translated_resource={"cfn_resource": "c"},
config_resource=config_b,
logical_id="my_testing_resource_c",
resource_full_address=Mock(),
)
testing_properties.collect(dummy_properties)
self.assertEqual(testing_properties.terraform_config["address_a"], config_a)
self.assertEqual(testing_properties.terraform_config["address_b"], config_b)
self.assertEqual(testing_properties.cfn_resources["address_a"], [{"cfn_resource": "a"}, {"cfn_resource": "b"}])
self.assertEqual(testing_properties.cfn_resources["address_b"], [{"cfn_resource": "c"}])
self.assertEqual(
testing_properties.terraform_resources,
{
"my_testing_resource_a": testing_resource_a_tf_config,
"my_testing_resource_b": testing_resource_b_tf_config,
"my_testing_resource_c": testing_resource_b_tf_config,
},
)
|
2bf537e0a7a4ab9339a894c9ea56b7573a8266a7
|
8b7d9d684b0dcf38626ec63d0d809327fce6196e
|
/tests/test_reporter_promethues.py
|
ba2cdf589f97f286999612dad692fdfa9d6b65fa
|
[
"MIT"
] |
permissive
|
run-ai/runai
|
917b274446ef38710e39b5d527a71c5b18cca3af
|
3bab5e23730d76d655ae3d9143d8e78a2b5cb0c7
|
refs/heads/master
| 2022-12-06T03:27:32.915727
| 2022-11-22T09:48:02
| 2022-11-22T09:48:02
| 205,650,825
| 122
| 17
|
MIT
| 2021-02-02T12:10:12
| 2019-09-01T08:42:13
|
Python
|
UTF-8
|
Python
| false
| false
| 5,807
|
py
|
test_reporter_promethues.py
|
import os
import string
import unittest
from runai.utils import Hook, random
from runai.reporter import report_promethues, Reporter
def random_args():
reporter_name = random.string(chars=string.ascii_letters + string.digits)
reporter_value = random.number()
report_type = random.choice([_ for _ in report_promethues.ReportType])
return (reporter_name, reporter_value, report_type)
def pid_exists(pid):
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
class Mock(Hook):
def __init__(self, error=None):
super(Mock, self).__init__(report_promethues, 'pushadd_to_gateway')
self._error = error
self.count = 0
def __hook__(self, *args, **kwargs):
self.count += 1
if self._error:
raise self._error()
class ReporterPromethuesBaseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.environ["podUUID"] = random.string()
os.environ["reporterGatewayURL"] = random.string(chars=string.ascii_letters)
@classmethod
def tearDownClass(cls):
del os.environ["podUUID"]
del os.environ["reporterGatewayURL"]
def tearDown(self):
report_promethues.push.failures = 0
class ReporterPromethuesPushTest(ReporterPromethuesBaseTest):
def _push(self):
args = random_args()
report_promethues.push(*args)
def testInitialization(self):
self.assertEqual(report_promethues.push.failures, 0)
def testSanity(self):
with Mock() as mock:
for i in range(random.number(2, 20)):
self.assertEqual(mock.count, i)
self._push()
self.assertEqual(mock.count, i + 1)
def testIOError(self):
with Mock(IOError):
try:
self._push()
except:
self.fail('IOError was not excepted')
def testNonIOError(self):
for error in [ImportError, IndexError, KeyError, ValueError]:
with Mock(error):
with self.assertRaises(error):
self._push()
def testErrorCached(self):
with Mock(IOError) as mock:
for i in range(report_promethues.RETRIES):
self.assertEqual(mock.count, i)
self._push()
self.assertEqual(mock.count, i + 1)
for _ in range(random.number(2, 20)):
self._push()
self.assertEqual(mock.count, report_promethues.RETRIES)
class ReporterPromethuesReporterTest(ReporterPromethuesBaseTest):
def testCreationManual(self):
for daemon in [True, False]:
reporter = Reporter()
reporter.start(daemon=daemon)
self.assertTrue(pid_exists(reporter.pid))
reporter.finish()
self.assertFalse(pid_exists(reporter.pid))
def testCreationScope(self):
with Reporter() as reporter:
self.assertTrue(pid_exists(reporter.pid))
self.assertFalse(pid_exists(reporter.pid))
def testReportMetric(self):
with Mock():
with Reporter() as reporter:
reporter.reportMetric(random.string(chars=string.ascii_letters), random.number())
def testReportParameter(self):
with Mock():
with Reporter() as reporter:
reporter.reportParameter(random.string(chars=string.ascii_letters), random.number())
def testSend(self):
with Mock():
with Reporter() as reporter:
reporter.send(random_args())
def testSendIOError(self):
with Mock(IOError):
with Reporter() as reporter:
for _ in range(random.number(2, 20)):
reporter.send(random_args())
self.assertTrue(pid_exists(reporter.pid))
def testSendError(self):
for error in [IOError, ImportError, IndexError, KeyError, ValueError]:
with Mock(error):
reporter = Reporter()
reporter.start(daemon=False)
for _ in range(random.number(2, 20)):
reporter.send(random_args())
self.assertTrue(pid_exists(reporter.pid))
# `finish` should not fail even if the reporter failed
try:
reporter.finish()
except:
self.fail('`finish` was not expected to raise an error')
class ReporterPromethuesModuleTest(ReporterPromethuesBaseTest):
def tearDown(self):
super(ReporterPromethuesModuleTest, self).tearDown()
report_promethues.REPORTER = None
def testInitialization(self):
self.assertIsNone(report_promethues.REPORTER)
def testSanity(self):
with Mock():
self.assertIsNone(report_promethues.REPORTER)
report_promethues.send(*random_args())
self.assertIsNotNone(report_promethues.REPORTER)
self.assertTrue(report_promethues.REPORTER.daemon)
def testFinish(self):
for error in [None, ImportError, IndexError, KeyError, ValueError]:
with Mock(error):
for _ in range(random.number(2, 5)):
report_promethues.send(*random_args())
pid = report_promethues.REPORTER.pid
self.assertTrue(pid_exists(pid))
# `finish` should not fail even if the worker failed
try:
report_promethues.finish()
except:
self.fail('`finish` was not expected to raise an error')
self.assertFalse(pid_exists(pid))
self.assertIsNone(report_promethues.REPORTER)
if __name__ == '__main__':
unittest.main()
|
142b63d50c1670970673eabc9214edee17022ed4
|
dab10c721000fd9eb38676d6b2730f155eedd54e
|
/recirq/hfvqe/util_test.py
|
27f8841f843932411d478e45bf2a9d3bc0849361
|
[
"Apache-2.0"
] |
permissive
|
quantumlib/ReCirq
|
f45e55e432f2e29fb8f2fe35a3d436a629219e86
|
d021621a3837693ae9c5fdc5c05058de20fba314
|
refs/heads/master
| 2023-09-03T19:35:55.281836
| 2023-09-01T01:12:40
| 2023-09-01T01:12:40
| 246,951,354
| 260
| 116
|
Apache-2.0
| 2023-09-09T00:41:35
| 2020-03-12T23:51:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,496
|
py
|
util_test.py
|
# Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy as sp
from recirq.hfvqe.util import (generate_permutations, swap_forward,
generate_fswap_pairs, generate_fswap_unitaries)
def test_swap_forward():
list_to_swap = list(range(6))
test_swapped_list = swap_forward(list_to_swap, starting_index=0)
assert test_swapped_list == [1, 0, 3, 2, 5, 4]
test_swapped_list = swap_forward(list_to_swap, starting_index=1)
assert test_swapped_list == [0, 2, 1, 4, 3, 5]
def test_generate_fswap_pairs():
swap_set = generate_fswap_pairs(2, 6)
assert swap_set[0] == [(0, 1), (2, 3), (4, 5)]
assert swap_set[1] == [(1, 2), (3, 4)]
swap_set = generate_fswap_pairs(1, 4)
assert swap_set[0] == [(0, 1), (2, 3)]
def test_gen_fswap_unitaries():
fswapu = generate_fswap_unitaries([((0, 1), (2, 3))], 4)
true_generator = np.zeros((4, 4), dtype=np.complex128)
true_generator[0, 0], true_generator[1, 1] = -1, -1
true_generator[0, 1], true_generator[1, 0] = 1, 1
true_generator[2, 2], true_generator[3, 3] = -1, -1
true_generator[2, 3], true_generator[3, 2] = 1, 1
true_u = sp.linalg.expm(-1j * np.pi * true_generator / 2)
assert np.allclose(true_u, fswapu[0])
def test_permutation_generator():
perms = generate_permutations(3)
assert len(perms) == 2 # N//2+1 circuits
assert perms[0] == [0, 1, 2]
assert perms[1] == [1, 2, 0]
perms = generate_permutations(4)
assert len(perms) == 2 # N/2 circuits
assert perms[0] == [0, 1, 2, 3]
assert perms[1] == [1, 3, 0, 2]
perms = generate_permutations(6)
assert len(perms) == 3 # N/2 circuits
assert perms[0] == [0, 1, 2, 3, 4, 5]
assert perms[1] == [1, 3, 0, 5, 2, 4]
assert perms[2] == [3, 5, 1, 4, 0, 2]
perms = generate_permutations(4, no_truncation=True)
assert len(perms) == 5
assert perms[1] == [1, 0, 3, 2]
assert perms[3] == [3, 1, 2, 0]
|
952f560e350aa080e43ce275bb2827b09876a687
|
bfc42c114f652012b6cfd14e7cccf52cb6b9ac7e
|
/tests/spdx/parser/tagvalue/test_file_parser.py
|
aedf197b57c488860775061c08ef2cc380f5a76f
|
[
"Apache-2.0",
"GPL-2.0-only"
] |
permissive
|
spdx/tools-python
|
05a952501af2ac608678cb1737f7c661f6091fa2
|
777bd274dd06cb24342738df7da5ab285d652350
|
refs/heads/main
| 2023-08-31T09:39:52.930063
| 2023-08-24T06:39:48
| 2023-08-24T10:22:33
| 32,761,058
| 147
| 136
|
Apache-2.0
| 2023-09-14T15:50:59
| 2015-03-23T21:54:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,803
|
py
|
test_file_parser.py
|
# SPDX-FileCopyrightText: 2023 spdx contributors
#
# SPDX-License-Identifier: Apache-2.0
import pytest
from spdx_tools.common.spdx_licensing import spdx_licensing
from spdx_tools.spdx.model import FileType, SpdxNoAssertion
from spdx_tools.spdx.parser.error import SPDXParsingError
from spdx_tools.spdx.parser.tagvalue.parser import Parser
from tests.spdx.parser.tagvalue.test_creation_info_parser import DOCUMENT_STR
def test_parse_file():
parser = Parser()
file_str = "\n".join(
[
"FileName: testfile.java",
"SPDXID: SPDXRef-File",
"FileType: SOURCE",
"FileType: TEXT",
"FileChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12",
"LicenseConcluded: Apache-2.0",
"LicenseInfoInFile: Apache-2.0",
"LicenseInfoInFile: NOASSERTION",
"FileCopyrightText: <text>Copyright 2014 Acme Inc.</text>",
"FileComment: <text>Very long file</text>",
"FileAttributionText: <text>Acknowledgements that might be required to be communicated in some contexts."
"</text>",
]
)
document = parser.parse("\n".join([DOCUMENT_STR, file_str]))
assert document is not None
assert len(document.files) == 1
spdx_file = document.files[0]
assert spdx_file.name == "testfile.java"
assert spdx_file.spdx_id == "SPDXRef-File"
assert spdx_file.file_types == [FileType.SOURCE, FileType.TEXT]
assert spdx_file.comment == "Very long file"
assert spdx_file.attribution_texts == [
"Acknowledgements that might be required to be communicated in some contexts."
]
assert spdx_file.license_info_in_file == [spdx_licensing.parse("Apache-2.0"), SpdxNoAssertion()]
assert spdx_file.license_concluded == spdx_licensing.parse("Apache-2.0")
def test_parse_invalid_file():
parser = Parser()
file_str = "\n".join(
[
"FileName: testfile.java",
"SPDXID: SPDXRef-File",
"FileType: SOUCE",
"FileType: TEXT",
"FileChecksum: SHA3: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12",
"LicenseConcluded: Apache-2.0",
"LicenseInfoInFile: Apache-2.0",
"FileCopyrightText: <text>Copyright 2014 Acme Inc.</text>",
"FileComment: <text>Very long file</text>",
"FileAttributionText: <text>Acknowledgements that might be required to be communicated in some contexts."
"</text>",
]
)
with pytest.raises(SPDXParsingError) as err:
parser.parse(file_str)
assert err.value.get_messages() == [
"Error while parsing File: ['Invalid FileType: SOUCE. Line 3', 'Error while "
"parsing FileChecksum: Token did not match specified grammar rule. Line: 5']"
]
|
872267c65a58b76c7a9f1dd6f56db73c611e5969
|
5d3f7da858b8bace0b047474aaa9b7f089544219
|
/scripts/helpers/manifest.py
|
d5cb9896173242377e77c4afb686a77b7fe41b72
|
[
"MIT"
] |
permissive
|
hacs/default
|
7e5225bab7c0f5dc19dc33559dc62ecbd81ddf3c
|
156c1ed27ec7106d76be9ea722c3baca92b85682
|
refs/heads/master
| 2023-08-31T00:13:49.869594
| 2023-08-27T09:26:47
| 2023-08-27T09:26:47
| 216,391,224
| 298
| 1,322
|
MIT
| 2023-09-14T10:39:52
| 2019-10-20T16:23:04
|
Python
|
UTF-8
|
Python
| false
| false
| 293
|
py
|
manifest.py
|
import json
import os
from glob import glob
from scripts.helpers.integration_path import get_integration_path
def get_manifest():
manifest = f"{get_integration_path()}/manifest.json"
with open(manifest, "r") as mf:
manifest = json.loads(mf.read())
return manifest or {}
|
164b09cc48f459ef92bb708c84ed25499c1cdb51
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/mobile_auth/utils.py
|
c777d175ff5b15cfafa15a56a1590f6e1b13eb93
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
utils.py
|
import base64
import os
from datetime import datetime, timedelta
from django.utils.translation import gettext as _
from corehq.apps.mobile_auth.xml import AuthKeys, KeyRecord, OpenRosaResponse
def generate_aes_key():
# get 32 byte key
bin_key = os.urandom(32)
return base64.b64encode(bin_key)
def new_key_record(domain, user_id, now=None, valid=None):
"""
return initialized but unsaved MobileAuthKeyRecord
"""
from corehq.apps.mobile_auth.models import MobileAuthKeyRecord
now = now or datetime.utcnow()
valid = valid or now
record = MobileAuthKeyRecord(
domain=domain,
user_id=user_id,
valid=valid,
)
bump_expiry(record, now=now)
return record
def bump_expiry(record, now=None):
"""
initialize or extend expiry to after now
in increments of a month
"""
now = now or datetime.utcnow()
record.expires = record.expires or now
while record.expires <= now:
record.expires += timedelta(days=30)
def get_mobile_auth_payload(key_records, domain, issued=None, now=None):
"""
formats a list of key record documents in the xml format outlined in
https://github.com/dimagi/commcare/wiki/CentralAuthAPI
makes sure to set xml object properties in a standard order
for ease of testing
"""
now = now or datetime.utcnow()
issued = issued or now
def _OpenRosaResponse():
x = OpenRosaResponse()
x.auth_keys = _auth_keys()
x.message = _('Here are your keys!')
return x
def _auth_keys():
x = AuthKeys(
key_records=list(_key_record())
)
x.domain = domain
x.issued = issued
return x
def _key_record():
for key_record in key_records:
x = KeyRecord()
for attr in ['valid', 'expires', 'uuid', 'type', 'key']:
setattr(x, attr, getattr(key_record, attr))
yield x
return _OpenRosaResponse().serializeDocument(pretty=True)
|
2f382fb3d6d7573ac4feb3ceddbcb293e1d579b9
|
2d05050d0ada29f7680b4df20c10bb85b0530e45
|
/tests/python/unittest/test_target_codegen_aarch64.py
|
e873bce52bdfac13eace870629ca3daf7d838ff7
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
apache/tvm
|
87cb617f9a131fa44e1693303aaddf70e7a4c403
|
d75083cd97ede706338ab413dbc964009456d01b
|
refs/heads/main
| 2023-09-04T11:24:26.263032
| 2023-09-04T07:26:00
| 2023-09-04T07:26:00
| 70,746,484
| 4,575
| 1,903
|
Apache-2.0
| 2023-09-14T19:06:33
| 2016-10-12T22:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 16,454
|
py
|
test_target_codegen_aarch64.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.script import tir as TIR
import re
import os
import ctypes
import pytest
from tvm.target.codegen import llvm_version_major
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_mul(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: A[i] * B[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and mul instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"mul\tz[0-9].[shdb],( p[0-9]/[m],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_add(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: A[i] + B[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and add instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"add\tz[0-9].[shdb],( p[0-9]/[m],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_sub(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: A[i] - B[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and sub instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"sub\tz[0-9].[shdb],( p[0-9]/[m],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_muladd(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.placeholder(m, dtype=type, name="C")
D = te.compute((m), lambda i: A[i] * B[i] + C[i], name="D")
s = te.create_schedule([D.op])
f = tvm.build(s, [A, B, C, D], target)
# Verify we see SVE load instructions and either mad or mla instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"mad|mla\tz[0-9].[shdb],( p[0-9]/[m],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_max(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: tvm.te.max(A[i], B[i]))
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and cmgt + sel instructions or a max instruction, all using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
compare = re.findall(
r"cmgt\tp[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
select = re.findall("sel\tz[0-9].[shdb], p[0-9], z[0-9].[shdb], z[0-9].[shdb]", assembly)
max = re.findall(
r"max\tz[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert (len(compare) > 1 and len(select) == len(compare)) or len(max) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_min(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: tvm.te.min(A[i], B[i]))
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and cmgt + sel instructions or a min instruction, all using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
compare = re.findall(
r"cmgt\tp[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
select = re.findall("sel\tz[0-9].[shdb], p[0-9], z[0-9].[shdb], z[0-9].[shdb]", assembly)
min = re.findall(
r"min\tz[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert (len(compare) > 1 and len(select) == len(compare)) or len(min) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_div(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: tvm.te.div(A[i], B[i]))
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and div instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"div\tz[0-9].[shdb],( p[0-9]/[m],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype", ["uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"]
)
def test_mod(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: tvm.te.floormod(A[i], B[i]), name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and mls instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"mls\tz[0-9].[shdb],( p[0-9]/[m],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 0
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_eq(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: A[i] == B[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and cmpeq or cmeq instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"cm(p)?eq\tp[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_neq(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: A[i] != B[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and cmpgt, cmgt, cmpne or cmne instructions, all using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"cm(p)?(gt|ne)\tp[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype", ["uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"]
)
def test_or(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: A[i] | B[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and orr instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"orr\tz[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype", ["uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"]
)
def test_and(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: A[i] & B[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and and instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"and\tz[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype", ["uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"]
)
def test_not(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
C = te.compute((m), lambda i: ~A[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, C], target)
# Verify we see SVE load instructions and eor instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"eor\tz[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.xfail(
reason="Awaiting llvm support for gathered loads",
strict=True,
)
@pytest.mark.parametrize(
"dtype", ["uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"]
)
def test_memcpy(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype="int32", name="B")
C = te.compute((m), lambda i: A[B[i]], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see gather instructions in the assembly
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
assert len(loads) > 0
check_correct_assembly(type=dtype)
if __name__ == "__main__":
tvm.testing.main()
|
5b4f909d6c10e3a7d017edb51b1e9ffa01b9ee1e
|
248bdd698605a8b2b623fe82899eec15bc80b889
|
/third_party/python/python-hglib/tests/test-diff.py
|
4f4fcc87e0f183e870d8034800ebf33e530500b9
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Feodor2/Mypal68
|
64a6f8055cb22ae6183a3a018e1487a44e20886e
|
dc92ce6bcc8032b5311ffc4f9f0cca38411637b1
|
refs/heads/main
| 2023-08-31T00:31:47.840415
| 2023-08-26T10:26:15
| 2023-08-26T10:26:15
| 478,824,817
| 393
| 39
|
NOASSERTION
| 2023-06-23T04:53:57
| 2022-04-07T04:21:39
| null |
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
test-diff.py
|
from tests import common
from hglib.util import b
class test_diff(common.basetest):
def test_basic(self):
self.append('a', 'a\n')
self.client.add(b('a'))
diff1 = b("""diff -r 000000000000 a
--- /dev/null
+++ b/a
@@ -0,0 +1,1 @@
+a
""")
self.assertEquals(diff1, self.client.diff(nodates=True))
self.assertEquals(diff1, self.client.diff([b('a')], nodates=True))
rev0, node0 = self.client.commit(b('first'))
diff2 = b("""diff -r 000000000000 -r """) + node0[:12] + b(""" a
--- /dev/null
+++ b/a
@@ -0,0 +1,1 @@
+a
""")
self.assertEquals(diff2, self.client.diff(change=rev0, nodates=True))
self.append('a', 'a\n')
rev1, node1 = self.client.commit(b('second'))
diff3 = b("""diff -r """) + node0[:12] + b(""" a
--- a/a
+++ b/a
@@ -1,1 +1,2 @@
a
+a
""")
self.assertEquals(diff3, self.client.diff(revs=[rev0], nodates=True))
diff4 = b("""diff -r """) + node0[:12] + b(" -r ") + node1[:12] + b(
""" a
--- a/a
+++ b/a
@@ -1,1 +1,2 @@
a
+a
""")
self.assertEquals(diff4, self.client.diff(revs=[rev0, rev1],
nodates=True))
def test_basic_plain(self):
open('.hg/hgrc', 'a').write('[defaults]\ndiff=--git\n')
self.test_basic()
|
49ba31c5564f5eada973dd3dbe81581c43b471d7
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/logbook/common.py
|
9fe6c2b60a8a1d76f39d2968a99426fecf492e39
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,729
|
py
|
common.py
|
"""Tests for the logbook component."""
from __future__ import annotations
import json
from typing import Any
from homeassistant.components import logbook
from homeassistant.components.logbook import processor
from homeassistant.components.logbook.models import LogbookConfig
from homeassistant.components.recorder.models import (
process_timestamp_to_utc_isoformat,
ulid_to_bytes_or_none,
uuid_hex_to_bytes_or_none,
)
from homeassistant.core import Context
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.json import JSONEncoder
import homeassistant.util.dt as dt_util
class MockRow:
"""Minimal row mock."""
def __init__(
self,
event_type: str,
data: dict[str, Any] | None = None,
context: Context | None = None,
):
"""Init the fake row."""
self.event_type = event_type
self.event_data = json.dumps(data, cls=JSONEncoder)
self.data = data
self.time_fired = dt_util.utcnow()
self.time_fired_ts = dt_util.utc_to_timestamp(self.time_fired)
self.context_parent_id_bin = (
ulid_to_bytes_or_none(context.parent_id) if context else None
)
self.context_user_id_bin = (
uuid_hex_to_bytes_or_none(context.user_id) if context else None
)
self.context_id_bin = ulid_to_bytes_or_none(context.id) if context else None
self.state = None
self.entity_id = None
self.row_id = None
self.shared_attrs = None
self.attributes = None
self.context_only = False
@property
def time_fired_minute(self):
"""Minute the event was fired."""
return self.time_fired.minute
@property
def time_fired_isoformat(self):
"""Time event was fired in utc isoformat."""
return process_timestamp_to_utc_isoformat(self.time_fired)
def mock_humanify(hass_, rows):
"""Wrap humanify with mocked logbook objects."""
entity_name_cache = processor.EntityNameCache(hass_)
ent_reg = er.async_get(hass_)
event_cache = processor.EventCache({})
context_lookup = {}
logbook_config = hass_.data.get(logbook.DOMAIN, LogbookConfig({}, None, None))
external_events = logbook_config.external_events
logbook_run = processor.LogbookRun(
context_lookup,
external_events,
event_cache,
entity_name_cache,
include_entity_name=True,
format_time=processor._row_time_fired_isoformat,
)
context_augmenter = processor.ContextAugmenter(logbook_run)
return list(
processor._humanify(
rows,
ent_reg,
logbook_run,
context_augmenter,
),
)
|
632df301a1d467f290394e21c9279d5e60976675
|
9aae726a537662311d1126895b01286968654031
|
/setup.py
|
bfb8475c99a8534d6d986ffa423416864b824567
|
[
"MIT"
] |
permissive
|
pwnfoo/NTLMRecon
|
5e6047838a3ed4da729165ce629f24f8bf80577b
|
da150d6292e2a6bea298fda4621311e7347eefc6
|
refs/heads/master
| 2022-09-02T13:24:54.848843
| 2022-04-15T19:55:16
| 2022-04-15T19:55:16
| 225,114,697
| 180
| 37
|
MIT
| 2021-04-19T02:24:51
| 2019-12-01T06:06:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
setup.py
|
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ntlmrecon', # Required
version='0.4b0', # Required
description='A tool to enumerate information from NTLM authentication enabled web endpoints', # Optional
license='MIT',
long_description=long_description, # Optional
long_description_content_type='text/markdown', # Optional (see note above)
url='https://github.com/sachinkamath/ntlmrecon', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Sachin S Kamath (@sachinkamath)', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='mail@skamath.me', # Optional
keywords='security recon redteam cybersecurity ntlm ntlmrecon', # Optional
package_dir={'': 'src'},
packages=find_packages(where='src'), # Required
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
install_requires=['requests', 'colorama', 'termcolor', 'iptools'], # TODO
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
entry_points={ # Optional
'console_scripts': [
'ntlmrecon=ntlmrecon:main',
],
},
project_urls={ # Optional
'Bug Reports': 'https://github.com/sachinkamath/ntlmrecon/issues',
'Source': 'https://github.com/sachinkamath/ntlmrecon/',
},
)
|
da040705d6cf51f0eabcfc1887e0f323b0146635
|
4091caecbc727e6d6ae0d827afce11c5979a84fd
|
/tools/accuracy_checker/tests/test_segmentation_representation.py
|
00fcaeebb184a09492632a85caf8277a898f550d
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/open_model_zoo
|
fdb03dd40bfccb854e4ed4f7b9beaa90596963cd
|
7929adbe91e9cfe8dc5dc1daad5ae7392f9719a0
|
refs/heads/master
| 2023-08-18T18:03:47.254427
| 2023-08-18T10:54:31
| 2023-08-18T10:54:31
| 153,097,694
| 1,712
| 730
|
Apache-2.0
| 2023-09-11T11:31:20
| 2018-10-15T10:55:02
|
Python
|
UTF-8
|
Python
| false
| false
| 12,376
|
py
|
test_segmentation_representation.py
|
"""
Copyright (c) 2018-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pytest
from .common import make_segmentation_representation, make_instance_segmentation_representation
from openvino.tools.accuracy_checker.utils import UnsupportedPackage
try:
import pycocotools.mask as maskUtils
except ImportError as import_error:
maskUtils = UnsupportedPackage("pycocotools", import_error.msg)
def no_available_pycocotools():
return isinstance(maskUtils, UnsupportedPackage)
def encode_mask(mask):
raw_mask = []
for elem in mask:
raw_mask.append(maskUtils.encode(np.asfortranarray(np.uint8(elem))))
return raw_mask
class TestSegmentationRepresentation:
def test_to_polygon_annotation(self):
annotation = make_segmentation_representation(np.array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]), True)[0]
expected = {
0: [np.array([[1, 0], [3, 0], [3, 2]])],
1: [np.array([[0, 0], [0, 2], [2, 2]])]}
actual = annotation.to_polygon()
for key in expected.keys():
assert actual[key]
for actual_arr, expected_arr in zip(actual[key], expected[key]):
assert np.array_equal(actual_arr.sort(axis=0), expected_arr.sort(axis=0))
def test_to_polygon_annotation_with_colors_in_arg(self):
annotation = make_segmentation_representation(np.array(
[[[128, 128, 128], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[128, 128, 128], [128, 128, 128], [0, 0, 0], [0, 0, 0]],
[[128, 128, 128], [128, 128, 128], [128, 128, 128], [0, 0, 0]]]), True)[0]
segmentation_colors = [[0, 0, 0], [128, 128, 128]]
expected = {
0: [np.array([[1, 0], [3, 0], [3, 2]])],
1: [np.array([[0, 0], [0, 2], [2, 2]])]}
actual = annotation.to_polygon(segmentation_colors)
for key in expected.keys():
assert actual[key]
for actual_arr, expected_arr in zip(actual[key], expected[key]):
assert np.array_equal(actual_arr.sort(axis=0), expected_arr.sort(axis=0))
def test_to_polygon_annotation_with_colors_in_meta(self):
annotation = make_segmentation_representation(np.array(
[[[128, 128, 128], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[128, 128, 128], [128, 128, 128], [0, 0, 0], [0, 0, 0]],
[[128, 128, 128], [128, 128, 128], [128, 128, 128], [0, 0, 0]]]), True)[0]
dataset_meta = {'segmentation_colors': [[0, 0, 0], [128, 128, 128]]}
annotation.metadata.update({
'dataset_meta': dataset_meta
})
expected = {
0: [np.array([[1, 0], [3, 0], [3, 2]])],
1: [np.array([[0, 0], [0, 2], [2, 2]])]}
actual = annotation.to_polygon()
for key in expected.keys():
assert actual[key]
for actual_arr, expected_arr in zip(actual[key], expected[key]):
assert np.array_equal(actual_arr.sort(axis=0), expected_arr.sort(axis=0))
def test_to_polygon_annotation_with_colors_on_converted_annotation(self):
annotation = make_segmentation_representation(np.array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]), True)[0]
dataset_meta = {'segmentation_colors': [[0, 0, 0], [128, 128, 128]]}
annotation.metadata.update({
'dataset_meta': dataset_meta
})
expected = {
0: [np.array([[1, 0], [3, 0], [3, 2]])],
1: [np.array([[0, 0], [0, 2], [2, 2]])]}
actual = annotation.to_polygon()
for key in expected.keys():
assert actual[key]
for actual_arr, expected_arr in zip(actual[key], expected[key]):
assert np.array_equal(actual_arr.sort(axis=0), expected_arr.sort(axis=0))
def test_to_polygon_annotation_without_colors(self):
annotation = make_segmentation_representation(np.array(
[[[128, 128, 128], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[128, 128, 128], [128, 128, 128], [0, 0, 0], [0, 0, 0]],
[[128, 128, 128], [128, 128, 128], [128, 128, 128], [0, 0, 0]]]), True)[0]
with pytest.raises(ValueError):
annotation.to_polygon()
def test_to_polygon_annotation_with_empty_mask(self):
annotation = make_segmentation_representation(np.array([]), True)[0]
with pytest.warns(Warning):
assert len(annotation.to_polygon()) == 0
def test_to_polygon_annotation_with_label_map_containing_not_all_classes(self):
annotation = make_segmentation_representation(np.array(
[[1, 0, 0, 0, 2], [1, 1, 0, 0, 2], [1, 1, 1, 0, 2]]), True)[0]
dataset_meta = {'label_map': {0: "background", 1: "triangle"}}
annotation.metadata.update({
'dataset_meta': dataset_meta
})
expected = {
0: [np.array([[1, 0], [3, 0], [3, 2]])],
1: [np.array([[0, 0], [0, 2], [2, 2]])]}
actual = annotation.to_polygon()
for key in expected.keys():
assert actual[key]
for actual_arr, expected_arr in zip(actual[key], expected[key]):
assert np.array_equal(actual_arr.sort(axis=0), expected_arr.sort(axis=0))
assert actual.get(2) is None
def test_to_polygon_prediction(self):
prediction = make_segmentation_representation(np.array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]), False)[0]
expected = {
0: [np.array([[1, 0], [3, 0], [3, 2]])],
1: [np.array([[0, 0], [0, 2], [2, 2]])]}
actual = prediction.to_polygon()
for key in expected.keys():
assert actual[key]
for actual_arr, expected_arr in zip(actual[key], expected[key]):
assert np.array_equal(actual_arr.sort(axis=0), expected_arr.sort(axis=0))
def test_to_polygon_prediction_with_argmax(self):
prediction = make_segmentation_representation(np.array(
[[[0.01, 0.99, 0.99, 0.99], [0.01, 0.01, 0.99, 0.99], [0.01, 0.01, 0.01, 0.99]],
[[0.99, 0.01, 0.01, 0.01], [0.99, 0.99, 0.01, 0.01], [0.99, 0.99, 0.99, 0.01]]]), False)[0]
expected = {
0: [np.array([[1, 0], [3, 0], [3, 2]])],
1: [np.array([[0, 0], [0, 2], [2, 2]])]}
actual = prediction.to_polygon()
for key in expected.keys():
assert actual[key]
for actual_arr, expected_arr in zip(actual[key], expected[key]):
assert np.array_equal(actual_arr.sort(axis=0), expected_arr.sort(axis=0))
def test_to_polygon_prediction_with_1_in_shape_channels_last(self):
prediction = make_segmentation_representation(np.array(
[[[1], [0], [0], [0]], [[1], [1], [0], [0]], [[1], [1], [1], [0]]]), False)[0]
expected = {
0: [np.array([[1, 0], [3, 0], [3, 2]])],
1: [np.array([[0, 0], [0, 2], [2, 2]])]}
actual = prediction.to_polygon()
for key in expected.keys():
assert actual[key]
for actual_arr, expected_arr in zip(actual[key], expected[key]):
assert np.array_equal(actual_arr.sort(axis=0), expected_arr.sort(axis=0))
def test_to_polygon_prediction_with_1_in_shape_channels_first(self):
prediction = make_segmentation_representation(np.array(
[[[1], [0], [0], [0]], [[1], [1], [0], [0]], [[1], [1], [1], [0]]]).reshape(1, 3, 4), False)[0]
expected = {
0: [np.array([[1, 0], [3, 0], [3, 2]])],
1: [np.array([[0, 0], [0, 2], [2, 2]])]}
actual = prediction.to_polygon()
for key in expected.keys():
assert actual[key]
for actual_arr, expected_arr in zip(actual[key], expected[key]):
assert np.array_equal(actual_arr.sort(axis=0), expected_arr.sort(axis=0))
def test_to_polygon_prediction_with_None_mask(self):
prediction = make_segmentation_representation(None, False)[0]
with pytest.warns(Warning):
assert len(prediction.to_polygon()) == 0
def test_to_polygon_prediction_with_empty_mask(self):
prediction = make_segmentation_representation(np.array([]), False)[0]
with pytest.warns(Warning):
assert len(prediction.to_polygon()) == 0
@pytest.mark.skipif(no_available_pycocotools(), reason='no installed pycocotools in the system')
class TestCoCoInstanceSegmentationRepresentation:
def test_to_polygon_annotation_mask_rle(self):
mask = [np.array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]),
np.array([[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1]])]
raw_mask = encode_mask(mask)
labels = [0, 1]
annotation = make_instance_segmentation_representation(raw_mask, labels, True)[0]
expected = {
1: [np.array([[[1, 0], [3, 0], [3, 2]]])],
0: [np.array([[[0, 0], [0, 2], [2, 2]]])]}
actual = annotation.to_polygon()
for key in expected.keys():
assert actual[key]
for actual_arr, expected_arr in zip(actual[key], expected[key]):
actual_arr = np.sort(actual_arr, axis=1)
expected_arr = np.sort(expected_arr, axis=1)
assert np.array_equal(actual_arr, expected_arr)
def test_to_polygon_annotation_mask_polygon(self):
mask = [np.array([[[0, 0], [0, 2], [2, 2]]]),
np.array([[[1, 0], [3, 0], [3, 2]]])]
labels = [0, 1]
annotation = make_instance_segmentation_representation(mask, labels, True)[0]
expected = {
1: [np.array([[[1, 0], [3, 0], [3, 2]]])],
0: [np.array([[[0, 0], [0, 2], [2, 2]]])]}
actual = annotation.to_polygon()
for key in expected.keys():
assert actual[key]
for actual_arr, expected_arr in zip(actual[key], expected[key]):
actual_arr = np.sort(actual_arr, axis=1)
expected_arr = np.sort(expected_arr, axis=1)
assert np.array_equal(actual_arr, expected_arr)
def test_to_polygon_prediction(self):
mask = [np.array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]),
np.array([[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1]])]
raw_mask = encode_mask(mask)
labels = [0, 1]
prediction = make_instance_segmentation_representation(raw_mask, labels, False)[0]
expected = {
1: [np.array([[[1, 0], [3, 0], [3, 2]]])],
0: [np.array([[[0, 0], [0, 2], [2, 2]]])]}
actual = prediction.to_polygon()
for key in expected.keys():
assert actual[key]
for actual_arr, expected_arr in zip(actual[key], expected[key]):
actual_arr = np.sort(actual_arr, axis=1)
expected_arr = np.sort(expected_arr, axis=1)
assert np.array_equal(actual_arr, expected_arr)
def test_to_polygon_with_None_mask(self):
labels = [0, 1]
prediction = make_instance_segmentation_representation(None, labels, False)[0]
with pytest.warns(Warning):
assert len(prediction.to_polygon()) == 0
def test_to_polygon_with_empty_mask(self):
labels = [0, 1]
prediction = make_instance_segmentation_representation([], labels, False)[0]
with pytest.warns(Warning):
assert len(prediction.to_polygon()) == 0
def test_to_polygon_with_None_labels(self):
prediction = make_instance_segmentation_representation([np.array([[1, 0], [0, 1]])], None, False)[0]
with pytest.warns(Warning):
assert len(prediction.to_polygon()) == 0
def test_to_polygon_with_empty_labels(self):
prediction = make_instance_segmentation_representation([np.array([[1, 0], [0, 1]])], [], False)[0]
with pytest.warns(Warning):
assert len(prediction.to_polygon()) == 0
|
0b158d37dff88a2c10d16d2243b8c008c7760f4b
|
98128b69fa7b3b663c32c817aa2da41cbf1b882b
|
/localization/pipeline/run_tests.py
|
cf5c3b04098411e8f55a2a4ceae4f86d452f6650
|
[
"Apache-2.0"
] |
permissive
|
NASA-AMMOS/MMGIS
|
7fb3cd47f5467127add14e6ea2ae11d2b9e65438
|
dc4d7ff352c5e1ebc14b4c902ca4ba69dc11f87d
|
refs/heads/master
| 2023-09-01T08:45:18.109634
| 2023-02-22T00:07:35
| 2023-02-22T00:07:35
| 178,263,007
| 103
| 29
|
Apache-2.0
| 2023-09-08T01:05:19
| 2019-03-28T18:42:08
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 79
|
py
|
run_tests.py
|
#discover and run all unit tests in project
python -m unittest discover -v
|
08be17a05377a6133b6af1c80f4d9552416e0237
|
89420cda57f03791a5448ed4eeb967d06a4aade3
|
/arviz/plots/compareplot.py
|
31953a3cee69dfdc7b4586c0adbbd3a6f551f7cb
|
[
"Apache-2.0"
] |
permissive
|
arviz-devs/arviz
|
fa2423e28f7a8c1b22986dbef317579c00744f75
|
24c260a0390d030e106943f21811652ea82aebc7
|
refs/heads/main
| 2023-09-03T12:22:12.075948
| 2023-07-18T22:29:35
| 2023-07-18T22:29:35
| 39,890,704
| 1,421
| 413
|
Apache-2.0
| 2023-09-13T15:53:19
| 2015-07-29T11:51:10
|
Python
|
UTF-8
|
Python
| false
| false
| 6,067
|
py
|
compareplot.py
|
"""Summary plot for model comparison."""
import numpy as np
from ..labels import BaseLabeller
from ..rcparams import rcParams
from .plot_utils import get_plotting_function
def plot_compare(
comp_df,
insample_dev=False,
plot_standard_error=True,
plot_ic_diff=True,
order_by_rank=True,
legend=True,
title=True,
figsize=None,
textsize=None,
labeller=None,
plot_kwargs=None,
ax=None,
backend=None,
backend_kwargs=None,
show=None,
):
r"""Summary plot for model comparison.
Models are compared based on their expected log pointwise predictive density (ELPD).
This plot is in the style of the one used in [2]_. Chapter 6 in the first edition
or 7 in the second.
Notes
-----
The ELPD is estimated either by Pareto smoothed importance sampling leave-one-out
cross-validation (LOO) or using the widely applicable information criterion (WAIC).
We recommend LOO in line with the work presented by [1]_.
Parameters
----------
comp_df : pandas.DataFrame
Result of the :func:`arviz.compare` method.
insample_dev : bool, default False
Plot in-sample ELPD, that is the value of the information criteria without the
penalization given by the effective number of parameters (p_loo or p_waic).
plot_standard_error : bool, default True
Plot the standard error of the ELPD.
plot_ic_diff : bool, default True
Plot standard error of the difference in ELPD between each model
and the top-ranked model.
order_by_rank : bool, default True
If True ensure the best model is used as reference.
legend : bool, default True
Add legend to figure.
figsize : (float, float), optional
If `None`, size is (6, num of models) inches.
title : bool, default True
Show a tittle with a description of how to interpret the plot.
textsize : float, optional
Text size scaling factor for labels, titles and lines. If `None` it will be autoscaled based
on `figsize`.
labeller : Labeller, optional
Class providing the method ``make_label_vert`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
plot_kwargs : dict, optional
Optional arguments for plot elements. Currently accepts 'color_ic',
'marker_ic', 'color_insample_dev', 'marker_insample_dev', 'color_dse',
'marker_dse', 'ls_min_ic' 'color_ls_min_ic', 'fontsize'
ax : matplotlib_axes or bokeh_figure, optional
Matplotlib axes or bokeh figure.
backend : {"matplotlib", "bokeh"}, default "matplotlib"
Select plotting backend.
backend_kwargs : bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :class:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
show : bool, optional
Call backend show function.
Returns
-------
axes : matplotlib_axes or bokeh_figure
See Also
--------
plot_elpd : Plot pointwise elpd differences between two or more models.
compare : Compare models based on PSIS-LOO loo or WAIC waic cross-validation.
loo : Compute Pareto-smoothed importance sampling leave-one-out cross-validation (PSIS-LOO-CV).
waic : Compute the widely applicable information criterion.
References
----------
.. [1] Vehtari et al. (2016). Practical Bayesian model evaluation using leave-one-out
cross-validation and WAIC https://arxiv.org/abs/1507.04544
.. [2] McElreath R. (2022). Statistical Rethinking A Bayesian Course with Examples in
R and Stan, Second edition, CRC Press.
Examples
--------
Show default compare plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> model_compare = az.compare({'Centered 8 schools': az.load_arviz_data('centered_eight'),
>>> 'Non-centered 8 schools': az.load_arviz_data('non_centered_eight')})
>>> az.plot_compare(model_compare)
Include the in-sample ELDP
.. plot::
:context: close-figs
>>> az.plot_compare(model_compare, insample_dev=True)
"""
if plot_kwargs is None:
plot_kwargs = {}
if labeller is None:
labeller = BaseLabeller()
yticks_pos, step = np.linspace(0, -1, (comp_df.shape[0] * 2) - 1, retstep=True)
yticks_pos[1::2] = yticks_pos[1::2] + step / 2
labels = [labeller.model_name_to_str(model_name) for model_name in comp_df.index]
if plot_ic_diff:
yticks_labels = [""] * len(yticks_pos)
yticks_labels[0] = labels[0]
yticks_labels[2::2] = labels[1:]
else:
yticks_labels = labels
_information_criterion = ["elpd_loo", "elpd_waic"]
column_index = [c.lower() for c in comp_df.columns]
for information_criterion in _information_criterion:
if information_criterion in column_index:
break
else:
raise ValueError(
"comp_df must contain one of the following "
f"information criterion: {_information_criterion}"
)
if order_by_rank:
comp_df.sort_values(by="rank", inplace=True)
compareplot_kwargs = dict(
ax=ax,
comp_df=comp_df,
legend=legend,
title=title,
figsize=figsize,
plot_ic_diff=plot_ic_diff,
plot_standard_error=plot_standard_error,
insample_dev=insample_dev,
yticks_pos=yticks_pos,
yticks_labels=yticks_labels,
plot_kwargs=plot_kwargs,
information_criterion=information_criterion,
textsize=textsize,
step=step,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_compare", "compareplot", backend)
ax = plot(**compareplot_kwargs)
return ax
|
782e4f9c891add4a96af3e4ecad13e37d9051a1b
|
6b97499b67fa3de10eb9449f4805100cfca0bbbd
|
/account/tests/test_decorators.py
|
06c622405c91621f0ed37b26bd8d594107909dfa
|
[
"MIT"
] |
permissive
|
pinax/django-user-accounts
|
c9b9b74bf14c1f117766b89829055c35cf794345
|
a69832facfb511ee9347fea7bc4303a8729c97a7
|
refs/heads/master
| 2023-08-17T06:51:55.779202
| 2023-02-09T09:22:00
| 2023-02-09T09:22:00
| 3,682,622
| 976
| 334
|
MIT
| 2023-09-14T18:07:35
| 2012-03-10T21:41:14
|
Python
|
UTF-8
|
Python
| false
| false
| 745
|
py
|
test_decorators.py
|
from unittest import mock
from django.http import HttpResponse
from django.test import TestCase
from account.decorators import login_required
@login_required
def mock_view(request, *args, **kwargs):
return HttpResponse("OK", status=200)
class LoginRequiredDecoratorTestCase(TestCase):
def test_authenticated_user_is_allowed(self):
request = mock.MagicMock()
request.user.is_authenticated = True
response = mock_view(request)
self.assertEqual(response.status_code, 200)
def test_unauthenticated_user_gets_redirected(self):
request = mock.MagicMock()
request.user.is_authenticated = False
response = mock_view(request)
self.assertEqual(response.status_code, 302)
|
8bb35de643af145428c4e3ab16e720137a20c333
|
993f18c21402d7a4ff21ddb7ff2ec6c80e466f20
|
/onnx/__init__.py
|
2f17b1ba805fb8a5bea4a023af6f352d3ea6dd7c
|
[
"Apache-2.0"
] |
permissive
|
onnx/onnx
|
10d3916803c7babff89ec0fa9045127bcccad376
|
8a475b34cb3875df311a46f57571646498f5bda7
|
refs/heads/main
| 2023-08-18T18:50:03.388353
| 2023-08-16T22:18:46
| 2023-08-16T22:18:46
| 102,692,863
| 16,164
| 4,150
|
Apache-2.0
| 2023-09-14T17:10:38
| 2017-09-07T04:53:45
|
Python
|
UTF-8
|
Python
| false
| false
| 11,753
|
py
|
__init__.py
|
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
__all__ = [
# Constants
"ONNX_ML",
"IR_VERSION",
"IR_VERSION_2017_10_10",
"IR_VERSION_2017_10_30",
"IR_VERSION_2017_11_3",
"IR_VERSION_2019_1_22",
"IR_VERSION_2019_3_18",
"IR_VERSION_2019_9_19",
"IR_VERSION_2020_5_8",
"IR_VERSION_2021_7_30",
"EXPERIMENTAL",
"STABLE",
# Modules
"checker",
"compose",
"defs",
"gen_proto",
"helper",
"hub",
"mapping",
"numpy_helper",
"parser",
"printer",
"shape_inference",
"utils",
"version_converter",
# Proto classes
"AttributeProto",
"FunctionProto",
"GraphProto",
"MapProto",
"ModelProto",
"NodeProto",
"OperatorProto",
"OperatorSetIdProto",
"OperatorSetProto",
"OperatorStatus",
"OptionalProto",
"SequenceProto",
"SparseTensorProto",
"StringStringEntryProto",
"TensorAnnotation",
"TensorProto",
"TensorShapeProto",
"TrainingInfoProto",
"TypeProto",
"ValueInfoProto",
"Version",
# Utility functions
"convert_model_to_external_data",
"load_external_data_for_model",
"load_model_from_string",
"load_model",
"load_tensor_from_string",
"load_tensor",
"save_model",
"save_tensor",
"write_external_data_tensors",
]
# isort:skip_file
import os
import typing
from typing import IO, Literal, Union
from onnx import serialization
from onnx.onnx_cpp2py_export import ONNX_ML
from onnx.external_data_helper import (
load_external_data_for_model,
write_external_data_tensors,
convert_model_to_external_data,
)
from onnx.onnx_pb import (
AttributeProto,
EXPERIMENTAL,
FunctionProto,
GraphProto,
IR_VERSION,
IR_VERSION_2017_10_10,
IR_VERSION_2017_10_30,
IR_VERSION_2017_11_3,
IR_VERSION_2019_1_22,
IR_VERSION_2019_3_18,
IR_VERSION_2019_9_19,
IR_VERSION_2020_5_8,
IR_VERSION_2021_7_30,
ModelProto,
NodeProto,
OperatorSetIdProto,
OperatorStatus,
STABLE,
SparseTensorProto,
StringStringEntryProto,
TensorAnnotation,
TensorProto,
TensorShapeProto,
TrainingInfoProto,
TypeProto,
ValueInfoProto,
Version,
)
from onnx.onnx_operators_pb import OperatorProto, OperatorSetProto
from onnx.onnx_data_pb import MapProto, OptionalProto, SequenceProto
from onnx.version import version as __version__
# Import common subpackages so they're available when you 'import onnx'
from onnx import (
checker,
compose,
defs,
gen_proto,
helper,
hub,
mapping,
numpy_helper,
parser,
printer,
shape_inference,
utils,
version_converter,
)
# Supported model formats that can be loaded from and saved to
# The literals are formats with built-in support. But we also allow users to
# register their own formats. So we allow str as well.
_SupportedFormat = Union[Literal["protobuf", "textproto"], str]
# Default serialization format
_DEFAULT_FORMAT = "protobuf"
def _load_bytes(f: IO[bytes] | str | os.PathLike) -> bytes:
if hasattr(f, "read") and callable(typing.cast(IO[bytes], f).read):
content = typing.cast(IO[bytes], f).read()
else:
f = typing.cast(Union[str, os.PathLike], f)
with open(f, "rb") as readable:
content = readable.read()
return content
def _save_bytes(content: bytes, f: IO[bytes] | str | os.PathLike) -> None:
if hasattr(f, "write") and callable(typing.cast(IO[bytes], f).write):
typing.cast(IO[bytes], f).write(content)
else:
f = typing.cast(Union[str, os.PathLike], f)
with open(f, "wb") as writable:
writable.write(content)
def _get_file_path(f: IO[bytes] | str | os.PathLike | None) -> str | None:
if isinstance(f, (str, os.PathLike)):
return os.path.abspath(f)
if hasattr(f, "name"):
assert f is not None
return os.path.abspath(f.name)
return None
def _get_serializer(
fmt: _SupportedFormat | None, f: str | os.PathLike | IO[bytes] | None = None
) -> serialization.ProtoSerializer:
"""Get the serializer for the given path and format from the serialization registry."""
# Use fmt if it is specified
if fmt is not None:
return serialization.registry.get(fmt)
if (file_path := _get_file_path(f)) is not None:
_, ext = os.path.splitext(file_path)
fmt = serialization.registry.get_format_from_file_extension(ext)
# Failed to resolve format if fmt is None. Use protobuf as default
fmt = fmt or _DEFAULT_FORMAT
assert fmt is not None
return serialization.registry.get(fmt)
def load_model(
f: IO[bytes] | str | os.PathLike,
format: _SupportedFormat | None = None, # pylint: disable=redefined-builtin
load_external_data: bool = True,
) -> ModelProto:
"""Loads a serialized ModelProto into memory.
Args:
f: can be a file-like object (has "read" function) or a string/PathLike containing a file name
format: The serialization format. When it is not specified, it is inferred
from the file extension when ``f`` is a path. If not specified _and_
``f`` is not a path, 'protobuf' is used. The encoding is assumed to
be "utf-8" when the format is a text format.
load_external_data: Whether to load the external data.
Set to True if the data is under the same directory of the model.
If not, users need to call :func:`load_external_data_for_model`
with directory to load external data from.
Returns:
Loaded in-memory ModelProto.
"""
model = _get_serializer(format, f).deserialize_proto(_load_bytes(f), ModelProto())
if load_external_data:
model_filepath = _get_file_path(f)
if model_filepath:
base_dir = os.path.dirname(model_filepath)
load_external_data_for_model(model, base_dir)
return model
def load_tensor(
f: IO[bytes] | str | os.PathLike,
format: _SupportedFormat | None = None, # pylint: disable=redefined-builtin
) -> TensorProto:
"""Loads a serialized TensorProto into memory.
Args:
f: can be a file-like object (has "read" function) or a string/PathLike containing a file name
format: The serialization format. When it is not specified, it is inferred
from the file extension when ``f`` is a path. If not specified _and_
``f`` is not a path, 'protobuf' is used. The encoding is assumed to
be "utf-8" when the format is a text format.
Returns:
Loaded in-memory TensorProto.
"""
return _get_serializer(format, f).deserialize_proto(_load_bytes(f), TensorProto())
def load_model_from_string(
s: bytes | str,
format: _SupportedFormat = _DEFAULT_FORMAT, # pylint: disable=redefined-builtin
) -> ModelProto:
"""Loads a binary string (bytes) that contains serialized ModelProto.
Args:
s: a string, which contains serialized ModelProto
format: The serialization format. When it is not specified, it is inferred
from the file extension when ``f`` is a path. If not specified _and_
``f`` is not a path, 'protobuf' is used. The encoding is assumed to
be "utf-8" when the format is a text format.
Returns:
Loaded in-memory ModelProto.
"""
return _get_serializer(format).deserialize_proto(s, ModelProto())
def load_tensor_from_string(
s: bytes,
format: _SupportedFormat = _DEFAULT_FORMAT, # pylint: disable=redefined-builtin
) -> TensorProto:
"""Loads a binary string (bytes) that contains serialized TensorProto.
Args:
s: a string, which contains serialized TensorProto
format: The serialization format. When it is not specified, it is inferred
from the file extension when ``f`` is a path. If not specified _and_
``f`` is not a path, 'protobuf' is used. The encoding is assumed to
be "utf-8" when the format is a text format.
Returns:
Loaded in-memory TensorProto.
"""
return _get_serializer(format).deserialize_proto(s, TensorProto())
def save_model(
proto: ModelProto | bytes,
f: IO[bytes] | str | os.PathLike,
format: _SupportedFormat | None = None, # pylint: disable=redefined-builtin
*,
save_as_external_data: bool = False,
all_tensors_to_one_file: bool = True,
location: str | None = None,
size_threshold: int = 1024,
convert_attribute: bool = False,
) -> None:
"""
Saves the ModelProto to the specified path and optionally, serialize tensors with raw data as external data before saving.
Args:
proto: should be a in-memory ModelProto
f: can be a file-like object (has "write" function) or a string containing
a file name or a pathlike object
format: The serialization format. When it is not specified, it is inferred
from the file extension when ``f`` is a path. If not specified _and_
``f`` is not a path, 'protobuf' is used. The encoding is assumed to
be "utf-8" when the format is a text format.
save_as_external_data: If true, save tensors to external file(s).
all_tensors_to_one_file: Effective only if save_as_external_data is True.
If true, save all tensors to one external file specified by location.
If false, save each tensor to a file named with the tensor name.
location: Effective only if save_as_external_data is true.
Specify the external file that all tensors to save to.
If not specified, will use the model name.
size_threshold: Effective only if save_as_external_data is True.
Threshold for size of data. Only when tensor's data is >= the size_threshold it will be converted
to external data. To convert every tensor with raw data to external data set size_threshold=0.
convert_attribute: Effective only if save_as_external_data is True.
If true, convert all tensors to external data
If false, convert only non-attribute tensors to external data
"""
if isinstance(proto, bytes):
proto = _get_serializer(_DEFAULT_FORMAT).deserialize_proto(proto, ModelProto())
if save_as_external_data:
convert_model_to_external_data(
proto, all_tensors_to_one_file, location, size_threshold, convert_attribute
)
model_filepath = _get_file_path(f)
if model_filepath is not None:
basepath = os.path.dirname(model_filepath)
proto = write_external_data_tensors(proto, basepath)
serialized = _get_serializer(format, model_filepath).serialize_proto(proto)
_save_bytes(serialized, f)
def save_tensor(
proto: TensorProto,
f: IO[bytes] | str | os.PathLike,
format: _SupportedFormat | None = None, # pylint: disable=redefined-builtin
) -> None:
"""
Saves the TensorProto to the specified path.
Args:
proto: should be a in-memory TensorProto
f: can be a file-like object (has "write" function) or a string
containing a file name or a pathlike object.
format: The serialization format. When it is not specified, it is inferred
from the file extension when ``f`` is a path. If not specified _and_
``f`` is not a path, 'protobuf' is used. The encoding is assumed to
be "utf-8" when the format is a text format.
"""
serialized = _get_serializer(format, f).serialize_proto(proto)
_save_bytes(serialized, f)
# For backward compatibility
load = load_model
load_from_string = load_model_from_string
save = save_model
|
cb7885601c4bb3f28a291d4c502ae6c8ee0b1d47
|
8f421001634923dbfb032389ecd094d4880e958a
|
/modules/bgsegm/samples/viz.py
|
fdcd1c585555d61a19f94979cd53b5b2d1c91c55
|
[
"Apache-2.0"
] |
permissive
|
opencv/opencv_contrib
|
ccf47a2a97022e20d936eb556aa9bc63bc9bdb90
|
9e134699310c81ea470445b4888fce5c9de6abc7
|
refs/heads/4.x
| 2023-08-22T05:58:21.266673
| 2023-08-11T16:28:20
| 2023-08-11T16:28:20
| 12,756,992
| 8,611
| 6,099
|
Apache-2.0
| 2023-09-14T17:35:22
| 2013-09-11T13:28:04
|
C++
|
UTF-8
|
Python
| false
| false
| 1,330
|
py
|
viz.py
|
import numpy as np
import cv2 as cv
import argparse
import os
def main():
argparser = argparse.ArgumentParser(description='Vizualization of the LSBP/GSOC background subtraction algorithm.')
argparser.add_argument('-g', '--gt', help='Directory with ground-truth frames', required=True)
argparser.add_argument('-f', '--frames', help='Directory with input frames', required=True)
argparser.add_argument('-l', '--lsbp', help='Display LSBP instead of GSOC', default=False)
args = argparser.parse_args()
gt = map(lambda x: os.path.join(args.gt, x), os.listdir(args.gt))
gt.sort()
f = map(lambda x: os.path.join(args.frames, x), os.listdir(args.frames))
f.sort()
gt = np.uint8(map(lambda x: cv.imread(x, cv.IMREAD_GRAYSCALE), gt))
f = np.uint8(map(lambda x: cv.imread(x, cv.IMREAD_COLOR), f))
if not args.lsbp:
bgs = cv.bgsegm.createBackgroundSubtractorGSOC()
else:
bgs = cv.bgsegm.createBackgroundSubtractorLSBP()
for i in xrange(f.shape[0]):
cv.imshow('Frame', f[i])
cv.imshow('Ground-truth', gt[i])
mask = bgs.apply(f[i])
bg = bgs.getBackgroundImage()
cv.imshow('BG', bg)
cv.imshow('Output mask', mask)
k = cv.waitKey(0)
if k == 27:
break
if __name__ == '__main__':
main()
|
4671e19c80c1942c8d35a73367bc6b0787fc0f42
|
9d38a5326520387d37f27ad397690f3cb15bc6ae
|
/python/graph/rgcn.py
|
9c4014db491f321e64fd28927df0d21fd54139be
|
[] |
no_license
|
Angel-ML/PyTorch-On-Angel
|
11d4aa0b60fc6c79ddf3e7246eb8bc354bc9aef1
|
0532accb42ce7698117e107feb37eebf215dfde8
|
refs/heads/master
| 2023-08-30T21:08:30.424122
| 2022-11-24T09:46:26
| 2022-11-24T09:46:26
| 187,611,545
| 164
| 53
| null | 2022-11-25T01:42:35
| 2019-05-20T09:38:41
|
Scala
|
UTF-8
|
Python
| false
| false
| 5,209
|
py
|
rgcn.py
|
# Tencent is pleased to support the open source community by making Angel available.
#
# Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/Apache-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
#
# !/usr/bin/env python
import argparse
import torch
import torch.nn.functional as F
from torch.nn import Parameter
from nn.conv import RGCNConv
class RelationGCN(torch.jit.ScriptModule):
def __init__(self, input_dim, hidden_dim, n_relations, n_bases, n_class,
task_type, class_weights=""):
super(RelationGCN, self).__init__()
# loss func for multi label classification
self.loss_fn = torch.nn.BCELoss()
self.task_type = task_type
self.class_weights = class_weights
if len(class_weights) > 2:
self.class_weights = \
torch.tensor(list(map(float, class_weights.split(",")))).to(torch.float)
else:
self.class_weights = None
self.conv1 = RGCNConv(input_dim, hidden_dim, n_relations, n_bases)
self.conv2 = RGCNConv(hidden_dim, hidden_dim, n_relations, n_bases)
self.weight = Parameter(torch.zeros(hidden_dim, n_class))
self.bias = Parameter(torch.zeros(n_class))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight)
if self.bias.dim() > 1:
torch.nn.init.xavier_uniform_(self.bias)
@torch.jit.script_method
def forward_(self, x, first_edge_index, second_edge_index, first_edge_type, second_edge_type):
# type: (Optional[Tensor], Tensor, Tensor, Tensor, Tensor) -> Tensor
x = self.embedding_(x, first_edge_index, second_edge_index, first_edge_type, second_edge_type)
x = torch.matmul(x, self.weight)
x = x + self.bias
if self.task_type == "classification":
return F.log_softmax(x, dim=1)
else:
return F.sigmoid(x)
@torch.jit.script_method
def loss(self, y_pred, y_true):
if self.task_type == "classification":
y_true = y_true.view(-1).to(torch.long)
if self.class_weights is None:
return F.nll_loss(y_pred, y_true)
else:
return F.nll_loss(y_pred, y_true, weight=self.class_weights)
else:
u_true = y_true.reshape(y_pred.size())
return self.loss_fn(y_pred, u_true)
@torch.jit.script_method
def predict_(self, x, first_edge_index, second_edge_index, first_edge_type, second_edge_type):
# type: (Optional[Tensor], Tensor, Tensor, Tensor, Tensor) -> Tensor
output = self.forward_(x, first_edge_index, second_edge_index, first_edge_type, second_edge_type)
if self.task_type == "classification":
return output.max(1)[1]
else:
return output
@torch.jit.script_method
def embedding_(self, x, first_edge_index, second_edge_index, first_edge_type, second_edge_type):
# type: (Optional[Tensor], Tensor, Tensor, Tensor, Tensor) -> Tensor
x = F.relu(self.conv1(x, second_edge_index, second_edge_type, None))
x = self.conv2(x, first_edge_index, first_edge_type, None)
return x
FLAGS = None
def main():
rgcn = RelationGCN(FLAGS.input_dim, FLAGS.hidden_dim, FLAGS.n_relations,
FLAGS.n_bases, FLAGS.n_class, FLAGS.task_type,
FLAGS.class_weights)
rgcn.save(FLAGS.output_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dim",
type=int,
default=-1,
help="input dimension of node features")
parser.add_argument(
"--hidden_dim",
type=int,
default=32,
help="hidden dimension of rgcn convolution layer")
parser.add_argument(
"--n_class",
type=int,
default=2,
help="the number of classes")
parser.add_argument(
"--output_file",
type=str,
default="rgcn.pt",
help="output file name")
parser.add_argument(
"--n_relations",
type=int,
default=1,
help="the number types of relations for edges")
parser.add_argument(
"--n_bases",
type=int,
default=30,
help="the number of bases in rgcn model")
parser.add_argument(
"--task_type",
type=str,
default="classification",
help="classification or multi-label-classification")
parser.add_argument(
"--class_weights",
type=str,
default="",
help="class weights, in order to balance class, such as: 0.1,0.9")
FLAGS, unparsed = parser.parse_known_args()
main()
|
30ac0b0717caa1eba7591c0b2217f85152e0b5f8
|
05b0c763ab92086e69a8d00ae6465009c596f6bc
|
/setup.py
|
a8fed674d85c55c18a17c7b9921d4dfb31b76b0a
|
[
"Apache-2.0"
] |
permissive
|
intel/intel-extension-for-pytorch
|
60ce2af2ec3a1dacae0d0db13dd51a5b44512e61
|
7f9266789de7ca9d8bcf55606f3204f1a3640640
|
refs/heads/master
| 2023-09-01T09:13:16.866410
| 2023-08-31T08:00:37
| 2023-08-31T08:00:37
| 256,061,008
| 991
| 144
|
Apache-2.0
| 2023-08-13T13:56:07
| 2020-04-15T23:35:29
|
Python
|
UTF-8
|
Python
| false
| false
| 38,908
|
py
|
setup.py
|
# This Python file uses the following encoding: utf-8
# !/usr/bin/env python
# Welcome to the Intel Extension for PyTorch setup.py.
#
# Environment variables you are probably interested in:
#
# DEBUG
# build with -O0 and -g (debug symbols)
#
# RELEASE
# build with optimization level -O2
#
# REL_WITH_DEB_INFO
# build with optimization level -O2 and -g (debug symbols)
#
# CFLAGS
# flags to apply to both C and C++ files to be compiled (a quirk of setup.py
# which we have faithfully adhered to in our build system is that CFLAGS
# also applies to C++ files (unless CXXFLAGS is set), in contrast to the
# default behavior of autogoo and cmake build systems.)
#
# CC
# the C/C++ compiler to use
#
# MKLROOT
# specify MKL library path.
# ONLY NEEDED if you have a specific MKL version you want to link against.
# Make sure this directory contains include and lib directories.
# By default, the MKL library installed with pip/conda is used.
#
# Environment variables we respect (these environment variables are
# conventional and are often understood/set by other software.)
#
# TORCH_VERSION
# specify the PyTorch version to depend on
#
# IPEX_VERSION
# specify the extension version literal
#
# MAX_JOBS
# process for parallel compile, must be a Integer
#
# VERBOSE
# more output when compile
#
# IPEX_VERSIONED_BUILD
# build wheel files versioned with a git commit number
#
##############################################################
# XPU Build options:
# USE_ONEMKL - to use oneMKL in operators
# USE_CHANNELS_LAST_1D - to use channels last 1d feature
# USE_PERSIST_STREAM - to use persistent oneDNN stream
# USE_PRIMITIVE_CACHE - to Cache oneDNN primitives by framework
# USE_QUEUE_BARRIER - to use queue submit_barrier API
# USE_SCRATCHPAD_MODE - to trun on oneDNN scratchpad user mode
# USE_MULTI_CONTEXT - to create DPC++ runtime context per device
# USE_AOT_DEVLIST - to set device list for AOT build option, for example, bdw,tgl,ats,..."
# USE_SYCL_ASSERT - to enable assert in sycl kernel
# USE_ITT_ANNOTATION - to enable ITT annotation in sycl kernel
# BUILD_STATIC_ONEMKL - to link static oneMKL libraries
# BUILD_STATS - to count statistics for each component during build process
# BUILD_BY_PER_KERNEL - to build by DPC++ per_kernel option (exclusive with USE_AOT_DEVLIST)
# BUILD_STRIPPED_BIN - to strip all symbols after build
# BUILD_SEPARATE_OPS - to build each operator in separate library
# BUILD_SIMPLE_TRACE - to build simple trace for each registered operator
# BUILD_OPT_LEVEL - to add build option -Ox, accept values: 0/1
# BUILD_NO_CLANGFORMAT - to build without force clang-format
# BUILD_INTERNAL_DEBUG - to build internal debug code path
#
##############################################################
from __future__ import print_function
from distutils.command.build_py import build_py
from distutils.command.install import install
from distutils.version import LooseVersion
from functools import lru_cache
from subprocess import check_call, check_output
from setuptools.command.build_clib import build_clib
from setuptools.command.egg_info import egg_info
from setuptools import setup, distutils
from pathlib import Path
from typing import Any, Optional
import sysconfig
import distutils.ccompiler
import distutils.command.clean
import os
import glob
import platform
import shutil
import subprocess
import sys
import errno
# FIXME: always set BUILD_WITH_XPU = ON in XPU repo
os.environ["BUILD_WITH_XPU"] = "OFF"
# Define env values
ON_ENV_VAL = ["ON", "YES", "1", "Y"]
OFF_ENV_VAL = ["OFF", "NO", "0", "N"]
FULL_ENV_VAL = ON_ENV_VAL + OFF_ENV_VAL
# initialize variables for compilation
IS_LINUX = platform.system() == "Linux"
IS_DARWIN = platform.system() == "Darwin"
IS_WINDOWS = platform.system() == "Windows"
@lru_cache(maxsize=128)
def _get_build_target():
build_target = ""
if len(sys.argv) > 1:
if sys.argv[1] in ["build_clib", "bdist_cppsdk"]:
build_target = "cppsdk"
elif sys.argv[1] in ["clean"]:
build_target = "clean"
elif sys.argv[1] in ["develop"]:
build_target = "develop"
else:
build_target = "python"
return build_target
torch_install_prefix = None
if _get_build_target() == "cppsdk":
torch_install_prefix = os.environ.get("LIBTORCH_PATH", None)
if torch_install_prefix is None or not os.path.exists(torch_install_prefix):
raise RuntimeError("Can not find libtorch from env LIBTORCH_PATH!")
torch_install_prefix = os.path.abspath(torch_install_prefix)
elif _get_build_target() in ["develop", "python"]:
try:
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension
except ImportError as e:
raise RuntimeError("Fail to import torch!")
def _check_env_flag(name, default=""):
return os.getenv(name, default).upper() in ON_ENV_VAL
def get_build_type():
return (
"RelWithDebInfo"
if _check_env_flag("REL_WITH_DEB_INFO")
else "Debug"
if _check_env_flag("DEBUG")
else "Release"
)
def create_if_not_exist(path_dir):
if not os.path.exists(path_dir):
try:
Path(path_dir).mkdir(parents=True, exist_ok=True)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise RuntimeError("Fail to create path {}".format(path_dir))
def get_version_num():
versions = {}
version_file = "version.txt"
version_lines = open(version_file, "r").readlines()
for line in version_lines:
key, value = line.strip().split(" ")
versions[key] = value
for v in ("VERSION_MAJOR", "VERSION_MINOR", "VERSION_PATCH"):
if v not in versions:
print("ERROR:", v, "is not found in", version_file)
sys.exit(1)
version = (
versions["VERSION_MAJOR"]
+ "."
+ versions["VERSION_MINOR"]
+ "."
+ versions["VERSION_PATCH"]
)
return version
PACKAGE_NAME = "intel_extension_for_pytorch"
PYTHON_VERSION = sys.version_info
def get_pytorch_install_dir():
if _get_build_target() == "clean":
return None
if _get_build_target() == "cppsdk":
return torch_install_prefix
else:
return os.path.dirname(os.path.abspath(torch.__file__))
pytorch_install_dir = get_pytorch_install_dir()
def _build_installation_dependency():
install_requires = []
install_requires.append("psutil")
install_requires.append("numpy")
return install_requires
def which(thefile: str) -> Optional[str]:
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
for d in path:
fname = os.path.join(d, thefile)
fnames = [fname]
if sys.platform == "win32":
exts = os.environ.get("PATHEXT", "").split(os.pathsep)
fnames += [fname + ext for ext in exts]
for name in fnames:
if os.access(name, os.F_OK | os.X_OK) and not os.path.isdir(name):
return name
return None
def get_cmake_command():
if IS_WINDOWS:
return "cmake"
def _get_version(cmd: Optional[str]) -> Any:
"Returns cmake version."
if cmd is None:
return None
for line in check_output([cmd, "--version"]).decode("utf-8").split("\n"):
if "version" in line:
return LooseVersion(line.strip().split(" ")[2])
raise RuntimeError("no version found")
cmake3_version = _get_version(which("cmake3"))
cmake_version = _get_version(which("cmake"))
_cmake_min_version = LooseVersion("3.13.0")
if all(
(
ver is None or ver < _cmake_min_version
for ver in [cmake_version, cmake3_version]
)
):
raise RuntimeError("Require cmake or cmake3 3.13.0 or higher but not found")
if cmake3_version is None:
cmake_command = "cmake"
elif cmake_version is None:
cmake_command = "cmake3"
else:
if cmake3_version >= cmake_version:
cmake_command = "cmake3"
else:
cmake_command = "cmake"
return cmake_command
def get_cpack_command():
if IS_WINDOWS:
return "cpack"
if shutil.which("cpack3") is not None:
return "cpack3"
if shutil.which("cpack") is not None:
return "cpack"
else:
raise RuntimeError("no cpack or cpack3 found")
def get_ipex_git_head_sha(base_dir):
ipex_git_sha = (
subprocess.check_output(["git", "rev-parse", "--short", "HEAD"], cwd=base_dir)
.decode("ascii")
.strip()
)
return ipex_git_sha
def get_torch_git_head_sha():
if _get_build_target() == "clean":
return None
if _get_build_target() == "cppsdk":
libtorch_hash_file = os.path.join(torch_install_prefix, "build-hash")
if not os.path.exists(libtorch_hash_file):
raise RuntimeError(
"can not find build-hash at {}".format(libtorch_hash_file)
)
with open(libtorch_hash_file, "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if line.isalnum():
return line
raise RuntimeError("can not get libtorch hash in {}".format(libtorch_hash_file))
else:
torch_git_sha = torch.version.git_version
return torch_git_sha
def get_submodule_commit(base_dir, submodule_dir):
if not os.path.isdir(submodule_dir):
return ""
return (
subprocess.check_output(
["git", "submodule", "status", submodule_dir], cwd=base_dir
)
.decode("ascii")
.strip()
.split()[0]
)
def get_build_version(ipex_git_sha):
ipex_version = os.getenv("IPEX_VERSION", get_version_num())
if "IPEX_VERSION" not in os.environ:
if _check_env_flag("IPEX_VERSIONED_BUILD", default="1"):
try:
ipex_version += "+git" + ipex_git_sha[:7]
except Exception:
pass
else:
pkg_type = "xpu" if _check_env_flag("BUILD_WITH_XPU") else "cpu"
ipex_version += "+" + pkg_type
return ipex_version
def write_buffer_to_file(file_path, buffer):
create_if_not_exist(os.path.dirname(file_path))
with open(file_path, "w") as f:
f.write(buffer)
f.close()
def get_code_fingerprint(ipex_build_version, ipex_git_sha, torch_git_sha, build_type):
fingerprint = "{}_{}_{}_{}".format(
ipex_build_version, ipex_git_sha, torch_git_sha, build_type
)
return fingerprint
def check_code_fingerprint_in_file(file_path, fingerprint):
b_exist = os.path.isfile(file_path)
if b_exist is False:
return False
with open(file_path) as file:
# read all content of a file
content = file.read()
# check if string present in a file
if fingerprint in content:
return True
else:
return False
def create_version_files(
base_dir,
ipex_build_version,
ipex_git_sha,
torch_git_sha,
gpu_onednn_sha,
cpu_ideep_sha,
):
print(
"Building Intel Extension for PyTorch. Version: {}".format(ipex_build_version)
)
py_version_path = os.path.join(base_dir, PACKAGE_NAME, "_version.py")
cpp_version_path = os.path.join(
base_dir, PACKAGE_NAME, "..", "csrc", "utils", "version.h"
)
build_type_str = get_build_type()
# Check code fingerprint to avoid non-modify rebuild.
current_code_fingerprint = get_code_fingerprint(
ipex_build_version, ipex_git_sha, torch_git_sha, build_type_str
)
b_same_fingerprint = check_code_fingerprint_in_file(
py_version_path, current_code_fingerprint
)
if b_same_fingerprint is False:
py_buffer = "# Autogenerated file, do not edit!\n"
py_buffer += "# code fingerprint:\n"
py_buffer += "# {}\n\n".format(current_code_fingerprint)
py_buffer += '__version__ = "{}"\n'.format(ipex_build_version)
py_buffer += '__ipex_gitrev__ = "{}"\n'.format(ipex_git_sha)
py_buffer += '__torch_gitrev__ = "{}"\n'.format(
"" if build_type_str == "Release" else torch_git_sha
)
py_buffer += '__gpu_onednn_gitrev__ = "{}"\n'.format(gpu_onednn_sha)
py_buffer += '__cpu_ideep_gitrev__ = "{}"\n'.format(cpu_ideep_sha)
py_buffer += '__build_type__ = "{}"\n'.format(build_type_str)
write_buffer_to_file(py_version_path, py_buffer)
b_same_fingerprint = check_code_fingerprint_in_file(
cpp_version_path, current_code_fingerprint
)
if b_same_fingerprint is False:
c_buffer = "// Autogenerated file, do not edit!\n"
c_buffer += "// clang-format off\n"
c_buffer += "// code fingerprint: {}\n".format(current_code_fingerprint)
c_buffer += "// clang-format on\n\n"
c_buffer += "#pragma once\n"
c_buffer += "#include <string>\n\n"
c_buffer += "namespace torch_ipex {\n\n"
c_buffer += "const std::string __version__()\n"
c_buffer += '{{ return "{}"; }}\n\n'.format(ipex_build_version)
c_buffer += "const std::string __gitrev__()\n"
c_buffer += '{{ return "{}"; }}\n\n'.format(ipex_git_sha)
c_buffer += "const std::string __torch_gitrev__()\n"
c_buffer += '{{ return "{}"; }}\n\n'.format(torch_git_sha)
c_buffer += "const std::string __build_type__()\n"
c_buffer += '{{ return "{}"; }}\n\n'.format(build_type_str)
c_buffer += "} // namespace torch_ipex\n"
write_buffer_to_file(cpp_version_path, c_buffer)
def get_project_dir():
project_root_dir = os.path.dirname(__file__)
return os.path.abspath(project_root_dir)
def get_build_dir():
return os.path.join(get_project_dir(), "build")
def get_build_type_dir():
build_type_dir = os.path.join(get_build_dir(), get_build_type())
create_if_not_exist(build_type_dir)
return build_type_dir
def get_package_base_dir():
return os.path.join(get_build_type_dir(), "packages")
def get_package_dir():
return os.path.join(get_package_base_dir(), PACKAGE_NAME)
def get_package_lib_dir():
package_lib_dir = os.path.join(get_package_dir(), "lib")
create_if_not_exist(package_lib_dir)
return package_lib_dir
def get_ipex_cpu_dir():
cpu_root_dir = os.path.join(get_project_dir(), "csrc", "cpu")
return os.path.abspath(cpu_root_dir)
def get_ipex_cpu_build_dir():
cpu_build_dir = os.path.join(get_build_type_dir(), "csrc", "cpu")
create_if_not_exist(cpu_build_dir)
return cpu_build_dir
def get_xpu_project_dir():
project_root_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(project_root_dir)
def get_xpu_project_build_dir():
xpu_build_dir = os.path.join(get_build_type_dir(), "csrc", "gpu")
create_if_not_exist(xpu_build_dir)
return xpu_build_dir
def get_xpu_compliers():
if shutil.which("icx") is None or shutil.which("icpx") is None:
raise RuntimeError("Failed to find compiler path from OS PATH")
if IS_WINDOWS:
return "icx", "icx"
else:
return "icx", "icpx"
def get_ipex_python_dir():
project_root_dir = os.path.dirname(__file__)
python_root_dir = os.path.join(project_root_dir, PACKAGE_NAME, "csrc")
return os.path.abspath(python_root_dir)
def get_ipex_python_build_dir():
python_build_dir = os.path.join(get_build_type_dir(), PACKAGE_NAME, "csrc")
create_if_not_exist(python_build_dir)
return python_build_dir
def get_ipex_cppsdk_build_dir():
cppsdk_build_dir = os.path.join(get_build_type_dir(), "csrc", "cppsdk")
create_if_not_exist(cppsdk_build_dir)
return cppsdk_build_dir
base_dir = os.path.dirname(os.path.abspath(__file__))
# Generate version info (ipex.__version__)
torch_git_sha = get_torch_git_head_sha()
ipex_git_sha = get_ipex_git_head_sha(base_dir)
ipex_build_version = get_build_version(ipex_git_sha)
ipex_gpu_onednn_git_sha = get_submodule_commit(base_dir, "third_party/oneDNN")
ipex_cpu_ideep_git_sha = get_submodule_commit(base_dir, "third_party/ideep")
create_version_files(
base_dir,
ipex_build_version,
ipex_git_sha,
torch_git_sha,
ipex_gpu_onednn_git_sha,
ipex_cpu_ideep_git_sha,
)
# global setup modules
class IPEXClean(distutils.command.clean.clean, object):
def run(self):
import glob
import re
with open(".gitignore", "r") as f:
ignores = f.read()
pat = re.compile(r"^#( BEGIN NOT-CLEAN-FILES )?")
for wildcard in filter(None, ignores.split("\n")):
match = pat.match(wildcard)
if match:
if match.group(1):
# Marker is found and stop reading .gitignore.
break
# Ignore lines which begin with '#'.
else:
for filename in glob.glob(wildcard):
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename, ignore_errors=True)
# It's an old-style class in Python 2.7...
distutils.command.clean.clean.run(self)
def get_cpp_test_dir():
project_root_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(project_root_dir, "tests", "cpu", "cpp")
def get_cpp_test_build_dir():
cpp_test_build_dir = os.path.join(get_build_type_dir(), "tests", "cpu", "cpp")
create_if_not_exist(cpp_test_build_dir)
return cpp_test_build_dir
def get_pybind11_abi_compiler_flags():
pybind11_abi_flags = []
for pname in ["COMPILER_TYPE", "STDLIB", "BUILD_ABI"]:
pval = getattr(torch._C, f"_PYBIND11_{pname}")
if pval is not None:
pybind11_abi_flags.append(f'-DPYBIND11_{pname}=\\"{pval}\\"')
cl_flags = ""
for flag in pybind11_abi_flags:
cl_flags += flag + " "
return cl_flags
def _gen_build_cfg_from_cmake(
cmake_exec, project_root_dir, cmake_args, build_dir, build_env, use_ninja = False):
if IS_WINDOWS:
if use_ninja:
check_call([cmake_exec, project_root_dir, '-GNinja'] + cmake_args, cwd=build_dir, env=build_env)
else:
# using MSVC generator
check_call([cmake_exec, project_root_dir, '-G Visual Studio 17 2022', '-T Intel C++ Compiler 2023'] + cmake_args, cwd=build_dir, env=build_env)
else:
# Linux
check_call([cmake_exec, project_root_dir] + cmake_args, cwd=build_dir, env=build_env)
def _build_project(build_args, build_dir, build_env, use_ninja=False):
if IS_WINDOWS:
if use_ninja:
check_call(['ninja'] + build_args, cwd=build_dir, env=build_env)
else:
# Current Windows MSVC needs manual build
pass
else:
# Linux
if use_ninja:
check_call(['ninja'] + build_args, cwd=build_dir, env=build_env)
else:
check_call(['make'] + build_args, cwd=build_dir, env=build_env)
def define_build_options(args, **kwargs):
for key, value in sorted(kwargs.items()):
if value is not None:
args.append("-D{}={}".format(key, value))
class IPEXCPPLibBuild(build_clib, object):
def run(self):
self.build_lib = os.path.relpath(get_package_dir())
self.build_temp = os.path.relpath(get_build_type_dir())
cmake_exec = get_cmake_command()
if cmake_exec is None:
raise RuntimeError(
"CMake must be installed to build the following extensions: "
+ ", ".join(e.name for e in self.extensions)
)
self.cmake = cmake_exec
project_root_dir = get_project_dir()
build_type_dir = get_build_type_dir()
ipex_python_dir = get_ipex_python_dir()
ipex_python_build_dir = get_ipex_python_build_dir()
ipex_cpu_dir = get_ipex_cpu_dir()
ipex_cpu_build_dir = get_ipex_cpu_build_dir()
ipex_xpu_dir = get_xpu_project_dir()
ipex_xpu_build_dir = get_xpu_project_build_dir()
ipex_cppsdk_build_dir = get_ipex_cppsdk_build_dir()
cpack_out_file = os.path.abspath(
os.path.join(build_type_dir, "IPEXCPackConfig.cmake")
)
self_extract_script = "gen_self_extract.sh"
if _get_build_target() == "cppsdk":
cmake_prefix_path = torch_install_prefix
else:
cmake_prefix_path = torch.utils.cmake_prefix_path
build_option_common = {
"CMAKE_BUILD_TYPE": get_build_type(),
"CMAKE_INSTALL_LIBDIR": "lib",
"CMAKE_PREFIX_PATH": cmake_prefix_path,
"CMAKE_INSTALL_PREFIX": os.path.abspath(get_package_dir()),
"CMAKE_PROJECT_VERSION": get_version_num(),
"PYTHON_PLATFORM_INFO": platform.platform(),
"PYTHON_INCLUDE_DIR": sysconfig.get_paths()["include"],
"PYTHON_EXECUTABLE": sys.executable,
"IPEX_PROJ_NAME": PACKAGE_NAME,
"LIBIPEX_GITREV": ipex_git_sha,
"LIBIPEX_VERSION": ipex_build_version,
}
build_with_cpu = True # Default ON
build_with_xpu = False # Default OFF
use_ninja = False
# Windows uses Ninja as default generator
if IS_WINDOWS:
use_ninja = True
sequential_build = False
cmake_common_args = []
my_env = os.environ.copy()
for var, val in my_env.items():
if var.startswith(("BUILD_", "USE_", "CMAKE_")):
if var == "CMAKE_PREFIX_PATH":
# XXX: Do NOT overwrite CMAKE_PREFIX_PATH. Append into the list, instead!
build_option_common[var] = ";".join(
[build_option_common[var], val.replace(":", ";")]
)
continue
if var == "USE_NINJA" and val.upper() in ON_ENV_VAL:
use_ninja = True
cmake_common_args.append("-GNinja")
continue
if IS_WINDOWS and var == 'USE_MSVC' and val.upper() in ON_ENV_VAL:
use_ninja = False
continue
if var == "BUILD_STATS" and val.upper() in ON_ENV_VAL:
sequential_build = True
# fall through
if var == "BUILD_WITH_XPU" and val.upper() in ON_ENV_VAL:
build_with_xpu = True
# fall through
if var == "BUILD_WITH_CPU" and val.upper() in OFF_ENV_VAL:
build_with_cpu = False
# fall through
build_option_common[var] = val
define_build_options(cmake_common_args, **build_option_common)
nproc = min(int(os.environ.get("MAX_JOBS", os.cpu_count())), os.cpu_count())
if sequential_build:
nproc = 1
print("WARNING: Practice as sequential build with single process !")
build_args = ["-j", str(nproc), "install"]
if _check_env_flag("VERBOSE") and use_ninja:
build_args.append("-v")
if build_with_xpu:
# Generate cmake for XPU module:
if os.path.isdir(ipex_xpu_dir) is False:
raise RuntimeError(
"It maybe CPU only branch, and it is not contains XPU code."
)
gpu_cc, gpu_cxx = get_xpu_compliers()
build_option_gpu = {
**build_option_common,
"BUILD_MODULE_TYPE": "GPU",
"CMAKE_C_COMPILER": gpu_cc,
"CMAKE_CXX_COMPILER": gpu_cxx,
}
if get_build_type() == "Debug":
build_option_gpu = {
**build_option_gpu,
"BUILD_SEPARATE_OPS": "ON",
"USE_SYCL_ASSERT": "ON",
"USE_ITT_ANNOTATION": "ON",
}
cmake_args_gpu = []
define_build_options(cmake_args_gpu, **build_option_gpu)
_gen_build_cfg_from_cmake(
cmake_exec, project_root_dir, cmake_args_gpu, ipex_xpu_build_dir, my_env, use_ninja
)
if build_with_cpu:
# Generate cmake for CPU module:
build_option_cpu = {**build_option_common, "BUILD_MODULE_TYPE": "CPU"}
cmake_args_cpu = []
define_build_options(cmake_args_cpu, **build_option_cpu)
_gen_build_cfg_from_cmake(
cmake_exec, project_root_dir, cmake_args_cpu, ipex_cpu_build_dir, my_env, use_ninja
)
# Generate cmake for the CPP UT
build_option_cpp_test = {
**build_option_common,
"PROJECT_DIR": project_root_dir,
"PYTORCH_INSTALL_DIR": pytorch_install_dir,
"CPP_TEST_BUILD_DIR": get_cpp_test_build_dir(),
}
cmake_args_cpp_test = []
define_build_options(cmake_args_cpp_test, **build_option_cpp_test)
_gen_build_cfg_from_cmake(
cmake_exec,
get_cpp_test_dir(),
cmake_args_cpp_test,
get_cpp_test_build_dir(),
my_env,
use_ninja
)
if _get_build_target() in ["develop", "python"]:
# Generate cmake for common python module:
build_option_python = {
**build_option_common,
"BUILD_MODULE_TYPE": "PYTHON",
"PYBIND11_CL_FLAGS": get_pybind11_abi_compiler_flags(),
}
cmake_args_python = []
define_build_options(cmake_args_python, **build_option_python)
_gen_build_cfg_from_cmake(
cmake_exec,
project_root_dir,
cmake_args_python,
ipex_python_build_dir,
my_env,
)
elif _get_build_target() == "cppsdk":
# Generate cmake for CPPSDK package:
build_option_cppsdk = {
**build_option_common,
"BUILD_MODULE_TYPE": "CPPSDK",
"CPACK_CONFIG_FILE": cpack_out_file,
"CPACK_OUTPUT_DIR": build_type_dir,
"LIBIPEX_GEN_SCRIPT": self_extract_script,
}
cmake_args_cppsdk = []
define_build_options(cmake_args_cppsdk, **build_option_cppsdk)
_gen_build_cfg_from_cmake(
cmake_exec,
project_root_dir,
cmake_args_cppsdk,
ipex_cppsdk_build_dir,
my_env,
)
if build_with_xpu:
# Build XPU module:
_build_project(build_args, ipex_xpu_build_dir, my_env, use_ninja)
if build_with_cpu:
# Build CPU module:
_build_project(build_args, ipex_cpu_build_dir, my_env, use_ninja)
# Build the CPP UT
_build_project(build_args, get_cpp_test_build_dir(), my_env, use_ninja)
if _get_build_target() in ["develop", "python"]:
# Build common python module:
_build_project(build_args, ipex_python_build_dir, my_env, use_ninja)
elif _get_build_target() == "cppsdk":
# Build CPPSDK package:
_build_project(build_args, ipex_cppsdk_build_dir, my_env, use_ninja)
cpack_exec = get_cpack_command()
check_call([cpack_exec, "--config", cpack_out_file])
gen_script_path = os.path.abspath(
os.path.join(build_type_dir, self_extract_script)
)
if not os.path.isfile(gen_script_path):
raise "Cannot find script to generate self-extract package in {}".format(
gen_script_path
)
check_call(gen_script_path, shell=True)
# Copy the export library, header and cmake file to root/intel_extension_for_pytorch dir.
# It is only copied in "develop" mode, which can save disk space in "install" mode.
if _get_build_target() == "develop":
ret = get_src_lib_and_dst()
for src, dst in ret:
self.copy_file(src, dst)
def get_src_lib_and_dst():
ret = []
generated_cpp_files = glob.glob(
os.path.join(get_package_base_dir(), PACKAGE_NAME, "lib", "**/*.so"),
recursive=True,
)
generated_cpp_files.extend(
glob.glob(
os.path.join(get_package_base_dir(), PACKAGE_NAME, "bin", "**/*.dll"),
recursive=True,
)
)
generated_cpp_files.extend(
glob.glob(
os.path.join(get_package_base_dir(), PACKAGE_NAME, "lib", "**/*.lib"),
recursive=True,
)
)
generated_cpp_files.extend(
glob.glob(
os.path.join(get_package_base_dir(), PACKAGE_NAME, "include", "**/*.h"),
recursive=True,
)
)
generated_cpp_files.extend(
glob.glob(
os.path.join(get_package_base_dir(), PACKAGE_NAME, "share", "**/*.cmake"),
recursive=True,
)
)
for src in generated_cpp_files:
dst = os.path.join(
get_project_dir(),
PACKAGE_NAME,
os.path.relpath(src, os.path.join(get_package_base_dir(), PACKAGE_NAME)),
)
dst_path = Path(dst)
if not dst_path.parent.exists():
Path(dst_path.parent).mkdir(parents=True, exist_ok=True)
ret.append((src, dst))
return ret
def get_src_py_and_dst():
ret = []
generated_python_files = glob.glob(
os.path.join(get_project_dir(), PACKAGE_NAME, "**/*.py"), recursive=True
)
for src in generated_python_files:
dst = os.path.join(
get_package_base_dir(),
PACKAGE_NAME,
os.path.relpath(src, os.path.join(get_project_dir(), PACKAGE_NAME)),
)
dst_path = Path(dst)
if not dst_path.parent.exists():
Path(dst_path.parent).mkdir(parents=True, exist_ok=True)
ret.append((src, dst))
return ret
# python specific setup modules
class IPEXEggInfoBuild(egg_info, object):
def finalize_options(self):
super(IPEXEggInfoBuild, self).finalize_options()
class IPEXInstallCmd(install, object):
def finalize_options(self):
self.build_lib = os.path.relpath(get_package_base_dir())
return super(IPEXInstallCmd, self).finalize_options()
class IPEXPythonPackageBuild(build_py, object):
def run(self) -> None:
ret = get_src_py_and_dst()
for src, dst in ret:
self.copy_file(src, dst)
super(IPEXPythonPackageBuild, self).finalize_options()
def make_relative_rpath(path):
if IS_DARWIN:
return "-Wl,-rpath,@loader_path/" + path
elif IS_WINDOWS:
return path
else:
return "-Wl,-rpath,$ORIGIN/" + path
def pyi_module():
main_libraries = ["intel-ext-pt-python"]
main_sources = [os.path.join(PACKAGE_NAME, "csrc", "_C.cpp")]
include_dirs = [
os.path.realpath("."),
os.path.realpath(os.path.join(PACKAGE_NAME, "csrc")),
os.path.join(pytorch_install_dir, "include"),
os.path.join(pytorch_install_dir, "include", "torch", "csrc", "api", "include"),
]
library_dirs = ["lib", os.path.join(pytorch_install_dir, "lib")]
if not IS_WINDOWS:
library_dirs = [
"lib",
os.path.join(pytorch_install_dir, "lib")
]
extra_compile_args = [
'-Wall',
'-Wextra',
'-Wno-strict-overflow',
'-Wno-unused-parameter',
'-Wno-missing-field-initializers',
'-Wno-write-strings',
'-Wno-unknown-pragmas',
# This is required for Python 2 declarations that are deprecated in 3.
'-Wno-deprecated-declarations',
# Python 2.6 requires -fno-strict-aliasing, see
# http://legacy.python.org/dev/peps/pep-3123/
# We also depend on it in our code (even Python 3).
'-fno-strict-aliasing',
# Clang has an unfixed bug leading to spurious missing
# braces warnings, see
# https://bugs.llvm.org/show_bug.cgi?id=21629
'-Wno-missing-braces']
C_ext = CppExtension(
"{}._C".format(PACKAGE_NAME),
libraries=main_libraries,
sources=main_sources,
language='c++',
extra_compile_args=extra_compile_args,
include_dirs=include_dirs,
library_dirs=library_dirs,
extra_link_args=[make_relative_rpath('lib')])
else:
library_dirs = [
"bin",
os.path.join(pytorch_install_dir, "lib")
]
extra_link_args = ['/NODEFAULTLIB:LIBCMT.LIB']
# /MD links against DLL runtime
# and matches the flags set for protobuf and ONNX
# /EHsc is about standard C++ exception handling
# /DNOMINMAX removes builtin min/max functions
# /wdXXXX disables warning no. XXXX
extra_compile_args = ['/MD', '/EHsc', '/DNOMINMAX',
'/wd4267', '/wd4251', '/wd4522', '/wd4522', '/wd4838',
'/wd4305', '/wd4244', '/wd4190', '/wd4101', '/wd4996',
'/wd4275']
C_ext = CppExtension(
"{}._C".format(PACKAGE_NAME),
libraries=main_libraries,
sources=main_sources,
language='c++',
extra_compile_args=extra_compile_args,
include_dirs=include_dirs,
library_dirs=library_dirs,
extra_link_args=extra_link_args)
return C_ext
def pyi_isa_help_module():
main_libraries = []
main_sources = [
os.path.join(PACKAGE_NAME, "csrc", "_isa_help_main.cpp"),
os.path.join(PACKAGE_NAME, "csrc", "cpu", "isa_help", "isa_help.cpp"),
os.path.join("csrc", "cpu", "isa", "cpu_feature.cpp"),
]
include_dirs = [
os.path.realpath("."),
os.path.realpath(os.path.join("csrc", "cpu", "isa")),
os.path.realpath(os.path.join(PACKAGE_NAME, "csrc")),
os.path.join(pytorch_install_dir, "include"),
os.path.join(pytorch_install_dir, "include", "torch", "csrc", "api", "include"),
]
if not IS_WINDOWS:
library_dirs = [
"lib",
os.path.join(pytorch_install_dir, "lib")
]
extra_compile_args = [
'-Wall',
'-Wextra',
'-Wno-strict-overflow',
'-Wno-unused-parameter',
'-Wno-missing-field-initializers',
'-Wno-write-strings',
'-Wno-unknown-pragmas',
# This is required for Python 2 declarations that are deprecated in 3.
'-Wno-deprecated-declarations',
# Python 2.6 requires -fno-strict-aliasing, see
# http://legacy.python.org/dev/peps/pep-3123/
# We also depend on it in our code (even Python 3).
'-fno-strict-aliasing',
# Clang has an unfixed bug leading to spurious missing
# braces warnings, see
# https://bugs.llvm.org/show_bug.cgi?id=21629
'-Wno-missing-braces']
C_ext = CppExtension(
"{}._isa_help".format(PACKAGE_NAME),
libraries=main_libraries,
sources=main_sources,
language='c++',
extra_compile_args=extra_compile_args,
include_dirs=include_dirs,
library_dirs=library_dirs,
extra_link_args=[make_relative_rpath('lib')])
else:
library_dirs = [
"bin",
os.path.join(pytorch_install_dir, "lib")
]
extra_link_args = ['/NODEFAULTLIB:LIBCMT.LIB']
# /MD links against DLL runtime
# and matches the flags set for protobuf and ONNX
# /EHsc is about standard C++ exception handling
# /DNOMINMAX removes builtin min/max functions
# /wdXXXX disables warning no. XXXX
extra_compile_args = ['/MD', '/EHsc', '/DNOMINMAX',
'/wd4267', '/wd4251', '/wd4522', '/wd4522', '/wd4838',
'/wd4305', '/wd4244', '/wd4190', '/wd4101', '/wd4996',
'/wd4275']
C_ext = CppExtension(
"{}._isa_help".format(PACKAGE_NAME),
libraries=main_libraries,
sources=main_sources,
language='c++',
extra_compile_args=extra_compile_args,
include_dirs=include_dirs,
library_dirs=library_dirs,
extra_link_args=extra_link_args)
return C_ext
ext_modules = []
cmdclass = {
"build_clib": IPEXCPPLibBuild,
"bdist_cppsdk": IPEXCPPLibBuild,
"clean": IPEXClean,
}
def fill_python_target_cmd(cmdclass, ext_modules):
class IPEXExtBuild(BuildExtension):
def run(self):
self.run_command("build_clib")
self.build_lib = os.path.relpath(get_package_base_dir())
self.build_temp = os.path.relpath(get_build_type_dir())
self.library_dirs.append(os.path.relpath(get_package_lib_dir()))
super(IPEXExtBuild, self).run()
cmdclass["build_ext"] = IPEXExtBuild
cmdclass["build_py"] = IPEXPythonPackageBuild
cmdclass["egg_info"] = IPEXEggInfoBuild
cmdclass["install"] = IPEXInstallCmd
ext_modules.append(pyi_module())
ext_modules.append(pyi_isa_help_module())
if _get_build_target() in ["develop", "python"]:
fill_python_target_cmd(cmdclass, ext_modules)
long_description = ""
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
entry_points = {
"console_scripts": [
"ipexrun = {}.launcher:main".format(PACKAGE_NAME),
]
}
setup(
name=PACKAGE_NAME,
version=ipex_build_version,
description="Intel® Extension for PyTorch*",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/intel/intel-extension-for-pytorch",
author="Intel Corp.",
install_requires=_build_installation_dependency(),
packages=[PACKAGE_NAME],
package_data={PACKAGE_NAME: ["*.so", "lib/*.so", "bin/*.dll", "lib/*.lib"]},
zip_safe=False,
ext_modules=ext_modules,
cmdclass=cmdclass,
entry_points=entry_points,
license="https://www.apache.org/licenses/LICENSE-2.0",
classifiers=[
"License :: OSI Approved :: Apache Software License",
],
)
|
6b9dfcf7bbda37c18bf3d1803afa84849780eecf
|
8c1221c2912ef2bf110d37f682e89d9285ed6f5e
|
/src/webdb/hale/views.py
|
8ad77b00e6ff02336fde6c4658176ac3ea5562fa
|
[] |
no_license
|
pjlantz/Hale
|
3da378804dae5bfaa0cffa897277caca55827464
|
c3cda1173907ad0797c205b61206a71ce0eaa842
|
refs/heads/master
| 2022-05-28T16:18:22.415456
| 2022-05-23T06:51:38
| 2022-05-23T06:51:38
| 699,015
| 178
| 70
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,602
|
py
|
views.py
|
################################################################################
# (c) 2011, The Honeynet Project
# Author: Patrik Lantz patrik@pjlantz.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
import os, mimetypes, base64, datetime
from django.conf import settings
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from webdb.hale.models import Botnet, Log, Module, File, RelatedIPs
from django.template import Context, loader
from django.contrib.auth import logout
from django.shortcuts import render_to_response
from django.template import RequestContext
from piston import forms
@login_required
def index(request):
"""
Inputs database entries to overview page
and render response
"""
botnets = Botnet.objects.all()
logs = Log.objects.all()
ips = RelatedIPs.objects.all()
files = File.objects.all()
t = loader.get_template('index.html')
c = Context({'botnets': botnets, 'logs': logs, 'ips': ips, 'files':files,})
return HttpResponse(t.render(c))
def request_token_ready(request, token):
error = request.GET.get('error', '')
ctx = RequestContext(request, {
'error' : error,
'token' : token})
return render_to_response('api/oauth/oauth_auth_done.html', context_instance = ctx)
def oauth_auth_view(request, token, callback, params):
print "Auth view"
form = forms.OAuthAuthenticationForm(initial={
'oauth_token': token.key,
'oauth_callback': token.get_callback_url() or callback,
})
return render_to_response('api/oauth/authorize_token.html', {'form':form,}, RequestContext(request))
def logoff(request):
"""
Renders logout page
"""
logout(request)
t = loader.get_template('logout.html')
c = Context({})
return HttpResponse(t.render(c))
@login_required
def download(request, filename):
"""
Render response for module download
"""
filePath = settings.MEDIA_ROOT + "/" + str(filename)
file = None
try:
file = open(filePath)
except IOError:
filePath = settings.MEDIA_ROOT + "/modules/" + str(filename)
file = open(filePath)
ctype, encoding = mimetypes.guess_type(filePath)
response = HttpResponse(file, mimetype=ctype)
response['Content-Disposition'] = 'attachment; filename='+str(filename)
return response
@login_required
def file(request, hashvalue):
"""
Render response for malware download
"""
file = File.objects.get(hash=hashvalue)
content = file.content
content = base64.b64decode(content)
ctype, encoding = mimetypes.guess_type(file.filename)
response = HttpResponse(content, mimetype=ctype)
response['Content-Disposition'] = 'attachment; filename='+file.filename
return response
@login_required
def log(request, log_id):
"""
Inputs database entries to botnet info page
and render response
"""
botnet = Botnet.objects.get(id=log_id)
logs = Log.objects.filter(botnet=botnet.id)
diff = botnet.lastseen - botnet.firstseen
uptime = diff.days
diff = datetime.datetime.now() - botnet.lastseen
lastActivity = diff.days
files = File.objects.filter(botnet=log_id)
ips = RelatedIPs.objects.filter(botnet=log_id)
t = loader.get_template('logs.html')
c = Context({'logs': logs, 'botnet': botnet, 'files':files, 'ips':ips, 'uptime':uptime, 'lastActivity': lastActivity,})
return HttpResponse(t.render(c))
@login_required
def modules(request):
"""
Inputs database entries to module page
and render response
"""
modules = Module.objects.all()
botnets = Botnet.objects.all()
t = loader.get_template('modules.html')
c = Context({'modules':modules, 'botnets':botnets,})
return HttpResponse(t.render(c))
|
3253c056bd04788830abab415dc215407d27dcf6
|
3cdf7892f13886e8d97c31c665b9a1800b097403
|
/quarry/types/chunk.py
|
548a9998f7ec01ee36b4375222368f3ab99f1850
|
[
"MIT-feh"
] |
permissive
|
barneygale/quarry
|
0a4985316a1eaecb87e0b5ce4542011d08e2702b
|
da88e64e4d841e9c3d23b3614dade041d6613e2a
|
refs/heads/master
| 2023-06-23T06:16:22.840153
| 2023-06-13T22:28:06
| 2023-06-13T22:28:06
| 21,437,069
| 526
| 123
|
NOASSERTION
| 2023-06-13T22:28:07
| 2014-07-02T19:10:31
|
Python
|
UTF-8
|
Python
| false
| false
| 14,122
|
py
|
chunk.py
|
from collections.abc import Sequence, MutableSequence
from bitstring import BitArray, Bits
import math
def get_width(length, full_width):
"""
Returns the number of bits used by Minecraft to represent indices into a
list of the given length.
"""
width = int(math.ceil(math.log(length, 2)))
if width < 4:
return 4
elif width > 8:
return full_width
else:
return width
class PackedArray(Sequence):
"""
This class provides support for an array where values are tightly packed
into a number of bits (such as 4 bits for light or 9 bits for height).
All operations associated with fixed-size mutable sequences are supported,
such as slicing.
You may need to adjust the *length* and *value_width* of packed arrays from
NBT data, where these fields are not conveyed.
Several constructors are available for specific uses of packed arrays:
- Light data used 4-bit values and 8-bit sectors
- Height data uses 9-bit values and 64-bit sectors
- Block data uses 64-bit sectors
"""
#: The ``bitstring.BitArray`` object used for storage.
storage = None
#: The number of entries in the array
length = None
#: The width in bits of sectors. Used in (de)serialization.
sector_width = None
#: The width in bits of values.
value_width = None
def __repr__(self):
return "<PackedArray length=%d sector_width=%d value_width=%d>" \
% (self.length,
self.sector_width,
self.value_width)
# Constructors ------------------------------------------------------------
def __init__(self, storage, length, sector_width, value_width):
self.storage = storage
self.length = length
self.sector_width = sector_width
self.value_width = value_width
@classmethod
def empty(cls, length, sector_width, value_width):
"""
Creates an empty array.
"""
obj = cls(BitArray(), length, sector_width, value_width)
obj.purge()
return obj
@classmethod
def empty_light(cls):
"""
Creates an empty array suitable for storing light data.
"""
return cls.empty(4096, 8, 4)
@classmethod
def empty_block(cls):
"""
Creates an empty array suitable for storing block data.
"""
return cls.empty(4096, 64, 4)
@classmethod
def empty_height(cls):
"""
Creates an empty array suitable for storing height data.
"""
return cls.empty(256, 64, 9)
@classmethod
def from_bytes(cls, bytes, length, sector_width, value_width):
"""
Deserialize a packed array from the given bytes.
"""
storage = BitArray(bytes=bytes)
return cls(storage, length, sector_width, value_width)
@classmethod
def from_light_bytes(cls, bytes):
"""
Deserialize a packed array from the given light data bytes.
"""
return cls.from_bytes(bytes, 4096, 8, 4)
@classmethod
def from_block_bytes(cls, bytes, value_width):
"""
Deserialize a packed array from the given block data bytes.
"""
return cls.from_bytes(bytes, 4096, 64, value_width)
@classmethod
def from_height_bytes(cls, bytes):
"""
Deserialize a packed array from the given height data bytes.
"""
return cls.from_bytes(bytes, 256, 64, 9)
# Instance methods --------------------------------------------------------
def to_bytes(self):
"""
Serialize this packed array to bytes.
"""
return self.storage.bytes
def purge(self):
"""
Initializes the storage.
You should not need to call this method.
"""
values_per_sector = self.sector_width // self.value_width
sector_count = 1 + (self.length - 1) // values_per_sector
self.storage.clear()
self.storage.append(self.sector_width * sector_count)
def pos(self, idx):
"""
Returns the bit position of the value at the given index.
You should not need to call this method.
"""
sector, value = divmod(idx, self.sector_width // self.value_width)
pos = (1 + sector) * self.sector_width - \
(1 + value ) * self.value_width
return pos, pos + self.value_width
def is_empty(self):
"""
Returns true if this packed array is entirely zeros.
"""
return not self.storage.any(True)
# Sequence methods --------------------------------------------------------
def __len__(self):
return self.length
def __iter__(self):
for i in range(self.length):
yield self.storage._slice(*self.pos(i)).uint
def __getitem__(self, item):
if isinstance(item, slice):
return [self.storage._slice(*self.pos(idx)).uint
for idx in range(*item.indices(len(self)))]
else:
if not 0 <= item < len(self):
raise IndexError(item)
return self.storage._slice(*self.pos(item)).uint
def __setitem__(self, item, value):
if isinstance(item, slice):
for idx, value in zip(range(*item.indices(len(self))), value):
self.storage._overwrite(
bs=Bits(uint=value, length=self.value_width),
pos=self.pos(idx)[0])
else:
self.storage._overwrite(
bs=Bits(uint=value, length=self.value_width),
pos=self.pos(item)[0])
class BlockArray(Sequence):
"""
This class provides support for block arrays. It wraps a
:class:`PackedArray` object and implements block encoding/decoding,
palettes, and counting of non-air blocks for lighting purposes. It stores
precisely 4096 (16x16x16) values.
All operations associated with fixed-size mutable sequences are supported,
such as slicing.
A palette is used when there are fewer than 256 unique values; the value
width varies from 4 to 8 bits depending on the size of the palette, and is
automatically adjusted upwards as necessary. Use :meth:`~BlockArray.repack`
to reclaim space by eliminating unused entries.
When 256 or more unique values are present, the palette is unused and
values are stored directly.
"""
#: The :class:`PackedArray` object used for storage.
storage = None
#: List of encoded block values. Empty when palette is not used.
palette = None
#: The `Registry` object used to encode/decode blocks
registry = None
#: The number of non-air blocks
non_air = None
def __repr__(self):
return "<BlockArray palette=%d storage=%r>" \
% (len(self.palette), self.storage)
# Constructors ------------------------------------------------------------
def __init__(self, storage, palette, registry, non_air=-1):
self.storage = storage
self.palette = palette
self.registry = registry
self._non_air = non_air
@classmethod
def empty(cls, registry, non_air=-1):
"""
Creates an empty block array.
"""
storage = PackedArray.empty(4096, 64, 4)
palette = [0]
return cls(storage, palette, registry, non_air)
@classmethod
def from_bytes(cls, bytes, value_width, registry, palette, non_air=-1):
"""
Deserialize a block array from the given bytes.
"""
storage = PackedArray.from_block_bytes(bytes, value_width)
return cls(storage, palette, registry, non_air)
@classmethod
def from_nbt(cls, section, registry, non_air=-1):
"""
Creates a block array that uses the given NBT section tag as storage
for block data and the palette. Minecraft 1.13+ only.
"""
nbt_palette = section.value['Palette']
if isinstance(nbt_palette.value, _NBTPaletteProxy):
proxy = nbt_palette.value
else:
proxy = _NBTPaletteProxy(registry)
for entry in nbt_palette.value:
proxy.append(entry)
nbt_palette.value = proxy
storage = section.value["BlockStates"].value
palette = proxy.palette
storage.length = 4096
storage.value_width = get_width(len(proxy), registry.max_bits)
return cls(storage, palette, registry, non_air)
# Instance methods --------------------------------------------------------
def to_bytes(self):
"""
Serialize this block array to bytes.
"""
return self.storage.to_bytes()
def is_empty(self):
"""
Returns true if this block array is entirely air.
"""
if self.palette == [0]:
return True
else:
return self.non_air == 0
@property
def non_air(self):
if self._non_air == -1:
self._non_air = [
self.registry.is_air_block(obj) for obj in self].count(False)
return self._non_air
def repack(self, reserve=None):
"""
Re-packs internal data to use the smallest possible bits-per-block by
eliminating unused palette entries. This operation is slow as it walks
all blocks to determine the new palette.
"""
# If no reserve is given, we re-compute the palette by walking blocks
if reserve is None:
palette = sorted(set(self))
palette_len = len(palette)
# Otherwise we just ensure we have enough space to store new entries.
elif self.palette:
palette = self.palette[:]
palette_len = len(palette) + reserve
# Reserving space in an unpaletted array is a no-op.
else:
return
# Compute new value width
value_width = get_width(palette_len, self.registry.max_bits)
# Exit if there's no change in value width needed
if value_width == self.storage.value_width:
return
# Switch to unpaletted operation if necessary
if value_width > 8:
palette = []
# Save contents
values = self[:]
# Update internals
self.storage.value_width = value_width
self.storage.purge()
self.palette[:] = palette
# Load contents
self[:] = values
# Sequence methods --------------------------------------------------------
def __len__(self):
return 4096
def __getitem__(self, item):
if isinstance(item, slice):
values = []
for value in self.storage[item.start:item.stop:item.step]:
if self.palette:
value = self.palette[value]
value = self.registry.decode_block(value)
values.append(value)
return values
else:
value = self.storage[item]
if self.palette:
value = self.palette[value]
value = self.registry.decode_block(value)
return value
def __setitem__(self, item, value):
if isinstance(item, slice):
for idx in range(*item.indices(4096)):
self[idx] = value[idx]
return
if self._non_air != -1:
self._non_air += int(self.registry.is_air_block(self[item])) - \
int(self.registry.is_air_block(value))
value = self.registry.encode_block(value)
if self.palette:
try:
value = self.palette.index(value)
except ValueError:
self.repack(reserve=1)
if self.palette:
self.palette.append(value)
value = len(self.palette) - 1
self.storage[item] = value
def __iter__(self):
for value in self.storage:
if self.palette:
value = self.palette[value]
value = self.registry.decode_block(value)
yield value
def __contains__(self, value):
if self.palette:
if self.registry.encode_block(value) not in self.palette:
return False
return super(BlockArray, self).__contains__(value)
def index(self, value, start=0, stop=None):
if self.palette:
if self.registry.encode_block(value) not in self.palette:
raise ValueError
return super(BlockArray, self).index(value, start, stop)
def count(self, value):
if self.palette:
if self.registry.encode_block(value) not in self.palette:
return 0
return super(BlockArray, self).count(value)
class _NBTPaletteProxy(MutableSequence):
def __init__(self, registry):
self.registry = registry
self.palette = []
def insert(self, idx, value):
# FIXME: NBT chunk sections are *always* paletted, and so the format
# diverges for palettes longer than 255 entries.
if len(self.palette) >= 255:
raise ValueError("Can't add more than 255 entries to NBT palette "
"proxy.")
self.palette.insert(idx, None)
self[idx] = value
def __len__(self):
return len(self.palette)
def __delitem__(self, idx):
del self.palette[idx]
def __getitem__(self, idx):
from quarry.types import nbt
block = self.registry.decode_block(self.palette[idx])
entry = nbt.TagCompound({'Name': nbt.TagString(block['name'])})
if len(block) > 1:
entry.value['Properties'] = nbt.TagCompound({
key: nbt.TagString(value)
for key, value in block.items()
if key != "name"})
return entry
def __setitem__(self, idx, tag):
block = {'name': tag.value['Name'].value}
properties = tag.value.get('Properties')
if properties:
block.update(properties.to_obj())
self.palette[idx] = self.registry.encode_block(block)
|
f013a8b06f21b2b421cadfecebb2f680a23e4146
|
a5a36aa7200b0be6ea11ad669ba0534ee1b896a6
|
/tests/getattr_test.py
|
6deeeb020fada2a8f5ea41f41592177afc0fbb11
|
[
"MIT"
] |
permissive
|
vaexio/vaex
|
ec42919f272a723f884fece3c83975112e7a6f30
|
15245cf4332d4423ac58bd737aee27d911a1b252
|
refs/heads/master
| 2023-08-11T08:03:33.248943
| 2023-07-21T10:40:58
| 2023-07-21T10:40:58
| 24,528,468
| 7,892
| 686
|
MIT
| 2023-09-04T05:07:11
| 2014-09-27T09:44:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,825
|
py
|
getattr_test.py
|
from common import *
def test_column_subset(ds_local):
ds = ds_local
dss = ds[['x', 'y']]
assert dss.get_column_names() == ['x', 'y']
np.array(dss) # test if all columns can be put in arrays
def test_column_subset_virtual(ds_local):
ds = ds_local
ds['r'] = ds.x + ds.y
dss = ds[['r']]
assert dss.get_column_names() == ['r']
assert set(dss.get_column_names(hidden=True)) == set(['__x', '__y', 'r'])
np.array(dss) # test if all columns can be put in arrays
def test_column_subset_virtual_recursive(df_local_non_arrow):
df = df_local_non_arrow
df['r'] = df.x + df.y
df['q'] = df.r/2
dfs = df[['q']]
assert dfs.get_column_names() == ['q']
all_columns = set(dfs.get_column_names(hidden=True))
assert all_columns == set(['__x', '__y', '__r', 'q'])
np.array(dfs) # test if all columns can be put in arrays
def test_column_subset_virtual(ds_filtered):
ds = ds_filtered
dss = ds[['y']]
assert dss.get_column_names() == ['y']
all_columns = set(dss.get_column_names(hidden=True))
assert all_columns == set(['__x', 'y'])
np.array(dss) # test if all columns can be put in arrays, with the possible filter copied as hidden
# 'nested' filter
ds = ds[ds.y > 2]
dss = ds[['m']]
assert dss.get_column_names() == ['m']
assert set(dss.get_column_names(hidden=True)) == set(['__x', '__y', 'm'])
def test_column_order(ds_local):
ds = ds_local
dss = ds[['x', 'y']]
assert dss.get_column_names() == ['x', 'y']
assert np.array(dss).T.tolist() == [ds.x.values.tolist(), ds.y.values.tolist()]
dss = ds[['y', 'x']]
assert dss.get_column_names() == ['y', 'x']
assert np.array(dss).T.tolist() == [ds.y.values.tolist(), ds.x.values.tolist()]
def test_column_order_virtual(ds_local):
ds = ds_local
# this will do some name mangling, but we don't care about the names
ds['r'] = ds.y + 10
ds = ds_local
dss = ds[['x', 'r']]
assert dss.get_column_names() == ['x', 'r']
assert np.array(dss).T.tolist() == [ds.x.values.tolist(), ds.r.values.tolist()]
dss = ds[['r', 'x']]
assert dss.get_column_names() == ['r', 'x']
assert np.array(dss).T.tolist() == [ds.r.values.tolist(), ds.x.values.tolist()]
def test_expression(ds_local):
ds = ds_local
# this will do some name mangling, but we don't care about the names
dss = ds[['y/10', 'x/5']]
assert 'y' in dss.get_column_names()[0]
assert 'x' in dss.get_column_names()[1]
assert np.array(dss).T.tolist() == [(ds.y/10).values.tolist(), (ds.x/5).values.tolist()]
@pytest.mark.skip(reason="Not implemented yet, should work, might need refactoring of copy")
def test_expression_virtual(ds_local):
ds = ds_local
# this will do some name mangling, but we don't care about the names
ds['r'] = ds.y + 10
dss = ds[['r/10', 'x/5']]
assert 'r' in dss.get_column_names()[0]
assert 'x' in dss.get_column_names()[1]
assert np.array(dss).T.tolist() == [(ds.r/10).values.tolist(), (ds.x/5).values.tolist()]
dss = ds[['x/5', 'r/10']]
assert 'r' in dss.get_column_names()[0]
assert 'x' in dss.get_column_names()[1]
assert np.array(dss).T.tolist() == [(ds.x/5).values.tolist(), (ds.r/10).values.tolist()]
def test_access_data_after_virtual_column_creation(ds_local):
ds = ds_local
# we can access the x column
assert ds[['x']].values[:,0].tolist() == ds.x.values.tolist()
ds['virtual'] = ds.x * 2
# it should also work after we added a virtual column
assert ds[['x']].values[:,0].tolist() == ds.x.values.tolist()
def test_non_existing_column(df_local):
df = df_local
with pytest.raises(NameError, match='.*Did you.*'):
df['x_']
def test_alias(df_local):
df = df_local
df2 = df[['123456']]
assert '123456' in df2
|
5c876bc7eb202c927052553e4b712a99febd893e
|
2a76ca8c01e7abe6ef64d030ecbb65e88641b278
|
/glumpy/transforms/log_scale.py
|
da8909bbc55a59d1a6bc433bf7f0c96d5989f55f
|
[] |
permissive
|
glumpy/glumpy
|
18bfc2d76b7a5fc126fbebddf2970d95238fc66b
|
75408635bd46e48ff10939e308a71eafdaff35e8
|
refs/heads/master
| 2023-09-03T11:48:52.087002
| 2023-04-20T15:23:59
| 2023-04-20T15:23:59
| 23,520,171
| 1,228
| 225
|
BSD-3-Clause
| 2023-07-07T07:25:18
| 2014-08-31T18:30:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,815
|
py
|
log_scale.py
|
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import library
from . transform import Transform
from . quantitative_scale import QuantitativeScale
class LogScale(QuantitativeScale):
"""
Log scales are similar to linear scales, except there's a logarithmic
transform that is applied to the input domain value before the output range
value is computed. The mapping to the output range value y can be expressed
as a function of the input domain value x: y = m log(x) + b.
As log(0) is negative infinity, a log scale must have either an
exclusively-positive or exclusively-negative domain; the domain must not
include or cross zero. A log scale with a positive domain has a
well-defined behavior for positive values, and a log scale with a negative
domain has a well-defined behavior for negative values (the input value is
multiplied by -1, and the resulting output value is also multiplied by
-1). The behavior of the scale is undefined if you pass a negative value to
a log scale with a positive domain or vice versa.
:param 2-tuple domain: Input domains. Default is (-1,+1).
:param 2-tuple range: Output range. Default is (-1,+1).
:param float base: Log base. Default is 10.
:param bool clamp: Clamping test. Default is False.
:param bool discard: Discard test. Default is True.
"""
aliases = { "domain" : "log_scale_domain",
"range" : "log_scale_range",
"clamp" : "log_scale_clamp",
"base" : "log_scale_base",
"discard" : "log_scale_discard" }
def __init__(self, *args, **kwargs):
"""
Initialize the transform
"""
self._base = float(Transform._get_kwarg("base", kwargs) or 10.0)
kwargs["domain"] = kwargs.get("domain", (1,10))
code = library.get("transforms/log-scale.glsl")
QuantitativeScale.__init__(self, code, *args, **kwargs)
@property
def base(self):
""" Input base """
return self._base
@base.setter
def base(self, value):
""" Input base """
self._base = np.abs(float(value))
if self.is_attached:
self["base"] = self._base
self["domain"] = self._process_domain()
def on_attach(self, program):
QuantitativeScale.on_attach(self, program)
self["base"] = self._base
def _scale(self,index):
domain = self._domain
base = self._base
return np.copysign(1.0,domain) * np.log(np.abs(domain))/np.log(base)
|
16553d8bc62ef2fd5f073c71e7112bd34259f879
|
dee696adb52739dd7d4021878bcbade43b359b0c
|
/sonora/wsgi.py
|
7f433cc44fdb2f353145ea34d0d9025d99584bf9
|
[
"Apache-2.0"
] |
permissive
|
public/sonora
|
bbd5f3440301799ed53a7c5659efcefbaf572d9a
|
f5c7b3dd1826cad5c1252455e14a27bdb17eb2b6
|
refs/heads/master
| 2023-08-21T05:53:23.711907
| 2023-08-18T16:56:54
| 2023-08-18T16:56:54
| 182,774,837
| 230
| 14
|
Apache-2.0
| 2023-08-23T14:43:07
| 2019-04-22T11:54:12
|
Python
|
UTF-8
|
Python
| false
| false
| 11,928
|
py
|
wsgi.py
|
import base64
from collections import namedtuple
import time
from urllib.parse import quote
import grpc
from sonora import protocol
_HandlerCallDetails = namedtuple(
"_HandlerCallDetails", ("method", "invocation_metadata")
)
class grpcWSGI(grpc.Server):
"""
WSGI Application Object that understands gRPC-Web.
This is called by the WSGI server that's handling our actual HTTP
connections. That means we can't use the normal gRPC I/O loop etc.
"""
def __init__(self, application=None, enable_cors=True):
self._application = application
self._handlers = []
self._enable_cors = enable_cors
def add_generic_rpc_handlers(self, handlers):
self._handlers.extend(handlers)
def add_insecure_port(self, port):
raise NotImplementedError()
def add_secure_port(self, port):
raise NotImplementedError()
def start(self):
raise NotImplementedError()
def stop(self):
raise NotImplementedError()
def _get_rpc_handler(self, environ):
path = environ["PATH_INFO"]
handler_call_details = _HandlerCallDetails(path, None)
rpc_handler = None
for handler in self._handlers:
rpc_handler = handler.service(handler_call_details)
if rpc_handler:
return rpc_handler
return None
def _create_context(self, environ):
try:
timeout = protocol.parse_timeout(environ["HTTP_GRPC_TIMEOUT"])
except KeyError:
timeout = None
metadata = []
for key, value in environ.items():
if key.startswith("HTTP_"):
header = key[5:].lower().replace("_", "-")
if header.endswith("-bin"):
value = base64.b64decode(value)
metadata.append((header, value))
return ServicerContext(timeout, metadata)
def _do_grpc_request(self, rpc_method, environ, start_response):
request_data = self._read_request(environ)
context = self._create_context(environ)
if environ["CONTENT_TYPE"] == "application/grpc-web-text":
_, _, message = protocol.b64_unwrap_message(request_data)
else:
_, _, message = protocol.unwrap_message(request_data)
request_proto = rpc_method.request_deserializer(message)
resp = None
try:
if not rpc_method.request_streaming and not rpc_method.response_streaming:
resp = rpc_method.unary_unary(request_proto, context)
elif not rpc_method.request_streaming and rpc_method.response_streaming:
resp = rpc_method.unary_stream(request_proto, context)
if context.time_remaining() is not None:
resp = _timeout_generator(context, resp)
else:
raise NotImplementedError()
except grpc.RpcError:
pass
except NotImplementedError:
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
response_content_type = (
environ.get("HTTP_ACCEPT", "application/grpc-web+proto")
.split(",")[0]
.strip()
)
headers = [
("Content-Type", response_content_type),
]
if self._enable_cors:
headers.append((
"Access-Control-Allow-Origin",
environ.get("HTTP_HOST") or environ["SERVER_NAME"],
))
headers.append(("Access-Control-Expose-Headers", "*"))
if response_content_type == "application/grpc-web-text":
wrap_message = protocol.b64_wrap_message
else:
wrap_message = protocol.wrap_message
if rpc_method.response_streaming:
yield from self._do_streaming_response(
rpc_method, start_response, wrap_message, context, headers, resp
)
else:
yield from self._do_unary_response(
rpc_method, start_response, wrap_message, context, headers, resp
)
def _do_streaming_response(
self, rpc_method, start_response, wrap_message, context, headers, resp
):
try:
first_message = next(resp)
except (grpc.RpcError, StopIteration):
first_message = None
if context._initial_metadata:
headers.extend(context._initial_metadata)
start_response("200 OK", headers)
if first_message is not None:
yield wrap_message(False, False, rpc_method.response_serializer(first_message))
try:
for message in resp:
yield wrap_message(
False, False, rpc_method.response_serializer(message)
)
except grpc.RpcError:
pass
trailers = [("grpc-status", str(context.code.value[0]))]
if context.details:
trailers.append(("grpc-message", quote(context.details.encode("utf8"))))
if context._trailing_metadata:
trailers.extend(context._trailing_metadata)
trailer_message = protocol.pack_trailers(trailers)
yield wrap_message(True, False, trailer_message)
def _do_unary_response(
self, rpc_method, start_response, wrap_message, context, headers, resp
):
if resp:
message_data = wrap_message(
False, False, rpc_method.response_serializer(resp)
)
else:
message_data = b""
if context._initial_metadata:
headers.extend(context._initial_metadata)
trailers = [("grpc-status", str(context.code.value[0]))]
if context.details:
trailers.append(("grpc-message", quote(context.details.encode("utf8"))))
if context._trailing_metadata:
trailers.extend(context._trailing_metadata)
trailer_message = protocol.pack_trailers(trailers)
trailer_data = wrap_message(True, False, trailer_message)
content_length = len(message_data) + len(trailer_data)
headers.append(("content-length", str(content_length)))
start_response("200 OK", headers)
yield message_data
yield trailer_data
def _do_cors_preflight(self, environ, start_response):
headers = [
("Content-Type", "text/plain"),
("Content-Length", "0"),
]
if self._enable_cors:
headers += [
("Access-Control-Allow-Methods", "POST, OPTIONS"),
("Access-Control-Allow-Headers", "*"),
(
"Access-Control-Allow-Origin",
environ.get("HTTP_HOST") or environ["SERVER_NAME"],
),
("Access-Control-Allow-Credentials", "true"),
("Access-Control-Expose-Headers", "*"),
]
start_response("204 No Content", headers)
return []
def __call__(self, environ, start_response):
"""
Our actual WSGI request handler. Will execute the request
if it matches a configured gRPC service path or fall through
to the next application.
"""
rpc_method = self._get_rpc_handler(environ)
request_method = environ["REQUEST_METHOD"]
if rpc_method:
if request_method == "POST":
return self._do_grpc_request(rpc_method, environ, start_response)
elif request_method == "OPTIONS":
return self._do_cors_preflight(environ, start_response)
else:
start_response("400 Bad Request", [])
return []
if self._application:
return self._application(environ, start_response)
else:
start_response("404 Not Found", [])
return []
def _read_request(self, environ):
try:
content_length = environ.get("CONTENT_LENGTH")
if content_length:
content_length = int(content_length)
else:
content_length = None
except ValueError:
content_length = None
stream = environ["wsgi.input"]
transfer_encoding = environ.get("HTTP_TRANSFER_ENCODING")
if transfer_encoding == "chunked":
buffer = []
line = stream.readline()
while line:
if not line:
break
size = line.split(b";", 1)[0]
if size == "\r\n":
break
chunk_size = int(size, 16)
if chunk_size == 0:
break
buffer.append(stream.read(chunk_size + 2)[:-2])
line = stream.readline()
return b"".join(buffer)
else:
return stream.read(content_length or 5)
class ServicerContext(grpc.ServicerContext):
def __init__(self, timeout=None, metadata=None):
self.code = grpc.StatusCode.OK
self.details = None
self._timeout = timeout
if timeout is not None:
self._deadline = time.monotonic() + timeout
else:
self._deadline = None
self._invocation_metadata = metadata or tuple()
self._initial_metadata = None
self._trailing_metadata = None
def set_code(self, code):
if isinstance(code, grpc.StatusCode):
self.code = code
elif isinstance(code, int):
for status_code in grpc.StatusCode:
if status_code.value[0] == code:
self.code = status_code
break
else:
raise ValueError(f"Unknown StatusCode: {code}")
else:
raise NotImplementedError(
f"Unsupported status code type: {type(code)} with value {code}"
)
def set_details(self, details):
self.details = details
def abort(self, code, details):
if code == grpc.StatusCode.OK:
raise ValueError()
self.set_code(code)
self.set_details(details)
raise grpc.RpcError()
def abort_with_status(self, status):
if status == grpc.StatusCode.OK:
raise ValueError()
self.set_code(status)
raise grpc.RpcError()
def time_remaining(self):
if self._deadline is not None:
return max(self._deadline - time.monotonic(), 0)
else:
return None
def invocation_metadata(self):
return self._invocation_metadata
def send_initial_metadata(self, initial_metadata):
self._initial_metadata = protocol.encode_headers(initial_metadata)
def set_trailing_metadata(self, trailing_metadata):
self._trailing_metadata = protocol.encode_headers(trailing_metadata)
def peer(self):
raise NotImplementedError()
def peer_identities(self):
raise NotImplementedError()
def peer_identity_key(self):
raise NotImplementedError()
def auth_context(self):
raise NotImplementedError()
def add_callback(self):
raise NotImplementedError()
def cancel(self):
raise NotImplementedError()
def is_active(self):
raise NotImplementedError()
def _timeout_generator(context, gen):
while 1:
if context.time_remaining() > 0:
yield next(gen)
else:
context.code = grpc.StatusCode.DEADLINE_EXCEEDED
context.details = "request timed out at the server"
raise grpc.RpcError()
|
e34590513f584f347882c6f1efa7d24a99590300
|
9e9858f53eef684140d7274987606a44d3077eaa
|
/examples/auto_test/SConstruct
|
71d1a7cc235f11311e59cd50ac30d5186c8d4d0e
|
[
"Apache-2.0"
] |
permissive
|
majianjia/nnom
|
1b8d11bd0d9c9c179ac0a9106879f0af7235c648
|
f5dae9307bf8dd4644210117ff0b2b34b34ac777
|
refs/heads/master
| 2023-08-11T14:23:34.514766
| 2023-05-16T01:52:33
| 2023-05-16T01:52:33
| 166,869,630
| 767
| 216
|
Apache-2.0
| 2023-05-16T01:52:35
| 2019-01-21T19:38:30
|
C
|
UTF-8
|
Python
| false
| false
| 1,145
|
SConstruct
|
import os
#if(not os.path.exists('CMSIS_5')):
# os.system('git clone https://github.com/ARM-software/CMSIS_5.git')
ROOT=os.path.abspath('../..')
env = Environment()
env.Replace(
ARCOMSTR = 'AR $SOURCE',
ASCOMSTR = 'AS $SOURCE',
ASPPCOMSTR = 'AS $SOURCE',
CCCOMSTR = 'CC $SOURCE',
CXXCOMSTR = 'CXX $SOURCE',
LINKCOMSTR = 'LINK $TARGET'
)
objs = []
#objs += Glob('CMSIS_5/CMSIS/NN/Source/*/*.c')
#objs += Glob('CMSIS_5/CMSIS/DSP/Source/BasicMathFunctions/arm_*.c')
objs += Glob('main.c')
#env.Append(CPPPATH=['CMSIS_5/CMSIS/NN/Include',
# 'CMSIS_5/CMSIS/DSP/Include',
# 'CMSIS_5/CMSIS/Core/Include'])
#env.Append(CPPDEFINES=['__ARM_ARCH_8M_BASE__'])
#env.Append(CCFLAGS=['-g','-O0','-std=gnu99'])
env.Append(CCFLAGS=['-std=c99'])
objs +=Glob('%s/src/core/*.c'%(ROOT))
objs +=Glob('%s/src/layers/*.c'%(ROOT))
objs +=Glob('%s/src/backends/*.c'%(ROOT))
env.Append(CPPPATH=['%s/inc'%(ROOT),'%s/port'%(ROOT)])
#env.Append(CPPDEFINES=['USE_NNOM_OUTPUT_SAVE'])
#if(os.getenv('USE_CMSIS_NN') == 'YES'):
# env.Append(CPPDEFINES=['NNOM_USING_CMSIS_NN'])
env.Program('mnist',objs)
|
|
d5bac1b5f53d3bebe127b9b0ff52a2eb3d9cb90d
|
71ba544346ae560c10a6d1b3ea6422ca614dab87
|
/examples/sample/sample_relations.py
|
963a2af5b31136a57f1bbbf4689801e7d654bfc1
|
[
"MIT"
] |
permissive
|
HKUST-KnowComp/ASER
|
f78f64c46ecf2637a2c138b677728a7efd3af044
|
ecf19896ba34dfc8ec06115baccb2cd7e937b662
|
refs/heads/master
| 2023-08-08T02:32:18.154665
| 2023-07-25T07:23:52
| 2023-07-25T07:23:52
| 184,387,399
| 299
| 34
|
MIT
| 2022-11-24T05:23:34
| 2019-05-01T08:14:32
|
Python
|
UTF-8
|
Python
| false
| false
| 3,181
|
py
|
sample_relations.py
|
import numpy as np
import pickle
import json
import os
from collections import defaultdict
from aser.database.kg_connection import ASERKGConnection
from aser.extract.parsed_reader import ParsedReader
from aser.extract.aser_extractor import DiscourseASERExtractor
if __name__ == "__main__":
processed_path = "/home/xliucr/ASER/data"
db_path = "/home/xliucr/ASER/database/core_2.0/all/KG.db"
rid2sids_path = "/home/xliucr/ASER/database/core_2.0/all/rid2sids.pkl"
sampled_relations_path = "/home/xliucr/ASER/database/core_2.0/all/sampled_relations.json"
N = 100
seed = 0
np.random.seed(seed)
kg_conn = ASERKGConnection(db_path, mode="memory")
parsed_reader = ParsedReader()
aser_extractor = DiscourseASERExtractor()
with open(rid2sids_path, "rb") as f:
rid2sids = pickle.load(f)
relation2rids = defaultdict(list)
for rid, relation in kg_conn.rid2relation_cache.items():
for sense in relation.relations:
relation2rids[sense].append(rid)
sampled_relations = dict()
for sense, rids in relation2rids.items():
relations = list()
np.random.shuffle(rids)
for rid in rids:
relation = kg_conn.get_exact_match_relation(rid)
hid, tid = relation.hid, relation.tid
sids = list(rid2sids[rid])
np.random.shuffle(sids)
for (sid1, sid2) in sids:
sentence1 = parsed_reader.get_parsed_sentence_and_context(os.path.join(processed_path, sid1))["sentence"]
if sid2 != sid1:
sentence2 = parsed_reader.get_parsed_sentence_and_context(os.path.join(processed_path, sid2))["sentence"]
extracted_eventualities, extracted_relations = aser_extractor.extract_from_parsed_result([sentence1, sentence2], in_order=False)
else:
sentence2 = sentence1
extracted_eventualities, extracted_relations = aser_extractor.extract_from_parsed_result([sentence1], in_order=False)
if len(extracted_relations) > 0 and not sense in set.union(*[set(r.relations.keys()) for r in extracted_relations]):
continue
else:
relations.append({
"sids": (sid1, sid2),
"sentences": [sentence1] if sid1 == sid2 else [sentence1, sentence2],
"hid": hid,
"tid": tid,
"rid": rid,
"eventualities": [e.encode(encoding="utf-8").decode("utf-8") for e in extracted_eventualities],
"relations": [r.encode(encoding="utf-8").decode("utf-8") for r in extracted_relations]
})
break
if len(relations) >= N:
break
sampled_relations[sense] = relations
print(sense, len(relations))
with open(sampled_relations_path, "w") as f:
json.dump(sampled_relations, f)
kg_conn.close()
parsed_reader.close()
aser_extractor.close()
|
5bcbee82d51a6316dbde87b4421d2ad6f71b2716
|
8fa191cd4a67431a04eff62d35122ee83cc7b0af
|
/bookwyrm/tests/models/test_group.py
|
86cafaa394c79d8957a062f6cc4f0c35f2a62018
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
bookwyrm-social/bookwyrm
|
24678676a7a58dba96641194dfae3fffbf01574d
|
0f8da5b738047f3c34d60d93f59bdedd8f797224
|
refs/heads/main
| 2023-08-20T21:45:30.957277
| 2023-08-19T23:41:50
| 2023-08-19T23:41:50
| 236,415,735
| 1,398
| 216
|
NOASSERTION
| 2023-09-08T20:43:06
| 2020-01-27T03:51:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,657
|
py
|
test_group.py
|
""" testing models """
from unittest.mock import patch
from django.test import TestCase
from bookwyrm import models
@patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async")
class Group(TestCase):
"""some activitypub oddness ahead"""
def setUp(self):
"""Set up for tests"""
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.owner_user = models.User.objects.create_user(
"mouse", "mouse@mouse.mouse", "mouseword", local=True, localname="mouse"
)
self.rat = models.User.objects.create_user(
"rat", "rat@rat.rat", "ratword", local=True, localname="rat"
)
self.badger = models.User.objects.create_user(
"badger",
"badger@badger.badger",
"badgerword",
local=True,
localname="badger",
)
self.capybara = models.User.objects.create_user(
"capybara",
"capybara@capybara.capybara",
"capybaraword",
local=True,
localname="capybara",
)
self.public_group = models.Group.objects.create(
name="Public Group",
description="Initial description",
user=self.owner_user,
privacy="public",
)
self.private_group = models.Group.objects.create(
name="Private Group",
description="Top secret",
user=self.owner_user,
privacy="direct",
)
self.followers_only_group = models.Group.objects.create(
name="Followers Group",
description="No strangers",
user=self.owner_user,
privacy="followers",
)
models.GroupMember.objects.create(group=self.private_group, user=self.badger)
models.GroupMember.objects.create(
group=self.followers_only_group, user=self.badger
)
models.GroupMember.objects.create(group=self.public_group, user=self.capybara)
def test_group_members_can_see_private_groups(self, _):
"""direct privacy group should not be excluded from group listings for group
members viewing"""
rat_groups = models.Group.privacy_filter(self.rat).all()
badger_groups = models.Group.privacy_filter(self.badger).all()
self.assertFalse(self.private_group in rat_groups)
self.assertTrue(self.private_group in badger_groups)
def test_group_members_can_see_followers_only_lists(self, _):
"""follower-only group booklists should not be excluded from group booklist
listing for group members who do not follower list owner"""
with patch(
"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"
), patch("bookwyrm.lists_stream.remove_list_task.delay"):
followers_list = models.List.objects.create(
name="Followers List",
curation="group",
privacy="followers",
group=self.public_group,
user=self.owner_user,
)
rat_lists = models.List.privacy_filter(self.rat).all()
badger_lists = models.List.privacy_filter(self.badger).all()
capybara_lists = models.List.privacy_filter(self.capybara).all()
self.assertFalse(followers_list in rat_lists)
self.assertFalse(followers_list in badger_lists)
self.assertTrue(followers_list in capybara_lists)
def test_group_members_can_see_private_lists(self, _):
"""private group booklists should not be excluded from group booklist listing
for group members"""
with patch(
"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"
), patch("bookwyrm.lists_stream.remove_list_task.delay"):
private_list = models.List.objects.create(
name="Private List",
privacy="direct",
curation="group",
group=self.public_group,
user=self.owner_user,
)
rat_lists = models.List.privacy_filter(self.rat).all()
badger_lists = models.List.privacy_filter(self.badger).all()
capybara_lists = models.List.privacy_filter(self.capybara).all()
self.assertFalse(private_list in rat_lists)
self.assertFalse(private_list in badger_lists)
self.assertTrue(private_list in capybara_lists)
|
69094e52ac12b3129acb03ad12b1d341e3b2dd9b
|
744c3b66611b08782fcdd9d66261c4d55b00d426
|
/examples/pybullet/gym/pybullet_envs/minitaur/vision/imagery_utils_test.py
|
5b5493f64f0c42fbf30cb0967718d7661b2f95a7
|
[
"Zlib"
] |
permissive
|
erwincoumans/bullet3
|
4ff9e0aa64b641c65b57b26f415dd69dbfb12256
|
6d181d78a5c7be8714c74055cddcf63d5ccef70a
|
refs/heads/master
| 2023-03-10T14:58:18.072562
| 2023-02-24T18:32:53
| 2023-02-24T18:32:53
| 31,621,748
| 103
| 29
|
NOASSERTION
| 2019-02-25T17:31:00
| 2015-03-03T21:15:54
|
C++
|
UTF-8
|
Python
| false
| false
| 2,737
|
py
|
imagery_utils_test.py
|
"""Tests for imagery_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import struct
import numpy as np
from pybullet_envs.minitaur.vision import imagery_pb2
from pybullet_envs.minitaur.vision import imagery_utils
from google3.testing.pybase import googletest
class ImageryUtilsTest(googletest.TestCase):
def test_convert_bgra_images(self):
image = imagery_pb2.Image(
height_px=2,
width_px=2,
image_format=imagery_pb2.Image.IMAGE_FORMAT_BGRA_HWC_8U,
content=b'ABCDABCDABCDABCD',
)
image_array = imagery_utils.convert_image_to_array(image)
self.assertEqual(image_array.dtype, np.uint8)
self.assertEqual(image_array.shape, (image.height_px, image.width_px, 4))
self.assertEqual(image_array[0, 0, 0], ord('A'))
self.assertEqual(image_array[1, 0, 3], ord('D'))
def test_convert_rgb_images(self):
image = imagery_pb2.Image(
height_px=2,
width_px=2,
image_format=imagery_pb2.Image.IMAGE_FORMAT_RGB_HWC_8U,
content=b'ABCABCABCABC',
)
image_array = imagery_utils.convert_image_to_array(image)
self.assertEqual(image_array.dtype, np.uint8)
self.assertEqual(image_array.shape, (image.height_px, image.width_px, 3))
self.assertEqual(image_array[0, 0, 0], ord('A'))
self.assertEqual(image_array[1, 1, 2], ord('C'))
def test_convert_gray_32bit_images(self):
image = imagery_pb2.Image(
height_px=2,
width_px=3,
image_format=imagery_pb2.Image.IMAGE_FORMAT_GRAY_HW_32F,
content=b'AAAABBBBCCCCAAAABBBBCCCC',
)
image_array = imagery_utils.convert_image_to_array(image)
self.assertEqual(image_array.dtype, np.float32)
self.assertEqual(image_array.shape, (image.height_px, image.width_px))
self.assertEqual(image_array[0, 2], struct.unpack(b'<f', b'CCCC'))
self.assertEqual(image_array[1, 1], struct.unpack(b'<f', b'BBBB'))
def test_convert_gray_16bit_images(self):
image = imagery_pb2.Image(
height_px=3,
width_px=2,
image_format=imagery_pb2.Image.IMAGE_FORMAT_GRAY_HW_16U,
content=b'AABBCCAABBCC',
)
image_array = imagery_utils.convert_image_to_array(image)
self.assertEqual(image_array.dtype, np.uint16)
self.assertEqual(image_array.shape, (image.height_px, image.width_px))
self.assertEqual(image_array[0, 1], struct.unpack(b'<H', b'BB'))
self.assertEqual(image_array[2, 1], struct.unpack(b'<H', b'CC'))
def test_unspecified_image_format(self):
image = imagery_pb2.Image()
with self.assertRaises(ValueError):
imagery_utils.convert_image_to_array(image)
if __name__ == '__main__':
googletest.main()
|
8af0283915b6c3028b2a4c6918b95de481969953
|
3ab8f9d09a406eda9188adc13b81110519eb5591
|
/src/twisted/python/test/strategies.py
|
d11fbf74dbf9a248c827e74a699c549705e67684
|
[
"MIT",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
twisted/twisted
|
b38039ef9ded2cc35390c280bd8f218b64917dea
|
cbcb1f7a2d52be937ad2e9ebea941611c4d65516
|
refs/heads/trunk
| 2023-09-03T21:08:14.581198
| 2023-09-03T20:51:47
| 2023-09-03T20:51:47
| 1,985,358
| 5,319
| 1,363
|
NOASSERTION
| 2023-09-13T20:30:22
| 2011-07-01T20:40:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,023
|
py
|
strategies.py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Hypothesis strategies for values related to L{twisted.python}.
"""
from hypothesis.strategies import SearchStrategy, characters, text
def systemdDescriptorNames() -> SearchStrategy[str]:
"""
Build strings that are legal values for the systemd
I{FileDescriptorName} field.
"""
# systemd.socket(5) says:
#
# > Names may contain any ASCII character, but must exclude control
# > characters and ":", and must be at most 255 characters in length.
return text(
# The docs don't say there is a min size so I'm guessing...
min_size=1,
max_size=255,
alphabet=characters(
# These constraints restrict us to ASCII.
min_codepoint=0,
max_codepoint=127,
# This one excludes control characters.
blacklist_categories=("Cc",),
# And this excludes the separator.
blacklist_characters=(":",),
),
)
|
84812ba7905134c1c5115248097a28965f482c34
|
017090be7ab186cb6b47f49e1066ac5cfec3a542
|
/src/neptune/internal/utils/json_file_splitter.py
|
b0b91853a4bf2a08eaf8b68d80d9701426ab5eec
|
[
"Apache-2.0"
] |
permissive
|
neptune-ai/neptune-client
|
9a79f9d93c84b3a20114e6e49a80652930399ece
|
9b697ce548634c30dbc5881d4a0b223c8987515d
|
refs/heads/master
| 2023-08-18T01:48:22.634432
| 2023-08-17T11:55:57
| 2023-08-17T11:55:57
| 170,117,229
| 408
| 55
|
Apache-2.0
| 2023-09-13T12:51:03
| 2019-02-11T11:25:57
|
Python
|
UTF-8
|
Python
| false
| false
| 3,053
|
py
|
json_file_splitter.py
|
#
# Copyright (c) 2022, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__all__ = ["JsonFileSplitter"]
import json
from collections import deque
from io import StringIO
from json import JSONDecodeError
from typing import (
Optional,
Tuple,
)
class JsonFileSplitter:
BUFFER_SIZE = 64 * 1024
MAX_PART_READ = 8 * 1024
def __init__(self, file_path: str):
self._file = open(file_path, "r")
self._decoder = json.JSONDecoder(strict=False)
self._part_buffer = StringIO()
self._parsed_queue = deque()
self._start_pos = 0
def close(self) -> None:
self._file.close()
self._part_buffer.close()
def get(self) -> Optional[dict]:
return (self.get_with_size() or (None, None))[0]
def get_with_size(self) -> Tuple[Optional[dict], int]:
if self._parsed_queue:
return self._parsed_queue.popleft()
self._read_data()
if self._parsed_queue:
return self._parsed_queue.popleft()
return None, 0
def _read_data(self):
if self._part_buffer.tell() < self.MAX_PART_READ:
data = self._file.read(self.BUFFER_SIZE)
if not data:
return
if self._part_buffer.tell() > 0:
data = self._reset_part_buffer() + data
self._decode(data)
if not self._parsed_queue:
data = self._file.read(self.BUFFER_SIZE)
while data:
self._part_buffer.write(data)
data = self._file.read(self.BUFFER_SIZE)
data = self._reset_part_buffer()
self._decode(data)
def _decode(self, data: str):
start = self._json_start(data)
while start is not None:
try:
json_data, new_start = self._decoder.raw_decode(data, start)
size = new_start - start
start = new_start
except JSONDecodeError:
self._part_buffer.write(data[start:])
break
else:
self._parsed_queue.append((json_data, size))
start = self._json_start(data, start)
@staticmethod
def _json_start(data: str, start: int = 0) -> Optional[int]:
try:
return data.index("{", start)
except ValueError:
return None
def _reset_part_buffer(self) -> str:
data = self._part_buffer.getvalue()
self._part_buffer.close()
self._part_buffer = StringIO()
return data
|
3c29f1c81ff9b22cd3e5a031d0a0aedbcf2dd273
|
38d86234ef4ba4ed5ac3bf585bcff8615004d2a6
|
/ssseg/modules/models/segmentors/fcn/__init__.py
|
6e65d2ed36ee6ab8fd41132f6ec0e1c082b98b0d
|
[
"Apache-2.0"
] |
permissive
|
SegmentationBLWX/sssegmentation
|
e57e7a071b03214c55248c4b1e64c85796744bf1
|
fe3d0dac83055b728fe3c5df964507fc7cc4948c
|
refs/heads/main
| 2023-08-05T02:49:57.370911
| 2023-08-01T13:49:17
| 2023-08-01T13:49:17
| 306,540,019
| 725
| 97
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 60
|
py
|
__init__.py
|
'''initialize'''
from .fcn import FCN, DepthwiseSeparableFCN
|
74bec013619aeacbb12468ed53dfa62472be4197
|
915df559af9529a529d999194806a0673a12f1fa
|
/plotting.py
|
0dc7114033e259156f9ce1fb54c4e5b8d48e6170
|
[
"MIT"
] |
permissive
|
ratschlab/RGAN
|
2b61f62aa25d611fae6f4ec6adc0b2e52f016937
|
f41731b965348259dcd94b0dcb1374d3e1c4ca7d
|
refs/heads/master
| 2022-08-09T09:42:10.890869
| 2018-02-09T18:09:22
| 2018-02-09T18:09:22
| 93,741,544
| 673
| 204
|
MIT
| 2018-10-07T20:42:34
| 2017-06-08T11:26:13
|
Python
|
UTF-8
|
Python
| false
| false
| 37,712
|
py
|
plotting.py
|
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import pdb
from time import time
from matplotlib.colors import hsv_to_rgb
from pandas import read_table, read_hdf
import paths
from data_utils import scale_data
def visualise_at_epoch(vis_sample, data, predict_labels, one_hot, epoch,
identifier, num_epochs, resample_rate_in_min, multivariate_mnist,
seq_length, labels):
# TODO: what's with all these arguments
if data == 'mnist':
if predict_labels:
n_labels = 1
if one_hot:
n_labels = 6
lab_votes = np.argmax(vis_sample[:, :, -n_labels:], axis=2)
else:
lab_votes = vis_sample[:, :, -n_labels:]
labs, _ = mode(lab_votes, axis=1)
samps = vis_sample[:, :, :-n_labels]
else:
labs = labels
samps = vis_sample
if multivariate_mnist:
save_mnist_plot_sample(samps.reshape(-1, seq_length**2, 1), epoch, identifier, n_samples=6, labels=labs)
else:
save_mnist_plot_sample(samps, epoch, identifier, n_samples=6, labels=labs)
elif 'eICU' in data:
vis_eICU_patients_downsampled(vis_sample[:6, :, :],
resample_rate_in_min,
identifier=identifier,
idx=epoch)
else:
save_plot_sample(vis_sample, epoch, identifier, n_samples=6,
num_epochs=num_epochs)
return True
def save_plot_sample(samples, idx, identifier, n_samples=6, num_epochs=None, ncol=2):
assert n_samples <= samples.shape[0]
assert n_samples % ncol == 0
sample_length = samples.shape[1]
if not num_epochs is None:
col = hsv_to_rgb((1, 1.0*(idx)/num_epochs, 0.8))
else:
col = 'grey'
x_points = np.arange(sample_length)
nrow = int(n_samples/ncol)
fig, axarr = plt.subplots(nrow, ncol, sharex=True, figsize=(6, 6))
for m in range(nrow):
for n in range(ncol):
# first column
sample = samples[n*nrow + m, :, 0]
axarr[m, n].plot(x_points, sample, color=col)
axarr[m, n].set_ylim(-1, 1)
for n in range(ncol):
axarr[-1, n].xaxis.set_ticks(range(0, sample_length, int(sample_length/4)))
fig.suptitle(idx)
fig.subplots_adjust(hspace = 0.15)
fig.savefig("./experiments/plots/" + identifier + "_epoch" + str(idx).zfill(4) + ".png")
plt.clf()
plt.close()
return
def save_plot_interpolate(input_samples, samples, idx, identifier, num_epochs=None, distances=None, sigma=1):
""" very boilerplate, unsure how to make nicer """
n_samples = samples.shape[0]
sample_length = samples.shape[1]
if not num_epochs is None:
col = hsv_to_rgb((1, 1.0*(idx)/num_epochs, 0.8))
else:
col = 'grey'
x_points = np.arange(sample_length)
if distances is None:
nrow = n_samples
else:
nrow = n_samples + 1
ncol = 1
fig, axarr = plt.subplots(nrow, ncol, figsize=(3, 9))
if distances is None:
startat = 0
else:
startat = 1
axarr[0].plot(distances.dA, color='green', label='distance from A', linestyle='--', marker='o', markersize=4)
axarr[0].plot(distances.dB, color='orange', label='distance from B', linestyle='dotted', marker='o', markersize=4)
axarr[0].get_xaxis().set_visible(False)
axarr[0].set_title('distance from endpoints')
for m in range(startat, nrow):
sample = samples[m-startat, :, 0]
axarr[m].plot(x_points, sample, color=col)
for m in range(startat, nrow):
axarr[m].set_ylim(-1.1, 1.1)
axarr[m].set_xlim(0, sample_length)
axarr[m].spines["top"].set_visible(False)
axarr[m].spines["bottom"].set_visible(False)
axarr[m].spines["right"].set_visible(False)
axarr[m].spines["left"].set_visible(False)
axarr[m].tick_params(bottom='off', left='off')
axarr[m].get_xaxis().set_visible(False)
axarr[m].get_yaxis().set_visible(False)
axarr[m].set_facecolor((0.96, 0.96, 0.96))
if not input_samples is None:
# now do the real samples
axarr[startat].plot(x_points, input_samples[0], color='green', linestyle='--')
axarr[-1].plot(x_points, input_samples[1], color='green', linestyle='--')
axarr[-1].xaxis.set_ticks(range(0, sample_length, int(sample_length/4)))
fig.suptitle(idx)
fig.subplots_adjust(hspace = 0.2)
fig.savefig("./experiments/plots/" + identifier + "_interpolate.png")
fig.savefig("./experiments/plots/" + identifier + "_interpolate.pdf")
plt.clf()
plt.close()
return
def reconstruction_errors(identifier, train_errors, vali_errors,
generated_errors, random_errors):
"""
Plot two histogram of the reconstruction errors.
"""
print(identifier)
fig, axarr = plt.subplots(4, 1, sharex=True, figsize=(4, 8))
axarr[0].hist(train_errors, normed=1, color='green', bins=50)
axarr[0].set_title("train reconstruction errors")
axarr[1].hist(vali_errors, normed=1, color='blue', bins=50)
axarr[1].set_title('vali reconstruction errors')
axarr[2].hist(generated_errors, normed=1, color='pink', bins=50)
axarr[2].set_title('generated reconstruction errors')
axarr[3].hist(random_errors, normed=1, color='grey', bins=50)
axarr[3].set_title('random reconstruction errors')
for ax in axarr:
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.tick_params(bottom='off', left='off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
axarr[3].set_xlim(0, 0.05)
plt.tight_layout()
plt.savefig('./experiments/plots/' + identifier + '_reconstruction_errors.png')
return True
def save_plot_reconstruct(real_samples, model_samples, identifier):
assert real_samples.shape == model_samples.shape
sample_length = real_samples.shape[1]
x_points = np.arange(sample_length)
nrow = real_samples.shape[0]
ncol = 2
fig, axarr = plt.subplots(nrow, ncol, sharex=True, figsize=(6, 6))
for m in range(nrow):
real_sample = real_samples[m, :, 0]
model_sample = model_samples[m, :, 0]
axarr[m, 0].plot(x_points, real_sample, color='green')
axarr[m, 1].plot(x_points, model_sample, color='red')
axarr[-1, 0].xaxis.set_ticks(range(0, sample_length, int(sample_length/4)))
axarr[-1, 1].xaxis.set_ticks(range(0, sample_length, int(sample_length/4)))
axarr[0, 0].set_title('real')
axarr[0, 1].set_title('reconstructed')
fig.subplots_adjust(hspace = 0.15)
fig.savefig("./experiments/plots/" + identifier + "_reconstruct.png")
plt.clf()
plt.close()
return
def save_plot_vary_dimension(samples_list, idx, identifier, n_dim):
"""
"""
assert len(samples_list) == n_dim
sample_length = samples_list[0].shape[1]
x_points = np.arange(sample_length)
nrow = samples_list[0].shape[0]
sidelength = n_dim*1.5
fig, axarr = plt.subplots(nrow, n_dim, sharex=True, sharey=True, figsize=(sidelength, sidelength))
for dim in range(n_dim):
sample_dim = samples_list[dim]
axarr[0, dim].set_title(dim)
h = dim*1.0/n_dim # hue
for n in range(nrow):
sample = sample_dim[n, :, 0]
axarr[n, dim].plot(x_points, sample, color='black')
axarr[n, dim].spines["top"].set_visible(False)
axarr[n, dim].spines["bottom"].set_visible(False)
axarr[n, dim].spines["right"].set_visible(False)
axarr[n, dim].spines["left"].set_visible(False)
axarr[n, dim].tick_params(bottom='off', left='off')
axarr[n, dim].get_xaxis().set_visible(False)
axarr[n, dim].set_facecolor(hsv_to_rgb((h, 0 + 0.25*n/nrow, 0.96)))
axarr[-1, dim].xaxis.set_ticks(range(0, sample_length, int(sample_length/4)))
fig.suptitle(idx)
fig.subplots_adjust(hspace = 0.11, wspace=0.11)
fig.savefig("./experiments/plots/" + identifier + "_epoch" + str(idx).zfill(4) + ".png")
plt.clf()
plt.close()
return True
def interpolate(sampleA, sampleB=None, n_steps=6):
"""
Plot the linear interpolation between two latent space points.
"""
weights = np.linspace(0, 1, n_steps)
if sampleB is None:
# do it "close by"
sampleB = sampleA + np.random.normal(size=sampleA.shape, scale=0.05)
samples = np.array([w*sampleB + (1-w)*sampleA for w in weights])
return samples
def vary_latent_dimension(sample, dimension, n_steps=6):
"""
"""
assert dimension <= sample.shape[1]
scale = np.mean(np.abs(sample[:, dimension]))
deviations = np.linspace(0, 2*scale, n_steps)
samples = np.array([sample[:, :]]*n_steps)
for n in range(n_steps):
samples[n, :, dimension] += deviations[n]
return samples
def plot_sine_evaluation(real_samples, fake_samples, idx, identifier):
"""
Create histogram of fake (generated) samples frequency, amplitude distribution.
Also for real samples.
"""
### frequency
seq_length = len(real_samples[0]) # assumes samples are all the same length
frate = seq_length
freqs_hz = np.fft.rfftfreq(seq_length)*frate # this is for labelling the plot
# TODO, just taking axis 0 for now...
w_real = np.mean(np.abs(np.fft.rfft(real_samples[:, :, 0])), axis=0)
w_fake = np.mean(np.abs(np.fft.rfft(fake_samples[:, :, 0])), axis=0)
### amplitude
A_real = np.max(np.abs(real_samples[:, :, 0]), axis=1)
A_fake = np.max(np.abs(fake_samples[:, :, 0]), axis=1)
### now plot
nrow = 2
ncol = 2
fig, axarr = plt.subplots(nrow, ncol, sharex='col', figsize=(6, 6))
# freq
axarr[0, 0].vlines(freqs_hz, ymin=np.minimum(np.zeros_like(w_real), w_real), ymax=np.maximum(np.zeros_like(w_real), w_real), color='#30ba50')
axarr[0, 0].set_title("frequency", fontsize=16)
axarr[0, 0].set_ylabel("real", fontsize=16)
axarr[1, 0].vlines(freqs_hz, ymin=np.minimum(np.zeros_like(w_fake), w_fake), ymax=np.maximum(np.zeros_like(w_fake), w_fake), color='#ba4730')
axarr[1, 0].set_ylabel("generated", fontsize=16)
# amplitude
axarr[0, 1].hist(A_real, normed=True, color='#30ba50', bins=30)
axarr[0, 1].set_title("amplitude", fontsize=16)
axarr[1, 1].hist(A_fake, normed=True, color='#ba4730', bins=30)
fig.savefig('./experiments/plots/' + identifier + '_eval' + str(idx).zfill(4) +'.png')
plt.clf()
plt.close()
return True
def plot_trace(identifier, xmax=250, final=False, dp=False):
"""
"""
trace_path = './experiments/traces/' + identifier + '.trace.txt'
da = read_table(trace_path, sep=' ')
nrow = 3
if dp:
trace_dp_path = './experiments/traces/' + identifier + '.dptrace.txt'
da_dp = read_table(trace_dp_path, sep=' ')
nrow += 1
ncol=1
fig, axarr = plt.subplots(nrow, ncol, sharex='col', figsize=(6, 6))
# D_loss
d_handle, = axarr[0].plot(da.epoch, da.D_loss, color='red', label='discriminator')
axarr[0].set_ylabel('D loss')
# axarr[0].set_ylim(0.9, 1.6)
if final:
#D_ticks = [1.0, 1.2, 1.5]
D_ticks = [0.5, 1.0, 1.5]
axarr[0].get_yaxis().set_ticks(D_ticks)
for tick in D_ticks:
axarr[0].plot((-10, xmax+10), (tick, tick), ls='dotted', lw=0.5, color='black', alpha=0.4, zorder=0)
# G loss
ax_G = axarr[0].twinx()
g_handle, = ax_G.plot(da.epoch, da.G_loss, color='green', ls='dashed', label='generator')
ax_G.set_ylabel('G loss')
if final:
G_ticks = [2.5, 5]
ax_G.get_yaxis().set_ticks(G_ticks)
# for tick in G_ticks:
# axarr[0].plot((-10, xmax+10), (tick, tick), ls='dotted', lw=0.5, color='green', alpha=1.0, zorder=0)
ax_G.spines["top"].set_visible(False)
ax_G.spines["bottom"].set_visible(False)
ax_G.spines["right"].set_visible(False)
ax_G.spines["left"].set_visible(False)
ax_G.tick_params(bottom='off', right='off')
axarr[0].legend(handles=[d_handle, g_handle], labels=['discriminator', 'generator'])
# mmd
da_mmd = da.loc[:, ['epoch', 'mmd2']].dropna()
axarr[1].plot(da_mmd.epoch, da_mmd.mmd2, color='purple')
axarr[1].set_ylabel('MMD$^2$')
#axarr[1].set_ylim(0.0, 0.04)
#ax_that = axarr[1].twinx()
#ax_that.plot(da.that)
#ax_that.set_ylabel('$\hat{t}$')
#ax_that.set_ylim(0, 50)
if final:
mmd_ticks = [0.01, 0.02, 0.03]
axarr[1].get_yaxis().set_ticks(mmd_ticks)
for tick in mmd_ticks:
axarr[1].plot((-10, xmax+10), (tick, tick), ls='dotted', lw=0.5, color='black', alpha=0.4, zorder=0)
# log likelihood
da_ll = da.loc[:, ['epoch', 'll', 'real_ll']].dropna()
axarr[2].plot(da_ll.epoch, da_ll.ll, color='orange')
axarr[2].plot(da_ll.epoch, da_ll.real_ll, color='orange', alpha=0.5)
axarr[2].set_ylabel('likelihood')
axarr[2].set_xlabel('epoch')
axarr[2].set_ylim(-750, 100)
#axarr[2].set_ylim(-10000000, 500)
if final:
# ll_ticks = [-1.0*1e7, -0.5*1e7, 0]
ll_ticks = [-500 ,-250, 0]
axarr[2].get_yaxis().set_ticks(ll_ticks)
for tick in ll_ticks:
axarr[2].plot((-10, xmax+10), (tick, tick), ls='dotted', lw=0.5, color='black', alpha=0.4, zorder=0)
if dp:
assert da_dp.columns[0] == 'epoch'
epochs = da_dp['epoch']
eps_values = da_dp.columns[1:]
for eps_string in eps_values:
if 'eps' in eps_string:
eps = eps_string[3:]
else:
eps = eps_string
deltas = da_dp[eps_string]
axarr[3].plot(epochs, deltas, label=eps)
axarr[3].set_ylabel('delta')
axarr[3].set_xlabel('epoch')
axarr[3].legend()
# beautify
for ax in axarr:
#ax.spines["top"].set_visible(True)
ax.spines["top"].set_color((0, 0, 0, 0.3))
#ax.spines["bottom"].set_visible(False)
ax.spines["bottom"].set_color((0, 0, 0, 0.3))
#ax.spines["right"].set_visible(False)
ax.spines["right"].set_color((0, 0, 0, 0.3))
#ax.spines["left"].set_visible(False)
ax.spines["left"].set_color((0, 0, 0, 0.3))
ax.tick_params(bottom='off', left='off')
# make background grey
# ax.set_facecolor((0.96, 0.96, 0.96))
ymin, ymax = ax.get_ylim()
for x in np.arange(0, xmax+10, 10):
ax.plot((x, x), (ymin, ymax), ls='dotted', lw=0.5, color='black', alpha=0.40, zorder=0)
ax.set_xlim(-5, xmax)
ax.get_yaxis().set_label_coords(-0.11,0.5)
# bottom one
fig.savefig('./experiments/traces/' + identifier + '_trace.png')
fig.savefig('./experiments/traces/' + identifier + '_trace.pdf')
plt.clf()
plt.close()
return True
### scripts for eICU
def vis_eICU_patients(patients, upto=None, identifier=None):
"""
Given a list of patientIDs, visualise the chosen variables.
(if only one patient given, only vis one patient)
"""
patients = list(set(patients))
print('Plotting traces of', len(patients), 'patients.')
eICU_dir = 'REDACTED'
variables = ['temperature', 'heartrate', 'respiration', 'systemicmean']
# set up the plot
fig, axarr = plt.subplots(len(variables), 1, sharex=True, figsize=(6.5, 9))
for patient in patients:
pat_df = read_hdf(eICU_dir + '/vitalPeriodic.h5', where='patientunitstayid = ' + str(patient), columns=['observationoffset'] + variables, mode='r')
pat_df.set_index('observationoffset', inplace=True)
pat_df.sort_index(inplace=True)
if not upto is None:
# restrict to first "upto" minutes
pat_df = pat_df.loc[0:upto*60]
for variable, ax in zip(variables, axarr):
ax.plot(pat_df.index/60, pat_df[variable], alpha=0.5)
# aesthetics
xmin, xmax = axarr[0].get_xlim()
for variable, ax in zip(variables, axarr):
ax.set_ylabel(variable)
ax.get_yaxis().set_label_coords(-0.15,0.5)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.tick_params(bottom='off')
#ax.set_facecolor((0.96, 0.96, 0.96))
ymin, ymax = ax.get_ylim()
# expand the ylim ever so slightly
yrange = np.abs(ymax - ymin)
ybuffer = yrange*0.08
ymin_new = ymin - ybuffer
ymax_new = ymax + ybuffer
for x in np.linspace(xmin, xmax - (xmax - xmin)*0.005, num=10):
ax.plot((x, x), (ymin_new, ymax_new), ls='dotted', lw=0.5, color='black', alpha=0.25, zorder=0)
ax.set_ylim(ymin_new, ymax_new)
axarr[-1].set_xlabel("time since admision (hours)")
axarr[-1].get_xaxis().tick_bottom()
if not identifier is None:
plt.suptitle(identifier)
fig.savefig('./plots/' + identifier + '.png', bbox_inches='tight')
else:
fig.savefig('./plots/eICU_patients.png', bbox_inches='tight')
plt.clf()
plt.close()
return True
def save_mnist_plot_sample(samples, idx, identifier, n_samples, labels=None):
"""
Generates a grid showing mnist digits.
"""
assert n_samples <= samples.shape[0]
if not labels is None:
assert n_samples <= len(labels)
if len(labels.shape) > 1 and not labels.shape[1] == 1:
# one-hot
label_titles = np.argmax(labels, axis=1)
else:
label_titles = labels
else:
label_titles = ['NA']*n_samples
assert n_samples % 2 == 0
img_size = int(np.sqrt(samples.shape[1]))
nrow = int(n_samples/2)
ncol = 2
fig, axarr = plt.subplots(nrow, ncol, sharex=True, figsize=(8, 8))
for m in range(nrow):
# first column
sample = samples[m, :, 0]
axarr[m, 0].imshow(sample.reshape([img_size,img_size]), cmap='gray')
axarr[m, 0].set_title(str(label_titles[m]))
# second column
sample = samples[nrow + m, :, 0]
axarr[m, 1].imshow(sample.reshape([img_size,img_size]), cmap='gray')
axarr[m, 1].set_title(str(label_titles[m + nrow]))
fig.suptitle(idx)
fig.suptitle(idx)
fig.subplots_adjust(hspace = 0.15)
fig.savefig("./experiments/plots/" + identifier + "_epoch" + str(idx).zfill(4) + ".png")
plt.clf()
plt.close()
return
def visualise_latent(Z, identifier):
"""
visualise a SINGLE point in the latent space
"""
seq_length = Z.shape[0]
latent_dim = Z.shape[1]
if latent_dim > 2:
print('WARNING: Only visualising first two dimensions of latent space.')
h = np.random.random()
colours = np.array([hsv_to_rgb((h, i/seq_length, 0.96)) for i in range(seq_length)])
# plt.plot(Z[:, 0], Z[:, 1], c='grey', alpha=0.5)
for i in range(seq_length):
plt.scatter(Z[i, 0], Z[i, 1], marker='o', c=colours[i])
plt.savefig('./experiments/plots/' + identifier + '_Z.png')
plt.clf()
plt.close()
return True
# --- to do with the model --- #
def plot_parameters(parameters, identifier):
"""
visualise the parameters of a GAN
"""
generator_out = parameters['generator/W_out_G:0']
generator_weights = parameters['generator/rnn/lstm_cell/weights:0'] # split this into four
generator_matrices = np.split(generator_weights, 4, 1)
fig, axarr = plt.subplots(5, 1, sharex=True,
gridspec_kw = {'height_ratios':[0.2, 1, 1, 1, 1]}, figsize=(3,13))
axarr[0].matshow(generator_out.T, extent=[0,100,0,100])
axarr[0].set_title('W_out_G')
axarr[1].matshow(generator_matrices[0])
axarr[1].set_title('LSTM weights (1)')
axarr[2].matshow(generator_matrices[1])
axarr[2].set_title('LSTM weights (2)')
axarr[3].matshow(generator_matrices[2])
axarr[3].set_title('LSTM weights (3)')
axarr[4].matshow(generator_matrices[3])
axarr[4].set_title('LSTM weights (4)')
for a in axarr:
a.set_xlim(0, 100)
a.set_ylim(0, 100)
a.spines["top"].set_visible(False)
a.spines["bottom"].set_visible(False)
a.spines["right"].set_visible(False)
a.spines["left"].set_visible(False)
a.get_xaxis().set_visible(False)
a.get_yaxis().set_visible(False)
# a.tick_params(bottom='off', left='off', top='off')
plt.tight_layout()
plt.savefig('./experiments/plots/' + identifier + '_weights.png')
return True
def vis_eICU_patients_downsampled(pat_arrs, time_step, time_steps_to_plot=None,
variable_names=['sao2', 'heartrate', 'respiration', 'systemicmean'],
identifier=None, idx=0):
"""
Given a list of patient dataframes, visualise the chosen variables.
(if only one patient given, only vis one patient)
"""
# set up the plot
fig, axarr = plt.subplots(len(variable_names), 1, sharex=True, figsize=(6.5, 9))
# fix the same colour for each patient for each axis
n_patients = pat_arrs.shape[0]
colours = [hsv_to_rgb((i/n_patients, 0.8, 0.8)) for i in range(n_patients)]
for (i, pat_arr) in enumerate(pat_arrs):
if not time_steps_to_plot is None:
pat_arr = pat_arr[0:time_steps_to_plot]
for col, ax in zip(range(pat_arr.shape[1]), axarr):
ax.plot(range(0, len(pat_arr)*time_step, time_step), pat_arr[:,col], alpha=0.5, color=colours[i])
# aesthetics
xmin, xmax = axarr[0].get_xlim()
for variable, ax in zip(variable_names, axarr):
ax.set_ylabel(variable)
ax.get_yaxis().set_label_coords(-0.15,0.5)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.tick_params(bottom='off')
#ax.set_facecolor((0.96, 0.96, 0.96))
ymin, ymax = ax.get_ylim()
# expand the ylim ever so slightly
yrange = np.abs(ymax - ymin)
ybuffer = yrange*0.08
ymin_new = ymin - ybuffer
ymax_new = ymax + ybuffer
for x in np.linspace(xmin, xmax - (xmax - xmin)*0.005, num=10):
ax.plot((x, x), (ymin_new, ymax_new), ls='dotted', lw=0.5, color='black', alpha=0.25, zorder=0)
#ax.set_ylim(ymin_new, ymax_new)
ax.set_ylim(-1.5, 1.5)
axarr[-1].set_xlabel("time since admision (minutes)")
axarr[-1].get_xaxis().tick_bottom()
if not identifier is None:
plt.suptitle(idx)
fig.savefig("./experiments/plots/" + identifier + "_epoch" + str(idx).zfill(4) + ".png", bbox_inches='tight')
else:
fig.savefig('./experiments/plots/eICU_patients.png', bbox_inches='tight')
plt.clf()
plt.close()
return True
### TSTR ###
def view_mnist_eval(identifier, train_X, train_Y, synth_X, synth_Y, test_X, test_Y, synth_predY, real_predY):
"""
Basically just
http://scikit-learn.org/stable/auto_examples/classification/plot_digits_classification.html
"""
# resize everything
side_length = int(np.sqrt(train_X.shape[1]))
train_X = train_X.reshape(-1, side_length, side_length)
synth_X = synth_X.reshape(-1, side_length, side_length)
test_X = test_X.reshape(-1, side_length, side_length)
# remember, they're wrecked in the outer function thanks to python
synth_images_and_labels = list(zip(synth_X, synth_Y))
for index, (image, label) in enumerate(synth_images_and_labels[:4]):
plt.subplot(4, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
if index == 0:
plt.title('synth train: %i' % label)
else:
plt.title('%i' % label)
train_images_and_labels = list(zip(train_X, train_Y))
for index, (image, label) in enumerate(train_images_and_labels[:4]):
plt.subplot(4, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
if index == 0:
plt.title('real train: %i' % label)
else:
plt.title('%i' % label)
images_and_synthpreds = list(zip(test_X, synth_predY))
for index, (image, prediction) in enumerate(images_and_synthpreds[:4]):
plt.subplot(4, 4, index + 9)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
if index == 0:
plt.title('synth pred: %i' % prediction)
else:
plt.title('%i' % prediction)
images_and_realpreds = list(zip(test_X, real_predY))
for index, (image, prediction) in enumerate(images_and_realpreds[:4]):
plt.subplot(4, 4, index + 13)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
if index == 0:
plt.title('real pred: %i' % prediction)
else:
plt.title('%i' % prediction)
plt.tight_layout()
plt.title(identifier)
plt.savefig('./experiments/tstr/' + identifier + '_preds.png')
return True
def view_marginals_raw(data, label=''):
"""
Sort of a duplication with 'view_marginals_cristobal', this doesn't attempt to compare distributions or anything.
"""
variables = ['sao2', 'heartrate', 'respiration', 'systemicmean']
num_gradations = 25
# for cutoff in the gradations, what fraction of samples (at a given time point) fall into that cutoff bracket?
grid = np.zeros(shape=(16, num_gradations, 4))
grid = np.zeros(shape=(16, num_gradations, 4))
assert data.shape[-1] == 4
ranges = []
for var in range(4):
# allow for a different range per variable (if zoom)
low = np.min(data[:, :, var])
high = np.max(data[:, :, var])
ranges.append([low, high])
gradations = np.linspace(low, high, num_gradations)
for (i, cutoff) in enumerate(gradations):
# take the mean over samples
frac = ((data[:, :, var] > low) & (data[:, :, var] <= cutoff)).mean(axis=0)
low = cutoff
grid[:, i, var] = frac
fig, axarr = plt.subplots(nrows=4, ncols=1, sharex=True)
axarr[0].imshow(grid[:, :, 0].T, origin='lower', aspect=0.5, cmap='magma_r')
axarr[1].imshow(grid[:, :, 1].T, origin='lower', aspect=0.5, cmap='magma_r')
axarr[2].imshow(grid[:, :, 2].T, origin='lower', aspect=0.5, cmap='magma_r')
axarr[3].imshow(grid[:, :, 3].T, origin='lower', aspect=0.5, cmap='magma_r')
for (var, ax) in enumerate(axarr):
labels = np.round(np.linspace(ranges[var][0], ranges[var][1], num_gradations)[1::4], 0)
ax.set_yticks(np.arange(num_gradations)[1::4])
ax.set_yticklabels(labels)
ax.set_ylabel(variables[var])
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticks_position('none')
ax.set_adjustable('box-forced')
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.grid(b=True, color='black', alpha=0.2, linestyle='--')
axarr[-1].set_xticks(np.arange(16)[::2])
plt.tight_layout(pad=0.0, w_pad=-5.0, h_pad=0.1)
plt.savefig("./experiments/eval/eICU_marginals_" + label + ".png")
return True
def view_marginals_cristobal(rep=0, epoch=300, zoom=False):
"""
View marginals of the synthetic data (compare to real data), from the data Cristobal generated.
"""
samples_path = paths.eICU_synthetic_dir + 'samples_eICU_cdgan_synthetic_dataset_r' + str(rep) + '_' + str(epoch) + '.pk'
samples = np.load(samples_path)
labels_path = paths.eICU_synthetic_dir + 'labels_eICU_cdgan_synthetic_dataset_r' + str(rep) + '_' + str(epoch) + '.pk'
labels = np.load(labels_path)
real_path = paths.eICU_task_data
raw_real_train = np.load(real_path).item()['X_train'].reshape(-1, 16, 4)
real_test = np.load(real_path).item()['X_test'].reshape(-1, 16, 4)
real_vali = np.load(real_path).item()['X_vali'].reshape(-1, 16, 4)
# discard vali, test
real, scaled_vali, scaled_test = scale_data(raw_real_train, real_vali, real_test)
real = raw_real_train
view_marginals_raw(raw_real_train, label='raw_real_train')
view_marginals_raw(real, label='real_train')
view_marginals_raw(samples, label='synthetic')
variables = ['sao2', 'heartrate', 'respiration', 'systemicmean']
# get the scaling factors
scaling_factors = {'a': np.zeros(shape=(16, 4)), 'b': np.zeros(shape=(16, 4))}
ranges = []
for var in range(4):
var_min = 100
var_max = 0
for timestep in range(16):
min_val = np.min([np.min(raw_real_train[:, timestep, var]), np.min(real_vali[:, timestep, var])])
max_val = np.max([np.max(raw_real_train[:, timestep, var]), np.max(real_vali[:, timestep, var])])
if min_val < var_min:
var_min = min_val
if max_val > var_max:
var_max = max_val
a = (max_val - min_val)/2
b = (max_val + min_val)/2
scaling_factors['a'][timestep, var] = a
scaling_factors['b'][timestep, var] = b
ranges.append([var_min, var_max])
# now, scale the synthetic data manually
samples_scaled = np.zeros_like(samples)
for var in range(4):
for timestep in range(16):
samples_scaled[:, timestep, var] = samples[:, timestep, var]*scaling_factors['a'][timestep, var] + scaling_factors['b'][timestep, var]
if zoom:
# use modes, skip for now
modes = False
if modes:
# get rough region of interest, then zoom in on it afterwards!
num_gradations = 5
gradations = np.linspace(-1, 1, num_gradations)
# for cutoff in the gradations, what fraction of samples (at a given time point) fall into that cutoff bracket?
lower = 0
real_grid = np.zeros(shape=(16, num_gradations, 4))
for (i, cutoff) in enumerate(gradations):
# take the mean over samples
real_frac = ((real > lower) & (real <= cutoff)).mean(axis=0)
lower = cutoff
real_grid[:, i, :] = real_frac
time_averaged_grid = np.mean(real_grid, axis=0)
# get the most populated part of the grid for each variable
grid_modes = np.argmax(time_averaged_grid, axis=0)
lower = 0
ranges = []
for i in grid_modes:
lower = gradations[i-1]
upper = gradations[i]
ranges.append([lower, upper])
else:
# hand-crafted ranges
ranges = [[88, 100], [30, 130], [7, 60], [35, 135]]
num_gradations = 25
# for cutoff in the gradations, what fraction of samples (at a given time point) fall into that cutoff bracket?
grid = np.zeros(shape=(16, num_gradations, 4))
real_grid = np.zeros(shape=(16, num_gradations, 4))
assert samples.shape[-1] == 4
for var in range(4):
# allow for a different range per variable (if zoom)
low = ranges[var][0]
high = ranges[var][1]
gradations = np.linspace(low, high, num_gradations)
for (i, cutoff) in enumerate(gradations):
# take the mean over samples
frac = ((samples_scaled[:, :, var] > low) & (samples_scaled[:, :, var] <= cutoff)).mean(axis=0)
real_frac = ((real[:, :, var] > low) & (real[:, :, var] <= cutoff)).mean(axis=0)
low = cutoff
grid[:, i, var] = frac
real_grid[:, i, var] = real_frac
# now plot this as an image
fig, axarr = plt.subplots(nrows=4, ncols=2, sharey='row', sharex=True)
axarr[0, 0].imshow(grid[:, :, 0].T, origin='lower', aspect=0.5, cmap='magma_r')
axarr[1, 0].imshow(grid[:, :, 1].T, origin='lower', aspect=0.5, cmap='magma_r')
axarr[2, 0].imshow(grid[:, :, 2].T, origin='lower', aspect=0.5, cmap='magma_r')
axarr[3, 0].imshow(grid[:, :, 3].T, origin='lower', aspect=0.5, cmap='magma_r')
axarr[0, 1].imshow(real_grid[:, :, 0].T, origin='lower', aspect=0.5, cmap='magma_r')
axarr[1, 1].imshow(real_grid[:, :, 1].T, origin='lower', aspect=0.5, cmap='magma_r')
axarr[2, 1].imshow(real_grid[:, :, 2].T, origin='lower', aspect=0.5, cmap='magma_r')
axarr[3, 1].imshow(real_grid[:, :, 3].T, origin='lower', aspect=0.5, cmap='magma_r')
axarr[0, 0].set_title("synthetic")
axarr[0, 1].set_title("real")
for var in range(4):
low, high = ranges[var]
labels = np.linspace(low, high, num_gradations)[1::4]
labels = np.round(labels, 0)
axarr[var, 0].set_yticklabels(labels)
axarr[var, 0].set_yticks(np.arange(num_gradations)[1::4])
axarr[var, 0].set_ylabel(variables[var])
for ax in axarr[var, :]:
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticks_position('none')
ax.set_adjustable('box-forced')
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.grid(b=True, color='black', alpha=0.2, linestyle='--')
axarr[-1, 0].set_xticks(np.arange(16)[::2])
axarr[-1, 1].set_xticks(np.arange(16)[::2])
if zoom:
plt.suptitle('(zoomed)')
plt.tight_layout(pad=0.0, w_pad=-5.0, h_pad=0.1)
plt.savefig("./experiments/eval/eICU_cristobal_marginals_r" + str(rep) + "_epoch" + str(epoch) + ".png")
# now make the histograms
fig, axarr = plt.subplots(nrows=1, ncols=4)
axarr[0].set_ylabel("density")
axarr[0].hist(real[:, :, 0].flatten(), normed=True, color='black', alpha=0.8, range=ranges[0], bins=min(50, (ranges[0][1] - ranges[0][0])), label='real')
axarr[1].hist(real[:, :, 1].flatten(), normed=True, color='black', alpha=0.8, range=ranges[1], bins=50)
axarr[2].hist(real[:, :, 2].flatten(), normed=True, color='black', alpha=0.8, range=ranges[2], bins=50)
axarr[3].hist(real[:, :, 3].flatten(), normed=True, color='black', alpha=0.8, range=ranges[3], bins=50)
axarr[0].hist(samples_scaled[:, :, 0].flatten(), normed=True, alpha=0.6, range=ranges[0], bins=min(50, (ranges[0][1] - ranges[0][0])), label='synthetic')
axarr[0].legend()
axarr[1].hist(samples_scaled[:, :, 1].flatten(), normed=True, alpha=0.6, range=ranges[1], bins=50)
axarr[2].hist(samples_scaled[:, :, 2].flatten(), normed=True, alpha=0.6, range=ranges[2], bins=50)
axarr[3].hist(samples_scaled[:, :, 3].flatten(), normed=True, alpha=0.6, range=ranges[3], bins=50)
for (var, ax) in enumerate(axarr):
ax.set_xlabel(variables[var])
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticks_position('none')
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.grid(b=True, color='black', alpha=0.2, linestyle='--')
plt.gcf().subplots_adjust(bottom=0.2)
fig.set_size_inches(10, 3)
plt.savefig("./experiments/eval/eICU_cristobal_hist_r" + str(rep) + "_epoch" + str(epoch) + ".png")
return True
# --- nips !!! --- #
def nips_plot_rbf(sample, index, which='train'):
if which == 'train':
# col = '#167ea0'
col = '#13af5f'
else:
col = 'black'
sample_length = len(sample)
sample = sample.reshape(sample_length)
x_points = np.arange(sample_length)
fig, axarr = plt.subplots(1, 1, figsize=(2, 2))
axarr.set_facecolor((0.95, 0.96, 0.96))
axarr.plot(x_points, sample, color=col)
axarr.set_ylim(-1.5, 1.5)
axarr.get_xaxis().set_visible(False)
axarr.get_yaxis().set_visible(False)
axarr.spines["top"].set_visible(False)
axarr.spines["bottom"].set_visible(False)
axarr.spines["right"].set_visible(False)
axarr.spines["left"].set_visible(False)
axarr.tick_params(bottom='off', left='off')
plt.savefig('./plots/NIPS_rbf_' + which + '_' + str(index) + '.png')
plt.savefig('./plots/NIPS_rbf_' + which + '_' + str(index) + '.pdf')
plt.clf()
plt.close()
return True
def nips_plot_sine(sample, index, which='train'):
if which == 'train':
#col = '#167ea0'
#col = '#13af5f'
col = '#1188ad'
else:
col = 'black'
sample_length = len(sample)
sample = sample.reshape(sample_length)
sample_length = len(sample)
sample = sample.reshape(sample_length)
x_points = np.arange(sample_length)
fig, axarr = plt.subplots(1, 1, figsize=(2, 2))
axarr.set_facecolor((0.95, 0.96, 0.96))
axarr.plot(x_points, sample, color=col)
axarr.set_ylim(-1.1, 1.1)
axarr.get_xaxis().set_visible(False)
axarr.get_yaxis().set_visible(False)
axarr.spines["top"].set_visible(False)
axarr.spines["bottom"].set_visible(False)
axarr.spines["right"].set_visible(False)
axarr.spines["left"].set_visible(False)
axarr.tick_params(bottom='off', left='off')
plt.savefig('./plots/NIPS_sine_' + which + '_' + str(index) + '.png')
plt.savefig('./plots/NIPS_sine_' + which + '_' + str(index) + '.pdf')
plt.clf()
plt.close()
return True
def nips_plot_mnist(sample, index, which='train'):
plt.axis('off')
plt.imshow(sample, cmap=plt.cm.gray, interpolation='nearest')
plt.savefig('./plots/NIPS_mnist_' + which + '_' + str(index) + '.png')
plt.savefig('./plots/NIPS_mnist_' + which + '_' + str(index) + '.pdf')
plt.clf()
plt.close()
return True
|
bcdb57b43e6512e70bde54779b6ca5420a8d753a
|
9abd182d02355ddf0b79afd4a35f7127a4a66f7a
|
/docs/tutorials/action_recognition/feat_custom.py
|
48c0ece0b793d3d26b1b25fff8c8bf54ca8ccf5d
|
[
"Apache-2.0"
] |
permissive
|
dmlc/gluon-cv
|
e1303086419a5733661d0fcb9095c09d4f2382ad
|
567775619f3b97d47e7c360748912a4fd883ff52
|
refs/heads/master
| 2023-07-19T12:02:36.824294
| 2023-01-19T00:37:33
| 2023-01-19T00:37:33
| 122,896,249
| 6,064
| 1,458
|
Apache-2.0
| 2023-01-19T00:37:35
| 2018-02-26T01:33:21
|
Python
|
UTF-8
|
Python
| false
| false
| 5,295
|
py
|
feat_custom.py
|
"""8. Extracting video features from pre-trained models
=======================================================
Feature extraction is a very useful tool when you don't have large annotated dataset or don't have the
computing resources to train a model from scratch for your use case. It's also useful to visualize what the model have learned.
In this tutorial, we provide a simple unified solution.
The only thing you need to prepare is a text file containing the information of your videos (e.g., the path to your videos),
we will take care of the rest.
You can extract strong video features from many popular pre-trained models (e.g., I3D, I3D-nonlocal, SlowFast) using a single command line.
.. note::
Feel free to skip the tutorial because the feature extraction script is self-complete and ready to launch.
:download:`Download Full Python Script: feat_extract.py<../../../scripts/action-recognition/feat_extract.py>`
For more command options, please run ``python feat_extract.py -h``
Please checkout the `model_zoo <../model_zoo/index.html#action_recognition>`_ to select your preferred pretrained model.
"""
######################################################################
# Prepare Data
# ------------
#
# Your data can be stored in any hierarchy.
# The only thing you need to prepare is a text file, ``video.txt``, which should look like
#
# ::
#
# /home/ubuntu/your_data/video_001.mp4
# /home/ubuntu/your_data/video_001.mp4
# /home/ubuntu/your_data/video_002.mp4
# /home/ubuntu/your_data/video_003.mp4
# /home/ubuntu/your_data/video_004.mp4
# ......
# /home/ubuntu/your_data/video_100.mp4
#
# Each line is the path to each video you want to extract features from.
#
# Or you can also use the format we used for training models in other tutorials,
# ::
#
# /home/ubuntu/your_data/video_001.mp4 200 0
# /home/ubuntu/your_data/video_001.mp4 300 1
# /home/ubuntu/your_data/video_002.mp4 100 2
# /home/ubuntu/your_data/video_003.mp4 400 2
# /home/ubuntu/your_data/video_004.mp4 200 1
# ......
# /home/ubuntu/your_data/video_100.mp4.100 3
#
# Each line has three things, the path to each video, the number of video frames and the video label.
# However, the second and third things are not gonna used in the code, they are just a placeholder.
# So you can put any postive number in these two places.
#
# Note that, at this moment, we only support extracting features from videos directly.
######################################################################
# Once you prepare the ``video.txt``, you can start extracting feature by:
#
# ::
#
# python feat_extract.py --data-list video.txt --model i3d_resnet50_v1_kinetics400 --save-dir ./features
######################################################################
# The extracted features will be saved to the ``features`` directory. Each video will have one feature file.
# For example, ``video_001.mp4`` will have a feature named ``i3d_resnet50_v1_kinetics400_video_001.mp4_feat.npy``.
# The feature is extracted from the center of the video by using a 32-frames clip.
######################################################################
# If you want a stronger feature by covering more temporal information. For example, you want to extract features from
# 10 segments of the video and combine them. You can do
#
# ::
#
# python feat_extract.py --data-list video.txt --model i3d_resnet50_v1_kinetics400 --save-dir ./features --num-segments 10
######################################################################
# If you you want to extract features from 10 segments of the video, select 64-frame clip from each segment,
# and combine them. You can do
#
# ::
#
# python feat_extract.py --data-list video.txt --model i3d_resnet50_v1_kinetics400 --save-dir ./features --num-segments 10 --new-length 64
#
######################################################################
# If you you want to extract features from 10 segments of the video, select 64-frame clip from each segment,
# perform three-cropping technology, and combine them. You can do
#
# ::
#
# python feat_extract.py --data-list video.txt --model i3d_resnet50_v1_kinetics400 --save-dir ./features --num-segments 10 --new-length 64 --three-crop
######################################################################
# We also provide pre-trained SlowFast models for you to extract video features. SlowFast is a recent state-of-the-art video model that
# achieves the best accuracy-efficiency tradeoff. For example, if you want to extract features from model ``slowfast_4x16_resnet50_kinetics400``,
#
# ::
#
# python feat_extract.py --data-list video.txt --model slowfast_4x16_resnet50_kinetics400 --save-dir ./features --slowfast --slow-temporal-stride 16 --fast-temporal-stride 2
#
# The model requires the input to be a 64-frame video clip.
# We select 4 frames for the slow branch (temporal_stride = 16) and 32 frames for the fast branch (temporal_stride = 2).
#
######################################################################
# Similarly, you can specify num_segments, new_legnth, etc. to obtain stronger features.
# There are many other options and other models you can choose, please check ``feat_extract.py`` for more usage information.
|
8b45c61f515cb452e7b45f7faee7c8b9e344576a
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowSdwanPolicyDataPolicyFilter/cli/equal/golden_output_expected.py
|
96d270d05833e216606282be1f1bc9ad13239382
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,533
|
py
|
golden_output_expected.py
|
expected_output = {
"data_policy_filter": {
'DP_HUB_LOCAL_FROM_TUNNEL': {
'VPN1':{
'default_action_count': {
"packets": 0,
"bytes": 0
},
'FROM_TUNNEL_SAME_VPN_SC': {
"packets": 46960,
"bytes": 53256268
}
},
'VPN2':{
'default_action_count': {
"packets": 0,
"bytes": 0
},
'FROM_TUNNEL_DIFF_VPN_SC': {
"packets": 0,
"bytes": 0
}
}
},
'DP_HUB_LOCAL_FROM_SERVICE': {
'VPN1':{
'default_action_count': {
"packets": 430,
"bytes": 30020
},
'FROM_SVC_TO_BR2_SAME_VPN_SC': {
"packets": 1782,
"bytes": 124740
}
},
'VPN2':{
'default_action_count': {
"packets": 0,
"bytes": 0
},
'FROM_SVC_TO_BR2_DIFF_VPN_SC': {
"packets": 0,
"bytes": 0
}
}
}
}
}
|
5af27b11335438885f10a34330b80ad35a7b1871
|
e8f6fa2699fb0ea8436c6a49756b68f912a38e70
|
/Chapter14/test_dqn.py
|
28486b0c99796296f491711a9d7857e5c81b5dee
|
[
"MIT"
] |
permissive
|
PacktPublishing/Python-Deep-Learning-Projects
|
a35d1b85826b951a562168fe9e4c55fcad1d2656
|
1dbe73597f9a1a519ca6e9dd387ee59897ceda6e
|
refs/heads/master
| 2023-04-01T07:07:06.854805
| 2023-01-30T09:39:23
| 2023-01-30T09:39:23
| 129,046,274
| 220
| 158
|
MIT
| 2023-03-24T22:29:37
| 2018-04-11T06:36:59
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 905
|
py
|
test_dqn.py
|
"""This module contains function to test the performance of the DQN model."""
import numpy as np
def test(env, model, states, episodes=100, render=False):
"""Test the performance of the DQN agent."""
scores_test = []
for episode in range(1, (episodes+1)):
state = env.reset()
state = state.reshape(1, states)
done = False
time_step = 0
while not done:
if render:
env.render()
action = np.argmax(model.predict(state)[0])
new_state, reward, done, info = env.step(action)
new_state = new_state.reshape(1, states)
state = new_state
time_step += 1
scores_test.append(time_step)
if episode % 10 == 0:
print('episode {}, score {} '.format(episode, time_step))
print('Average score over 100 test games: {}'.format(np.mean(scores_test)))
|
54e3675530aa1fe45a5522372a921fe3894df851
|
cde096ba977b63becc1b9066677331ef4594a797
|
/csfieldguide/tests/utils/translatable_model_loader/__init__.py
|
e0a1cb069b41a38f4fa4910e27ff90a0d379c6ea
|
[
"CC-BY-NC-SA-4.0",
"BSD-3-Clause",
"CC0-1.0",
"ISC",
"Unlicense",
"LicenseRef-scancode-secret-labs-2011",
"WTFPL",
"Apache-2.0",
"LGPL-3.0-only",
"MIT",
"CC-BY-SA-4.0",
"LicenseRef-scancode-public-domain",
"CC-BY-NC-2.5",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
uccser/cs-field-guide
|
655524b161fab0ab422679dd80720f660f2cfa98
|
ea3281ec6f4d17538f6d3cf6f88d74fa54581b34
|
refs/heads/develop
| 2023-08-28T14:33:58.789843
| 2023-08-28T08:24:03
| 2023-08-28T08:24:03
| 34,356,619
| 364
| 97
|
MIT
| 2023-09-14T08:58:55
| 2015-04-21T23:00:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 61
|
py
|
__init__.py
|
"""Module for tests of the TranslatableModelLoader class."""
|
85c7e8ecc402711ac4aa6b7dd9e83000a8937b18
|
6f60dd616898f0eb9cd372c339d3caee45e8043f
|
/helpers/command.py
|
61b84e7a3ae20319a7bfec36c550580738fec202
|
[] |
no_license
|
kobotoolbox/kobo-install
|
e0c92c57fe86665eb8017ee1c9e203d47d20cc7f
|
7ec85be362b95387295ee5a2fad2563ce3aae838
|
refs/heads/master
| 2023-08-10T03:50:19.094832
| 2023-07-26T14:43:28
| 2023-07-26T14:43:28
| 151,285,266
| 162
| 156
| null | 2023-09-13T20:32:56
| 2018-10-02T16:17:27
|
Python
|
UTF-8
|
Python
| false
| false
| 22,486
|
py
|
command.py
|
# -*- coding: utf-8 -*-
import os
import sys
import time
import subprocess
from helpers.cli import CLI
from helpers.config import Config
from helpers.network import Network
from helpers.template import Template
from helpers.upgrading import Upgrading
from helpers.utils import run_docker_compose
class Command:
@staticmethod
def help():
output = [
'Usage: python3 run.py [options]',
'',
' Options:',
' -i, --info',
' Show KoboToolbox Url and super user credentials',
' -l, --logs',
' Display docker logs',
' -b, --build',
' Build kpi and kobocat (only on dev/staging mode)',
' -bkf, --build-kpi',
' Build kpi (only on dev/staging mode)',
' -bkc, --build-kobocat',
' Build kobocat (only on dev/staging mode)',
' -s, --setup',
' Prompt questions to (re)write configuration files',
' -S, --stop',
' Stop KoboToolbox',
' -u, --update, --upgrade [branch or tag]',
' Update KoboToolbox',
' -cf, --compose-frontend [docker-compose arguments]',
' Run a docker-compose command in the front-end '
'environment',
' -cb, --compose-backend [docker-compose arguments]',
' Run a docker-compose command in the back-end '
'environment',
' -m, --maintenance',
' Activate maintenance mode. All traffic is '
'redirected to maintenance page',
' -sm, --stop-maintenance',
' Stop maintenance mode',
' -v, --version',
' Display current version',
''
]
print('\n'.join(output))
@classmethod
def build(cls, image=None):
"""
Builds kpi/kobocat images with `--no-caches` option
Pulls latest `kobotoolbox/koboform_base` as well
:param image: str
"""
config = Config()
dict_ = config.get_dict()
if config.dev_mode or config.staging_mode:
def build_image(image_):
frontend_command = run_docker_compose(dict_, [
'-f', 'docker-compose.frontend.yml',
'-f', 'docker-compose.frontend.override.yml',
'-p', config.get_prefix('frontend'),
'build', '--force-rm', '--no-cache',
image_
])
CLI.run_command(frontend_command, dict_['kobodocker_path'])
if image is None or image == 'kf':
prefix = config.get_prefix('frontend')
timestamp = int(time.time())
dict_['kpi_dev_build_id'] = f'{prefix}{timestamp}'
config.write_config()
Template.render(config)
build_image('kpi')
if image is None or image == 'kc':
pull_base_command = [
'docker',
'pull',
'kobotoolbox/koboform_base',
]
CLI.run_command(pull_base_command, dict_['kobodocker_path'])
prefix = config.get_prefix('frontend')
timestamp = int(time.time())
dict_['kc_dev_build_id'] = f'{prefix}{timestamp}'
config.write_config()
Template.render(config)
build_image('kobocat')
@classmethod
def compose_frontend(cls, args):
config = Config()
dict_ = config.get_dict()
command = run_docker_compose(dict_, [
'-f', 'docker-compose.frontend.yml',
'-f', 'docker-compose.frontend.override.yml',
'-p', config.get_prefix('frontend')
])
cls.__validate_custom_yml(config, command)
command.extend(args)
subprocess.call(command, cwd=dict_['kobodocker_path'])
@classmethod
def compose_backend(cls, args):
config = Config()
dict_ = config.get_dict()
backend_role = dict_['backend_server_role']
command = run_docker_compose(dict_, [
'-f', f'docker-compose.backend.{backend_role}.yml',
'-f', f'docker-compose.backend.{backend_role}.override.yml',
'-p', config.get_prefix('backend')
])
cls.__validate_custom_yml(config, command)
command.extend(args)
subprocess.call(command, cwd=dict_['kobodocker_path'])
@classmethod
def info(cls, timeout=600):
config = Config()
dict_ = config.get_dict()
nginx_port = dict_['exposed_nginx_docker_port']
main_url = '{}://{}.{}{}'.format(
'https' if dict_['https'] else 'http',
dict_['kpi_subdomain'],
dict_['public_domain_name'],
':{}'.format(nginx_port) if (
nginx_port and
str(nginx_port) != Config.DEFAULT_NGINX_PORT
) else ''
)
stop = False
start = int(time.time())
success = False
hostname = f"{dict_['kpi_subdomain']}.{dict_['public_domain_name']}"
https = dict_['https']
nginx_port = int(Config.DEFAULT_NGINX_HTTPS_PORT) \
if https else int(dict_['exposed_nginx_docker_port'])
already_retried = False
while not stop:
if Network.status_check(hostname,
'/service_health/',
nginx_port, https) == Network.STATUS_OK_200:
stop = True
success = True
elif int(time.time()) - start >= timeout:
if timeout > 0:
CLI.colored_print(
'\n`KoBoToolbox` has not started yet. '
'This is can be normal with low CPU/RAM computers.\n',
CLI.COLOR_INFO)
question = f'Wait for another {timeout} seconds?'
response = CLI.yes_no_question(question)
if response:
start = int(time.time())
continue
else:
if not already_retried:
already_retried = True
CLI.colored_print(
'\nSometimes front-end containers cannot '
'communicate with back-end containers.\n'
'Restarting the front-end containers usually '
'fixes it.\n', CLI.COLOR_INFO)
question = 'Would you like to try?'
response = CLI.yes_no_question(question)
if response:
start = int(time.time())
cls.restart_frontend()
continue
stop = True
else:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(10)
# Create a new line
print('')
if success:
username = dict_['super_user_username']
password = dict_['super_user_password']
message = (
'Ready\n'
f'URL: {main_url}\n'
f'User: {username}\n'
f'Password: {password}'
)
CLI.framed_print(message,
color=CLI.COLOR_SUCCESS)
else:
message = (
'KoBoToolbox could not start!\n'
'Please try `python3 run.py --logs` to see the logs.'
)
CLI.framed_print(message, color=CLI.COLOR_ERROR)
return success
@classmethod
def logs(cls):
config = Config()
dict_ = config.get_dict()
if config.primary_backend or config.secondary_backend:
backend_role = dict_['backend_server_role']
backend_command = run_docker_compose(dict_, [
'-f', f'docker-compose.backend.{backend_role}.yml',
'-f', f'docker-compose.backend.{backend_role}.override.yml',
'-p', config.get_prefix('backend'),
'logs', '-f'
])
cls.__validate_custom_yml(config, backend_command)
CLI.run_command(backend_command, dict_['kobodocker_path'], True)
if config.frontend:
frontend_command = run_docker_compose(dict_, [
'-f', 'docker-compose.frontend.yml',
'-f', 'docker-compose.frontend.override.yml',
'-p', config.get_prefix('frontend'),
'logs', '-f',
])
cls.__validate_custom_yml(config, frontend_command)
CLI.run_command(frontend_command, dict_['kobodocker_path'], True)
@classmethod
def configure_maintenance(cls):
config = Config()
dict_ = config.get_dict()
if not config.multi_servers or config.frontend:
config.maintenance()
Template.render_maintenance(config)
dict_['maintenance_enabled'] = True
config.write_config()
cls.stop_nginx()
cls.start_maintenance()
@classmethod
def stop_nginx(cls):
config = Config()
dict_ = config.get_dict()
nginx_stop_command = run_docker_compose(dict_, [
'-f', 'docker-compose.frontend.yml',
'-f', 'docker-compose.frontend.override.yml',
'-p', config.get_prefix('frontend'),
'stop', 'nginx',
])
cls.__validate_custom_yml(config, nginx_stop_command)
CLI.run_command(nginx_stop_command, dict_['kobodocker_path'])
@classmethod
def start_maintenance(cls):
config = Config()
dict_ = config.get_dict()
frontend_command = run_docker_compose(dict_, [
'-f', 'docker-compose.maintenance.yml',
'-f', 'docker-compose.maintenance.override.yml',
'-p', config.get_prefix('maintenance'),
'up', '-d',
])
CLI.run_command(frontend_command, dict_['kobodocker_path'])
CLI.colored_print('Maintenance mode has been started',
CLI.COLOR_SUCCESS)
@classmethod
def restart_frontend(cls):
cls.start(frontend_only=True)
@classmethod
def start(cls, frontend_only=False, force_setup=False):
config = Config()
dict_ = config.get_dict()
cls.stop(output=False, frontend_only=frontend_only)
if frontend_only:
CLI.colored_print('Launching front-end containers', CLI.COLOR_INFO)
else:
CLI.colored_print('Launching environment', CLI.COLOR_INFO)
# Test if ports are available
ports = []
if config.proxy:
nginx_port = int(dict_['nginx_proxy_port'])
else:
nginx_port = int(dict_['exposed_nginx_docker_port'])
if frontend_only or config.frontend or \
not config.multi_servers:
ports.append(nginx_port)
if (not frontend_only or config.primary_backend or
config.secondary_backend) and \
config.expose_backend_ports:
ports.append(dict_['postgresql_port'])
ports.append(dict_['mongo_port'])
ports.append(dict_['redis_main_port'])
ports.append(dict_['redis_cache_port'])
for port in ports:
if Network.is_port_open(port):
CLI.colored_print(f'Port {port} is already open. '
'KoboToolbox cannot start',
CLI.COLOR_ERROR)
sys.exit(1)
# Start the back-end containers
if not frontend_only and config.backend:
backend_role = dict_['backend_server_role']
backend_command = run_docker_compose(dict_, [
'-f', f'docker-compose.backend.{backend_role}.yml',
'-f', f'docker-compose.backend.{backend_role}.override.yml',
'-p', config.get_prefix('backend'),
'up', '-d'
])
cls.__validate_custom_yml(config, backend_command)
CLI.run_command(backend_command, dict_['kobodocker_path'])
# Start the front-end containers
if config.frontend:
# If this was previously a shared-database setup, migrate to
# separate databases for KPI and KoBoCAT
Upgrading.migrate_single_to_two_databases(config)
frontend_command = run_docker_compose(dict_, [
'-f', 'docker-compose.frontend.yml',
'-f', 'docker-compose.frontend.override.yml',
'-p', config.get_prefix('frontend'),
'up', '-d',
])
if dict_['maintenance_enabled']:
cls.start_maintenance()
# Start all front-end services except the non-maintenance NGINX
frontend_command.extend([
s for s in config.get_service_names() if s != 'nginx'
])
cls.__validate_custom_yml(config, frontend_command)
CLI.run_command(frontend_command, dict_['kobodocker_path'])
# Start reverse proxy if user uses it.
if config.use_letsencrypt:
if force_setup:
# Let's Encrypt NGINX container needs kobo-docker NGINX
# container to be started first
config.init_letsencrypt()
else:
proxy_command = run_docker_compose(dict_, ['up', '-d'])
CLI.run_command(proxy_command,
config.get_letsencrypt_repo_path())
if dict_['maintenance_enabled']:
CLI.colored_print(
'Maintenance mode is enabled. To resume '
'normal operation, use `--stop-maintenance`',
CLI.COLOR_INFO,
)
elif not frontend_only:
if not config.multi_servers or config.frontend:
CLI.colored_print('Waiting for environment to be ready. '
'It can take a few minutes.', CLI.COLOR_INFO)
cls.info()
else:
backend_server_role = dict_['backend_server_role']
CLI.colored_print(
(f'{backend_server_role} backend server is starting up '
'and should be up & running soon!\nPlease look at docker '
'logs for further information: '
'`python3 run.py -cb logs -f`'),
CLI.COLOR_WARNING)
@classmethod
def stop(cls, output=True, frontend_only=False):
"""
Stop containers.
Because containers share the same network, containers must be stopped
first, then "down-ed" to remove any attached internal networks.
The order must respected to avoid removing networks with active endpoints.
"""
config = Config()
if not config.multi_servers or config.frontend:
# Stop maintenance container in case it's up&running
cls.stop_containers('maintenance')
# Stop reverse proxy if user uses it.
if config.use_letsencrypt:
cls.stop_containers('certbot')
# Stop down front-end containers
cls.stop_containers('frontend')
# Clean maintenance services
cls.stop_containers('maintenance', down=True)
# Clean certbot services if user uses it.
if config.use_letsencrypt:
cls.stop_containers('certbot', down=True)
if not frontend_only and config.backend:
cls.stop_containers('backend', down=True)
# Clean front-end services
if not config.multi_servers or config.frontend:
cls.stop_containers('frontend', down=True)
if output:
CLI.colored_print('KoboToolbox has been stopped', CLI.COLOR_SUCCESS)
@classmethod
def stop_containers(cls, group: str, down: bool = False):
config = Config()
dict_ = config.get_dict()
backend_role = dict_['backend_server_role']
if group not in ['frontend', 'backend', 'certbot', 'maintenance']:
raise Exception('Unknown group')
group_docker_maps = {
'frontend': {
'options': [
'-f', 'docker-compose.frontend.yml',
'-f', 'docker-compose.frontend.override.yml',
'-p', config.get_prefix('frontend'),
],
'custom_yml': True,
},
'backend': {
'options': [
'-f', f'docker-compose.backend.{backend_role}.yml',
'-f', f'docker-compose.backend.{backend_role}.override.yml',
'-p', config.get_prefix('backend'),
],
'custom_yml': True,
},
'certbot': {
'options': [],
'custom_yml': False,
'path': config.get_letsencrypt_repo_path(),
},
'maintenance': {
'options': [
'-f', 'docker-compose.maintenance.yml',
'-f', 'docker-compose.maintenance.override.yml',
'-p', config.get_prefix('maintenance'),
],
'custom_yml': False,
}
}
path = group_docker_maps[group].get('path', dict_['kobodocker_path'])
mode = 'stop' if not down else 'down'
options = group_docker_maps[group]['options']
command = run_docker_compose(dict_, options + [mode])
if group_docker_maps[group]['custom_yml']:
cls.__validate_custom_yml(config, command)
CLI.run_command(command, path)
@classmethod
def stop_maintenance(cls):
"""
Stop maintenance mode
"""
config = Config()
dict_ = config.get_dict()
if not config.multi_servers or config.frontend:
# Stop maintenance container in case it's up&running
cls.stop_containers('maintenance')
# Create and start NGINX container
frontend_command = run_docker_compose(dict_, [
'-f', 'docker-compose.frontend.yml',
'-f', 'docker-compose.frontend.override.yml',
'-p', config.get_prefix('frontend'),
'up', '-d',
'nginx',
])
cls.__validate_custom_yml(config, frontend_command)
CLI.run_command(frontend_command, dict_['kobodocker_path'])
CLI.colored_print('Maintenance mode has been stopped',
CLI.COLOR_SUCCESS)
dict_['maintenance_enabled'] = False
config.write_config()
@classmethod
def version(cls):
git_commit_version_command = ['git', 'rev-parse', 'HEAD']
stdout = CLI.run_command(git_commit_version_command)
build = stdout.strip()[0:7]
version = Config.KOBO_INSTALL_VERSION
CLI.colored_print(
f'kobo-install Version: {version} (build {build})',
CLI.COLOR_SUCCESS,
)
@staticmethod
def __validate_custom_yml(config, command):
"""
Validate whether docker-compose must start the containers with a
custom YML file in addition to the default. If the file does not yet exist,
kobo-install is paused until the user creates it and resumes the setup manually.
If user has chosen to use a custom YML file, it is injected into `command`
before being executed.
"""
dict_ = config.get_dict()
frontend_command = True
# Detect if it's a front-end command or back-end command
for part in command:
if 'backend' in part:
frontend_command = False
break
start_index = 5 if dict_.get('compose_version', 'v1') == 'v1' else 6
if frontend_command and dict_['use_frontend_custom_yml']:
custom_file = '{}/docker-compose.frontend.custom.yml'.format(
dict_['kobodocker_path']
)
does_custom_file_exist = os.path.exists(custom_file)
while not does_custom_file_exist:
message = (
'Please create your custom configuration in\n'
'`{custom_file}`.'
).format(custom_file=custom_file)
CLI.framed_print(message, color=CLI.COLOR_INFO, columns=90)
input('Press any key when it is done...')
does_custom_file_exist = os.path.exists(custom_file)
# Add custom file to docker-compose command
command.insert(start_index, '-f')
command.insert(start_index + 1, 'docker-compose.frontend.custom.yml')
if not frontend_command and dict_['use_backend_custom_yml']:
backend_server_role = dict_['backend_server_role']
custom_file = '{}/docker-compose.backend.{}.custom.yml'.format(
dict_['kobodocker_path'],
backend_server_role
)
does_custom_file_exist = os.path.exists(custom_file)
while not does_custom_file_exist:
message = (
'Please create your custom configuration in\n'
'`{custom_file}`.'
).format(custom_file=custom_file)
CLI.framed_print(message, color=CLI.COLOR_INFO, columns=90)
input('Press any key when it is done...')
does_custom_file_exist = os.path.exists(custom_file)
# Add custom file to docker-compose command
command.insert(start_index, '-f')
command.insert(
start_index + 1,
'docker-compose.backend.{}.custom.yml'.format(backend_server_role),
)
|
57b88656e7bc729e4e917fa531332de2ac40a41d
|
b7e6a4c0c7648468ffbfe7c2b447cd9f756f5405
|
/lithoxyl/p_squared.py
|
4086eab8d2945f748109a6e65c8a5cd6e09f4eb4
|
[] |
no_license
|
mahmoud/lithoxyl
|
f08c67d9d8756f6023704eb3de1d0637375f136e
|
b4bfa92c54df85b4bd5935fe270e2aa3fb25c412
|
refs/heads/master
| 2022-09-14T01:08:21.517624
| 2021-10-25T00:34:58
| 2021-10-25T00:34:58
| 10,783,537
| 162
| 15
| null | 2022-08-05T20:41:09
| 2013-06-19T06:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,545
|
py
|
p_squared.py
|
# -*- coding: utf-8 -*-
"""\
An implementation of P-Squared (Piecewise Parabolic) Quantile
Estimation, which provides efficient online estimation for
quantile-related statistics (e.g., median, quartiles).
For description of the algorithm defined in
http://www.cs.wustl.edu/~jain/papers/ftp/psqr.pdf
Implemented by Kurt Rose and Mahmoud Hashemi.
Copyright 2013, 3-clause BSD License
"""
from __future__ import absolute_import
import sys
PY3 = sys.version_info[0] == 3
_zip = zip
if PY3:
def _zip(*a, **kw):
return list(zip(*a, **kw))
xrange = range
class P2Estimator(object):
def __init__(self, q_points, data):
self._q_points = self._process_q_points(q_points)
self._q_points = (0.0,) + self._q_points + (1.0,)
len_data, len_qps = len(data), len(self._q_points)
if len_data < len_qps:
msg = ('expected %d or more initial points for '
'%d quantiles (got %d)' % (len_qps, len_qps - 2, len_data))
raise ValueError(msg)
initial = sorted(data[:len_qps])
vals = [[i + 1, x] for i, x in enumerate(initial)]
self._points = pts = _zip(self._q_points, vals)
self._min_point, self._max_point = pts[0][1], pts[-1][1]
self._lookup = dict(pts)
self._back_tuples = list(reversed(_zip(vals[1:], vals[2:])))
self._quads = _zip(self._q_points[1:], vals, vals[1:], vals[2:])
for i in xrange(len_qps, len_data):
self.add(data[i])
return
@staticmethod
def _process_q_points(q_points):
try:
qps = sorted([float(x) for x in set(q_points or [])])
if qps[0] == 0.0:
qps = qps[1:]
if qps[-1] == 1.0:
qps = qps[:-1]
if not qps or not all([0.0 < x < 1.0 for x in qps]):
raise ValueError()
except Exception:
raise ValueError('invalid quantile point(s): %r' % (q_points,))
else:
return tuple(qps)
def add(self, val):
prev_count = self._max_point[0]
self._max_point[0] = prev_count + 1
cur_min, cur_max = self._min_point[1], self._max_point[1]
if val < cur_min:
self._min_point[1] = cur_min = val
elif val > cur_max:
self._max_point[1] = cur_max = val
for point, nxt_point in self._back_tuples:
if val <= point[1]:
point[0] += 1
if point[0] == nxt_point[0]:
point[0] -= 1
# update estimated locations of percentiles
for qpdiv, (ln, lq), cur, (rn, rq) in self._quads:
cn, cq = cur
d = int(prev_count * qpdiv + 1 - cn)
if not d:
continue
d = 1.0 if d > 0 else -1.0 # clamped at +-1
if not (ln < cn + d < rn):
continue
nq = (cq + (d / (rn - ln)) * # hooray parabolic
((cn - ln + d) * (rq - cq) / (rn - cn) +
(rn - cn - d) * (cq - lq) / (cn - ln)))
if not (lq < nq < rq): # fall back on linear eqn
if d == 1:
nq = cq + (rq - cq) / (rn - cn)
else:
nq = cq - (lq - cq) / (ln - cn)
cur[0], cur[1] = cn + d, nq
def get_quantiles(self):
return [(x[0], x[1][1]) for x in self._points]
def _get_quantile(self, q):
try:
return self._lookup[float(q)][1]
except KeyError:
raise ValueError('quantile not tracked: %r' % q)
def test_random(vals=None, nsamples=100000):
import random
import time
from pprint import pprint
random.seed(12345)
qp = (0.01, 0.05, 0.25, 0.50, 0.75, 0.90, 0.95, 0.99)
if not vals:
vals = [random.random() for i in range(nsamples)]
try:
start = time.time()
m = P2Estimator(q_points=qp, data=vals)
p = m.get_quantiles()
duration = time.time() - start
tmpl = ("P2Estimator processed %d measurements "
"in %f seconds (%f ms each)")
pprint(p)
print(tmpl % (nsamples, duration, 1000 * duration / nsamples))
except Exception:
import traceback
import pdb
traceback.print_exc()
pdb.post_mortem()
raise
for k, v in p:
if not k:
continue
if not 0.95 < v / k < 1.05:
print("problem: %s is %s, should be ~%s" % (k, v, k))
return
if __name__ == "__main__":
test_random()
|
609d2ce2b91d575557eb26f29bbfe2ccf456cc1e
|
5177d1b0205366bddc6f8a0533a03cc6e9533692
|
/testidpp/HCOO-idpp/idpp.py
|
beb4930f11b76105983d02a48531256b6ac47f6e
|
[] |
no_license
|
tamaswells/VASP_script
|
feea41a8aaf4beb55e7d2b254061d59a2375d225
|
1a13a755d67241ea3f1a7cc89695c0b91043fed9
|
refs/heads/master
| 2022-02-08T12:58:56.470695
| 2021-12-28T08:37:30
| 2021-12-28T08:37:30
| 130,773,640
| 147
| 103
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
idpp.py
|
#!/public/software/apps/Anaconda3/bin/python
import numpy as np
import os
import sys
import warnings
from pymatgen.core import Structure
from pymatgen_diffusion.neb.pathfinder import IDPPSolver
def warn(*args, **kwargs):
pass
warnings.warn = warn
sys.stdout = open(os.devnull, 'w')
if len(sys.argv) <4:
raise SystemError('Sytax Error! Run as python idpp ini/POSCAR fin/POSCAR 4')
warnings.filterwarnings("ignore")
init_struct = Structure.from_file(sys.argv[1], False)
final_struct = Structure.from_file(sys.argv[2], False)
obj = IDPPSolver.from_endpoints(endpoints=[init_struct, final_struct], nimages=int(sys.argv[3]),
sort_tol=1.0)
new_path = obj.run(maxiter=5000, tol=1e-5, gtol=1e-3,step_size=0.05,\
max_disp=0.05, spring_const=5.0)
for i in range(len(new_path)):
image_file='{0:02d}'.format(i)
if not os.path.exists(image_file):
os.makedirs(image_file)
POSCAR_file=image_file+'/POSCAR'
new_path[i].to(fmt="poscar", filename=POSCAR_file)
sys.stdout = sys.__stdout__
#Image Dependent Pair Potential for improved interpolation of NEB initial guess
#Reference: S. Smidstrup, A. Pedersen, K. Stokbro and H. Jonsson, Improved initial guess for minimum energy path calculations, J. Chem. Phys. 140, 214106 (2014).
print("Improved interpolation of NEB initial guess has been generated. BYE.")
|
e2ad36e5873282d054881a3c70eba18fcbd3963b
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/BOJ/2110 공유기 설치/rockmiin.py
|
6e436a31f9e05ac5efaf3402cc6612150ccedcce
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641
| 2023-02-09T07:16:14
| 2023-02-09T07:16:14
| 258,455,710
| 175
| 49
| null | 2023-02-09T07:16:16
| 2020-04-24T08:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 410
|
py
|
rockmiin.py
|
n, c= map(int, input().split())
home= [int(input()) for _ in range(n)]
home.sort()
start, end= 1, home[-1]- home[0]
result= 0
while start <= end:
mid= (start+ end)// 2
val= home[0]
cnt= 1
for i in range(1, len(home)):
if home[i] >= val+ mid:
cnt+=1
val= home[i]
if cnt >= c:
result= mid
start= mid+ 1
else: end= mid -1
print(result)
|
8f57ddca13e3bca36440a09acc8573590535afc8
|
2f6011fe675824bbea9456c075707f6b00b2641e
|
/gdpr_assist/signals.py
|
3f1c7e55bd89dfd8d4bf2291ca044313cc5e7b14
|
[
"BSD-3-Clause"
] |
permissive
|
wildfish/django-gdpr-assist
|
aa692b411db5018d8c30ea85843a4489fb85849c
|
d7b0ffb96c863f929b469116b82cebe7568618a4
|
refs/heads/develop
| 2023-06-01T02:55:51.145820
| 2022-07-12T13:49:20
| 2022-07-12T13:49:20
| 145,978,441
| 172
| 20
|
NOASSERTION
| 2023-05-17T13:36:05
| 2018-08-24T10:32:27
|
Python
|
UTF-8
|
Python
| false
| false
| 145
|
py
|
signals.py
|
"""
Internal signals
"""
from django.dispatch import Signal
# providing_args = ["instance"]
pre_anonymise = Signal()
post_anonymise = Signal()
|
2fa7a7aa39b95dae86a926635c61875a5255c626
|
1904012be256128f2791627f39dcbf0af5bd7b87
|
/conanfile.py
|
5d6a02eb71c2ba66abf1a38ed5b0b6674a74658f
|
[
"BSL-1.0"
] |
permissive
|
boost-ext/di
|
42fae06fccbadb5c8a8bd17de7892d8c40750ba2
|
f87427b4efa723c28af677317df0afa59491f2f5
|
refs/heads/cpp14
| 2023-09-01T06:25:36.864504
| 2023-05-31T06:40:45
| 2023-05-31T07:40:56
| 3,250,728
| 404
| 62
| null | 2023-05-31T07:40:57
| 2012-01-23T21:42:55
|
C++
|
UTF-8
|
Python
| false
| false
| 403
|
py
|
conanfile.py
|
from conans import ConanFile, CMake
class DI(ConanFile):
name = "DI"
version = "latest"
url = "https://github.com/boost-ext/di"
license = "Boost"
description = "[Boost::ext].DI - C++14 Dependency Injection Library"
settings = "os", "compiler", "arch", "build_type"
exports_sources = "include/*"
no_copy_source = True
def package(self):
self.copy("*.hpp")
|
6b0fce8db9cf4c10448d77e8a33b606b717f3778
|
6b0fd331c9e75ae4fda1904c3330d5831cd88ab1
|
/src/predict.py
|
23f229a71791884a8c138e9cff0b5d5831e91989
|
[
"MIT"
] |
permissive
|
PavlosMelissinos/enet-keras
|
8c0b484c83c608400ba2cb95d4673284045cb037
|
95ce39474b4b95521b64d176dfb7455d57716fde
|
refs/heads/master
| 2022-12-10T02:13:40.178562
| 2020-03-11T15:54:09
| 2020-03-11T15:54:09
| 86,742,995
| 128
| 56
|
MIT
| 2022-12-07T23:36:33
| 2017-03-30T19:55:12
|
Python
|
UTF-8
|
Python
| false
| false
| 6,397
|
py
|
predict.py
|
# coding=utf-8
from __future__ import absolute_import, print_function
from PIL import Image as PILImage
import numpy as np
import os
import six
import sys
from keras import backend as K
from keras.preprocessing.image import array_to_img
from src.data import datasets, utils
from src.experiments.core import Experiment
import models
def color_output_image(colormap, img, mode='bw'):
"""
move this into datasets.py
:param dataset:
:param img:
:param mode:
:return:
"""
cv_image = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
if mode == 'bw':
cv_image[img > 0] = 255
elif mode == 'class_palette':
img = np.asarray(img, dtype=np.uint8).reshape(img.size[0], img.size[1])
for cid, color in six.iteritems(colormap):
cv_image[img == cid] = color
else:
raise ValueError('Unknown coloring mode: Expected one of {}; got {}'.format(['bw', 'class_palette'], mode))
return cv_image
def predict(segmenter, img, h=None, w=None):
np.random.seed(1337) # for reproducibility
try:
oh, ow = img.shape[0], img.shape[1]
h = oh if h is None else h
w = ow if w is None else w
img = utils.resize(img, target_h=h, target_w=w)
img = np.expand_dims(utils.normalize(img), axis=0)
pred = segmenter.predict(img)[0]
nc = pred.shape[-1]
scores = np.max(pred, axis=1)
pred = np.argmax(pred, axis=1)
scores_per_class = [np.sum(scores[pred == c]) / np.sum(scores) for c in range(nc)]
pred = np.reshape(pred, (h, w)) # dh x dw
pred = np.expand_dims(pred, axis=2) # dh x dw x 1
# pred = np.repeat(pred, repeats=3, axis=2) # dh x dw x 3
pred = utils.resize(pred, target_h=oh, target_w=ow) # oh x ow x 1 (original shape)
pred = utils.img_to_array(pred)
return pred, scores_per_class
except:
if img is None:
print('Skipping corrupted image')
return None
else:
raise
def load_mscoco_data():
data_type = 'val2014'
out_directory = os.path.join('data', 'out', model_name)
config = {'dataset_name': 'MSCOCO',
'data_dir': 'data/MSCOCO',
'data_type': data_type}
dataset = getattr(datasets, config['dataset_name'])(**config)
instance_mode = False
keep_context = 0.2
if len(sys.argv) > 1:
filetxt = sys.argv[1]
with open(filetxt) as fin:
basedir = os.path.dirname(filetxt)
files = [os.path.join(basedir, line.rstrip('\n')) for line in fin]
data_gen = (utils.load_image(imfile) for imfile in files)
else:
data_gen = (sample[0] for sample in
dataset.flow())
data = {
'generator': data_gen,
'root_dir': 'data',
'num_instances': dataset.num_instances,
'dir_target': out_directory,
'keep_context': keep_context,
'dataset_name': 'MSCOCO',
'data_type': 'val2017'
}
return data
def load_arbitrary_data(image_filenames=None):
out_directory = os.path.join('data', 'out', model_name)
def data_generator():
for image_filename in image_filenames:
img = PILImage.open(image_filename)
resized_img = utils.resize(img, target_w=dw, target_h=dh)
yield resized_img
data = {
'generator': data_generator(),
'root_dir': 'data',
'num_instances': len(image_filenames),
'dir_target': out_directory,
'keep_context': 0,
'dataset_name': 'MSCOCO',
'data_type': None
}
return data
def run(segmenter, data):
data_gen = data['data_gen']
num_instances = data['num_instances']
out_directory = os.path.realpath(data['dir_target'])
keep_context = data['keep_context']
# dataset = getattr(datasets, data['dataset_name'])(**data)
dataset = getattr(datasets, data['dataset_name'])
for idx, image in enumerate(data_gen):
if idx > 20:
break
print('Processing {} out of {}'.format(idx+1, num_instances), end='\r')
pred_final, scores = predict(segmenter, image, h=dh, w=dw)
# draw prediction as rgb
pred_final = color_output_image(dataset.palette, pred_final[:, :, 0])
pred_final = array_to_img(pred_final)
out_file = os.path.join(
out_directory,
'{}_{}_{}_out.png'.format(
idx,
keep_context,
utils.basename_without_ext(pw)))
sys.stdout.flush()
if os.path.isfile(out_file):
continue
utils.ensure_dir(out_directory)
print('Saving output to {}'.format(out_file))
pilimg = PILImage.fromarray(image.astype(np.uint8), mode='RGB')
pilimg.save(out_file.replace('_out.png', '.png'))
pred_final.save(out_file)
def load_data(**kwargs):
load_mscoco = kwargs['load_mscoco']
interim_testing = kwargs['interim_testing']
if load_mscoco:
data = load_mscoco_data()
else:
txt_file = sys.argv[1]
image_dir = os.path.dirname(txt_file)
with open(txt_file) as fin:
image_filenames = [os.path.join(image_dir, line.rstrip('\n')) for line in fin]
data = load_arbitrary_data(image_filenames=image_filenames)
if interim_testing:
for idx, item in enumerate(data['data_gen']):
filename, extension = os.path.splitext(image_filenames[idx])
out_filename = filename + '_interim_w{}_h{}'.format(w, h) + extension
PILImage.fromarray(item).save(out_filename)
return data
def main():
# parameters
kwargs = {
'dataset_name': 'MSCOCO',
'load_mscoco': False,
'w': 256,
'h': 256,
'interim_testing': False
}
Experiment(**kwargs)
segmenter = load_model(**kwargs)
data = load_data()
run(segmenter=segmenter, data=data)
if __name__ == '__main__':
if K.backend() == 'tensorflow':
print('Tensorflow backend detected; Applying memory usage constraints')
sess_config = K.tf.ConfigProto(gpu_options=K.tf.GPUOptions(allow_growth=True))
ss = K.tf.Session(config=sess_config)
K.set_session(ss)
print('This script is obsolete, please use run.py in predict mode instead.')
# main()
|
e098b93c0d6369327a5d29a14657ac91090a4ddd
|
2a76ca8c01e7abe6ef64d030ecbb65e88641b278
|
/examples/snippet-cubes.py
|
4b3fe0661732ddb8e264609db9413e5a702fab9d
|
[] |
permissive
|
glumpy/glumpy
|
18bfc2d76b7a5fc126fbebddf2970d95238fc66b
|
75408635bd46e48ff10939e308a71eafdaff35e8
|
refs/heads/master
| 2023-09-03T11:48:52.087002
| 2023-04-20T15:23:59
| 2023-04-20T15:23:59
| 23,520,171
| 1,228
| 225
|
BSD-3-Clause
| 2023-07-07T07:25:18
| 2014-08-31T18:30:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,043
|
py
|
snippet-cubes.py
|
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy.geometry import primitives
from glumpy import app, gl, glm, gloo, data
from glumpy.graphics.collections import BaseCollection
vertex = """
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
attribute float index;
attribute vec3 position;
attribute vec2 texcoord;
varying float v_index;
varying vec2 v_texcoord;
void main()
{
v_texcoord = texcoord;
v_index = index;
vec4 pos = projection * view * model * vec4(position,1.0);
gl_Position = <grid>;
}
"""
fragment = """
uniform sampler2D texture;
varying vec2 v_texcoord;
varying float v_index;
void main()
{
<clip>;
float r = texture2D(texture, v_texcoord).r;
gl_FragColor = vec4(vec3(r),1.0);
}
"""
Grid = gloo.Snippet("""
uniform float rows, cols;
vec4 cell(vec4 position, float index)
{
float col = mod(index,cols) + 0.5;
float row = floor(index/cols) + 0.5;
float x = -1.0 + col * (2.0/cols);
float y = -1.0 + row * (2.0/rows);
float width = 0.95 / (1.0*cols);
float height = 0.95 / (1.0*rows);
vec4 P = position / position.w;
P = vec4(x + width*P.x, y + height*P.y, P.z, P.w);
return P*position.w;
}
""")
Clip = gloo.Snippet("""
uniform vec2 iResolution;
uniform float rows, cols;
void clip(float index)
{
vec2 P = gl_FragCoord.xy;
// mod doesn't play well with 0
float i = index+.00001;
float col = mod(i,cols);
float row = floor(i/cols);
float width = iResolution.x / cols;
float height = iResolution.y / rows;
float x = col * width;
float y = row * height;
float gap = 1.5;
if( P.x < (x+gap)) discard;
if( P.x > (x+width-gap)) discard;
if( P.y < (y+gap)) discard;
if( P.y > (y+height-gap)) discard;
}
""")
rows,cols = 3,3
window = app.Window(width=1024, height=1024, color=(0.30, 0.30, 0.35, 1.00))
# Build collection
dtype = [("position", np.float32, 3),
("normal", np.float32, 3),
("texcoord", np.float32, 2),
("color", np.float32, 4),
("index", np.float32, 1)]
cubes = BaseCollection(vtype=dtype, itype=np.uint32)
V,I = primitives.cube()
C = np.zeros(len(V),dtype=dtype)
for key in V.dtype.names: C[key] = V[key]
for i in range(rows*cols):
C["index"] = i
cubes.append(vertices=C, indices=I)
cubes._update()
V = cubes._vertices_buffer
I = cubes._indices_buffer
@window.event
def on_draw(dt):
global phi, theta
window.clear()
program.draw(gl.GL_TRIANGLES, I)
theta += 0.5
phi += 0.5
model = np.eye(4, dtype=np.float32)
glm.rotate(model, theta, 0, 0, 1)
glm.rotate(model, phi, 0, 1, 0)
program['model'] = model
@window.event
def on_resize(width, height):
program['projection'] = glm.perspective(fovy, width / float(height), 1.0, 100.0)
program['clip']['iResolution'] = width, height
@window.event
def on_mouse_scroll(x, y, dx, dy):
global fovy
fovy = np.minimum(np.maximum(fovy*(1+dy/100), 10.0), 179.0)
program['projection'] = glm.perspective(fovy,
window.width/float(window.height),
1.0, 100.0)
@window.event
def on_init():
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glDisable(gl.GL_BLEND)
program = gloo.Program(vertex, fragment)
program.bind(V)
view = np.eye(4, dtype=np.float32)
model = np.eye(4, dtype=np.float32)
projection = np.eye(4, dtype=np.float32)
glm.translate(view, 0, 0, -3)
program['texture'] = data.get("checkerboard")
program['model'] = model
program['view'] = view
program['grid'] = Grid("pos", "index")
program['grid']["rows"] = rows
program['grid']["cols"] = cols
program['clip'] = Clip("v_index")
program['clip']["rows"] = rows
program['clip']["cols"] = cols
fovy = 30
phi, theta = 30, 20
app.run()
|
dbb060c1caebd0e79928d2fd6103b9511c81965c
|
02ff7b84aaf436651a4b177affc91757266bdfd6
|
/dataset/ctw1500/Evaluation_Protocol/ctw1500_eval.py
|
43f1ebd583ac56fd8d2b3342fd0fc57ec8910820
|
[
"MIT"
] |
permissive
|
GXYM/DRRG
|
cb4345cd57554a4617f7a64371e7b42d8518cc8f
|
41b197679f9b7f785f2b5c5a84871b3f73aef885
|
refs/heads/master
| 2023-07-11T23:32:46.639969
| 2023-06-30T16:41:00
| 2023-06-30T16:41:00
| 226,042,729
| 272
| 61
|
MIT
| 2021-11-06T15:31:21
| 2019-12-05T07:32:19
|
C++
|
UTF-8
|
Python
| false
| false
| 2,443
|
py
|
ctw1500_eval.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import os, shutil, sys
from voc_eval_polygon import voc_eval_polygon
from collections import Counter
import numpy as np
import argparse
parser = argparse.ArgumentParser()
# basic opts
parser.add_argument('exp_name', type=str, help='Model output directory')
args = parser.parse_args()
input_dir = 'output/{}'.format(args.exp_name)
eval_result_dir = "output/Analysis/output_eval"
anno_path = 'data/ctw1500/test/test_label_curve.txt'
imagesetfile = 'data/ctw1500/test/test.txt'
outputstr = "dataset/ctw1500/Evaluation_sort/detections_text"
# score_thresh_list=[0.2, 0.3, 0.4, 0.5, 0.6, 0.62, 0.65, 0.7, 0.75, 0.8, 0.9]
score_thresh_list = [0.5]
files = os.listdir(input_dir)
files.sort()
for iscore in score_thresh_list:
fpath = outputstr + str(iscore) + '.txt'
with open(fpath, "w") as f1:
for ix, filename in enumerate(files):
imagename = filename[:-4]
with open(os.path.join(input_dir, filename), "r") as f:
lines = f.readlines()
for line in lines:
box = line.strip().split(",")
assert (len(box) % 2 == 0), 'mismatch xy'
out_str = "{} {}".format(str(int(imagename[:]) - 1001), 0.999)
for i in box:
out_str = out_str + ' ' + str(i)
f1.writelines(out_str + '\n')
rec, prec, AP, FP, TP, image_ids, num_gt = voc_eval_polygon(fpath, anno_path, imagesetfile, 'text', ovthresh=0.5)
fid_path = '{}/Eval_ctw1500_{}.txt'.format(eval_result_dir, iscore)
F = lambda x, y: 2 * x * y * 1.0 / (x + y)
img_dict = dict(Counter(image_ids))
with open(fid_path, 'w') as f:
count = 0
for k, v in zip(img_dict.keys(), img_dict.values()):
fp = np.sum(FP[count:count+v])
tp = np.sum(TP[count:count+v])
count += v
recall = tp / float(num_gt[k])
precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
f.write('%s :: Precision=%.4f - Recall=%.4f\n' % (str(int(k)+1001)+".txt", recall, precision))
Recall = rec[-1]
Precision = prec[-1]
F_score = F(Recall, Precision)
f.write('ALL :: AP=%.4f - Precision=%.4f - Recall=%.4f - Fscore=%.4f' % (AP, Precision, Recall, F_score))
print('AP: {:.4f}, recall: {:.4f}, pred: {:.4f}, '
'FM: {:.4f}\n'.format(AP, Recall, Precision, F_score))
|
eb6bc4c87bc0b135cc1c9df54bf03b4fcb5b8ecf
|
528f1dbb2b6d1d642dedf978ac34f5fa0517093a
|
/bin/bounce.py
|
81309ac413a4bd02950c03e11d6dc82e3d440394
|
[
"MIT"
] |
permissive
|
jquast/blessed
|
18714ec3f2f1abc0a84008fae00845235ec898ce
|
a34c6b1869b4dd467c6d1ab6895872bb72db7e0f
|
refs/heads/master
| 2023-08-29T14:26:10.248503
| 2023-08-20T01:25:06
| 2023-08-20T01:25:06
| 17,308,448
| 992
| 101
|
MIT
| 2023-08-20T01:25:07
| 2014-03-01T05:28:51
|
Python
|
UTF-8
|
Python
| false
| false
| 824
|
py
|
bounce.py
|
#!/usr/bin/env python
"""Classic game of tennis."""
# std imports
from math import floor
# local
from blessed import Terminal
def roundxy(x, y):
return int(floor(x)), int(floor(y))
term = Terminal()
x, y, xs, ys = 2, 2, 0.4, 0.3
with term.cbreak(), term.hidden_cursor():
# clear the screen
print(term.home + term.black_on_olivedrab4 + term.clear)
# loop every 20ms
while term.inkey(timeout=0.02) != 'q':
# erase,
txt_erase = term.move_xy(*roundxy(x, y)) + ' '
# bounce,
if x >= (term.width - 1) or x <= 0:
xs *= -1
if y >= term.height or y <= 0:
ys *= -1
# move,
x, y = x + xs, y + ys
# draw !
txt_ball = term.move_xy(*roundxy(x, y)) + '█'
print(txt_erase + txt_ball, end='', flush=True)
|
f9f6c0d755eb4f56551f763537cc35e59ffd38b9
|
4feb5744ab5a26aeeb04573e4944d2bf4d1a6a2a
|
/peeringdb_server/management/commands/pdb_delete_users.py
|
f9352debf887e5007974d40dd671ab4fe4820219
|
[
"BSD-2-Clause"
] |
permissive
|
peeringdb/peeringdb
|
cb79f809c4bb8cc5192180366df1f05d8fc0111f
|
3f62b2d97c78ccf151fb1a5761637e28463b9541
|
refs/heads/master
| 2023-09-04T09:26:43.741086
| 2023-08-22T19:20:34
| 2023-08-22T19:20:34
| 60,563,174
| 311
| 121
|
BSD-2-Clause
| 2023-09-13T02:13:42
| 2016-06-06T21:49:25
|
Python
|
UTF-8
|
Python
| false
| false
| 4,958
|
py
|
pdb_delete_users.py
|
"""
Delete childless org objects
"""
from datetime import timedelta
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import transaction
from django.utils import timezone
from peeringdb_server.models import User
class PretendMode(IOError):
pass
class Command(BaseCommand):
help = "Flags and deletes elderyly orphaned user accounts"
def add_arguments(self, parser):
parser.add_argument(
"--commit", action="store_true", help="will commit the changes"
)
parser.add_argument(
"--max-notify",
type=int,
default=10,
help="maximum amount of user notifications to send",
)
parser.add_argument(
"--max-flag",
type=int,
default=0,
help="maximum amount of user flags to set, set to not limit",
)
def log(self, msg):
if self.commit:
self.stdout.write(msg)
else:
self.stdout.write(f"[pretend] {msg}")
def handle(self, *args, **options):
self.commit = options.get("commit")
self.max_notify = options.get("max_notify")
self.max_flag = options.get("max_flag")
self.notifications = []
sid = transaction.savepoint()
try:
with transaction.atomic():
self.flag_users()
self.unflag_users()
self.notify_users()
self.delete_users()
if not self.commit:
raise PretendMode()
except PretendMode:
if sid:
transaction.savepoint_rollback(sid)
else:
transaction.rollback()
self.send_emails()
def flag_users(self):
min_age = timezone.now() - timedelta(days=settings.MIN_AGE_ORPHANED_USER_DAYS)
qset = User.objects.filter(flagged_for_deletion__isnull=True)
qset = qset.filter(is_active=True)
qset = qset.prefetch_related("groups")
qset = qset.exclude(groups__name__startswith="org.")
qset = qset.exclude(never_flag_for_deletion=True)
qset = qset.exclude(date_joined__gte=min_age)
if self.max_flag > 0:
self.log(f"Flagging {self.max_flag} of {qset.count()} orphaned users ...")
qset = qset[: self.max_flag]
else:
self.log(f"Flagging {qset.count()} users ...")
for user in qset:
deletion_date = timezone.now() + timedelta(
days=settings.DELETE_ORPHANED_USER_DAYS
)
self.log(f"Flagging {user} for deletion on {deletion_date}")
user.flagged_for_deletion = deletion_date
user.notified_for_deletion = None
user.save()
def unflag_users(self):
qset = User.objects.filter(flagged_for_deletion__isnull=False)
qset = qset.prefetch_related("groups")
qset = qset.filter(groups__name__startswith="org.")
for user in qset:
self.log(f"{user} no longer orphaned - removing flag")
user.flagged_for_deletion = None
user.notified_for_deletion = None
user.save()
def notify_users(self):
now = timezone.now()
qset = User.objects.filter(flagged_for_deletion__isnull=False)
qset = qset.filter(notified_for_deletion__isnull=True).order_by(
"flagged_for_deletion"
)
for user in qset[: self.max_notify]:
notify_date = user.flagged_for_deletion - timedelta(
days=settings.NOTIFY_ORPHANED_USER_DAYS
)
if notify_date > now:
continue
self.log(f"Notifying {user} about pending deletion")
self.notifications.append(
(
user,
"Pending account removal",
f"As your account `{user.username}` is no longer associated with any "
f"organizations, it will be removed on {user.flagged_for_deletion}."
"\n\n"
"If you wish to keep your account, please affiliate it with an "
"organization.",
)
)
user.notified_for_deletion = timezone.now()
user.save()
def send_emails(self):
count = len(self.notifications)
if not self.commit:
self.log(f"Would send {count} emails ...")
return
self.log(f"Sending {count} emails ...")
for user, subject, text in self.notifications:
user.email_user(subject, text)
def delete_users(self):
now = timezone.now()
qset = User.objects.filter(
flagged_for_deletion__lte=now, never_flag_for_deletion=False
)
for user in qset:
self.log(f"Closing {user}'s account ..")
user.close_account()
|
24f1515120d2432d19425036b4d3f7bab42b55af
|
f43a1f64cb5c483fad6782c866508d8724622f24
|
/PyViCare/PyViCareCachedService.py
|
340d4f970439093afa007c5ebce4016c9f448a51
|
[
"Apache-2.0"
] |
permissive
|
somm15/PyViCare
|
88bfca043e739f7b662be0dc0a05e443e7a31825
|
8ba411483a865e074d1146fd1b8b7a8c4f4be122
|
refs/heads/master
| 2023-08-31T11:34:36.605842
| 2023-06-08T20:17:01
| 2023-06-08T20:17:01
| 164,931,903
| 110
| 89
|
Apache-2.0
| 2023-09-11T22:18:11
| 2019-01-09T20:20:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,217
|
py
|
PyViCareCachedService.py
|
import logging
import threading
from typing import Any, List
from PyViCare.PyViCareAbstractOAuthManager import AbstractViCareOAuthManager
from PyViCare.PyViCareService import (ViCareDeviceAccessor, ViCareService,
readFeature)
from PyViCare.PyViCareUtils import PyViCareInvalidDataError, ViCareTimer
logger = logging.getLogger('ViCare')
logger.addHandler(logging.NullHandler())
class ViCareCachedService(ViCareService):
def __init__(self, oauth_manager: AbstractViCareOAuthManager, accessor: ViCareDeviceAccessor, roles: List[str], cacheDuration: int) -> None:
ViCareService.__init__(self, oauth_manager, accessor, roles)
self.__cacheDuration = cacheDuration
self.__cache = None
self.__cacheTime = None
self.__lock = threading.Lock()
def getProperty(self, property_name: str) -> Any:
data = self.__get_or_update_cache()
entities = data["data"]
return readFeature(entities, property_name)
def setProperty(self, property_name, action, data):
response = super().setProperty(property_name, action, data)
self.clear_cache()
return response
def __get_or_update_cache(self):
with self.__lock:
if self.is_cache_invalid():
# we always sett the cache time before we fetch the data
# to avoid consuming all the api calls if the api is down
# see https://github.com/home-assistant/core/issues/67052
# we simply return the old cache in this case
self.__cacheTime = ViCareTimer().now()
data = self.fetch_all_features()
if "data" not in data:
logger.error("Missing 'data' property when fetching data.")
raise PyViCareInvalidDataError(data)
self.__cache = data
return self.__cache
def is_cache_invalid(self) -> bool:
return self.__cache is None or self.__cacheTime is None or (ViCareTimer().now() - self.__cacheTime).seconds > self.__cacheDuration
def clear_cache(self):
with self.__lock:
self.__cache = None
self.__cacheTime = None
|
1567d0cdb90f80fa41d0211b102dd7aa4b0cc962
|
c504d12541142cd813527aa0d5a20c824bd15b91
|
/simple_rl/agents/DelayedQAgentClass.py
|
4ed20942e9308b6d0ac6cdcc301fd4afc252e1a9
|
[
"Apache-2.0"
] |
permissive
|
david-abel/simple_rl
|
322324c2f1ac8d46e4ffc7f3ed249ad1729fed21
|
d294572bd186b48beac47d8cdcffdf1a05e7d563
|
refs/heads/master
| 2023-09-03T19:54:17.481235
| 2022-08-22T09:44:35
| 2022-08-22T09:44:35
| 63,088,888
| 262
| 105
|
Apache-2.0
| 2022-08-22T09:44:36
| 2016-07-11T17:41:25
|
Python
|
UTF-8
|
Python
| false
| false
| 8,550
|
py
|
DelayedQAgentClass.py
|
'''
DelayedQAgentClass.py: Class for Delayed Q-Learning from [Strehl et al. 2006].
Author: Yuu Jinnai (ddyuudd@gmail.com)
'''
# Python imports.
import random
import numpy
import time
import copy
from collections import defaultdict
# Other imports.
from simple_rl.agents.AgentClass import Agent
class DelayedQAgent(Agent):
'''
Delayed-Q Learning Agent (Strehl, A.L., Li, L., Wiewiora, E., Langford, J. and Littman, M.L., 2006. PAC model-free reinforcement learning).
'''
def __init__(self, actions, init_q=None, name="Delayed-Q", gamma=0.99, m=5, epsilon1=0.1):
'''
Args:
actions (list): Contains strings denoting the actions.
init_q (2d list): Initial Q function. AU(s, a) in Strehl et al 2006.
name (str): Denotes the name of the agent.
gamma (float): discount factor
m (float): Number of samples for updating Q-value
epsilon1 (float): Learning rate
'''
# Set initial q func.
self.rmax = 1 # TODO: set/get function
init_q = defaultdict(lambda : defaultdict(lambda: self.rmax / (1 - gamma))) if init_q is None else init_q
Agent.__init__(self, name=name, actions=actions, gamma=gamma)
# Set/initialize parameters and other relevant classwide data
self.step_number = 0
# TODO: Here we assume that init_q has Qvalue for every (s, a) pair.
self.q_func = copy.deepcopy(init_q)
self.default_q_func = copy.deepcopy(init_q)
self.AU = defaultdict(lambda: defaultdict(lambda: 0.0)) # used for attempted updates
self.l = defaultdict(lambda: defaultdict(lambda: 0)) # counters
self.b = defaultdict(lambda: defaultdict(lambda: 0)) # beginning timestep of attempted update
self.LEARN = defaultdict(lambda: defaultdict(lambda: True)) # beginning timestep of attempted update
for x in init_q:
for y in init_q[x]:
self.AU[x][y] = 0.0 # AU(s, a) <- 0
self.l[x][y] = 0 # l(s, a) <- 0
self.b[x][y] = 0 # b(s, a) <- 0
self.LEARN[x][y] = False
# TODO: Add a code to calculate m and epsilon1 from epsilon and delta.
# m and epsilon1 should be set according to epsilon and delta in order to be PAC-MDP.
self.m = m
self.epsilon1 = epsilon1
self.tstar = 0 # time of most recent action value change
def get_parameters(self):
'''
Returns:
(dict) key=param_name (str) --> val=param_val (object).
'''
param_dict = defaultdict(int)
param_dict["gamma"] = self.gamma
param_dict["m"] = self.m
param_dict["epsilon1"] = self.epsilon1
return param_dict
# --------------------------------
# ---- CENTRAL ACTION METHODS ----
# --------------------------------
def act(self, state, reward, learning=True):
'''
Args:
state (State)
reward (float)
Summary:
The central method called during each time step.
Retrieves the action according to the current policy
and performs updates given (s=self.prev_state,
a=self.prev_action, r=reward, s'=state)
'''
if learning:
self.update(self.prev_state, self.prev_action, reward, state)
# For Delayed Q-learning it always take the action with highest Q value (no epsilon exploration required).
action = self.greedy_q_policy(state)
self.prev_state = state
self.prev_action = action
self.step_number += 1
return action
def greedy_q_policy(self, state):
'''
Args:
state (State)
Returns:
(str): action.
'''
action = self.get_max_q_action(state)
return action
# ---------------------------------
# ---- Q VALUES AND PARAMETERS ----
# ---------------------------------
def update(self, state, action, reward, next_state):
'''
Args:
state (State)
action (str)
reward (float)
next_state (State)
Summary:
Updates the internal Q Function according to the Bellman Equation. (Classic Q Learning update)
'''
# If this is the first state, just return.
if state is None:
self.prev_state = next_state
return
if self.b[state][action] <= self.tstar:
self.LEARN[state][action] = True
if self.LEARN[state][action]:
if self.l[state][action] == 0:
self.b[state][action] = self.step_number
self.l[state][action] = self.l[state][action] + 1
nextq, _ = self._compute_max_qval_action_pair(next_state)
self.AU[state][action] = self.AU[state][action] + reward + self.gamma * nextq
if self.l[state][action] == self.m:
if self.q_func[state][action] - self.AU[state][action] / self.m >= 2 * self.epsilon1:
self.q_func[state][action] = self.AU[state][action] / self.m + self.epsilon1
self.tstar = self.step_number
elif self.b[state][action] > self.tstar:
self.LEARN[state][action] = False
self.AU[state][action] = 0
self.l[state][action] = 0
def _compute_max_qval_action_pair(self, state):
'''
Args:
state (State)
Returns:
(tuple) --> (float, str): where the float is the Qval, str is the action.
'''
# Grab random initial action in case all equal
best_action = random.choice(self.actions)
max_q_val = float("-inf")
shuffled_action_list = self.actions[:]
random.shuffle(shuffled_action_list)
# Find best action (action w/ current max predicted Q value)
for action in shuffled_action_list:
q_s_a = self.get_q_value(state, action)
if q_s_a > max_q_val:
max_q_val = q_s_a
best_action = action
return max_q_val, best_action
def get_max_q_action(self, state):
'''
Args:
state (State)
Returns:
(str): denoting the action with the max q value in the given @state.
'''
return self._compute_max_qval_action_pair(state)[1]
def get_max_q_value(self, state):
'''
Args:
state (State)
Returns:
(float): denoting the max q value in the given @state.
'''
return self._compute_max_qval_action_pair(state)[0]
def get_q_value(self, state, action):
'''
Args:
state (State)
action (str)
Returns:
(float): denoting the q value of the (@state, @action) pair.
'''
return self.q_func[state][action]
def get_action_distr(self, state, beta=0.2):
'''
Args:
state (State)
beta (float): Softmax temperature parameter.
Returns:
(list of floats): The i-th float corresponds to the probability
mass associated with the i-th action (indexing into self.actions)
'''
all_q_vals = []
for i in range(len(self.actions)):
action = self.actions[i]
all_q_vals.append(self.get_q_value(state, action))
# Softmax distribution.
total = sum([numpy.exp(beta * qv) for qv in all_q_vals])
softmax = [numpy.exp(beta * qv) / total for qv in all_q_vals]
return softmax
def reset(self):
self.step_number = 0
self.episode_number = 0
self.q_func = copy.deepcopy(self.default_q_func)
Agent.reset(self)
def end_of_episode(self):
'''
Summary:
Resets the agents prior pointers.
'''
Agent.end_of_episode(self)
def set_q_function(self, q_func):
'''
Set initial Q-function.
For PAC-MDP, initial Q(s, a) should be an upper bound of Q*(s, a).
'''
self.default_q_func = copy.deepcopy(q_func)
self.q_func = copy.deepcopy(self.default_q_func)
def set_vmax(self):
'''
Initialize Q-values to be Vmax.
'''
vmax = self.rmax / (1 - self.gamma)
for x in self.q_func:
for y in self.q_func[x]:
self.q_func[x][y] = vmax
self.default_q_func[x][y] = vmax
|
dd311b750373103b32d5b5d479b670117369ab3f
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/A_Primer_on_Scientific_Programming_with_Python/formulas/ball_numbers.py
|
ce2f815844bde35c945a9ca914e63e6c6b1d70ec
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 30
|
py
|
ball_numbers.py
|
print 5*0.6 - 0.5*9.81*0.6**2
|
69b16eea9dc8f18744d11397a28853dd7cb04990
|
9a0eb3e292d57b59198c7c66a994372ced9cfa5b
|
/nodes/1.x/python/Assembly.Members.py
|
c7833eea069fa3ddecaea0162ac35944ebb7aaea
|
[
"MIT"
] |
permissive
|
andydandy74/ClockworkForDynamo
|
544ddf0893f5c0072fca7934f4e128001771f767
|
528400c667c4c3f2b51814af84e85c8fab8a8059
|
refs/heads/master
| 2023-08-19T03:07:33.489926
| 2023-08-13T04:31:17
| 2023-08-13T04:31:17
| 15,043,988
| 184
| 100
|
MIT
| 2023-09-04T18:47:40
| 2013-12-09T10:11:01
|
Python
|
UTF-8
|
Python
| false
| false
| 575
|
py
|
Assembly.Members.py
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.GeometryConversion)
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
assemblies = UnwrapElement(IN[0])
elementlist = list()
for assinst in assemblies:
memberslist = assinst.GetMemberIds()
members = list()
for item in memberslist:
members.append(doc.GetElement(item))
elementlist.append(members)
OUT = elementlist
|
ed3bd88ed6f5d9bac2bac4790e2f67b6972913a9
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Alignment/APEEstimation/python/SectorBuilder_Tid_cff.py
|
07c0eeb8adb2534db10f3eb0ba49064c253905b8
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 4,957
|
py
|
SectorBuilder_Tid_cff.py
|
import FWCore.ParameterSet.Config as cms
from Alignment.APEEstimation.SectorBuilder_cfi import *
##
## Whole Subdetector (means only one for both endcaps)
##
Tid = EmptySector.clone(
name = 'Tid',
subdetId = [4],
)
TID = cms.VPSet(
Tid,
)
##
## Separation of side(+,-)
##
TidMinus = Tid.clone(
name = 'TidMinus',
side = [1],
)
TidPlus = Tid.clone(
name = 'TidPlus',
side = [2],
)
TIDSideSeparation = cms.VPSet(
TidMinus,
TidPlus,
)
##
## Separation of side + rings
##
TidMinusRing1 = TidMinus.clone(
name = 'TidMinusRing1',
ring = [1],
)
TidMinusRing2 = TidMinus.clone(
name = 'TidMinusRing2',
ring = [2],
)
TidMinusRing3 = TidMinus.clone(
name = 'TidMinusRing3',
ring = [3],
)
TidPlusRing1 = TidPlus.clone(
name = 'TidPlusRing1',
ring = [1],
)
TidPlusRing2 = TidPlus.clone(
name = 'TidPlusRing2',
ring = [2],
)
TidPlusRing3 = TidPlus.clone(
name = 'TidPlusRing3',
ring = [3],
)
TIDSideAndPureRingSeparation = cms.VPSet(
TidMinusRing1,
TidMinusRing2,
TidMinusRing3,
TidPlusRing1,
TidPlusRing2,
TidPlusRing3,
)
##
## Separation of side + rings + rphi/stereo
##
TidMinusRing1Rphi = TidMinusRing1.clone(
name = 'TidMinusRing1Rphi',
isRPhi = [1],
)
TidMinusRing1Stereo = TidMinusRing1.clone(
name = 'TidMinusRing1Stereo',
isStereo = [1],
)
TidMinusRing2Rphi = TidMinusRing2.clone(
name = 'TidMinusRing2Rphi',
isRPhi = [1],
)
TidMinusRing2Stereo = TidMinusRing2.clone(
name = 'TidMinusRing2Stereo',
isStereo = [1],
)
TidPlusRing1Rphi = TidPlusRing1.clone(
name = 'TidPlusRing1Rphi',
isRPhi = [1],
)
TidPlusRing1Stereo = TidPlusRing1.clone(
name = 'TidPlusRing1Stereo',
isStereo = [1],
)
TidPlusRing2Rphi = TidPlusRing2.clone(
name = 'TidPlusRing2Rphi',
isRPhi = [1],
)
TidPlusRing2Stereo = TidPlusRing2.clone(
name = 'TidPlusRing2Stereo',
isStereo = [1],
)
TIDSideAndRingSeparation = cms.VPSet(
TidMinusRing1Rphi,
TidMinusRing1Stereo,
TidMinusRing2Rphi,
TidMinusRing2Stereo,
TidMinusRing3,
TidPlusRing1Rphi,
TidPlusRing1Stereo,
TidPlusRing2Rphi,
TidPlusRing2Stereo,
TidPlusRing3,
)
##
## Separation of side + rings + rphi/stereo + orientations
##
TidMinusRing1RphiOut = TidMinusRing1Rphi.clone(
name = 'TidMinusRing1RphiOut',
wDirection = [-1],
)
TidMinusRing1StereoOut = TidMinusRing1Stereo.clone(
name = 'TidMinusRing1StereoOut',
wDirection = [-1],
)
TidMinusRing1RphiIn = TidMinusRing1Rphi.clone(
name = 'TidMinusRing1RphiIn',
wDirection = [1],
)
TidMinusRing1StereoIn = TidMinusRing1Stereo.clone(
name = 'TidMinusRing1StereoIn',
wDirection = [1],
)
TidMinusRing2RphiOut = TidMinusRing2Rphi.clone(
name = 'TidMinusRing2RphiOut',
wDirection = [-1],
)
TidMinusRing2StereoOut = TidMinusRing2Stereo.clone(
name = 'TidMinusRing2StereoOut',
wDirection = [-1],
)
TidMinusRing2RphiIn = TidMinusRing2Rphi.clone(
name = 'TidMinusRing2RphiIn',
wDirection = [1],
)
TidMinusRing2StereoIn = TidMinusRing2Stereo.clone(
name = 'TidMinusRing2StereoIn',
wDirection = [1],
)
TidMinusRing3Out = TidMinusRing3.clone(
name = 'TidMinusRing3Out',
wDirection = [-1],
)
TidMinusRing3In = TidMinusRing3.clone(
name = 'TidMinusRing3In',
wDirection = [1],
)
TidPlusRing1RphiOut = TidPlusRing1Rphi.clone(
name = 'TidPlusRing1RphiOut',
wDirection = [1],
)
TidPlusRing1StereoOut = TidPlusRing1Stereo.clone(
name = 'TidPlusRing1StereoOut',
wDirection = [1],
)
TidPlusRing1RphiIn = TidPlusRing1Rphi.clone(
name = 'TidPlusRing1RphiIn',
wDirection = [-1],
)
TidPlusRing1StereoIn = TidPlusRing1Stereo.clone(
name = 'TidPlusRing1StereoIn',
wDirection = [-1],
)
TidPlusRing2RphiOut = TidPlusRing2Rphi.clone(
name = 'TidPlusRing2RphiOut',
wDirection = [1],
)
TidPlusRing2StereoOut = TidPlusRing2Stereo.clone(
name = 'TidPlusRing2StereoOut',
wDirection = [1],
)
TidPlusRing2RphiIn = TidPlusRing2Rphi.clone(
name = 'TidPlusRing2RphiIn',
wDirection = [-1],
)
TidPlusRing2StereoIn = TidPlusRing2Stereo.clone(
name = 'TidPlusRing2StereoIn',
wDirection = [-1],
)
TidPlusRing3Out = TidPlusRing3.clone(
name = 'TidPlusRing3Out',
wDirection = [1],
)
TidPlusRing3In = TidPlusRing3.clone(
name = 'TidPlusRing3In',
wDirection = [-1],
)
TIDSideAndRingAndOrientationSeparation = cms.VPSet(
TidMinusRing1RphiOut,
TidMinusRing1StereoOut,
TidMinusRing1RphiIn,
TidMinusRing1StereoIn,
TidMinusRing2RphiOut,
TidMinusRing2StereoOut,
TidMinusRing2RphiIn,
TidMinusRing2StereoIn,
TidMinusRing3Out,
TidMinusRing3In,
TidPlusRing1RphiOut,
TidPlusRing1StereoOut,
TidPlusRing1RphiIn,
TidPlusRing1StereoIn,
TidPlusRing2RphiOut,
TidPlusRing2StereoOut,
TidPlusRing2RphiIn,
TidPlusRing2StereoIn,
TidPlusRing3Out,
TidPlusRing3In,
)
|
1e58393acc85fa4e9fbc05a0b25834d5c30947e3
|
8ca19f1a31070738b376c0370c4bebf6b7efcb43
|
/tests/sharepoint/test_feature.py
|
beca7730d6c1b12c1466a2297ba8140266447847
|
[
"MIT"
] |
permissive
|
vgrem/Office365-REST-Python-Client
|
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
|
cbd245d1af8d69e013c469cfc2a9851f51c91417
|
refs/heads/master
| 2023-09-02T14:20:40.109462
| 2023-08-31T19:14:05
| 2023-08-31T19:14:05
| 51,305,798
| 1,006
| 326
|
MIT
| 2023-08-28T05:38:02
| 2016-02-08T15:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 354
|
py
|
test_feature.py
|
from office365.sharepoint.features.feature import Feature
from tests.sharepoint.sharepoint_case import SPTestCase
class TestFeature(SPTestCase):
target_feature = None # type: Feature
def test_1_get_site_features(self):
site_features = self.client.site.features.get().execute_query()
self.assertGreater(len(site_features), 0)
|
7925e0898f04a7ceb23c6006f970c1355e2cebde
|
80f94bea418d7956df1ba19d4d6a1d7715a94ade
|
/lib/galaxy/selenium/context.py
|
6ba867e885710d0b654b837d1ef40d21940fef84
|
[
"CC-BY-2.5",
"MIT",
"CC-BY-3.0",
"AFL-3.0"
] |
permissive
|
galaxyproject/galaxy
|
5748409eb6693b1611f289d164f85e20c3237495
|
b9ae7a16ba0465995e880ae9701b7e87226b9bab
|
refs/heads/dev
| 2023-08-28T22:35:51.248138
| 2023-08-26T08:02:33
| 2023-08-26T08:02:33
| 31,211,061
| 1,277
| 1,137
|
NOASSERTION
| 2023-09-14T19:39:01
| 2015-02-23T14:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,661
|
py
|
context.py
|
import os
from abc import abstractmethod
from typing import Optional
from urllib.parse import urljoin
import yaml
from .driver_factory import ConfiguredDriver
from .navigates_galaxy import NavigatesGalaxy
class GalaxySeleniumContext(NavigatesGalaxy):
url: str
target_url_from_selenium: str
configured_driver: ConfiguredDriver
def build_url(self, url: str, for_selenium: bool = True) -> str:
if for_selenium:
base = self.target_url_from_selenium
else:
base = self.url
return urljoin(base, url)
@property
def driver(self):
return self.configured_driver.driver
def screenshot(self, label: str):
"""If GALAXY_TEST_SCREENSHOTS_DIRECTORY is set create a screenshot there named <label>.png.
Unlike the above "snapshot" feature, this will be written out regardless and not in a per-test
directory. The above method is used for debugging failures within a specific test. This method
if more for creating a set of images to augment automated testing with manual human inspection
after a test or test suite has executed.
"""
target = self._screenshot_path(label)
if target is None:
return
self.driver.save_screenshot(target)
return target
@abstractmethod
def _screenshot_path(self, label: str, extension=".png") -> str:
"""Path to store screenshots in."""
class GalaxySeleniumContextImpl(GalaxySeleniumContext):
"""Minimal, simplified GalaxySeleniumContext useful outside the context of test cases.
A variant of this concept that can also populate content via the API
to then interact with via the Selenium is :class:`galaxy_test.selenium.framework.GalaxySeleniumContextImpl`.
"""
def __init__(self, from_dict: Optional[dict] = None) -> None:
from_dict = from_dict or {}
self.configured_driver = ConfiguredDriver(**from_dict.get("driver", {}))
self.url = from_dict.get("local_galaxy_url", "http://localhost:8080")
self.target_url_from_selenium = from_dict.get("selenium_galaxy_url", self.url)
self.timeout_multiplier = from_dict.get("timeout_multiplier", 1)
def _screenshot_path(self, label, extension=".png"):
return label + extension
def init(config=None, clazz=GalaxySeleniumContextImpl) -> GalaxySeleniumContext:
if os.path.exists("galaxy_selenium_context.yml"):
with open("galaxy_selenium_context.yml") as f:
as_dict = yaml.safe_load(f)
context = clazz(as_dict)
else:
config = config or {}
context = clazz(config)
return context
|
c5fb19954f95c292267cad83f8f00eced012ba41
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/rotate-function.py
|
2a53ada048aa222f9033f3ceb2d25f0fcc0d4f50
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 410
|
py
|
rotate-function.py
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
s = sum(A)
fi = 0
for i in xrange(len(A)):
fi += i * A[i]
result = fi
for i in xrange(1, len(A)+1):
fi += s - len(A) * A[-i]
result = max(result, fi)
return result
|
f27d6e67d5150d61531c9879b6b0cd9dceb96e33
|
dbf613469aa8c69ba013e016de82c942508b2485
|
/ppb/features/default_sprites.py
|
48a60fd8f5b09af2c272e855cb765854fc5d120f
|
[
"Artistic-2.0"
] |
permissive
|
ppb/pursuedpybear
|
dcb96ae35d352a9165898872048c72c6224074b9
|
d067c2f7d440389fc542638d79e3244281c6e737
|
refs/heads/canon
| 2022-11-22T11:57:27.406370
| 2022-02-08T02:12:45
| 2022-02-08T02:12:45
| 54,496,479
| 247
| 119
|
Artistic-2.0
| 2022-11-12T22:36:20
| 2016-03-22T17:46:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,537
|
py
|
default_sprites.py
|
"""
Theme: sprites with common default behaviours (motion)
Types of motion include: relative to the motion of other sprites, moving
towards another object.
"""
import ppb
import math
class TargetSprite(ppb.Sprite):
"""Sprite that moves to a given target.
:param target: Vector that the sprite moves towards.
:param speed: Distance per second that the sprite travels with linear motion.
Negative values cause movement away from the target.
:param exponential_speed: Fraction of the distance to the target that the sprite travels
per second with exponential motion. Must be less than 1.
Negative values cause movement away from the target.
:param max_speed: Maximum distance per second that the sprite can travel towards the target.
Negative values cause movement away from the target.
:param min_speed: Minimum distance per second that the sprite travels when not in range of the target.
Non-negative values prevent movement away from the target.
"""
target = ppb.Vector(0, 0)
speed = 1.0
exponential_speed = 0.0
max_speed = math.inf
min_speed = -math.inf
def on_update(self, update_event, signal):
if self.max_speed < self.min_speed:
raise ValueError(f"{type(self).__name__} maximum speed cannot be less than minimum speed.")
if self.exponential_speed > 1.0:
raise ValueError(f"{type(self).__name__} exponential speed cannot be greater than 1.")
offset = self.target - self.position
distance_to_target = offset.length
if distance_to_target < 0.0001:
self.position = self.target
return
max_distance = self.max_speed * update_event.time_delta
min_distance = self.min_speed * update_event.time_delta
linear_distance = self.speed * update_event.time_delta
exponential_distance = distance_to_target * self._exponential_decay(update_event.time_delta)
total_distance = linear_distance + exponential_distance
total_distance = min(total_distance, max_distance)
total_distance = max(total_distance, min_distance)
if distance_to_target <= total_distance:
self.position = self.target
else:
direction = offset.normalize()
self.position += direction * total_distance
def _exponential_decay(self, time_delta):
decay_rate = 1. - self.exponential_speed
remaining = decay_rate ** time_delta
decay_amount = 1. - remaining
return decay_amount
|
0a35cd5a1cd8d72fb52f0afac6f655337c396efd
|
c259a48454bd6450d3afc5bc66fc5761b25e0a5b
|
/.erda/migrations/cmdb/20211102-cluster-scheduler-cover-tag.py
|
edc81444f37918d6daa6a6f233472fdc0eb877b2
|
[
"Apache-2.0"
] |
permissive
|
erda-project/erda
|
c84db0ffaa7e6099475c1199e6338be7d0a5d245
|
852a5e3e3f6829afc343958ac0da60f3623f9c64
|
refs/heads/master
| 2023-09-01T01:45:08.582910
| 2023-08-31T15:32:21
| 2023-08-31T15:32:21
| 344,676,663
| 2,789
| 391
|
Apache-2.0
| 2023-09-14T06:24:33
| 2021-03-05T02:58:27
|
Go
|
UTF-8
|
Python
| false
| false
| 1,879
|
py
|
20211102-cluster-scheduler-cover-tag.py
|
"""
Generated by Erda Migrator.
Please implement the function entry, and add it to the list entries.
"""
import json
import django.db.models
from django.db.models import Q
class CoClusters(django.db.models.Model):
"""
generated by erda-cli
"""
id = django.db.models.BigIntegerField()
org_id = django.db.models.BigIntegerField()
name = django.db.models.CharField()
display_name = django.db.models.CharField()
type = django.db.models.CharField()
cloud_vendor = django.db.models.CharField()
logo = django.db.models.TextField()
description = django.db.models.TextField()
wildcard_domain = django.db.models.CharField()
config = django.db.models.TextField()
urls = django.db.models.TextField()
settings = django.db.models.TextField()
scheduler = django.db.models.TextField()
opsconfig = django.db.models.TextField()
resource = django.db.models.TextField()
sys = django.db.models.TextField()
created_at = django.db.models.DateTimeField(auto_now=True)
updated_at = django.db.models.DateTimeField(auto_now=True, auto_now_add=True)
manage_config = django.db.models.TextField()
class Meta:
db_table = "co_clusters"
def entry():
"""
please implement this and add it to the list entries
"""
condition_clusters = CoClusters.objects.filter(Q(type='k8s') | Q(type='edas'))
for cluster in condition_clusters:
try:
fmt_scheduler = json.loads(cluster.scheduler)
fmt_scheduler["enableTag"] = True
fmt_scheduler["enableWorkspace"] = True
cluster.scheduler = json.dumps(fmt_scheduler, indent=4, ensure_ascii=False)
cluster.save()
except Exception as e:
print("cluster %s enabled scheduler switch error, exception: %s, skip" % (cluster.name, e))
entries: [callable] = [
entry,
]
|
e6060b0fba86595758c7cadf2e690ea19db894ac
|
1577e1cf4e89584a125cffb855ca50a9654c6d55
|
/pyobjc/pyobjc/pyobjc-core-2.5.1/PyObjCTest/test_hidden_selector.py
|
643f176c0fd04cd3b9df96ff860c95cacda6d40a
|
[
"MIT"
] |
permissive
|
apple-open-source/macos
|
a4188b5c2ef113d90281d03cd1b14e5ee52ebffb
|
2d2b15f13487673de33297e49f00ef94af743a9a
|
refs/heads/master
| 2023-08-01T11:03:26.870408
| 2023-03-27T00:00:00
| 2023-03-27T00:00:00
| 180,595,052
| 124
| 24
| null | 2022-12-27T14:54:09
| 2019-04-10T14:06:23
| null |
UTF-8
|
Python
| false
| false
| 5,549
|
py
|
test_hidden_selector.py
|
from PyObjCTools.TestSupport import *
import objc
class hidden_method (object):
def __pyobjc_class_setup__(self, name, class_dict, instance_methods, class_methods):
@objc.selector
def method(self):
return 42
method.isHidden = True
def clsmethod(self):
return 99
clsmethod=objc.selector(clsmethod, isClassMethod=True)
clsmethod.isHidden = True
instance_methods.add(method)
class_methods.add(clsmethod)
class OCTestHidden (objc.lookUpClass('NSObject')):
m = hidden_method()
@objc.selector
def body(self):
return "BODY"
body.isHidden = True
def bodyclass(self):
return "BODYCLASS"
bodyclass=objc.selector(bodyclass, isClassMethod=True)
bodyclass.isHidden=True
@objc.selector
def somebody(self):
return "instance"
somebody.isHidden = True
def boolMethod(self):
return 1
boolMethod = objc.selector(boolMethod, signature=objc._C_NSBOOL + b'@:')
boolMethod.isHidden = True
class OCTestSubHidden (OCTestHidden):
def body(self):
return "BODY2"
@classmethod
def bodyclass(self):
return "BODYCLASS2"
@classmethod
def somebody(self):
return "class"
def boolMethod(self):
return 0
class TestHiddenSelector (TestCase):
def testHiddenInClassDef(self):
o = OCTestHidden.alloc().init()
self.assertRaises(AttributeError, getattr, o, 'body')
v = o.performSelector_(b'body')
self.assertEqual(v, "BODY")
v = o.pyobjc_instanceMethods.body()
self.assertEqual(v, "BODY")
self.assertRaises(AttributeError, getattr, OCTestHidden, 'bodyclass')
v = OCTestHidden.performSelector_(b'bodyclass')
self.assertEqual(v, "BODYCLASS")
v = OCTestHidden.pyobjc_classMethods.bodyclass()
self.assertEqual(v, "BODYCLASS")
o = OCTestHidden.alloc().init()
self.assertRaises(AttributeError, getattr, o, 'boolMethod')
v = o.pyobjc_instanceMethods.boolMethod()
self.assertIs(v, True)
def testHiddenInSetupHook(self):
o = OCTestHidden.alloc().init()
# Instance method
self.assertRaises(AttributeError, getattr, o, 'method')
v = o.performSelector_(b'method')
self.assertEqual(v, 42)
v = o.pyobjc_instanceMethods.method()
self.assertEqual(v, 42)
# Class method
self.assertRaises(AttributeError, getattr, OCTestHidden, 'clsmethod')
v = OCTestHidden.performSelector_(b'clsmethod')
self.assertEqual(v, 99)
v = OCTestHidden.pyobjc_classMethods.clsmethod()
self.assertEqual(v, 99)
def testHiddenAddMethods(self):
@objc.selector
def addedmethod(self):
return "NEW"
addedmethod.isHidden = True
def addedclass(self):
return "NEWCLASS"
addedclass=objc.selector(addedclass, isClassMethod=True)
addedclass.isHidden=True
objc.classAddMethods(OCTestHidden, [addedmethod, addedclass])
o = OCTestHidden.alloc().init()
# Instance method
self.assertRaises(AttributeError, getattr, o, 'addedmethod')
v = o.performSelector_(b'addedmethod')
self.assertEqual(v, "NEW")
v = o.pyobjc_instanceMethods.addedmethod()
self.assertEqual(v, "NEW")
# Class method
self.assertRaises(AttributeError, getattr, OCTestHidden, 'addedclass')
v = OCTestHidden.performSelector_(b'addedclass')
self.assertEqual(v, "NEWCLASS")
v = OCTestHidden.pyobjc_classMethods.addedclass()
self.assertEqual(v, "NEWCLASS")
def testClassVsInstance(self):
o = OCTestHidden.alloc().init()
self.assertRaises(AttributeError, getattr, o, "sombody")
v = o.performSelector_(b'somebody')
self.assertEqual(v, "instance")
v = OCTestSubHidden.somebody()
self.assertEqual(v, "class")
def testHiddenInSubClass(self):
# Instance
o = OCTestSubHidden.alloc().init()
self.assertRaises(AttributeError, getattr, o, "body")
v = o.performSelector_(b'body')
self.assertEqual(v, "BODY2")
@objc.selector
def subclassbody(self):
return "base"
subclassbody.isHidden = True
@objc.selector
def subclassbody2(self):
return "base2"
subclassbody.isHidden = True
objc.classAddMethods(OCTestHidden, [subclassbody, subclassbody2])
@objc.selector
def subclassbody(self):
return "sub"
@objc.selector
def subclassbody2(self):
return "sub2"
objc.classAddMethods(OCTestSubHidden, [subclassbody])
self.assertRaises(AttributeError, getattr, o, "subclassbody")
v = o.performSelector_(b'subclassbody')
self.assertEqual(v, "sub")
OCTestSubHidden.subclassbody2 = subclassbody2
#self.assertRaises(AttributeError, getattr, o, "subclassbody2")
v = o.performSelector_(b'subclassbody2')
self.assertEqual(v, "sub2")
self.assertRaises(AttributeError, getattr, o, 'boolMethod')
v = o.pyobjc_instanceMethods.boolMethod()
self.assertIs(v, False)
# Class
self.assertRaises(AttributeError, getattr, OCTestSubHidden, 'bodyclass')
v = OCTestSubHidden.performSelector_(b'bodyclass')
self.assertEqual(v, "BODYCLASS2")
if __name__ == "__main__":
main()
|
8155739a449852c7d12c44fe8b15eaca06584338
|
df1254b56f35b24644e00493c50d4b6eb3c15b7b
|
/colour/appearance/kim2009.py
|
973060fe2acb2588e6ee665b56aed8bc7f1ad743
|
[
"BSD-3-Clause"
] |
permissive
|
colour-science/colour
|
908400b227cf81668675e41099256ce50b23ae4b
|
1fdf3b3042922e8d4f86b989b00a06e7e5d81102
|
refs/heads/develop
| 2023-09-01T23:17:07.186869
| 2023-08-26T09:40:45
| 2023-08-26T09:40:45
| 17,114,363
| 1,756
| 301
|
BSD-3-Clause
| 2023-09-14T10:24:37
| 2014-02-23T18:55:40
|
Python
|
UTF-8
|
Python
| false
| false
| 18,783
|
py
|
kim2009.py
|
"""
Kim, Weyrich and Kautz (2009) Colour Appearance Model
=====================================================
Defines the *Kim, Weyrich and Kautz (2009)* colour appearance model objects:
- :class:`colour.appearance.InductionFactors_Kim2009`
- :attr:`colour.VIEWING_CONDITIONS_KIM2009`
- :class:`colour.appearance.MediaParameters_Kim2009`
- :attr:`colour.MEDIA_PARAMETERS_KIM2009`
- :class:`colour.CAM_Specification_Kim2009`
- :func:`colour.XYZ_to_Kim2009`
- :func:`colour.Kim2009_to_XYZ`
References
----------
- :cite:`Kim2009` : Kim, M., Weyrich, T., & Kautz, J. (2009). Modeling Human
Color Perception under Extended Luminance Levels. ACM Transactions on
Graphics, 28(3), 27:1--27:9. doi:10.1145/1531326.1531333
"""
from __future__ import annotations
import numpy as np
from collections import namedtuple
from dataclasses import astuple, dataclass, field
from colour.adaptation import CAT_CAT02
from colour.appearance.ciecam02 import (
VIEWING_CONDITIONS_CIECAM02,
CAT_INVERSE_CAT02,
RGB_to_rgb,
degree_of_adaptation,
full_chromatic_adaptation_forward,
full_chromatic_adaptation_inverse,
hue_quadrature,
rgb_to_RGB,
)
from colour.algebra import vector_dot, spow
from colour.hints import ArrayLike, NDArrayFloat
from colour.utilities import (
CanonicalMapping,
MixinDataclassArithmetic,
as_float,
as_float_array,
from_range_100,
from_range_degrees,
has_only_nan,
ones,
to_domain_100,
to_domain_degrees,
tsplit,
tstack,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"InductionFactors_Kim2009",
"VIEWING_CONDITIONS_KIM2009",
"MediaParameters_Kim2009",
"MEDIA_PARAMETERS_KIM2009",
"CAM_Specification_Kim2009",
"XYZ_to_Kim2009",
"Kim2009_to_XYZ",
]
class InductionFactors_Kim2009(
namedtuple("InductionFactors_Kim2009", ("F", "c", "N_c"))
):
"""
*Kim, Weyrich and Kautz (2009)* colour appearance model induction factors.
Parameters
----------
F
Maximum degree of adaptation :math:`F`.
c
Exponential non-linearity :math:`c`.
N_c
Chromatic induction factor :math:`N_c`.
Notes
-----
- The *Kim, Weyrich and Kautz (2009)* colour appearance model induction
factors are the same as *CIECAM02* colour appearance model.
- The *Kim, Weyrich and Kautz (2009)* colour appearance model separates
the surround modelled by the
:class:`colour.appearance.InductionFactors_Kim2009` class instance from
the media, modeled with the
:class:`colour.appearance.MediaParameters_Kim2009` class instance.
References
----------
:cite:`Kim2009`
"""
VIEWING_CONDITIONS_KIM2009: CanonicalMapping = CanonicalMapping(
VIEWING_CONDITIONS_CIECAM02
)
VIEWING_CONDITIONS_KIM2009.__doc__ = """
Reference *Kim, Weyrich and Kautz (2009)* colour appearance model viewing
conditions.
References
----------
:cite:`Kim2009`
"""
class MediaParameters_Kim2009(namedtuple("MediaParameters_Kim2009", ("E",))):
"""
*Kim, Weyrich and Kautz (2009)* colour appearance model media parameters.
Parameters
----------
E
Lightness prediction modulating parameter :math:`E`.
References
----------
:cite:`Kim2009`
"""
def __new__(cls, E):
"""
Return a new instance of the
:class:`colour.appearance.MediaParameters_Kim2009` class.
"""
return super().__new__(cls, E)
MEDIA_PARAMETERS_KIM2009: CanonicalMapping = CanonicalMapping(
{
"High-luminance LCD Display": MediaParameters_Kim2009(1),
"Transparent Advertising Media": MediaParameters_Kim2009(1.2175),
"CRT Displays": MediaParameters_Kim2009(1.4572),
"Reflective Paper": MediaParameters_Kim2009(1.7526),
}
)
MEDIA_PARAMETERS_KIM2009.__doc__ = """
Reference *Kim, Weyrich and Kautz (2009)* colour appearance model media
parameters.
References
----------
:cite:`Kim2009`
Aliases:
- 'bright_lcd_display': 'High-luminance LCD Display'
- 'advertising_transparencies': 'Transparent Advertising Media'
- 'crt': 'CRT Displays'
- 'paper': 'Reflective Paper'
"""
MEDIA_PARAMETERS_KIM2009["bright_lcd_display"] = MEDIA_PARAMETERS_KIM2009[
"High-luminance LCD Display"
]
MEDIA_PARAMETERS_KIM2009[
"advertising_transparencies"
] = MEDIA_PARAMETERS_KIM2009["Transparent Advertising Media"]
MEDIA_PARAMETERS_KIM2009["crt"] = MEDIA_PARAMETERS_KIM2009["CRT Displays"]
MEDIA_PARAMETERS_KIM2009["paper"] = MEDIA_PARAMETERS_KIM2009[
"Reflective Paper"
]
@dataclass
class CAM_Specification_Kim2009(MixinDataclassArithmetic):
"""
Define the *Kim, Weyrich and Kautz (2009)* colour appearance model
specification.
Parameters
----------
J
Correlate of *Lightness* :math:`J`.
C
Correlate of *chroma* :math:`C`.
h
*Hue* angle :math:`h` in degrees.
s
Correlate of *saturation* :math:`s`.
Q
Correlate of *brightness* :math:`Q`.
M
Correlate of *colourfulness* :math:`M`.
H
*Hue* :math:`h` quadrature :math:`H`.
HC
*Hue* :math:`h` composition :math:`H^C`.
References
----------
:cite:`Kim2009`
"""
J: float | NDArrayFloat | None = field(default_factory=lambda: None)
C: float | NDArrayFloat | None = field(default_factory=lambda: None)
h: float | NDArrayFloat | None = field(default_factory=lambda: None)
s: float | NDArrayFloat | None = field(default_factory=lambda: None)
Q: float | NDArrayFloat | None = field(default_factory=lambda: None)
M: float | NDArrayFloat | None = field(default_factory=lambda: None)
H: float | NDArrayFloat | None = field(default_factory=lambda: None)
HC: float | NDArrayFloat | None = field(default_factory=lambda: None)
def XYZ_to_Kim2009(
XYZ: ArrayLike,
XYZ_w: ArrayLike,
L_A: ArrayLike,
media: MediaParameters_Kim2009 = MEDIA_PARAMETERS_KIM2009["CRT Displays"],
surround: InductionFactors_Kim2009 = VIEWING_CONDITIONS_KIM2009["Average"],
n_c: float = 0.57,
discount_illuminant: bool = False,
compute_H: bool = True,
) -> CAM_Specification_Kim2009:
"""
Compute the *Kim, Weyrich and Kautz (2009)* colour appearance model
correlates from given *CIE XYZ* tristimulus values.
Parameters
----------
XYZ
*CIE XYZ* tristimulus values of test sample / stimulus.
XYZ_w
*CIE XYZ* tristimulus values of reference white.
L_A
Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
to be 20% of the luminance of a white object in the scene).
media
Media parameters.
surround
Surround viewing conditions induction factors.
discount_illuminant
Truth value indicating if the illuminant should be discounted.
compute_H
Whether to compute *Hue* :math:`h` quadrature :math:`H`. :math:`H` is
rarely used, and expensive to compute.
n_c
Cone response sigmoidal curve modulating factor :math:`n_c`.
Returns
-------
:class:`colour.CAM_Specification_Kim2009`
*Kim, Weyrich and Kautz (2009)* colour appearance model specification.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_w`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+---------------------------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+=================================+=======================+===============+
| ``CAM_Specification_Kim2009.J`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.C`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.h`` | [0, 360] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.s`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.Q`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.M`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.H`` | [0, 400] | [0, 1] |
+---------------------------------+-----------------------+---------------+
References
----------
:cite:`Kim2009`
Examples
--------
>>> XYZ = np.array([19.01, 20.00, 21.78])
>>> XYZ_w = np.array([95.05, 100.00, 108.88])
>>> L_A = 318.31
>>> media = MEDIA_PARAMETERS_KIM2009["CRT Displays"]
>>> surround = VIEWING_CONDITIONS_KIM2009["Average"]
>>> XYZ_to_Kim2009(XYZ, XYZ_w, L_A, media, surround)
... # doctest: +ELLIPSIS
CAM_Specification_Kim2009(J=28.8619089..., C=0.5592455..., \
h=219.0480667..., s=9.3837797..., Q=52.7138883..., M=0.4641738..., \
H=278.0602824..., HC=None)
"""
XYZ = to_domain_100(XYZ)
XYZ_w = to_domain_100(XYZ_w)
_X_w, Y_w, _Z_w = tsplit(XYZ_w)
L_A = as_float_array(L_A)
# Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform
# sharpened *RGB* values.
RGB = vector_dot(CAT_CAT02, XYZ)
RGB_w = vector_dot(CAT_CAT02, XYZ_w)
# Computing degree of adaptation :math:`D`.
D = (
degree_of_adaptation(surround.F, L_A)
if not discount_illuminant
else ones(L_A.shape)
)
# Computing full chromatic adaptation.
XYZ_c = full_chromatic_adaptation_forward(RGB, RGB_w, Y_w, D)
XYZ_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D)
# Converting to *Hunt-Pointer-Estevez* colourspace.
LMS = RGB_to_rgb(XYZ_c)
LMS_w = RGB_to_rgb(XYZ_wc)
# Cones absolute response.
LMS_n_c = spow(LMS, n_c)
LMS_w_n_c = spow(LMS_w, n_c)
L_A_n_c = spow(L_A, n_c)
LMS_p = LMS_n_c / (LMS_n_c + L_A_n_c)
LMS_wp = LMS_w_n_c / (LMS_w_n_c + L_A_n_c)
# Achromatic signal :math:`A` and :math:`A_w`.
v_A = np.array([40, 20, 1])
A = np.sum(v_A * LMS_p, axis=-1) / 61
A_w = np.sum(v_A * LMS_wp, axis=-1) / 61
# Perceived *Lightness* :math:`J_p`.
a_j, b_j, o_j, n_j = 0.89, 0.24, 0.65, 3.65
A_A_w = A / A_w
J_p = spow(
(-(A_A_w - b_j) * spow(o_j, n_j)) / (A_A_w - b_j - a_j), 1 / n_j
)
# Computing the media dependent *Lightness* :math:`J`.
J = 100 * (media.E * (J_p - 1) + 1)
# Computing the correlate of *brightness* :math:`Q`.
n_q = 0.1308
Q = J * spow(Y_w, n_q)
# Opponent signals :math:`a` and :math:`b`.
a = (1 / 11) * np.sum(np.array([11, -12, 1]) * LMS_p, axis=-1)
b = (1 / 9) * np.sum(np.array([1, 1, -2]) * LMS_p, axis=-1)
# Computing the correlate of *chroma* :math:`C`.
a_k, n_k = 456.5, 0.62
C = a_k * spow(np.hypot(a, b), n_k)
# Computing the correlate of *colourfulness* :math:`M`.
a_m, b_m = 0.11, 0.61
M = C * (a_m * np.log10(Y_w) + b_m)
# Computing the correlate of *saturation* :math:`s`.
s = 100 * np.sqrt(M / Q)
# Computing the *hue* angle :math:`h`.
h = np.degrees(np.arctan2(b, a)) % 360
# Computing hue :math:`h` quadrature :math:`H`.
H = hue_quadrature(h) if compute_H else np.full(h.shape, np.nan)
return CAM_Specification_Kim2009(
as_float(from_range_100(J)),
as_float(from_range_100(C)),
as_float(from_range_degrees(h)),
as_float(from_range_100(s)),
as_float(from_range_100(Q)),
as_float(from_range_100(M)),
as_float(from_range_degrees(H, 400)),
None,
)
def Kim2009_to_XYZ(
specification: CAM_Specification_Kim2009,
XYZ_w: ArrayLike,
L_A: ArrayLike,
media: MediaParameters_Kim2009 = MEDIA_PARAMETERS_KIM2009["CRT Displays"],
surround: InductionFactors_Kim2009 = VIEWING_CONDITIONS_KIM2009["Average"],
n_c: float = 0.57,
discount_illuminant: bool = False,
) -> NDArrayFloat:
"""
Convert from *Kim, Weyrich and Kautz (2009)* specification to *CIE XYZ*
tristimulus values.
Parameters
----------
specification
*Kim, Weyrich and Kautz (2009)* colour appearance model specification.
Correlate of *Lightness* :math:`J`, correlate of *chroma* :math:`C` or
correlate of *colourfulness* :math:`M` and *hue* angle :math:`h` in
degrees must be specified, e.g. :math:`JCh` or :math:`JMh`.
XYZ_w
*CIE XYZ* tristimulus values of reference white.
L_A
Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken
to be 20% of the luminance of a white object in the scene).
media
Media parameters.
surroundl
Surround viewing conditions induction factors.
discount_illuminant
Discount the illuminant.
n_c
Cone response sigmoidal curve modulating factor :math:`n_c`.
Returns
-------
:class:`numpy.ndarray`
*CIE XYZ* tristimulus values.
Raises
------
ValueError
If neither :math:`C` or :math:`M` correlates have been defined in the
``specification`` argument.
Notes
-----
+---------------------------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+=================================+=======================+===============+
| ``CAM_Specification_Kim2009.J`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.C`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.h`` | [0, 360] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.s`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.Q`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.M`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``CAM_Specification_Kim2009.H`` | [0, 360] | [0, 1] |
+---------------------------------+-----------------------+---------------+
| ``XYZ_w`` | [0, 100] | [0, 1] |
+---------------------------------+-----------------------+---------------+
+-----------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+===========+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+-----------+-----------------------+---------------+
References
----------
:cite:`Kim2009`
Examples
--------
>>> specification = CAM_Specification_Kim2009(
... J=28.861908975839647, C=0.5592455924373706, h=219.04806677662953
... )
>>> XYZ_w = np.array([95.05, 100.00, 108.88])
>>> L_A = 318.31
>>> media = MEDIA_PARAMETERS_KIM2009["CRT Displays"]
>>> surround = VIEWING_CONDITIONS_KIM2009["Average"]
>>> Kim2009_to_XYZ(specification, XYZ_w, L_A, media, surround)
... # doctest: +ELLIPSIS
array([ 19.0099995..., 19.9999999..., 21.7800000...])
"""
J, C, h, _s, _Q, M, _H, _HC = astuple(specification)
J = to_domain_100(J)
C = to_domain_100(C)
h = to_domain_degrees(h)
M = to_domain_100(M)
L_A = as_float_array(L_A)
XYZ_w = to_domain_100(XYZ_w)
_X_w, Y_w, _Z_w = tsplit(XYZ_w)
# Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform
# sharpened *RGB* values.
RGB_w = vector_dot(CAT_CAT02, XYZ_w)
# Computing degree of adaptation :math:`D`.
D = (
degree_of_adaptation(surround.F, L_A)
if not discount_illuminant
else ones(L_A.shape)
)
# Computing full chromatic adaptation.
XYZ_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D)
# Converting to *Hunt-Pointer-Estevez* colourspace.
LMS_w = RGB_to_rgb(XYZ_wc)
# n_q = 0.1308
# J = Q / spow(Y_w, n_q)
if has_only_nan(C) and not has_only_nan(M):
a_m, b_m = 0.11, 0.61
C = M / (a_m * np.log10(Y_w) + b_m)
elif has_only_nan(C):
raise ValueError(
'Either "C" or "M" correlate must be defined in '
'the "CAM_Specification_Kim2009" argument!'
)
# Cones absolute response.
LMS_w_n_c = spow(LMS_w, n_c)
L_A_n_c = spow(L_A, n_c)
LMS_wp = LMS_w_n_c / (LMS_w_n_c + L_A_n_c)
# Achromatic signal :math:`A_w`
v_A = np.array([40, 20, 1])
A_w = np.sum(v_A * LMS_wp, axis=-1) / 61
# Perceived *Lightness* :math:`J_p`.
J_p = (J / 100 - 1) / media.E + 1
# Achromatic signal :math:`A`.
a_j, b_j, n_j, o_j = 0.89, 0.24, 3.65, 0.65
J_p_n_j = spow(J_p, n_j)
A = A_w * ((a_j * J_p_n_j) / (J_p_n_j + spow(o_j, n_j)) + b_j)
# Opponent signals :math:`a` and :math:`b`.
a_k, n_k = 456.5, 0.62
C_a_k_n_k = spow(C / a_k, 1 / n_k)
hr = np.radians(h)
a, b = np.cos(hr) * C_a_k_n_k, np.sin(hr) * C_a_k_n_k
# Cones absolute response.
M = np.array(
[
[1.0000, 0.3215, 0.2053],
[1.0000, -0.6351, -0.1860],
[1.0000, -0.1568, -4.4904],
]
)
LMS_p = vector_dot(M, tstack([A, a, b]))
LMS = spow((-spow(L_A, n_c) * LMS_p) / (LMS_p - 1), 1 / n_c)
# Converting to *Hunt-Pointer-Estevez* colourspace.
RGB_c = rgb_to_RGB(LMS)
# Applying inverse full chromatic adaptation.
RGB = full_chromatic_adaptation_inverse(RGB_c, RGB_w, Y_w, D)
XYZ = vector_dot(CAT_INVERSE_CAT02, RGB)
return from_range_100(XYZ)
|
b0c9659fed2fa16bb8d995b2fc05f94976f9ec9c
|
e22fd36933c9114a9df1694e7a6274bf059de2a6
|
/selfdrive/car/gm/values.py
|
d12c21dc23d4b941c415a42fbe68201fd779d51b
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
commaai/openpilot
|
66dfb7f31290bc8f58c9ead95d56697a52b45afb
|
a0b49d54222c52ff0112c402bc0e0d9262e77a66
|
refs/heads/master
| 2023-09-05T21:34:14.076796
| 2023-09-05T21:15:18
| 2023-09-05T21:15:18
| 74,627,617
| 46,071
| 9,878
|
MIT
| 2023-09-14T21:51:23
| 2016-11-24T01:33:30
|
Python
|
UTF-8
|
Python
| false
| false
| 22,353
|
py
|
values.py
|
# ruff: noqa: E501
from collections import defaultdict
from dataclasses import dataclass
from enum import Enum
from typing import Dict, List, Union
from cereal import car
from openpilot.selfdrive.car import dbc_dict
from openpilot.selfdrive.car.docs_definitions import CarFootnote, CarHarness, CarInfo, CarParts, Column
Ecu = car.CarParams.Ecu
class CarControllerParams:
STEER_MAX = 300 # GM limit is 3Nm. Used by carcontroller to generate LKA output
STEER_STEP = 3 # Active control frames per command (~33hz)
INACTIVE_STEER_STEP = 10 # Inactive control frames per command (10hz)
STEER_DELTA_UP = 10 # Delta rates require review due to observed EPS weakness
STEER_DELTA_DOWN = 15
STEER_DRIVER_ALLOWANCE = 65
STEER_DRIVER_MULTIPLIER = 4
STEER_DRIVER_FACTOR = 100
NEAR_STOP_BRAKE_PHASE = 0.5 # m/s
# Heartbeat for dash "Service Adaptive Cruise" and "Service Front Camera"
ADAS_KEEPALIVE_STEP = 100
CAMERA_KEEPALIVE_STEP = 100
# Allow small margin below -3.5 m/s^2 from ISO 15622:2018 since we
# perform the closed loop control, and might need some
# to apply some more braking if we're on a downhill slope.
# Our controller should still keep the 2 second average above
# -3.5 m/s^2 as per planner limits
ACCEL_MAX = 2. # m/s^2
ACCEL_MIN = -4. # m/s^2
def __init__(self, CP):
# Gas/brake lookups
self.ZERO_GAS = 2048 # Coasting
self.MAX_BRAKE = 400 # ~ -4.0 m/s^2 with regen
if CP.carFingerprint in CAMERA_ACC_CAR:
self.MAX_GAS = 3400
self.MAX_ACC_REGEN = 1514
self.INACTIVE_REGEN = 1554
# Camera ACC vehicles have no regen while enabled.
# Camera transitions to MAX_ACC_REGEN from ZERO_GAS and uses friction brakes instantly
max_regen_acceleration = 0.
else:
self.MAX_GAS = 3072 # Safety limit, not ACC max. Stock ACC >4096 from standstill.
self.MAX_ACC_REGEN = 1404 # Max ACC regen is slightly less than max paddle regen
self.INACTIVE_REGEN = 1404
# ICE has much less engine braking force compared to regen in EVs,
# lower threshold removes some braking deadzone
max_regen_acceleration = -1. if CP.carFingerprint in EV_CAR else -0.1
self.GAS_LOOKUP_BP = [max_regen_acceleration, 0., self.ACCEL_MAX]
self.GAS_LOOKUP_V = [self.MAX_ACC_REGEN, self.ZERO_GAS, self.MAX_GAS]
self.BRAKE_LOOKUP_BP = [self.ACCEL_MIN, max_regen_acceleration]
self.BRAKE_LOOKUP_V = [self.MAX_BRAKE, 0.]
class CAR:
HOLDEN_ASTRA = "HOLDEN ASTRA RS-V BK 2017"
VOLT = "CHEVROLET VOLT PREMIER 2017"
CADILLAC_ATS = "CADILLAC ATS Premium Performance 2018"
MALIBU = "CHEVROLET MALIBU PREMIER 2017"
ACADIA = "GMC ACADIA DENALI 2018"
BUICK_LACROSSE = "BUICK LACROSSE 2017"
BUICK_REGAL = "BUICK REGAL ESSENCE 2018"
ESCALADE = "CADILLAC ESCALADE 2017"
ESCALADE_ESV = "CADILLAC ESCALADE ESV 2016"
BOLT_EUV = "CHEVROLET BOLT EUV 2022"
SILVERADO = "CHEVROLET SILVERADO 1500 2020"
EQUINOX = "CHEVROLET EQUINOX 2019"
TRAILBLAZER = "CHEVROLET TRAILBLAZER 2021"
class Footnote(Enum):
OBD_II = CarFootnote(
'Requires a <a href="https://github.com/commaai/openpilot/wiki/GM#hardware" target="_blank">community built ASCM harness</a>. ' +
'<b><i>NOTE: disconnecting the ASCM disables Automatic Emergency Braking (AEB).</i></b>',
Column.MODEL)
@dataclass
class GMCarInfo(CarInfo):
package: str = "Adaptive Cruise Control (ACC)"
def init_make(self, CP: car.CarParams):
if CP.networkLocation == car.CarParams.NetworkLocation.fwdCamera:
self.car_parts = CarParts.common([CarHarness.gm])
else:
self.car_parts = CarParts.common([CarHarness.obd_ii])
self.footnotes.append(Footnote.OBD_II)
CAR_INFO: Dict[str, Union[GMCarInfo, List[GMCarInfo]]] = {
CAR.HOLDEN_ASTRA: GMCarInfo("Holden Astra 2017"),
CAR.VOLT: GMCarInfo("Chevrolet Volt 2017-18", min_enable_speed=0, video_link="https://youtu.be/QeMCN_4TFfQ"),
CAR.CADILLAC_ATS: GMCarInfo("Cadillac ATS Premium Performance 2018"),
CAR.MALIBU: GMCarInfo("Chevrolet Malibu Premier 2017"),
CAR.ACADIA: GMCarInfo("GMC Acadia 2018", video_link="https://www.youtube.com/watch?v=0ZN6DdsBUZo"),
CAR.BUICK_LACROSSE: GMCarInfo("Buick LaCrosse 2017-19", "Driver Confidence Package 2"),
CAR.BUICK_REGAL: GMCarInfo("Buick Regal Essence 2018"),
CAR.ESCALADE: GMCarInfo("Cadillac Escalade 2017", "Driver Assist Package"),
CAR.ESCALADE_ESV: GMCarInfo("Cadillac Escalade ESV 2016", "Adaptive Cruise Control (ACC) & LKAS"),
CAR.BOLT_EUV: [
GMCarInfo("Chevrolet Bolt EUV 2022-23", "Premier or Premier Redline Trim without Super Cruise Package", video_link="https://youtu.be/xvwzGMUA210"),
GMCarInfo("Chevrolet Bolt EV 2022-23", "2LT Trim with Adaptive Cruise Control Package"),
],
CAR.SILVERADO: [
GMCarInfo("Chevrolet Silverado 1500 2020-21", "Safety Package II"),
GMCarInfo("GMC Sierra 1500 2020-21", "Driver Alert Package II", video_link="https://youtu.be/5HbNoBLzRwE"),
],
CAR.EQUINOX: GMCarInfo("Chevrolet Equinox 2019-22"),
CAR.TRAILBLAZER: GMCarInfo("Chevrolet Trailblazer 2021-22"),
}
class CruiseButtons:
INIT = 0
UNPRESS = 1
RES_ACCEL = 2
DECEL_SET = 3
MAIN = 5
CANCEL = 6
class AccState:
OFF = 0
ACTIVE = 1
FAULTED = 3
STANDSTILL = 4
class CanBus:
POWERTRAIN = 0
OBSTACLE = 1
CAMERA = 2
CHASSIS = 2
SW_GMLAN = 3
LOOPBACK = 128
DROPPED = 192
FINGERPRINTS = {
CAR.HOLDEN_ASTRA: [
# Astra BK MY17, ASCM unplugged
{
190: 8, 193: 8, 197: 8, 199: 4, 201: 8, 209: 7, 211: 8, 241: 6, 249: 8, 288: 5, 298: 8, 304: 1, 309: 8, 311: 8, 313: 8, 320: 3, 328: 1, 352: 5, 381: 6, 384: 4, 386: 8, 388: 8, 393: 8, 398: 8, 401: 8, 413: 8, 417: 8, 419: 8, 422: 1, 426: 7, 431: 8, 442: 8, 451: 8, 452: 8, 453: 8, 455: 7, 456: 8, 458: 5, 479: 8, 481: 7, 485: 8, 489: 8, 497: 8, 499: 3, 500: 8, 501: 8, 508: 8, 528: 5, 532: 6, 554: 3, 560: 8, 562: 8, 563: 5, 564: 5, 565: 5, 567: 5, 647: 5, 707: 8, 715: 8, 723: 8, 753: 5, 761: 7, 806: 1, 810: 8, 840: 5, 842: 5, 844: 8, 866: 4, 961: 8, 969: 8, 977: 8, 979: 8, 985: 5, 1001: 8, 1009: 8, 1011: 6, 1017: 8, 1019: 3, 1020: 8, 1105: 6, 1217: 8, 1221: 5, 1225: 8, 1233: 8, 1249: 8, 1257: 6, 1259: 8, 1261: 7, 1263: 4, 1265: 8, 1267: 8, 1280: 4, 1300: 8, 1328: 4, 1417: 8, 1906: 7, 1907: 7, 1908: 7, 1912: 7, 1919: 7,
}],
CAR.VOLT: [
# Volt Premier w/ ACC 2017
{
170: 8, 171: 8, 189: 7, 190: 6, 193: 8, 197: 8, 199: 4, 201: 8, 209: 7, 211: 2, 241: 6, 288: 5, 289: 8, 298: 8, 304: 1, 308: 4, 309: 8, 311: 8, 313: 8, 320: 3, 328: 1, 352: 5, 381: 6, 384: 4, 386: 8, 388: 8, 389: 2, 390: 7, 417: 7, 419: 1, 426: 7, 451: 8, 452: 8, 453: 6, 454: 8, 456: 8, 479: 3, 481: 7, 485: 8, 489: 8, 493: 8, 495: 4, 497: 8, 499: 3, 500: 6, 501: 8, 508: 8, 528: 4, 532: 6, 546: 7, 550: 8, 554: 3, 558: 8, 560: 8, 562: 8, 563: 5, 564: 5, 565: 5, 566: 5, 567: 3, 568: 1, 573: 1, 577: 8, 647: 3, 707: 8, 711: 6, 715: 8, 761: 7, 810: 8, 840: 5, 842: 5, 844: 8, 866: 4, 961: 8, 969: 8, 977: 8, 979: 7, 988: 6, 989: 8, 995: 7, 1001: 8, 1005: 6, 1009: 8, 1017: 8, 1019: 2, 1020: 8, 1105: 6, 1187: 4, 1217: 8, 1221: 5, 1223: 3, 1225: 7, 1227: 4, 1233: 8, 1249: 8, 1257: 6, 1265: 8, 1267: 1, 1273: 3, 1275: 3, 1280: 4, 1300: 8, 1322: 6, 1323: 4, 1328: 4, 1417: 8, 1601: 8, 1905: 7, 1906: 7, 1907: 7, 1910: 7, 1912: 7, 1922: 7, 1927: 7, 1928: 7, 2016: 8, 2020: 8, 2024: 8, 2028: 8
},
# Volt Premier w/ ACC 2018
{
170: 8, 171: 8, 189: 7, 190: 6, 193: 8, 197: 8, 199: 4, 201: 8, 209: 7, 211: 2, 241: 6, 288: 5, 298: 8, 304: 1, 308: 4, 309: 8, 311: 8, 313: 8, 320: 3, 328: 1, 352: 5, 381: 6, 384: 4, 386: 8, 388: 8, 389: 2, 390: 7, 417: 7, 419: 1, 426: 7, 451: 8, 452: 8, 453: 6, 454: 8, 456: 8, 479: 3, 481: 7, 485: 8, 489: 8, 493: 8, 495: 4, 497: 8, 499: 3, 500: 6, 501: 8, 508: 8, 528: 4, 532: 6, 546: 7, 550: 8, 554: 3, 558: 8, 560: 8, 562: 8, 563: 5, 564: 5, 565: 5, 566: 5, 567: 3, 568: 1, 573: 1, 577: 8, 578: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 3, 707: 8, 711: 6, 715: 8, 717: 5, 761: 7, 810: 8, 840: 5, 842: 5, 844: 8, 866: 4, 869: 4, 880: 6, 961: 8, 967: 4, 969: 8, 977: 8, 979: 7, 988: 6, 989: 8, 995: 7, 1001: 8, 1005: 6, 1009: 8, 1017: 8, 1019: 2, 1020: 8, 1033: 7, 1034: 7, 1105: 6, 1187: 4, 1217: 8, 1221: 5, 1223: 3, 1225: 7, 1227: 4, 1233: 8, 1249: 8, 1257: 6, 1265: 8, 1267: 1, 1273: 3, 1275: 3, 1280: 4, 1296: 4, 1300: 8, 1322: 6, 1323: 4, 1328: 4, 1417: 8, 1516: 8, 1601: 8, 1618: 8, 1905: 7, 1906: 7, 1907: 7, 1910: 7, 1912: 7, 1922: 7, 1927: 7, 1930: 7, 2016: 8, 2018: 8, 2020: 8, 2024: 8, 2028: 8
},
# Volt Premier 2018 w/ flashed firmware, no radar
{
170: 8, 171: 8, 189: 7, 190: 6, 192: 5, 193: 8, 197: 8, 199: 4, 201: 6, 209: 7, 211: 2, 241: 6, 288: 5, 289: 1, 290: 1, 298: 2, 304: 1, 308: 4, 309: 8, 311: 8, 313: 8, 320: 3, 328: 1, 352: 5, 368: 8, 381: 2, 384: 8, 386: 5, 388: 8, 389: 2, 390: 7, 417: 7, 419: 1, 426: 7, 451: 8, 452: 8, 453: 6, 454: 8, 456: 8, 458: 8, 479: 3, 481: 7, 485: 8, 489: 5, 493: 8, 495: 4, 497: 8, 499: 3, 500: 6, 501: 3, 508: 8, 512: 3, 528: 4, 530: 8, 532: 6, 537: 5, 539: 8, 542: 7, 546: 7, 550: 8, 554: 3, 558: 8, 560: 6, 562: 4, 563: 5, 564: 5, 565: 5, 566: 5, 567: 3, 568: 1, 573: 1, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 3, 707: 8, 711: 6, 761: 7, 810: 8, 821: 4, 823: 7, 832: 8, 840: 5, 842: 5, 844: 8, 853: 8, 866: 4, 961: 8, 967: 4, 969: 8, 977: 8, 979: 7, 988: 6, 989: 8, 995: 7, 1001: 5, 1003: 5, 1005: 6, 1009: 8, 1017: 8, 1019: 2, 1020: 8, 1033: 7, 1034: 7, 1105: 6, 1187: 4, 1217: 8, 1221: 5, 1223: 3, 1225: 7, 1227: 4, 1233: 8, 1249: 8, 1257: 6, 1265: 8, 1267: 1, 1273: 3, 1275: 3, 1280: 4, 1300: 8, 1322: 6, 1323: 4, 1328: 4, 1417: 8, 1905: 7, 1906: 7, 1907: 7, 1910: 7, 1912: 7, 1922: 7, 1927: 7
}],
CAR.BUICK_LACROSSE: [
# LaCrosse Premium AWD 2017
{
190: 6, 193: 8, 197: 8, 199: 4, 201: 8, 209: 7, 211: 2, 241: 6, 249: 8, 288: 5, 298: 8, 304: 1, 309: 8, 311: 8, 313: 8, 320: 3, 322: 7, 328: 1, 352: 5, 353: 3, 381: 6, 386: 8, 388: 8, 393: 7, 398: 8, 407: 7, 413: 8, 417: 7, 419: 1, 422: 4, 426: 7, 431: 8, 442: 8, 451: 8, 452: 8, 453: 6, 455: 7, 456: 8, 463: 3, 479: 3, 481: 7, 485: 8, 487: 8, 489: 8, 495: 4, 497: 8, 499: 3, 500: 6, 501: 8, 503: 1, 508: 8, 510: 8, 528: 5, 532: 6, 534: 2, 554: 3, 560: 8, 562: 8, 563: 5, 564: 5, 565: 5, 567: 5, 573: 1, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 5, 707: 8, 753: 5, 761: 7, 801: 8, 804: 3, 810: 8, 840: 5, 842: 5, 844: 8, 866: 4, 872: 1, 882: 8, 890: 1, 892: 2, 893: 1, 894: 1, 961: 8, 967: 4, 969: 8, 977: 8, 979: 8, 985: 5, 1001: 8, 1005: 6, 1009: 8, 1011: 6, 1013: 3, 1017: 8, 1019: 2, 1020: 8, 1022: 1, 1105: 6, 1217: 8, 1221: 5, 1223: 2, 1225: 7, 1233: 8, 1243: 3, 1249: 8, 1257: 6, 1259: 8, 1261: 7, 1263: 4, 1265: 8, 1267: 1, 1280: 4, 1300: 8, 1322: 6, 1328: 4, 1417: 8, 1609: 8, 1613: 8, 1649: 8, 1792: 8, 1798: 8, 1824: 8, 1825: 8, 1840: 8, 1842: 8, 1858: 8, 1860: 8, 1863: 8, 1872: 8, 1875: 8, 1882: 8, 1888: 8, 1889: 8, 1892: 8, 1904: 7, 1906: 7, 1907: 7, 1912: 7, 1913: 7, 1914: 7, 1916: 7, 1918: 7, 1919: 7, 1937: 8, 1953: 8, 1968: 8, 2001: 8, 2017: 8, 2018: 8, 2020: 8, 2026: 8
}],
CAR.BUICK_REGAL : [
# Regal TourX Essence w/ ACC 2018
{
190: 8, 193: 8, 197: 8, 199: 4, 201: 8, 209: 7, 211: 8, 241: 6, 249: 8, 288: 5, 298: 8, 304: 1, 309: 8, 311: 8, 313: 8, 320: 3, 322: 7, 328: 1, 352: 5, 381: 6, 384: 4, 386: 8, 388: 8, 393: 7, 398: 8, 407: 7, 413: 8, 417: 8, 419: 8, 422: 4, 426: 8, 431: 8, 442: 8, 451: 8, 452: 8, 453: 8, 455: 7, 456: 8, 463: 3, 479: 8, 481: 7, 485: 8, 487: 8, 489: 8, 495: 8, 497: 8, 499: 3, 500: 8, 501: 8, 508: 8, 528: 5, 532: 6, 554: 3, 560: 8, 562: 8, 563: 5, 564: 5, 565: 5, 567: 5, 569: 3, 573: 1, 577: 8, 578: 8, 579: 8, 587: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 3, 707: 8, 715: 8, 717: 5, 753: 5, 761: 7, 810: 8, 840: 5, 842: 5, 844: 8, 866: 4, 869: 4, 880: 6, 882: 8, 884: 8, 890: 1, 892: 2, 893: 2, 894: 1, 961: 8, 967: 8, 969: 8, 977: 8, 979: 8, 985: 8, 1001: 8, 1005: 6, 1009: 8, 1011: 8, 1013: 3, 1017: 8, 1020: 8, 1024: 8, 1025: 8, 1026: 8, 1027: 8, 1028: 8, 1029: 8, 1030: 8, 1031: 8, 1032: 2, 1033: 7, 1034: 7, 1105: 6, 1217: 8, 1221: 5, 1223: 8, 1225: 7, 1233: 8, 1249: 8, 1257: 6, 1259: 8, 1261: 8, 1263: 8, 1265: 8, 1267: 8, 1271: 8, 1280: 4, 1296: 4, 1300: 8, 1322: 6, 1328: 4, 1417: 8, 1601: 8, 1602: 8, 1603: 7, 1611: 8, 1618: 8, 1906: 8, 1907: 7, 1912: 7, 1914: 7, 1916: 7, 1919: 7, 1930: 7, 2016: 8, 2018: 8, 2019: 8, 2024: 8, 2026: 8
}],
CAR.CADILLAC_ATS: [
# Cadillac ATS Coupe Premium Performance 3.6L RWD w/ ACC 2018
{
190: 6, 193: 8, 197: 8, 199: 4, 201: 8, 209: 7, 211: 2, 241: 6, 249: 8, 288: 5, 298: 8, 304: 1, 309: 8, 311: 8, 313: 8, 320: 3, 322: 7, 328: 1, 352: 5, 368: 3, 381: 6, 384: 4, 386: 8, 388: 8, 393: 7, 398: 8, 401: 8, 407: 7, 413: 8, 417: 7, 419: 1, 422: 4, 426: 7, 431: 8, 442: 8, 451: 8, 452: 8, 453: 6, 455: 7, 456: 8, 462: 4, 479: 3, 481: 7, 485: 8, 487: 8, 489: 8, 491: 2, 493: 8, 497: 8, 499: 3, 500: 6, 501: 8, 508: 8, 510: 8, 528: 5, 532: 6, 534: 2, 554: 3, 560: 8, 562: 8, 563: 5, 564: 5, 565: 5, 567: 5, 573: 1, 577: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 6, 707: 8, 715: 8, 717: 5, 719: 5, 723: 2, 753: 5, 761: 7, 801: 8, 804: 3, 810: 8, 840: 5, 842: 5, 844: 8, 866: 4, 869: 4, 880: 6, 882: 8, 890: 1, 892: 2, 893: 2, 894: 1, 961: 8, 967: 4, 969: 8, 977: 8, 979: 8, 985: 5, 1001: 8, 1005: 6, 1009: 8, 1011: 6, 1013: 3, 1017: 8, 1019: 2, 1020: 8, 1033: 7, 1034: 7, 1105: 6, 1217: 8, 1221: 5, 1223: 3, 1225: 7, 1233: 8, 1241: 3, 1249: 8, 1257: 6, 1259: 8, 1261: 7, 1263: 4, 1265: 8, 1267: 1, 1271: 8, 1280: 4, 1296: 4, 1300: 8, 1322: 6, 1323: 4, 1328: 4, 1417: 8, 1601: 8, 1904: 7, 1906: 7, 1907: 7, 1912: 7, 1916: 7, 1917: 7, 1918: 7, 1919: 7, 1920: 7, 1930: 7, 2016: 8, 2024: 8
}],
CAR.MALIBU: [
# Malibu Premier w/ ACC 2017
{
190: 6, 193: 8, 197: 8, 199: 4, 201: 8, 209: 7, 211: 2, 241: 6, 249: 8, 288: 5, 298: 8, 304: 1, 309: 8, 311: 8, 313: 8, 320: 3, 328: 1, 352: 5, 381: 6, 384: 4, 386: 8, 388: 8, 393: 7, 398: 8, 407: 7, 413: 8, 417: 7, 419: 1, 422: 4, 426: 7, 431: 8, 442: 8, 451: 8, 452: 8, 453: 6, 455: 7, 456: 8, 479: 3, 481: 7, 485: 8, 487: 8, 489: 8, 495: 4, 497: 8, 499: 3, 500: 6, 501: 8, 508: 8, 510: 8, 528: 5, 532: 6, 554: 3, 560: 8, 562: 8, 563: 5, 564: 5, 565: 5, 567: 5, 573: 1, 577: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 6, 707: 8, 715: 8, 717: 5, 753: 5, 761: 7, 810: 8, 840: 5, 842: 5, 844: 8, 866: 4, 869: 4, 880: 6, 961: 8, 969: 8, 977: 8, 979: 8, 985: 5, 1001: 8, 1005: 6, 1009: 8, 1013: 3, 1017: 8, 1019: 2, 1020: 8, 1033: 7, 1034: 7, 1105: 6, 1217: 8, 1221: 5, 1223: 2, 1225: 7, 1233: 8, 1249: 8, 1257: 6, 1265: 8, 1267: 1, 1280: 4, 1296: 4, 1300: 8, 1322: 6, 1323: 4, 1328: 4, 1417: 8, 1601: 8, 1906: 7, 1907: 7, 1912: 7, 1919: 7, 1930: 7, 2016: 8, 2024: 8,
}],
CAR.ACADIA: [
# Acadia Denali w/ACC 2018
{
190: 6, 192: 5, 193: 8, 197: 8, 199: 4, 201: 6, 208: 8, 209: 7, 211: 2, 241: 6, 249: 8, 288: 5, 289: 1, 290: 1, 298: 8, 304: 8, 309: 8, 313: 8, 320: 8, 322: 7, 328: 1, 352: 7, 368: 8, 381: 8, 384: 8, 386: 8, 388: 8, 393: 8, 398: 8, 413: 8, 417: 7, 419: 1, 422: 4, 426: 7, 431: 8, 442: 8, 451: 8, 452: 8, 453: 6, 454: 8, 455: 7, 458: 8, 460: 4, 462: 4, 463: 3, 479: 3, 481: 7, 485: 8, 489: 5, 497: 8, 499: 3, 500: 6, 501: 8, 508: 8, 510: 8, 512: 3, 530: 8, 532: 6, 534: 2, 554: 3, 560: 8, 562: 8, 563: 5, 564: 5, 567: 5, 568: 2, 573: 1, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 6, 707: 8, 715: 8, 717: 5, 753: 5, 761: 7, 789: 5, 800: 6, 801: 8, 803: 8, 804: 3, 805: 8, 832: 8, 840: 5, 842: 5, 844: 8, 866: 4, 869: 4, 880: 6, 961: 8, 969: 8, 977: 8, 979: 8, 985: 5, 1001: 8, 1003: 5, 1005: 6, 1009: 8, 1017: 8, 1020: 8, 1033: 7, 1034: 7, 1105: 6, 1217: 8, 1221: 5, 1225: 8, 1233: 8, 1249: 8, 1257: 6, 1265: 8, 1267: 1, 1280: 4, 1296: 4, 1300: 8, 1322: 6, 1328: 4, 1417: 8, 1906: 7, 1907: 7, 1912: 7, 1914: 7, 1918: 7, 1919: 7, 1920: 7, 1930: 7
},
# Acadia Denali w/ /ACC 2018
{
190: 6, 193: 8, 197: 8, 199: 4, 201: 8, 208: 8, 209: 7, 211: 2, 241: 6, 249: 8, 288: 5, 289: 8, 298: 8, 304: 1, 309: 8, 313: 8, 320: 3, 322: 7, 328: 1, 338: 6, 340: 6, 352: 5, 381: 8, 384: 4, 386: 8, 388: 8, 393: 8, 398: 8, 413: 8, 417: 7, 419: 1, 422: 4, 426: 7, 431: 8, 442: 8, 451: 8, 452: 8, 453: 6, 454: 8, 455: 7, 462: 4, 463: 3, 479: 3, 481: 7, 485: 8, 489: 8, 497: 8, 499: 3, 500: 6, 501: 8, 508: 8, 510: 8, 532: 6, 554: 3, 560: 8, 562: 8, 563: 5, 564: 5, 567: 5, 573: 1, 577: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 6, 707: 8, 715: 8, 717: 5, 753: 5, 761: 7, 840: 5, 842: 5, 844: 8, 866: 4, 869: 4, 880: 6, 961: 8, 969: 8, 977: 8, 979: 8, 985: 5, 1001: 8, 1005: 6, 1009: 8, 1017: 8, 1020: 8, 1033: 7, 1034: 7, 1105: 6, 1217: 8, 1221: 5, 1225: 8, 1233: 8, 1249: 8, 1257: 6, 1265: 8, 1267: 1, 1280: 4, 1296: 4, 1300: 8, 1322: 6, 1328: 4, 1417: 8, 1601: 8, 1906: 7, 1907: 7, 1912: 7, 1914: 7, 1919: 7, 1920: 7, 1930: 7, 2016: 8, 2024: 8
}],
CAR.ESCALADE: [
{
170: 8, 190: 6, 193: 8, 197: 8, 199: 4, 201: 8, 208: 8, 209: 7, 211: 2, 241: 6, 249: 8, 288: 5, 298: 8, 304: 1, 309: 8, 311: 8, 313: 8, 320: 3, 322: 7, 328: 1, 352: 5, 381: 6, 384: 4, 386: 8, 388: 8, 393: 7, 398: 8, 407: 4, 413: 8, 417: 7, 419: 1, 422: 4, 426: 7, 431: 8, 442: 8, 451: 8, 452: 8, 453: 6, 454: 8, 455: 7, 460: 5, 462: 4, 463: 3, 479: 3, 481: 7, 485: 8, 487: 8, 489: 8, 497: 8, 499: 3, 500: 6, 501: 8, 508: 8, 510: 8, 532: 6, 534: 2, 554: 3, 560: 8, 562: 8, 563: 5, 564: 5, 573: 1, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 6, 707: 8, 715: 8, 717: 5, 719: 5, 761: 7, 801: 8, 804: 3, 810: 8, 840: 5, 842: 5, 844: 8, 866: 4, 869: 4, 880: 6, 961: 8, 967: 4, 969: 8, 977: 8, 979: 8, 985: 5, 1001: 8, 1005: 6, 1009: 8, 1017: 8, 1019: 2, 1020: 8, 1033: 7, 1034: 7, 1105: 6, 1217: 8, 1221: 5, 1223: 2, 1225: 7, 1233: 8, 1249: 8, 1257: 6, 1265: 8, 1267: 1, 1280: 4, 1296: 4, 1300: 8, 1322: 6, 1323: 4, 1328: 4, 1417: 8, 1609: 8, 1613: 8, 1649: 8, 1792: 8, 1798: 8, 1824: 8, 1825: 8, 1840: 8, 1842: 8, 1858: 8, 1860: 8, 1863: 8, 1872: 8, 1875: 8, 1882: 8, 1888: 8, 1889: 8, 1892: 8, 1906: 7, 1907: 7, 1912: 7, 1914: 7, 1917: 7, 1918: 7, 1919: 7, 1920: 7, 1930: 7, 1937: 8, 1953: 8, 1968: 8, 2001: 8, 2017: 8, 2018: 8, 2020: 8, 2026: 8
}],
CAR.ESCALADE_ESV: [
{
309: 1, 848: 8, 849: 8, 850: 8, 851: 8, 852: 8, 853: 8, 854: 3, 1056: 6, 1057: 8, 1058: 8, 1059: 8, 1060: 8, 1061: 8, 1062: 8, 1063: 8, 1064: 8, 1065: 8, 1066: 8, 1067: 8, 1068: 8, 1120: 8, 1121: 8, 1122: 8, 1123: 8, 1124: 8, 1125: 8, 1126: 8, 1127: 8, 1128: 8, 1129: 8, 1130: 8, 1131: 8, 1132: 8, 1133: 8, 1134: 8, 1135: 8, 1136: 8, 1137: 8, 1138: 8, 1139: 8, 1140: 8, 1141: 8, 1142: 8, 1143: 8, 1146: 8, 1147: 8, 1148: 8, 1149: 8, 1150: 8, 1151: 8, 1216: 8, 1217: 8, 1218: 8, 1219: 8, 1220: 8, 1221: 8, 1222: 8, 1223: 8, 1224: 8, 1225: 8, 1226: 8, 1232: 8, 1233: 8, 1234: 8, 1235: 8, 1236: 8, 1237: 8, 1238: 8, 1239: 8, 1240: 8, 1241: 8, 1242: 8, 1787: 8, 1788: 8
}],
CAR.BOLT_EUV: [
{
189: 7, 190: 7, 193: 8, 197: 8, 201: 8, 209: 7, 211: 3, 241: 6, 257: 8, 288: 5, 289: 8, 298: 8, 304: 3, 309: 8, 311: 8, 313: 8, 320: 4, 322: 7, 328: 1, 352: 5, 381: 8, 384: 4, 386: 8, 388: 8, 451: 8, 452: 8, 453: 6, 458: 5, 463: 3, 479: 3, 481: 7, 485: 8, 489: 8, 497: 8, 500: 6, 501: 8, 528: 5, 532: 6, 560: 8, 562: 8, 563: 5, 565: 5, 566: 8, 587: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 707: 8, 715: 8, 717: 5, 753: 5, 761: 7, 789: 5, 800: 6, 810: 8, 840: 5, 842: 5, 844: 8, 848: 4, 869: 4, 880: 6, 977: 8, 1001: 8, 1017: 8, 1020: 8, 1217: 8, 1221: 5, 1233: 8, 1249: 8, 1265: 8, 1280: 4, 1296: 4, 1300: 8, 1611: 8, 1930: 7
}],
CAR.SILVERADO: [
{
190: 6, 193: 8, 197: 8, 201: 8, 208: 8, 209: 7, 211: 2, 241: 6, 249: 8, 257: 8, 288: 5, 289: 8, 298: 8, 304: 3, 309: 8, 311: 8, 313: 8, 320: 4, 322: 7, 328: 1, 352: 5, 381: 8, 384: 4, 386: 8, 388: 8, 413: 8, 451: 8, 452: 8, 453: 6, 455: 7, 460: 5, 463: 3, 479: 3, 481: 7, 485: 8, 489: 8, 497: 8, 500: 6, 501: 8, 528: 5, 532: 6, 534: 2, 560: 8, 562: 8, 563: 5, 565: 5, 587: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 707: 8, 715: 8, 717: 5, 761: 7, 789: 5, 800: 6, 801: 8, 810: 8, 840: 5, 842: 5, 844: 8, 848: 4, 869: 4, 880: 6, 977: 8, 1001: 8, 1011: 6, 1017: 8, 1020: 8, 1033: 7, 1034: 7, 1217: 8, 1221: 5, 1233: 8, 1249: 8, 1259: 8, 1261: 7, 1263: 4, 1265: 8, 1267: 1, 1271: 8, 1280: 4, 1296: 4, 1300: 8, 1611: 8, 1930: 7
}],
CAR.EQUINOX: [
{
190: 6, 193: 8, 197: 8, 201: 8, 209: 7, 211: 2, 241: 6, 249: 8, 257: 8, 288: 5, 289: 8, 298: 8, 304: 1, 309: 8, 311: 8, 313: 8, 320: 3, 328: 1, 352: 5, 381: 8, 384: 4, 386: 8, 388: 8, 413: 8, 451: 8, 452: 8, 453: 6, 455: 7, 463: 3, 479: 3, 481: 7, 485: 8, 489: 8, 497: 8, 500: 6, 501: 8, 510: 8, 528: 5, 532: 6, 560: 8, 562: 8, 563: 5, 565: 5, 587: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 707: 8, 715: 8, 717: 5, 753: 5, 761: 7, 789: 5, 800: 6, 810: 8, 840: 5, 842: 5, 844: 8, 869: 4, 880: 6, 977: 8, 1001: 8, 1011: 6, 1017: 8, 1020: 8, 1033: 7, 1034: 7, 1217: 8, 1221: 5, 1233: 8, 1249: 8, 1259: 8, 1261: 7, 1263: 4, 1265: 8, 1267: 1, 1271: 8, 1280: 4, 1296: 4, 1300: 8, 1611: 8, 1930: 7
}],
# Trailblazer also matches as a Silverado, so comment out to avoid conflicts.
# TODO: split with FW versions
# CAR.TRAILBLAZER: [
# {
# 190: 6, 193: 8, 197: 8, 201: 8, 209: 7, 211: 2, 241: 6, 249: 8, 288: 5, 289: 8, 298: 8, 304: 3, 309: 8, 311: 8, 313: 8, 320: 4, 328: 1, 352: 5, 381: 8, 384: 4, 386: 8, 388: 8, 413: 8, 451: 8, 452: 8, 453: 6, 455: 7, 479: 3, 481: 7, 485: 8, 489: 8, 497: 8, 500: 6, 501: 8, 532: 6, 560: 8, 562: 8, 563: 5, 565: 5, 587: 8, 707: 8, 715: 8, 717: 5, 761: 7, 789: 5, 800: 6, 810: 8, 840: 5, 842: 5, 844: 8, 869: 4, 880: 6, 977: 8, 1001: 8, 1011: 6, 1017: 8, 1020: 8, 1217: 8, 1221: 5, 1233: 8, 1249: 8, 1259: 8, 1261: 7, 1263: 4, 1265: 8, 1267: 1, 1271: 8, 1280: 4, 1296: 4, 1300: 8, 1609: 8, 1611: 8, 1613: 8, 1649: 8, 1792: 8, 1798: 8, 1824: 8, 1825: 8, 1840: 8, 1842: 8, 1858: 8, 1860: 8, 1863: 8, 1872: 8, 1875: 8, 1882: 8, 1888: 8, 1889: 8, 1892: 8, 1930: 7, 1937: 8, 1953: 8, 1968: 8, 2001: 8, 2017: 8, 2018: 8, 2020: 8
# }],
}
GM_RX_OFFSET = 0x400
DBC: Dict[str, Dict[str, str]] = defaultdict(lambda: dbc_dict('gm_global_a_powertrain_generated', 'gm_global_a_object', chassis_dbc='gm_global_a_chassis'))
EV_CAR = {CAR.VOLT, CAR.BOLT_EUV}
# We're integrated at the camera with VOACC on these cars (instead of ASCM w/ OBD-II harness)
CAMERA_ACC_CAR = {CAR.BOLT_EUV, CAR.SILVERADO, CAR.EQUINOX, CAR.TRAILBLAZER}
STEER_THRESHOLD = 1.0
|
a05fd6a97e3a82736dc5feca0ff38ce91a5b5a71
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/api/tests/opentrons/protocol_engine/commands/test_hash_command_params.py
|
098ce53c3217e32e84a9f49a89a4fba36281e9e4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,095
|
py
|
test_hash_command_params.py
|
"""Tests for hash_command_params."""
from opentrons.protocol_engine import CommandIntent
from opentrons.protocol_engine import commands
from opentrons.protocol_engine.commands.hash_command_params import hash_command_params
def test_equivalent_commands() -> None:
"""Equivalent commands should have the same hash."""
a = commands.BlowOutInPlaceCreate(
params=commands.BlowOutInPlaceParams(
pipetteId="abc123",
flowRate=123,
)
)
b = commands.WaitForDurationCreate(
params=commands.WaitForDurationParams(seconds=123)
)
c = commands.WaitForDurationCreate(
params=commands.WaitForDurationParams(seconds=123)
)
assert hash_command_params(b, None) == hash_command_params(c, None)
a_hash = hash_command_params(a, None)
assert hash_command_params(b, a_hash) == hash_command_params(c, a_hash)
def test_nonequivalent_commands() -> None:
"""Nonequivalent commands should have different hashes."""
a = commands.BlowOutInPlaceCreate(
params=commands.BlowOutInPlaceParams(
pipetteId="abc123",
flowRate=123,
)
)
b = commands.WaitForDurationCreate(
params=commands.WaitForDurationParams(seconds=123)
)
assert hash_command_params(a, None) != hash_command_params(b, None)
def test_repeated_commands() -> None:
"""Repeated commands should hash differently, even though they're equivalent in isolation."""
a = commands.WaitForDurationCreate(
params=commands.WaitForDurationParams(seconds=123)
)
b = commands.WaitForDurationCreate(
params=commands.WaitForDurationParams(seconds=123)
)
a_hash = hash_command_params(a, None)
b_hash = hash_command_params(b, a_hash)
assert a_hash != b_hash
def test_setup_command() -> None:
"""Setup commands should always hash to None."""
setup_command = commands.WaitForDurationCreate(
params=commands.WaitForDurationParams(seconds=123),
intent=CommandIntent.SETUP,
)
assert hash_command_params(setup_command, None) is None
|
59d41794ced7e9b28f27f5be879f6e606d895c0d
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/alembic/versions/2020-04-27_40c86deefd01_drop_ignoreuntiltime_constraing_on_raw_.py
|
a440767406bfc75f66b2480fe6a9c65894f78f3f
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,428
|
py
|
2020-04-27_40c86deefd01_drop_ignoreuntiltime_constraing_on_raw_.py
|
"""Drop ignoreuntiltime constraing on raw archive table
Revision ID: 40c86deefd01
Revises: d5dba845418b
Create Date: 2020-04-27 12:03:32.802407
"""
# revision identifiers, used by Alembic.
revision = '40c86deefd01'
down_revision = 'd5dba845418b'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
from sqlalchemy.dialects import postgresql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('raw_web_pages', 'ignoreuntiltime',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('raw_web_pages', 'ignoreuntiltime',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
# ### end Alembic commands ###
|
6a4d1ab99bcb3cd203dba522fca93dc4c33f879f
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/python/paddle/jit/layer.py
|
e3204ab65df022ce1527366e8d22b40244746153
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 1,581
|
py
|
layer.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid import core
from paddle.fluid.core import Load
class Layer:
def __init__(self):
self.cpp_layer = None
# {name: Function}
self.functions = {}
def load(self, load_path, place):
self.cpp_layer = Load(load_path, place)
for name in self.cpp_layer.function_names():
function = self.cpp_layer.function(name)
info = self.cpp_layer.function_info(name)
self.functions[name] = Function(function, info)
setattr(self, name, self.functions[name])
class Function:
def __init__(self, function, info):
self.function = function
self.info = FunctionInfo(info)
def __call__(self, *args):
return core.eager.jit_function_call(self.function, args)
class FunctionInfo:
def __init__(self, info):
self.info = info
def name(self):
return self.info.name()
|
f01fc153fac1315524427bf8420c878056998b1f
|
da99c3d2dfbce3902ec189700daf87278f90d5cd
|
/utils/factory.py
|
cb98bfd9ea6c1c0f7b32b81db3a61acca75950ec
|
[
"MIT"
] |
permissive
|
cfzd/Ultra-Fast-Lane-Detection
|
52c297c1975d0e2e5cf7844bc2ce4f47b8d993e3
|
353df107756b8c03c22c27201e33fc63d84ecfe6
|
refs/heads/master
| 2023-08-11T23:20:22.619180
| 2022-12-14T06:50:09
| 2022-12-14T06:50:09
| 266,319,758
| 1,630
| 488
|
MIT
| 2021-12-03T04:09:33
| 2020-05-23T11:11:34
|
Python
|
UTF-8
|
Python
| false
| false
| 5,179
|
py
|
factory.py
|
from utils.loss import SoftmaxFocalLoss, ParsingRelationLoss, ParsingRelationDis
from utils.metrics import MultiLabelAcc, AccTopk, Metric_mIoU
from utils.dist_utils import DistSummaryWriter
import torch
def get_optimizer(net,cfg):
training_params = filter(lambda p: p.requires_grad, net.parameters())
if cfg.optimizer == 'Adam':
optimizer = torch.optim.Adam(training_params, lr=cfg.learning_rate, weight_decay=cfg.weight_decay)
elif cfg.optimizer == 'SGD':
optimizer = torch.optim.SGD(training_params, lr=cfg.learning_rate, momentum=cfg.momentum,
weight_decay=cfg.weight_decay)
else:
raise NotImplementedError
return optimizer
def get_scheduler(optimizer, cfg, iters_per_epoch):
if cfg.scheduler == 'multi':
scheduler = MultiStepLR(optimizer, cfg.steps, cfg.gamma, iters_per_epoch, cfg.warmup, iters_per_epoch if cfg.warmup_iters is None else cfg.warmup_iters)
elif cfg.scheduler == 'cos':
scheduler = CosineAnnealingLR(optimizer, cfg.epoch * iters_per_epoch, eta_min = 0, warmup = cfg.warmup, warmup_iters = cfg.warmup_iters)
else:
raise NotImplementedError
return scheduler
def get_loss_dict(cfg):
if cfg.use_aux:
loss_dict = {
'name': ['cls_loss', 'relation_loss', 'aux_loss', 'relation_dis'],
'op': [SoftmaxFocalLoss(2), ParsingRelationLoss(), torch.nn.CrossEntropyLoss(), ParsingRelationDis()],
'weight': [1.0, cfg.sim_loss_w, 1.0, cfg.shp_loss_w],
'data_src': [('cls_out', 'cls_label'), ('cls_out',), ('seg_out', 'seg_label'), ('cls_out',)]
}
else:
loss_dict = {
'name': ['cls_loss', 'relation_loss', 'relation_dis'],
'op': [SoftmaxFocalLoss(2), ParsingRelationLoss(), ParsingRelationDis()],
'weight': [1.0, cfg.sim_loss_w, cfg.shp_loss_w],
'data_src': [('cls_out', 'cls_label'), ('cls_out',), ('cls_out',)]
}
return loss_dict
def get_metric_dict(cfg):
if cfg.use_aux:
metric_dict = {
'name': ['top1', 'top2', 'top3', 'iou'],
'op': [MultiLabelAcc(), AccTopk(cfg.griding_num, 2), AccTopk(cfg.griding_num, 3), Metric_mIoU(cfg.num_lanes+1)],
'data_src': [('cls_out', 'cls_label'), ('cls_out', 'cls_label'), ('cls_out', 'cls_label'), ('seg_out', 'seg_label')]
}
else:
metric_dict = {
'name': ['top1', 'top2', 'top3'],
'op': [MultiLabelAcc(), AccTopk(cfg.griding_num, 2), AccTopk(cfg.griding_num, 3)],
'data_src': [('cls_out', 'cls_label'), ('cls_out', 'cls_label'), ('cls_out', 'cls_label')]
}
return metric_dict
class MultiStepLR:
def __init__(self, optimizer, steps, gamma = 0.1, iters_per_epoch = None, warmup = None, warmup_iters = None):
self.warmup = warmup
self.warmup_iters = warmup_iters
self.optimizer = optimizer
self.steps = steps
self.steps.sort()
self.gamma = gamma
self.iters_per_epoch = iters_per_epoch
self.iters = 0
self.base_lr = [group['lr'] for group in optimizer.param_groups]
def step(self, external_iter = None):
self.iters += 1
if external_iter is not None:
self.iters = external_iter
if self.warmup == 'linear' and self.iters < self.warmup_iters:
rate = self.iters / self.warmup_iters
for group, lr in zip(self.optimizer.param_groups, self.base_lr):
group['lr'] = lr * rate
return
# multi policy
if self.iters % self.iters_per_epoch == 0:
epoch = int(self.iters / self.iters_per_epoch)
power = -1
for i, st in enumerate(self.steps):
if epoch < st:
power = i
break
if power == -1:
power = len(self.steps)
# print(self.iters, self.iters_per_epoch, self.steps, power)
for group, lr in zip(self.optimizer.param_groups, self.base_lr):
group['lr'] = lr * (self.gamma ** power)
import math
class CosineAnnealingLR:
def __init__(self, optimizer, T_max , eta_min = 0, warmup = None, warmup_iters = None):
self.warmup = warmup
self.warmup_iters = warmup_iters
self.optimizer = optimizer
self.T_max = T_max
self.eta_min = eta_min
self.iters = 0
self.base_lr = [group['lr'] for group in optimizer.param_groups]
def step(self, external_iter = None):
self.iters += 1
if external_iter is not None:
self.iters = external_iter
if self.warmup == 'linear' and self.iters < self.warmup_iters:
rate = self.iters / self.warmup_iters
for group, lr in zip(self.optimizer.param_groups, self.base_lr):
group['lr'] = lr * rate
return
# cos policy
for group, lr in zip(self.optimizer.param_groups, self.base_lr):
group['lr'] = self.eta_min + (lr - self.eta_min) * (1 + math.cos(math.pi * self.iters / self.T_max)) / 2
|
37a699106e0ac4dcb4c0eea06173c0f1de15582d
|
56d8f5e69dbd693bce8d5971894b1b5d064a5126
|
/modules/splunk_sdk.py
|
68ffea480b5cdc927d959b5b4eaf49a0dd68f59d
|
[
"Apache-2.0"
] |
permissive
|
splunk/attack_range
|
35ae93aaab81fc6cc60657844aa47899dcbd61bc
|
51c668149c09b3ea38cff6ec94b67eca4545f615
|
refs/heads/develop
| 2023-09-01T05:16:10.712727
| 2023-08-14T16:00:08
| 2023-08-14T16:00:08
| 184,844,805
| 1,767
| 331
|
Apache-2.0
| 2023-08-14T13:55:21
| 2019-05-04T02:46:46
|
Jinja
|
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
splunk_sdk.py
|
import sys
from time import sleep
import splunklib.results as results
import splunklib.client as client
import splunklib.results as results
import requests
def export_search(host, s, password, export_mode="raw", out=sys.stdout, username="admin", splunk_rest_port=8089):
"""
Exports events from a search using Splunk REST API to a local file.
This is faster than performing a search/export from Splunk Python SDK.
@param host: splunk server address
@param s: search that matches events
@param password: Splunk server password
@param export_mode: default `raw`. `csv`, `xml`, or `json`
@param out: local file pointer to write the results
@param username: Splunk server username
@param port: Splunk server port
"""
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
r = requests.post("https://%s:%d/servicesNS/admin/search/search/jobs/export" % (host, splunk_rest_port),
auth=(username, password),
data={'output_mode': export_mode,
'search': s,
'max_count': 1000000},
verify=False)
out.write(r.text.encode('utf-8'))
|
fe2d1865493f12d3ca489bee8f590f7a6946bb4e
|
5e255ad1360c90478393744586663741a9569c21
|
/linebot/v3/shop/__init__.py
|
82a7c767b32fd57d95eff6ed87e57ac36e67b1ba
|
[
"Apache-2.0"
] |
permissive
|
line/line-bot-sdk-python
|
d76268e8b542060d6eccbacc5dbfab16960ecc35
|
cffd35948238ae24982173e30b1ea1e595bbefd9
|
refs/heads/master
| 2023-08-31T22:12:31.698183
| 2023-08-28T01:10:09
| 2023-08-28T01:10:09
| 70,553,423
| 1,898
| 1,181
|
Apache-2.0
| 2023-09-11T05:14:07
| 2016-10-11T03:42:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
__init__.py
|
# coding: utf-8
# flake8: noqa
"""
Mission Stickers API
This document describes LINE Mission Stickers API. # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
__version__ = "3.3.0"
# import apis into sdk package
from linebot.v3.shop.api.shop import Shop
from linebot.v3.shop.api.async_shop import AsyncShop
# import ApiClient
from linebot.v3.shop.api_response import ApiResponse
from linebot.v3.shop.api_client import ApiClient
from linebot.v3.shop.async_api_client import AsyncApiClient
from linebot.v3.shop.configuration import Configuration
from linebot.v3.shop.exceptions import OpenApiException
from linebot.v3.shop.exceptions import ApiTypeError
from linebot.v3.shop.exceptions import ApiValueError
from linebot.v3.shop.exceptions import ApiKeyError
from linebot.v3.shop.exceptions import ApiAttributeError
from linebot.v3.shop.exceptions import ApiException
# import models into sdk package
from linebot.v3.shop.models.error_response import ErrorResponse
from linebot.v3.shop.models.mission_sticker_request import MissionStickerRequest
|
4620992176412ae2b695cef32679b0fc3c45679e
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/depot_tools/clang_format_merge_driver.py
|
764d398d6bc83e3648b89fc7db6629f91cabac26
|
[
"BSD-3-Clause",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 2,551
|
py
|
clang_format_merge_driver.py
|
#!/usr/bin/env python3
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""clang-format 3-way merge driver.
This is a custom merge driver for git that helps automatically resolves
conflicts caused by clang-format changes. The conflict resolution
strategy is extremely simple: it simply clang-formats the current,
ancestor branch's, and other branch's version of the file and delegates
the remaining work to git merge-file.
See https://git-scm.com/docs/gitattributes ("Defining a custom merge
driver") for more details.
"""
from __future__ import print_function
import subprocess
import sys
import clang_format
def main():
if len(sys.argv) < 5:
print('usage: %s <base> <current> <others> <path in the tree>' %
sys.argv[0])
sys.exit(1)
# pylint: disable=unbalanced-tuple-unpacking
base, current, others, file_name_in_tree = sys.argv[1:5]
# pylint: enable=unbalanced-tuple-unpacking
if file_name_in_tree == '%P':
print(file=sys.stderr)
print('ERROR: clang-format merge driver needs git 2.5+', file=sys.stderr)
if sys.platform == 'darwin':
print('Upgrade to Xcode 7.2+', file=sys.stderr)
print(file=sys.stderr)
return 1
print('Running clang-format 3-way merge driver on ' + file_name_in_tree)
try:
tool = clang_format.FindClangFormatToolInChromiumTree()
for fpath in base, current, others:
# Typically, clang-format is used with the -i option to rewrite files
# in-place. However, merge files live in the repo root, so --style=file
# will always pick up the root .clang-format.
#
# Instead, this tool uses --assume-filename so clang-format will pick up
# the appropriate .clang-format. Unfortunately, --assume-filename only
# works when the input is from stdin, so the file I/O portions are lifted
# up into the script as well.
with open(fpath, 'rb') as input_file:
output = subprocess.check_output(
[tool, '--assume-filename=%s' % file_name_in_tree, '--style=file'],
stdin=input_file)
with open(fpath, 'wb') as output_file:
output_file.write(output)
except clang_format.NotFoundError as e:
print(e)
print('Failed to find clang-format. Falling-back on standard 3-way merge')
return subprocess.call(['git', 'merge-file', '-Lcurrent', '-Lbase', '-Lother',
current, base, others])
if __name__ == '__main__':
sys.exit(main())
|
6e84e27ffa349c56954551b446006836c0a0d68b
|
55c1238c19835a09677328d90b3dc75ba98031cf
|
/tests/test_func_sync.py
|
d60533ed069ae9df0f242a5d3d11669928741efc
|
[
"BSD-2-Clause-Views"
] |
permissive
|
youknowone/ring
|
f13b116cfc2679810eee2b080117c32375835798
|
b5c34df262eb6c33115dc2b1e1bc83faa6bb42d0
|
refs/heads/main
| 2023-06-26T17:21:07.065475
| 2023-03-30T04:58:23
| 2023-03-30T04:58:23
| 65,191,260
| 498
| 56
|
NOASSERTION
| 2023-03-30T04:56:09
| 2016-08-08T09:28:27
|
Python
|
UTF-8
|
Python
| false
| false
| 11,132
|
py
|
test_func_sync.py
|
import sys
import time
import shelve
import ring
import pymemcache.client
import memcache
import redis
import diskcache
from ring.func.lru_cache import LruCache
import pytest
from pytest_lazyfixture import lazy_fixture
try:
import contextvars
except ImportError:
contextvars = None
pymemcache_client = pymemcache.client.Client(("127.0.0.1", 11211))
pythonmemcache_client = memcache.Client(["127.0.0.1:11211"])
redis_py_client = redis.StrictRedis()
pymemcache_client_contextvar = (
contextvars.ContextVar(
"pymemcache_client_contextvar",
default=pymemcache_client,
)
if contextvars
else None
)
pythonmemcache_client_contextvar = (
contextvars.ContextVar(
"pythonmemcache_client_contextvar",
default=pythonmemcache_client,
)
if contextvars
else None
)
redis_py_client_contextvar = (
contextvars.ContextVar(
"redis_py_client",
default=redis_py_client,
)
if contextvars
else None
)
try:
import pylibmc
except ImportError:
pylibmc_client = None
else:
pylibmc_client = pylibmc.Client(["127.0.0.1"])
finally:
pylibmc_client_contextvar = (
contextvars.ContextVar(
"pylibmc_client",
default=pylibmc_client,
)
if contextvars
else None
)
class StorageDict(dict):
pass
@pytest.fixture
def storage_dict():
storage = StorageDict()
storage.ring = ring.dict
storage.is_binary = False
storage.has_has = True
storage.has_touch = True
storage.has_expire = True
return storage
@pytest.fixture
def storage_shelve(request):
storage = shelve.open("/tmp/ring-test/shelve{}".format(sys.version_info[0]))
storage.ring = ring.shelve
storage.is_binary = False
storage.has_has = True
storage.has_touch = False
storage.has_expire = False
request.addfinalizer(storage.close)
return storage
@pytest.fixture
def storage_lru():
storage = LruCache(128)
storage.ring = ring.lru
storage.is_binary = False
storage.has_has = True
storage.has_touch = True
storage.has_expire = False
return storage
@pytest.fixture(scope="session", params=[diskcache.Cache("/tmp/ring-test/diskcache")])
def storage_diskcache(request):
client = request.param
client.ring = ring.disk
client.is_binary = False
client.has_has = False
client.has_touch = False
client.has_expire = True
return client
@pytest.fixture(
scope="session",
ids=[
"python-memcached",
"pymemcache",
"pylibmc",
"pythonmemcache_client_contextvar",
"pymemcache_client_contextvar",
"pylibmc_client_contextvar",
],
params=[
# client, binary, has_touch
(pythonmemcache_client, False, sys.version_info[0] == 2),
(pymemcache_client, True, True),
(pylibmc_client, True, None), # actually has_touch but not in travis
(pythonmemcache_client_contextvar, False, sys.version_info[0] == 2),
(pymemcache_client_contextvar, True, True),
(pylibmc_client_contextvar, True, None),
],
)
def memcache_client(request):
client, is_binary, has_touch = request.param
if contextvars:
if isinstance(client, contextvars.ContextVar):
client = client.get()
if not client:
pytest.skip()
client.is_binary = is_binary
client.has_has = False
client.has_touch = has_touch
client.has_expire = True
client.ring = ring.memcache
return client
@pytest.fixture(scope="session", params=[redis_py_client, redis_py_client_contextvar])
def redis_client(request):
client = request.param
if contextvars:
if isinstance(client, contextvars.ContextVar):
client = client.get()
if not client:
pytest.skip()
client.ring = ring.redis
client.is_binary = True
client.has_has = True
client.has_touch = True
client.has_expire = True
return client
@pytest.fixture(
params=[
lazy_fixture("storage_dict"),
lazy_fixture("storage_shelve"),
lazy_fixture("storage_lru"),
lazy_fixture("memcache_client"),
lazy_fixture("redis_client"),
lazy_fixture("storage_diskcache"),
]
)
def storage(request):
return request.param
@pytest.fixture(
params=["function", "method1", "method2", "class1", "class2", "static1", "static2"]
)
def function(request, storage):
def resultify(r):
if storage.is_binary:
r = str(r).encode("utf-8")
return r
options = {"wire_slots": ("base",)}
if storage.has_expire:
options["expire"] = 10
if request.param == "function":
base = [0]
@storage.ring(storage, **options)
def f(a, b):
return resultify(base[0] + a * 100 + b)
f.base = base
return f
else:
class A(object):
base = [0]
def __ring_key__(self):
return "a"
@storage.ring(storage, **options)
def method(self, a, b):
return resultify(self.base[0] + a * 100 + b)
@storage.ring(storage, **options)
@classmethod
def cmethod(cls, a, b):
return resultify(cls.base[0] + a * 200 + b)
@storage.ring(storage, **options)
@staticmethod
def smethod(a, b):
return resultify(A.base[0] + a * 200 + b)
obj1 = A()
obj2 = A()
f = {
"method1": obj1.method,
"method2": obj2.method,
"class1": obj1.cmethod,
"class2": A.cmethod,
"static1": obj1.smethod,
"static2": A.smethod,
}[request.param]
f.base = A.base
return f
def test_common(function, storage):
# `function` is a callable with parameter `a` and `b`
# test function is correct
assert function.storage.backend is storage
assert function.key(a=0, b=0) # f takes a, b
assert function.base[0] is not None # f has attr base for test
assert function.execute(a=1, b=2) != function.execute(a=1, b=3) # f is not singular
assert function.execute(a=2, b=2) != function.execute(a=1, b=2) # f is not singular
r = function.execute(0, 0)
function.base[0] += 1
assert r != function.execute(0, 0) # base has side effect
function.delete(1, 2) # delete sample cache
# test: parametrized key build
assert function.key(1, 2) == function.key(1, b=2) == function.key(a=1, b=2)
assert function.key(1, 2) != function.key(1, 3)
# set base
function.base[0] = 10000
# test: 'get' 'execute' 'delete' 'get_or_update'
assert None is function.get(1, 2) # not cached yet
r1 = function.execute(1, 2) # run without cache
assert r1 == function(1, 2) # create and return cache
assert function.get(1, 2) == function(a=1, b=2) # cached now
function.delete(b=2, a=1) # delete cache
assert function.get(1, 2) is None # of course get fails
assert r1 == function.get_or_update(1, 2) # this is equivalent to call the func
# reset base
function.base[0] = 20000
# test: actually cached or not
r2 = function.execute(1, 2)
assert r1 != r2 # base has side effect
assert r1 == function(1, 2) # still cached
assert r2 != function(1, 2)
# test: 'update'
assert r2 == function.update(1, 2) # immediate update
if storage.has_has:
assert function.has(1, 2) is True
assert function.has(5, 9) is False
if storage.has_touch:
function.touch(1, 2) # just a running test
function.touch(0, 0) # illegal touch
elif storage.has_touch is not None: # None means unknown
with pytest.raises((AttributeError, NotImplementedError)):
function.touch(1, 2)
function.set(b"RANDOMVALUE", 1, 2)
assert function.get(1, 2) == b"RANDOMVALUE"
function.delete(1, 2) # finallize
def test_func_dict():
cache = {}
base = [0]
@ring.dict(cache, key_prefix="", expire=10)
def f(a, b):
return base[0] + a * 100 + b
assert f.key(1, 2) == ":1:2" # dict doesn't have prefix by default
base[0] = 10000
assert False is f.has(1, 2)
assert 10102 == f(1, b=2)
assert True is f.has(1, 2)
assert cache[f.key(1, 2)][1] == 10102
assert 10103 == f(1, b=3)
assert cache[f.key(1, 3)][1] == 10103
base[0] = 20000
assert 10102 == f(1, b=2)
assert 10103 == f(1, b=3)
assert 20204 == f(2, b=4)
cache.clear()
assert 20102 == f(1, b=2)
assert 20103 == f(1, b=3)
assert 20204 == f(2, b=4)
base[0] = 30000
assert 30102 == f.update(1, b=2)
f.touch(1, b=2)
f._rope.storage.now = lambda: time.time() + 100 # expirable duration
assert f.get(1, b=2) is None
def test_func_dict_without_expiration():
@ring.dict({})
def f():
return 0
assert f.get() is None
assert f() == 0
with pytest.raises(AttributeError):
f.touch()
def test_func_dict_expire():
cache = {}
@ring.dict(cache, expire=1)
def f(a, b):
return a * 100 + b
assert f.get(1, 2) is None
assert f(1, 2) == 102
assert f.update(1, 2) == 102
f.delete(1, 2)
assert f.get(1, 2) is None
def test_lru(storage_lru):
@ring.lru(maxsize=2)
def f(a, b):
return a * 100 + b
assert 102 == f(1, 2)
assert 205 == f(2, 5)
assert 102 == f.get(1, 2)
assert 205 == f.get(2, 5)
assert 503 == f(5, 3)
assert None is f.get(1, 2)
def test_diskcache(storage_diskcache):
base = [0]
@ring.disk(storage_diskcache, "ring-test")
def f(a, b):
r = base[0] + a * 100 + b
sr = str(r)
if storage_diskcache.is_binary:
sr = sr.encode("utf-8")
return sr
f.delete(8, 6)
assert f.key(8, 6) == "ring-test:8:6"
base[0] = 10000
assert None is f.get(8, b=6)
assert 10806 == int(f(8, b=6))
assert 10806 == int(storage_diskcache.get(f.key(8, 6)))
with pytest.raises(AttributeError):
f.touch(0, 0)
def test_common_value(storage):
options = {"expire": 10}
if not storage.has_expire:
options = {}
base = [b"a"]
@storage.ring(storage, key_prefix=str(storage), **options)
def ff():
base[0] += b"b"
return base[0]
ff.delete()
b0 = base[0]
# set
v1 = ff()
b1 = base[0]
# get
v2 = ff()
b2 = base[0]
assert b0 != b1
assert v1 == b1
assert v2 == b1
assert b1 == b2
# py3 test in asyncio
@storage.ring(storage, key_prefix=str(storage), **options)
def complicated(a, *args, **kw):
return b"42"
# set
v1 = complicated(0, 1, 2, 3, b=4, c=5, d=6)
v2 = complicated.get(0, 1, 2, 3, b=4, c=5, d=6)
assert v1 == v2
def test_execute_many(redis_client):
client = redis_client
@ring.memcache(client, coder="json")
def f(a):
return a
r = f.execute_many(
(1,),
(2,),
)
assert r == [1, 2]
with pytest.raises(TypeError):
f.execute_many([1])
|
bbb04ac9c71462a3820444aa1dc18c89325d9a96
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/intentions/dictLiteralFormToConstructor2.py
|
489db637998fb4507dd9e9d0a4d8d94b16ecefe6
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 29
|
py
|
dictLiteralFormToConstructor2.py
|
a = {'a b':<caret> 3, 'b': 5}
|
b815fd79cb028280ac89f4915380d7a24082df79
|
8d585fa3b2419d9b993be2f2652e448cfeedc8b2
|
/config.py
|
33605e2c87dca57e76d5914af90a9567759a740e
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
DataDog/dd-agent
|
bd4ef0edb234293b51d30894a529ce94b37060f8
|
16fa4ec9ae11ca0adfffbd260c5b4899dc73509f
|
refs/heads/master
| 2023-08-16T09:52:21.816487
| 2023-07-11T15:37:34
| 2023-07-11T15:37:34
| 1,210,071
| 1,227
| 991
|
NOASSERTION
| 2023-06-28T12:20:19
| 2010-12-31T03:02:47
|
Python
|
UTF-8
|
Python
| false
| false
| 59,951
|
py
|
config.py
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import ConfigParser
from cStringIO import StringIO
import glob
import imp
import inspect
import itertools
import logging
import logging.config
import logging.handlers
from optparse import OptionParser, Values
import os
import platform
import re
from socket import gaierror, gethostbyname
import string
import sys
import traceback
from urlparse import urlparse
from importlib import import_module
# 3p
import simplejson as json
import distro
# project
from util import check_yaml, config_to_yaml
from utils.platform import Platform, get_os
from utils.proxy import get_proxy
from utils.sdk import load_manifest
from utils.service_discovery.config import extract_agent_config
from utils.service_discovery.config_stores import CONFIG_FROM_FILE, TRACE_CONFIG
from utils.service_discovery.sd_backend import get_sd_backend, AUTO_CONFIG_DIR, SD_BACKENDS
from utils.subprocess_output import (
get_subprocess_output,
SubprocessOutputEmptyError,
)
from utils.windows_configuration import get_registry_conf, get_windows_sdk_check
# CONSTANTS
AGENT_VERSION = "5.32.9"
JMX_VERSION = "0.26.8"
DATADOG_CONF = "datadog.conf"
UNIX_CONFIG_PATH = '/etc/dd-agent'
MAC_CONFIG_PATH = '/opt/datadog-agent/etc'
DEFAULT_CHECK_FREQUENCY = 15 # seconds
LOGGING_MAX_BYTES = 10 * 1024 * 1024
SDK_INTEGRATIONS_DIR = 'integrations'
SD_PIPE_NAME = "dd-service_discovery"
SD_PIPE_UNIX_PATH = '/opt/datadog-agent/run'
SD_PIPE_WIN_PATH = "\\\\.\\pipe\\{pipename}"
UNKNOWN_WHEEL_VERSION_MSG = 'Unknown Wheel'
CUSTOM_CHECK_VERSION_MSG = 'custom'
PY3_COMPATIBILITY_ATTR = 'py3_compatible'
PY3_COMPATIBILITY_READY = 'ready'
PY3_COMPATIBILITY_NOT_READY = 'not_ready'
PY3_COMPATIBILITY_UNKNOWN = 'unknown'
log = logging.getLogger(__name__)
OLD_STYLE_PARAMETERS = [
('apache_status_url', "apache"),
('cacti_mysql_server', "cacti"),
('couchdb_server', "couchdb"),
('elasticsearch', "elasticsearch"),
('haproxy_url', "haproxy"),
('hudson_home', "Jenkins"),
('memcache_', "memcached"),
('mongodb_server', "mongodb"),
('mysql_server', "mysql"),
('nginx_status_url', "nginx"),
('postgresql_server', "postgres"),
('redis_urls', "redis"),
('varnishstat', "varnish"),
('WMI', "WMI"),
]
NAGIOS_OLD_CONF_KEYS = [
'nagios_log',
'nagios_perf_cfg'
]
LEGACY_DATADOG_URLS = [
"app.datadoghq.com",
"app.datad0g.com",
]
JMX_SD_CONF_TEMPLATE = '.jmx.{}.yaml'
# These are unlikely to change, but manifests are versioned,
# so keeping these as a list just in case we change add stuff.
MANIFEST_VALIDATION = {
'max': ['max_agent_version'],
'min': ['min_agent_version']
}
class PathNotFound(Exception):
pass
class ApiKeyNotFound(Exception):
pass
class ApiKeyInvalid(Exception):
pass
def get_parsed_args():
parser = OptionParser()
parser.add_option('-A', '--autorestart', action='store_true', default=False,
dest='autorestart')
parser.add_option('-d', '--dd_url', action='store', default=None,
dest='dd_url')
parser.add_option('-u', '--use-local-forwarder', action='store_true',
default=False, dest='use_forwarder')
parser.add_option('-v', '--verbose', action='store_true', default=False,
dest='verbose',
help='Print out stacktraces for errors in checks')
parser.add_option('-p', '--profile', action='store_true', default=False,
dest='profile', help='Enable Developer Mode')
try:
options, args = parser.parse_args()
except SystemExit:
# Ignore parse errors
options, args = Values({'autorestart': False,
'dd_url': None,
'use_forwarder': False,
'verbose': False,
'profile': False}), []
return options, args
def get_version():
return AGENT_VERSION
def _version_string_to_tuple(version_string):
'''Return a (X, Y, Z) version tuple from an 'X.Y.Z' version string'''
version_list = []
for elem in version_string.split('.'):
try:
elem_int = int(elem)
except ValueError:
log.warning("Unable to parse element '%s' of version string '%s'", elem, version_string)
raise
version_list.append(elem_int)
return tuple(version_list)
# Return url endpoint, here because needs access to version number
def get_url_endpoint(default_url, endpoint_type='app'):
parsed_url = urlparse(default_url)
if parsed_url.netloc not in LEGACY_DATADOG_URLS:
return default_url
subdomain = parsed_url.netloc.split(".")[0]
# Replace https://app.datadoghq.com in https://5-2-0-app.agent.datadoghq.com
return default_url.replace(subdomain,
"{0}-{1}.agent".format(
get_version().replace(".", "-"),
endpoint_type))
def skip_leading_wsp(f):
"Works on a file, returns a file-like object"
return StringIO("\n".join(map(string.strip, f.readlines())))
def _windows_commondata_path():
"""Return the common appdata path, using ctypes
From http://stackoverflow.com/questions/626796/\
how-do-i-find-the-windows-common-application-data-folder-using-python
"""
import ctypes
from ctypes import wintypes, windll
CSIDL_COMMON_APPDATA = 35
_SHGetFolderPath = windll.shell32.SHGetFolderPathW
_SHGetFolderPath.argtypes = [wintypes.HWND,
ctypes.c_int,
wintypes.HANDLE,
wintypes.DWORD, wintypes.LPCWSTR]
path_buf = wintypes.create_unicode_buffer(wintypes.MAX_PATH)
_SHGetFolderPath(0, CSIDL_COMMON_APPDATA, 0, 0, path_buf)
return path_buf.value
def _windows_extra_checksd_path():
common_data = _windows_commondata_path()
return os.path.join(common_data, 'Datadog', 'checks.d')
def _windows_checksd_path():
if hasattr(sys, 'frozen'):
# we're frozen - from py2exe
prog_path = os.path.dirname(sys.executable)
return _checksd_path(os.path.normpath(os.path.join(prog_path, '..', 'agent')))
else:
cur_path = os.path.dirname(__file__)
return _checksd_path(cur_path)
def _config_path(directory):
path = os.path.join(directory, DATADOG_CONF)
if os.path.exists(path):
return path
raise PathNotFound(path)
def _confd_path(directory):
path = os.path.join(directory, 'conf.d')
if os.path.exists(path):
return path
raise PathNotFound(path)
def _checksd_path(directory):
path_override = os.environ.get('CHECKSD_OVERRIDE')
if path_override and os.path.exists(path_override):
return path_override
# this is deprecated in testing on versions after SDK (5.12.0)
path = os.path.join(directory, 'checks.d')
if os.path.exists(path):
return path
raise PathNotFound(path)
def _is_affirmative(s):
if s is None:
return False
# int or real bool
if isinstance(s, int):
return bool(s)
# try string cast
return s.lower() in ('yes', 'true', '1')
def get_config_path(cfg_path=None, os_name=None):
# Check if there's an override and if it exists
if cfg_path is not None and os.path.exists(cfg_path):
return cfg_path
# Check if there's a config stored in the current agent directory
try:
path = os.path.realpath(__file__)
path = os.path.dirname(path)
return _config_path(path)
except PathNotFound as e:
pass
# Check for an OS-specific path, continue on not-found exceptions
bad_path = ''
try:
if Platform.is_windows():
common_data = _windows_commondata_path()
return _config_path(os.path.join(common_data, 'Datadog'))
elif Platform.is_mac():
return _config_path(MAC_CONFIG_PATH)
else:
return _config_path(UNIX_CONFIG_PATH)
except PathNotFound as e:
if len(e.args) > 0:
bad_path = e.args[0]
# If all searches fail, exit the agent with an error
sys.stderr.write("Please supply a configuration file at %s or in the directory where "
"the Agent is currently deployed.\n" % bad_path)
sys.exit(3)
def get_default_bind_host():
try:
gethostbyname('localhost')
except gaierror:
log.warning("localhost seems undefined in your hosts file, using 127.0.0.1 instead")
return '127.0.0.1'
return 'localhost'
def get_histogram_aggregates(configstr=None):
if configstr is None:
return None
try:
vals = configstr.split(',')
valid_values = ['min', 'max', 'median', 'avg', 'sum', 'count']
result = []
for val in vals:
val = val.strip()
if val not in valid_values:
log.warning("Ignored histogram aggregate {0}, invalid".format(val))
continue
else:
result.append(val)
except Exception:
log.exception("Error when parsing histogram aggregates, skipping")
return None
return result
def get_histogram_percentiles(configstr=None):
if configstr is None:
return None
result = []
try:
vals = configstr.split(',')
for val in vals:
try:
val = val.strip()
floatval = float(val)
if floatval <= 0 or floatval >= 1:
raise ValueError
if len(val) > 4:
log.warning("Histogram percentiles are rounded to 2 digits: {0} rounded"
.format(floatval))
result.append(float(val[0:4]))
except ValueError:
log.warning("Bad histogram percentile value {0}, must be float in ]0;1[, skipping"
.format(val))
except Exception:
log.exception("Error when parsing histogram percentiles, skipping")
return None
return result
def clean_dd_url(url):
url = url.strip()
if not url.startswith('http'):
url = 'https://' + url
return url[:-1] if url.endswith('/') else url
def remove_empty(string_array):
return filter(lambda x: x, string_array)
def get_config(parse_args=True, cfg_path=None, options=None, can_query_registry=True, allow_invalid_api_key=False):
if parse_args:
options, _ = get_parsed_args()
# General config
agentConfig = {
'check_freq': DEFAULT_CHECK_FREQUENCY,
'collect_orchestrator_tags': True,
'dogstatsd_port': 8125,
'dogstatsd_target': 'http://localhost:17123',
'graphite_listen_port': None,
'hostname': None,
'listen_port': None,
'tags': None,
'use_ec2_instance_id': False, # DEPRECATED
'version': get_version(),
'watchdog': True,
'additional_checksd': '/etc/dd-agent/checks.d/',
'bind_host': get_default_bind_host(),
'statsd_metric_namespace': None,
'utf8_decoding': False,
'apm_enabled': False
}
if Platform.is_mac():
agentConfig['additional_checksd'] = '/opt/datadog-agent/etc/checks.d'
elif Platform.is_windows():
agentConfig['additional_checksd'] = _windows_extra_checksd_path()
# Config handling
try:
# Find the right config file
path = os.path.realpath(__file__)
path = os.path.dirname(path)
config_path = get_config_path(cfg_path, os_name=get_os())
config = ConfigParser.ConfigParser()
config.readfp(skip_leading_wsp(open(config_path)))
# bulk import
for option in config.options('Main'):
agentConfig[option] = config.get('Main', option)
# Store developer mode setting in the agentConfig
if config.has_option('Main', 'developer_mode'):
agentConfig['developer_mode'] = _is_affirmative(config.get('Main', 'developer_mode'))
# Allow an override with the --profile option
if options is not None and options.profile:
agentConfig['developer_mode'] = True
# Core config
# API keys
if not config.has_option('Main', 'api_key') and not allow_invalid_api_key:
log.warning(u"No API key was found. Aborting.")
raise ApiKeyNotFound("No API key was found.")
api_keys = map(lambda el: el.strip(), config.get('Main', 'api_key').split(','))
for k in api_keys:
# Basic sanity check
if len(k) != 32 and not allow_invalid_api_key:
log.warning(u"API key is invalid. Aborting.")
raise ApiKeyInvalid("API Key invalid")
# Endpoints
if not config.has_option('Main', 'dd_url'):
log.warning(u"No dd_url was found. Aborting.")
sys.exit(2)
dd_urls = map(clean_dd_url, config.get('Main', 'dd_url').split(','))
# For collector and dogstatsd
agentConfig['dd_url'] = dd_urls[0]
agentConfig['api_key'] = api_keys[0]
# Forwarder endpoints logic
# endpoints is:
# {
# 'https://app.datadoghq.com': ['api_key_abc', 'api_key_def'],
# 'https://app.example.com': ['api_key_xyz']
# }
endpoints = {}
dd_urls = remove_empty(dd_urls)
api_keys = remove_empty(api_keys)
if len(dd_urls) == 1:
if len(api_keys) > 0:
endpoints[dd_urls[0]] = api_keys
else:
assert len(dd_urls) == len(api_keys), 'Please provide one api_key for each url'
for i, dd_url in enumerate(dd_urls):
endpoints[dd_url] = endpoints.get(dd_url, []) + [api_keys[i]]
agentConfig['endpoints'] = endpoints
# Forwarder or not forwarder
agentConfig['use_forwarder'] = options is not None and options.use_forwarder
if agentConfig['use_forwarder']:
listen_port = 17123
if config.has_option('Main', 'listen_port'):
listen_port = int(config.get('Main', 'listen_port'))
agentConfig['dd_url'] = "http://{}:{}".format(agentConfig['bind_host'], listen_port)
# FIXME: Legacy dd_url command line switch
elif options is not None and options.dd_url is not None:
agentConfig['dd_url'] = options.dd_url
# Forwarder timeout
agentConfig['forwarder_timeout'] = 20
if config.has_option('Main', 'forwarder_timeout'):
agentConfig['forwarder_timeout'] = int(config.get('Main', 'forwarder_timeout'))
# Extra checks.d path
# the linux directory is set by default
if config.has_option('Main', 'additional_checksd'):
agentConfig['additional_checksd'] = config.get('Main', 'additional_checksd')
if config.has_option('Main', 'use_dogstatsd'):
agentConfig['use_dogstatsd'] = config.get('Main', 'use_dogstatsd').lower() in ("yes", "true")
else:
agentConfig['use_dogstatsd'] = True
# Service discovery
if config.has_option('Main', 'service_discovery_backend'):
try:
additional_config = extract_agent_config(config)
agentConfig.update(additional_config)
except:
log.error('Failed to load the agent configuration related to '
'service discovery. It will not be used.')
# Concerns only Windows
if config.has_option('Main', 'use_web_info_page'):
agentConfig['use_web_info_page'] = config.get('Main', 'use_web_info_page').lower() in ("yes", "true")
else:
agentConfig['use_web_info_page'] = True
# local traffic only? Default to no
agentConfig['non_local_traffic'] = False
if config.has_option('Main', 'non_local_traffic'):
agentConfig['non_local_traffic'] = config.get('Main', 'non_local_traffic').lower() in ("yes", "true")
# DEPRECATED
if config.has_option('Main', 'use_ec2_instance_id'):
use_ec2_instance_id = config.get('Main', 'use_ec2_instance_id')
# translate yes into True, the rest into False
agentConfig['use_ec2_instance_id'] = (use_ec2_instance_id.lower() == 'yes')
if config.has_option('Main', 'check_freq'):
try:
agentConfig['check_freq'] = int(config.get('Main', 'check_freq'))
except Exception:
pass
# Custom histogram aggregate/percentile metrics
if config.has_option('Main', 'histogram_aggregates'):
agentConfig['histogram_aggregates'] = get_histogram_aggregates(config.get('Main', 'histogram_aggregates'))
if config.has_option('Main', 'histogram_percentiles'):
agentConfig['histogram_percentiles'] = get_histogram_percentiles(config.get('Main', 'histogram_percentiles'))
# Disable Watchdog (optionally)
if config.has_option('Main', 'watchdog'):
if config.get('Main', 'watchdog').lower() in ('no', 'false'):
agentConfig['watchdog'] = False
# Optional graphite listener
if config.has_option('Main', 'graphite_listen_port'):
agentConfig['graphite_listen_port'] = \
int(config.get('Main', 'graphite_listen_port'))
else:
agentConfig['graphite_listen_port'] = None
# Dogstatsd config
dogstatsd_defaults = {
'dogstatsd_port': 8125,
'dogstatsd_target': 'http://' + agentConfig['bind_host'] + ':17123',
}
for key, value in dogstatsd_defaults.iteritems():
if config.has_option('Main', key):
agentConfig[key] = config.get('Main', key)
else:
agentConfig[key] = value
# Create app:xxx tags based on monitored apps
agentConfig['create_dd_check_tags'] = config.has_option('Main', 'create_dd_check_tags') and \
_is_affirmative(config.get('Main', 'create_dd_check_tags'))
# Forwarding to external statsd server
if config.has_option('Main', 'statsd_forward_host'):
agentConfig['statsd_forward_host'] = config.get('Main', 'statsd_forward_host')
if config.has_option('Main', 'statsd_forward_port'):
agentConfig['statsd_forward_port'] = int(config.get('Main', 'statsd_forward_port'))
# Optional config
# FIXME not the prettiest code ever...
if config.has_option('Main', 'use_mount'):
agentConfig['use_mount'] = _is_affirmative(config.get('Main', 'use_mount'))
if options is not None and options.autorestart:
agentConfig['autorestart'] = True
elif config.has_option('Main', 'autorestart'):
agentConfig['autorestart'] = _is_affirmative(config.get('Main', 'autorestart'))
if config.has_option('Main', 'check_timings'):
agentConfig['check_timings'] = _is_affirmative(config.get('Main', 'check_timings'))
if config.has_option('Main', 'exclude_process_args'):
agentConfig['exclude_process_args'] = _is_affirmative(config.get('Main', 'exclude_process_args'))
try:
filter_device_re = config.get('Main', 'device_blacklist_re')
agentConfig['device_blacklist_re'] = re.compile(filter_device_re)
except ConfigParser.NoOptionError:
pass
# Dogstream config
if config.has_option("Main", "dogstream_log"):
# Older version, single log support
log_path = config.get("Main", "dogstream_log")
if config.has_option("Main", "dogstream_line_parser"):
agentConfig["dogstreams"] = ':'.join([log_path, config.get("Main", "dogstream_line_parser")])
else:
agentConfig["dogstreams"] = log_path
elif config.has_option("Main", "dogstreams"):
agentConfig["dogstreams"] = config.get("Main", "dogstreams")
if config.has_option("Main", "nagios_perf_cfg"):
agentConfig["nagios_perf_cfg"] = config.get("Main", "nagios_perf_cfg")
if config.has_option("Main", "use_curl_http_client"):
agentConfig["use_curl_http_client"] = _is_affirmative(config.get("Main", "use_curl_http_client"))
else:
# Default to False as there are some issues with the curl client and ELB
agentConfig["use_curl_http_client"] = False
if config.has_option("Main", "allow_ipv6"):
agentConfig["allow_ipv6"] = _is_affirmative(config.get("Main", "allow_ipv6"))
else:
agentConfig["allow_ipv6"] = True
if config.has_section('WMI'):
agentConfig['WMI'] = {}
for key, value in config.items('WMI'):
agentConfig['WMI'][key] = value
if config.has_option("Main", "skip_ssl_validation"):
agentConfig["skip_ssl_validation"] = _is_affirmative(config.get("Main", "skip_ssl_validation"))
agentConfig["collect_instance_metadata"] = True
if config.has_option("Main", "collect_instance_metadata"):
agentConfig["collect_instance_metadata"] = _is_affirmative(config.get("Main", "collect_instance_metadata"))
agentConfig["proxy_forbid_method_switch"] = False
if config.has_option("Main", "proxy_forbid_method_switch"):
agentConfig["proxy_forbid_method_switch"] = _is_affirmative(config.get("Main", "proxy_forbid_method_switch"))
agentConfig["collect_ec2_tags"] = False
if config.has_option("Main", "collect_ec2_tags"):
agentConfig["collect_ec2_tags"] = _is_affirmative(config.get("Main", "collect_ec2_tags"))
agentConfig["collect_orchestrator_tags"] = True
if config.has_option("Main", "collect_orchestrator_tags"):
agentConfig["collect_orchestrator_tags"] = _is_affirmative(config.get("Main", "collect_orchestrator_tags"))
agentConfig["utf8_decoding"] = False
if config.has_option("Main", "utf8_decoding"):
agentConfig["utf8_decoding"] = _is_affirmative(config.get("Main", "utf8_decoding"))
agentConfig["gce_updated_hostname"] = False
if config.has_option("Main", "gce_updated_hostname"):
agentConfig["gce_updated_hostname"] = _is_affirmative(config.get("Main", "gce_updated_hostname"))
# APM config
agentConfig["apm_enabled"] = True
if config.has_option("Main", "apm_enabled"):
agentConfig["apm_enabled"] = _is_affirmative(config.get("Main", "apm_enabled"))
agentConfig["process_agent_enabled"] = False
if config.has_option("Main", "process_agent_enabled"):
agentConfig["process_agent_enabled"] = _is_affirmative(config.get("Main", "process_agent_enabled"))
agentConfig["enable_gohai"] = True
if config.has_option("Main", "enable_gohai"):
agentConfig["enable_gohai"] = _is_affirmative(config.get("Main", "enable_gohai"))
agentConfig["openstack_use_uuid"] = False
if config.has_option("Main", "openstack_use_uuid"):
agentConfig["openstack_use_uuid"] = _is_affirmative(config.get("Main", "openstack_use_uuid"))
agentConfig["openstack_use_metadata_tags"] = True
if config.has_option("Main", "openstack_use_metadata_tags"):
agentConfig["openstack_use_metadata_tags"] = _is_affirmative(config.get("Main", "openstack_use_metadata_tags"))
agentConfig["disable_py3_validation"] = False
if config.has_option("Main", "disable_py3_validation"):
agentConfig["disable_py3_validation"] = _is_affirmative(config.get("Main", "disable_py3_validation"))
except ConfigParser.NoSectionError as e:
sys.stderr.write('Config file not found or incorrectly formatted.\n')
sys.exit(2)
except ConfigParser.ParsingError as e:
sys.stderr.write('Config file not found or incorrectly formatted.\n')
sys.exit(2)
except ConfigParser.NoOptionError as e:
sys.stderr.write('There are some items missing from your config file, but nothing fatal [%s]' % e)
# Storing proxy settings in the agentConfig
agentConfig['proxy_settings'] = get_proxy(agentConfig)
if agentConfig.get('ca_certs', None) is None:
agentConfig['ssl_certificate'] = get_ssl_certificate(get_os(), 'datadog-cert.pem')
else:
agentConfig['ssl_certificate'] = agentConfig['ca_certs']
# On Windows, check for api key in registry if default api key
# this code should never be used and is only a failsafe
if Platform.is_windows() and agentConfig['api_key'] == 'APIKEYHERE' and can_query_registry:
registry_conf = get_registry_conf(config)
agentConfig.update(registry_conf)
return agentConfig
def get_system_stats(proc_path=None):
systemStats = {
'machine': platform.machine(),
'platform': sys.platform,
'processor': platform.processor(),
'pythonV': platform.python_version(),
}
platf = sys.platform
try:
if Platform.is_linux(platf):
if not proc_path:
proc_path = "/proc"
proc_cpuinfo = os.path.join(proc_path,'cpuinfo')
output, _, _ = get_subprocess_output(['grep', 'model name', proc_cpuinfo], log)
systemStats['cpuCores'] = len(output.splitlines())
if Platform.is_darwin(platf) or Platform.is_freebsd(platf):
output, _, _ = get_subprocess_output(['sysctl', 'hw.ncpu'], log)
systemStats['cpuCores'] = int(output.split(': ')[1])
except SubprocessOutputEmptyError as e:
log.warning("unable to retrieve number of cpuCores. Failed with error %s", e)
if Platform.is_linux(platf):
name, version, codename = distro.linux_distribution(full_distribution_name=False)
if name == 'amzn':
name = 'amazon'
systemStats['nixV'] = (name, version, codename)
elif Platform.is_darwin(platf):
systemStats['macV'] = platform.mac_ver()
elif Platform.is_freebsd(platf):
version = platform.uname()[2]
systemStats['fbsdV'] = ('freebsd', version, '') # no codename for FreeBSD
elif Platform.is_win32(platf):
systemStats['winV'] = platform.win32_ver()
return systemStats
def set_win32_cert_path():
"""In order to use tornado.httpclient with the packaged .exe on Windows we
need to override the default ceritifcate location which is based on the path
to tornado and will give something like "C:\path\to\program.exe\tornado/cert-file".
If pull request #379 is accepted (https://github.com/facebook/tornado/pull/379) we
will be able to override this in a clean way. For now, we have to monkey patch
tornado.httpclient._DEFAULT_CA_CERTS
"""
if hasattr(sys, 'frozen'):
# we're frozen - from py2exe
prog_path = os.path.dirname(sys.executable)
crt_path = os.path.join(prog_path, 'ca-certificates.crt')
else:
cur_path = os.path.dirname(__file__)
crt_path = os.path.join(cur_path, 'packaging', 'datadog-agent', 'win32',
'install_files', 'ca-certificates.crt')
import tornado.simple_httpclient
log.info("Windows certificate path: %s" % crt_path)
tornado.simple_httpclient._DEFAULT_CA_CERTS = crt_path
def set_win32_requests_ca_bundle_path():
"""In order to allow `requests` to validate SSL requests with the packaged .exe on Windows,
we need to override the default certificate location which is based on the location of the
requests or certifi libraries.
We override the path directly in requests.adapters so that the override works even when the
`requests` lib has already been imported
"""
import requests.adapters
if hasattr(sys, 'frozen'):
# we're frozen - from py2exe
prog_path = os.path.dirname(sys.executable)
ca_bundle_path = os.path.join(prog_path, 'cacert.pem')
requests.adapters.DEFAULT_CA_BUNDLE_PATH = ca_bundle_path
log.info("Default CA bundle path of the requests library: {0}"
.format(requests.adapters.DEFAULT_CA_BUNDLE_PATH))
def get_confd_path(osname=None):
try:
cur_path = os.path.dirname(os.path.realpath(__file__))
return _confd_path(cur_path)
except PathNotFound as e:
pass
bad_path = ''
try:
if Platform.is_windows():
common_data = _windows_commondata_path()
return _confd_path(os.path.join(common_data, 'Datadog'))
elif Platform.is_mac():
return _confd_path(MAC_CONFIG_PATH)
else:
return _confd_path(UNIX_CONFIG_PATH)
except PathNotFound as e:
if len(e.args) > 0:
bad_path = e.args[0]
raise PathNotFound(bad_path)
def get_checksd_path(osname=None):
if Platform.is_windows():
return _windows_checksd_path()
# Mac & Linux
else:
# Unix only will look up based on the current directory
# because checks.d will hang with the other python modules
cur_path = os.path.dirname(os.path.realpath(__file__))
return _checksd_path(cur_path)
def get_sdk_integrations_path(osname=None):
if not osname:
osname = get_os()
if os.environ.get('INTEGRATIONS_DIR'):
if os.environ.get('TRAVIS'):
path = os.environ['TRAVIS_BUILD_DIR']
elif os.environ.get('CIRCLECI'):
path = os.path.join(
os.environ['HOME'],
os.environ['CIRCLE_PROJECT_REPONAME']
)
elif os.environ.get('APPVEYOR'):
path = os.environ['APPVEYOR_BUILD_FOLDER']
else:
cur_path = os.environ['INTEGRATIONS_DIR']
path = os.path.join(cur_path, '..') # might need tweaking in the future.
else:
cur_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(cur_path, '..', SDK_INTEGRATIONS_DIR)
if os.path.exists(path):
return path
raise PathNotFound(path)
def get_jmx_pipe_path():
if Platform.is_windows():
pipe_path = SD_PIPE_WIN_PATH
else:
pipe_path = SD_PIPE_UNIX_PATH
if not os.path.isdir(pipe_path):
pipe_path = '/tmp'
return pipe_path
def get_auto_confd_path(osname=None):
"""Used for service discovery which only works for Unix"""
return os.path.join(get_confd_path(osname), AUTO_CONFIG_DIR)
def get_win32service_file(osname, filename):
# This file is needed to log in the event viewer for windows
if osname == 'windows':
if hasattr(sys, 'frozen'):
# we're frozen - from py2exe
prog_path = os.path.dirname(sys.executable)
path = os.path.join(prog_path, filename)
else:
cur_path = os.path.dirname(__file__)
path = os.path.join(cur_path, filename)
if os.path.exists(path):
log.debug("Certificate file found at %s" % str(path))
return path
else:
cur_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(cur_path, filename)
if os.path.exists(path):
return path
return None
def get_ssl_certificate(osname, filename):
# The SSL certificate is needed by tornado in case of connection through a proxy
# Also used by flare's requests on Windows
if osname == 'windows':
if hasattr(sys, 'frozen'):
# we're frozen - from py2exe
prog_path = os.path.dirname(sys.executable)
path = os.path.join(prog_path, filename)
else:
cur_path = os.path.dirname(__file__)
path = os.path.join(cur_path, filename)
if os.path.exists(path):
log.debug("Certificate file found at %s" % str(path))
return path
else:
cur_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(cur_path, filename)
if os.path.exists(path):
return path
log.info("Certificate file NOT found at %s" % str(path))
return None
def _get_check_module(check_name, check_path, from_site=False):
error = None
traceback_message = None
if from_site:
try:
check_module = import_module("datadog_checks.{}".format(check_name))
except Exception as e:
error = e
# Log at debug level since this code path is expected if the check is not installed as a wheel
log.debug('Unable to import check module %s from site-packages: %s', check_name, e)
else:
try:
check_module = imp.load_source('checksd_%s' % check_name, check_path)
except Exception as e:
error = e
traceback_message = traceback.format_exc()
# There is a configuration file for that check but the module can't be imported
log.exception('Unable to import check module %s.py from checks.d' % check_name)
if error:
return None, {'error': error, 'traceback': traceback_message}
return check_module, None
def _get_wheel_version(check_name):
check_module, err = _get_check_module(check_name, None, True)
if err:
return err
if hasattr(check_module, "__version__"):
return check_module.__version__
return None
def _get_check_class(check_name, check_path, from_site=False):
'''Return the corresponding check class for a check name if available.'''
from checks import AgentCheck
check_class = None
check_module, err = _get_check_module(check_name, check_path, from_site)
if err:
return err
# We make sure that there is an AgentCheck class defined
check_class = None
classes = inspect.getmembers(check_module, inspect.isclass)
for _, clsmember in classes:
if clsmember == AgentCheck:
continue
if issubclass(clsmember, AgentCheck):
check_class = clsmember
if AgentCheck in clsmember.__bases__:
continue
else:
break
return check_class
def _deprecated_configs(agentConfig):
""" Warn about deprecated configs
"""
deprecated_checks = {}
deprecated_configs_enabled = [v for k, v in OLD_STYLE_PARAMETERS if len([l for l in agentConfig if l.startswith(k)]) > 0]
for deprecated_config in deprecated_configs_enabled:
msg = "Configuring %s in datadog.conf is not supported anymore. Please use conf.d" % deprecated_config
deprecated_checks[deprecated_config] = {'error': msg, 'traceback': None}
log.error(msg)
return deprecated_checks
def _file_configs_paths(osname, agentConfig):
""" Retrieve all the file configs and return their paths
"""
try:
confd_path = get_confd_path(osname)
all_file_configs = glob.glob(os.path.join(confd_path, '*.yaml'))
all_default_configs = glob.glob(os.path.join(confd_path, '*.yaml.default'))
except PathNotFound as e:
log.error("No conf.d folder found at '%s' or in the directory where the Agent is currently deployed.\n" % e.args[0])
sys.exit(3)
if all_default_configs:
current_configs = set([_conf_path_to_check_name(conf) for conf in all_file_configs])
for default_config in all_default_configs:
if not _conf_path_to_check_name(default_config) in current_configs:
all_file_configs.append(default_config)
# Compatibility code for the Nagios checks if it's still configured
# in datadog.conf
# FIXME: 6.x, should be removed
if not any('nagios' in config for config in itertools.chain(*all_file_configs)):
# check if it's configured in datadog.conf the old way
if any([nagios_key in agentConfig for nagios_key in NAGIOS_OLD_CONF_KEYS]):
all_file_configs.append('deprecated/nagios')
return all_file_configs
def _service_disco_configs(agentConfig):
""" Retrieve all the service disco configs and return their conf dicts
"""
if agentConfig.get('service_discovery') and agentConfig.get('service_discovery_backend') in SD_BACKENDS:
try:
log.info("Fetching service discovery check configurations.")
sd_backend = get_sd_backend(agentConfig=agentConfig)
service_disco_configs = sd_backend.get_configs()
except Exception:
log.exception("Loading service discovery configurations failed.")
return {}
else:
service_disco_configs = {}
return service_disco_configs
def _conf_path_to_check_name(conf_path):
f = os.path.splitext(os.path.split(conf_path)[1])
if f[1] and f[1] == ".default":
f = os.path.splitext(f[0])
return f[0]
def get_checks_places(osname, agentConfig):
""" Return a list of methods which, when called with a check name, will each return a check path to inspect
"""
try:
checksd_path = get_checksd_path(osname)
except PathNotFound as e:
log.info("no bundled checks.d path (checks provided as wheels): %s", e.args[0])
checksd_path = None
# custom checks
places = [lambda name: (os.path.join(agentConfig['additional_checksd'], '%s.py' % name), None)]
try:
if Platform.is_windows():
places.append(get_windows_sdk_check)
else:
sdk_integrations = get_sdk_integrations_path(osname)
places.append(lambda name: (os.path.join(sdk_integrations, name, 'check.py'),
os.path.join(sdk_integrations, name, 'manifest.json')))
except PathNotFound:
log.debug('No sdk integrations path found')
# wheel integrations
places.append(lambda name: (None, None))
# agent-bundled integrations
if checksd_path:
places.append(lambda name: (os.path.join(checksd_path, '%s.py' % name), None))
return places
def _load_file_config(config_path, check_name, agentConfig):
if config_path == 'deprecated/nagios':
log.warning("Configuring Nagios in datadog.conf is deprecated "
"and will be removed in a future version. "
"Please use conf.d")
check_config = {'instances': [dict((key, value) for (key, value) in agentConfig.iteritems() if key in NAGIOS_OLD_CONF_KEYS)]}
return True, check_config, {}
try:
check_config = check_yaml(config_path)
except Exception as e:
log.exception("Unable to parse yaml config in %s" % config_path)
traceback_message = traceback.format_exc()
return False, None, {check_name: {'error': str(e), 'traceback': traceback_message, 'version': 'unknown'}}
return True, check_config, {}
def get_valid_check_class(check_name, check_path, from_site=False):
check_class = _get_check_class(check_name, check_path, from_site)
if not check_class:
log.error('No check class (inheriting from AgentCheck) found in %s.py' % check_name)
return False, None, {}
# this means we are actually looking at a load failure
elif isinstance(check_class, dict):
return False, None, {check_name: check_class}
return True, check_class, {}
def _initialize_check(check_config, check_name, check_class, agentConfig,
manifest_path, version_override=None):
init_config = check_config.get('init_config') or {}
instances = check_config['instances']
try:
try:
check = check_class(check_name, init_config=init_config,
agentConfig=agentConfig, instances=instances)
except TypeError as e:
# Backwards compatibility for checks which don't support the
# instances argument in the constructor.
check = check_class(check_name, init_config=init_config,
agentConfig=agentConfig)
check.instances = instances
if manifest_path:
check.set_manifest_path(manifest_path)
if not version_override:
check.set_check_version(manifest=load_manifest(manifest_path))
else:
check.set_check_version(version=version_override)
except Exception as e:
log.exception('Unable to initialize check %s' % check_name)
traceback_message = traceback.format_exc()
manifest = load_manifest(manifest_path)
if manifest is not None:
check_version = '{core}:{vers}'.format(core=AGENT_VERSION,
vers=manifest.get('version', 'unknown'))
elif version_override:
check_version = version_override
else:
check_version = AGENT_VERSION
return {}, {check_name: {'error': e, 'traceback': traceback_message, 'version': check_version}}
else:
return {check_name: check}, {}
def _update_python_path(check_config):
# Add custom pythonpath(s) if available
if 'pythonpath' in check_config:
pythonpath = check_config['pythonpath']
if not isinstance(pythonpath, list):
pythonpath = [pythonpath]
sys.path.extend(pythonpath)
def validate_sdk_check(manifest_path):
max_validated = min_validated = False
try:
with open(manifest_path, 'r') as fp:
manifest = json.load(fp)
current_version = _version_string_to_tuple(get_version())
for maxfield in MANIFEST_VALIDATION['max']:
max_version = manifest.get(maxfield)
if not max_version:
continue
max_validated = _version_string_to_tuple(max_version) >= current_version
break
for minfield in MANIFEST_VALIDATION['min']:
min_version = manifest.get(minfield)
if not min_version:
continue
min_validated = _version_string_to_tuple(min_version) <= current_version
break
except IOError:
log.debug("Manifest file (%s) not present." % manifest_path)
except json.JSONDecodeError:
log.debug("Manifest file (%s) has badly formatted json." % manifest_path)
except ValueError:
log.debug("Versions in manifest file (%s) can't be validated.", manifest_path)
return (min_validated and max_validated)
def load_check_from_places(check_config, check_name, checks_places, agentConfig):
'''Find a check named check_name in the given checks_places and try to initialize it with the given check_config.
A failure (`load_failure`) can happen when the check class can't be validated or when the check can't be initialized. '''
load_success, load_failure = {}, {}
for check_path_builder in checks_places:
check_path, manifest_path = check_path_builder(check_name)
is_wheel = not check_path and not manifest_path
# The windows SDK function will return None,
# so the loop should also continue if there is no path.
if not (check_path and os.path.exists(check_path)) and not is_wheel:
continue
prev_failures = bool(load_failure)
check_is_valid, check_class, load_failure = get_valid_check_class(check_name, check_path, from_site=is_wheel)
if not check_is_valid:
load_error = load_failure.get(check_name, {}).get('error')
if is_wheel and not prev_failures and isinstance(load_error, ImportError):
load_failure = {}
continue
if manifest_path:
validated = validate_sdk_check(manifest_path)
if not validated:
log.warn("The SDK check (%s) was designed for a different agent core "
"or couldnt be validated - behavior is undefined" % check_name)
version_override = None
if is_wheel:
wheel_version = _get_wheel_version(check_name)
if wheel_version is None or isinstance(wheel_version, dict):
version_override = UNKNOWN_WHEEL_VERSION_MSG
else:
version_override = wheel_version
elif not manifest_path and agentConfig['additional_checksd'] in check_path:
version_override = CUSTOM_CHECK_VERSION_MSG # custom check
load_success, load_failure = _initialize_check(
check_config, check_name, check_class, agentConfig, manifest_path, version_override
)
_update_python_path(check_config)
# Validate custom checks and wheels without a `datadog_checks` namespace
if not agentConfig.get("disable_py3_validation", False):
if version_override in (UNKNOWN_WHEEL_VERSION_MSG, CUSTOM_CHECK_VERSION_MSG):
py3_compatible = PY3_COMPATIBILITY_READY
warnings = []
try:
file_path = os.path.realpath(check_path.decode(sys.getfilesystemencoding()))
output, _, _ = get_subprocess_output(
[sys.executable, "-m", "pylint", "-f", "json", "--py3k", "-d", "W1618", "--persistent", "no", "--exit-zero", file_path], log)
warnings = json.loads(output)
except SubprocessOutputEmptyError as e:
# old versions of pylint return an empty output to indicate there are no warnings
pass
except Exception as e:
py3_compatible = PY3_COMPATIBILITY_UNKNOWN
log.error("error running 'validate' on custom check: %s", e)
if warnings:
py3_compatible = PY3_COMPATIBILITY_NOT_READY
# for now we don't display anything in the status page
# if not py3_compatible:
# load_success[check_name].persistent_warning("check is not compatible with Python3 (see logs for more information)")
setattr(load_success[check_name], PY3_COMPATIBILITY_ATTR, py3_compatible)
if is_wheel:
log.debug('Loaded %s' % check_name)
else:
log.debug('Loaded %s' % check_path)
break # we successfully initialized this check
return load_success, load_failure
def load_check_directory(agentConfig, hostname):
''' Return the initialized checks from checks.d, and a mapping of checks that failed to
initialize. Only checks that have a configuration
file in conf.d will be returned. '''
from checks import AGENT_METRICS_CHECK_NAME
from jmxfetch import JMX_CHECKS
initialized_checks = {}
init_failed_checks = {}
deprecated_checks = {}
agentConfig['checksd_hostname'] = hostname
osname = get_os()
# the TRACE_CONFIG flag is used by the configcheck to trace config object loading and
# where they come from (service discovery, auto config or config file)
if agentConfig.get(TRACE_CONFIG):
configs_and_sources = {
# check_name: (config_source, config)
}
deprecated_checks.update(_deprecated_configs(agentConfig))
checks_places = get_checks_places(osname, agentConfig)
for config_path in _file_configs_paths(osname, agentConfig):
# '/etc/dd-agent/checks.d/my_check.py' -> 'my_check'
check_name = _conf_path_to_check_name(config_path)
conf_is_valid, check_config, invalid_check = _load_file_config(config_path, check_name, agentConfig)
init_failed_checks.update(invalid_check)
if not conf_is_valid:
continue
if agentConfig.get(TRACE_CONFIG):
configs_and_sources[check_name] = (CONFIG_FROM_FILE, check_config)
# load the check
load_success, load_failure = load_check_from_places(check_config, check_name, checks_places, agentConfig)
initialized_checks.update(load_success)
init_failed_checks.update(load_failure)
for check_name, service_disco_check_config in _service_disco_configs(agentConfig).iteritems():
# ignore this config from service disco if the check has been loaded through a file config
if check_name in initialized_checks or \
check_name in init_failed_checks or \
check_name in JMX_CHECKS:
continue
sd_init_config, sd_instances = service_disco_check_config[1]
if agentConfig.get(TRACE_CONFIG):
configs_and_sources[check_name] = (
service_disco_check_config[0],
{'init_config': sd_init_config, 'instances': sd_instances})
check_config = {'init_config': sd_init_config, 'instances': sd_instances}
# load the check
load_success, load_failure = load_check_from_places(check_config, check_name, checks_places, agentConfig)
initialized_checks.update(load_success)
init_failed_checks.update(load_failure)
init_failed_checks.update(deprecated_checks)
log.info('initialized checks.d checks: %s' % [k for k in initialized_checks.keys() if k != AGENT_METRICS_CHECK_NAME])
log.info('initialization failed checks.d checks: %s' % init_failed_checks.keys())
if agentConfig.get(TRACE_CONFIG):
return configs_and_sources
return {'initialized_checks': initialized_checks.values(),
'init_failed_checks': init_failed_checks}
def load_check(agentConfig, hostname, checkname):
"""Same logic as load_check_directory except it loads one specific check"""
from jmxfetch import JMX_CHECKS
agentConfig['checksd_hostname'] = hostname
osname = get_os()
checks_places = get_checks_places(osname, agentConfig)
for config_path in _file_configs_paths(osname, agentConfig):
check_name = _conf_path_to_check_name(config_path)
if check_name == checkname and check_name not in JMX_CHECKS:
conf_is_valid, check_config, invalid_check = _load_file_config(config_path, check_name, agentConfig)
if invalid_check and not conf_is_valid:
return invalid_check
# try to load the check and return the result
load_success, load_failure = load_check_from_places(check_config, check_name, checks_places, agentConfig)
return load_success.values()[0] or load_failure
# the check was not found, try with service discovery
for check_name, service_disco_check_config in _service_disco_configs(agentConfig).iteritems():
if check_name == checkname:
sd_init_config, sd_instances = service_disco_check_config[1]
check_config = {'init_config': sd_init_config, 'instances': sd_instances}
# try to load the check and return the result
load_success, load_failure = load_check_from_places(check_config, check_name, checks_places, agentConfig)
return load_success.values()[0] if load_success else load_failure
return None
def generate_jmx_configs(agentConfig, hostname, checknames=None):
"""Similar logic to load_check_directory for JMX checks"""
from jmxfetch import get_jmx_checks
jmx_checks = get_jmx_checks(auto_conf=True)
if not checknames:
checknames = jmx_checks
agentConfig['checksd_hostname'] = hostname
# the check was not found, try with service discovery
generated = {}
for check_name, service_disco_check_config in _service_disco_configs(agentConfig).iteritems():
if check_name in checknames and check_name in jmx_checks:
log.debug('Generating JMX config for: %s' % check_name)
_, (sd_init_config, sd_instances) = service_disco_check_config
check_config = {'init_config': sd_init_config,
'instances': sd_instances}
try:
yaml = config_to_yaml(check_config)
generated["{}_{}".format(check_name, 0)] = yaml
except Exception:
log.exception("Unable to generate YAML config for %s", check_name)
return generated
# logging
def get_log_date_format():
return "%Y-%m-%d %H:%M:%S %Z"
def get_log_format(logger_name):
if get_os() != 'windows':
return '%%(asctime)s | %%(levelname)s | dd.%s | %%(name)s(%%(filename)s:%%(lineno)s) | %%(message)s' % logger_name
return '%(asctime)s | %(levelname)s | %(name)s(%(filename)s:%(lineno)s) | %(message)s'
def get_syslog_format(logger_name):
return 'dd.%s[%%(process)d]: %%(levelname)s (%%(filename)s:%%(lineno)s): %%(message)s' % logger_name
def get_logging_config(cfg_path=None):
system_os = get_os()
logging_config = {
'log_level': None,
'log_to_event_viewer': False,
'log_to_syslog': False,
'syslog_host': None,
'syslog_port': None,
}
if system_os == 'windows':
logging_config['collector_log_file'] = os.path.join(_windows_commondata_path(), 'Datadog', 'logs', 'collector.log')
logging_config['forwarder_log_file'] = os.path.join(_windows_commondata_path(), 'Datadog', 'logs', 'forwarder.log')
logging_config['dogstatsd_log_file'] = os.path.join(_windows_commondata_path(), 'Datadog', 'logs', 'dogstatsd.log')
logging_config['jmxfetch_log_file'] = os.path.join(_windows_commondata_path(), 'Datadog', 'logs', 'jmxfetch.log')
logging_config['service_log_file'] = os.path.join(_windows_commondata_path(), 'Datadog', 'logs', 'service.log')
logging_config['log_to_syslog'] = False
else:
logging_config['collector_log_file'] = '/var/log/datadog/collector.log'
logging_config['forwarder_log_file'] = '/var/log/datadog/forwarder.log'
logging_config['dogstatsd_log_file'] = '/var/log/datadog/dogstatsd.log'
logging_config['jmxfetch_log_file'] = '/var/log/datadog/jmxfetch.log'
logging_config['go-metro_log_file'] = '/var/log/datadog/go-metro.log'
logging_config['trace-agent_log_file'] = '/var/log/datadog/trace-agent.log'
logging_config['process-agent_log_file'] = '/var/log/datadog/process-agent.log'
logging_config['log_to_syslog'] = True
config_path = get_config_path(cfg_path, os_name=system_os)
config = ConfigParser.ConfigParser()
config.readfp(skip_leading_wsp(open(config_path)))
if config.has_section('handlers') or config.has_section('loggers') or config.has_section('formatters'):
if system_os == 'windows':
config_example_file = "https://github.com/DataDog/dd-agent/blob/master/packaging/datadog-agent/win32/install_files/datadog_win32.conf"
else:
config_example_file = "https://github.com/DataDog/dd-agent/blob/master/datadog.conf.example"
sys.stderr.write("""Python logging config is no longer supported and will be ignored.
To configure logging, update the logging portion of 'datadog.conf' to match:
'%s'.
""" % config_example_file)
for option in logging_config:
if config.has_option('Main', option):
logging_config[option] = config.get('Main', option)
levels = {
'CRITICAL': logging.CRITICAL,
'DEBUG': logging.DEBUG,
'ERROR': logging.ERROR,
'FATAL': logging.FATAL,
'INFO': logging.INFO,
'WARN': logging.WARN,
'WARNING': logging.WARNING,
}
if config.has_option('Main', 'log_level'):
logging_config['log_level'] = levels.get(config.get('Main', 'log_level'))
if config.has_option('Main', 'log_to_syslog'):
logging_config['log_to_syslog'] = config.get('Main', 'log_to_syslog').strip().lower() in ['yes', 'true', 1]
if config.has_option('Main', 'log_to_event_viewer'):
logging_config['log_to_event_viewer'] = config.get('Main', 'log_to_event_viewer').strip().lower() in ['yes', 'true', 1]
if config.has_option('Main', 'syslog_host'):
host = config.get('Main', 'syslog_host').strip()
if host:
logging_config['syslog_host'] = host
else:
logging_config['syslog_host'] = None
if config.has_option('Main', 'syslog_port'):
port = config.get('Main', 'syslog_port').strip()
try:
logging_config['syslog_port'] = int(port)
except Exception:
logging_config['syslog_port'] = None
if config.has_option('Main', 'disable_file_logging'):
logging_config['disable_file_logging'] = config.get('Main', 'disable_file_logging').strip().lower() in ['yes', 'true', 1]
else:
logging_config['disable_file_logging'] = False
return logging_config
def initialize_logging(logger_name):
try:
logging_config = get_logging_config()
logging.basicConfig(
format=get_log_format(logger_name),
level=logging_config['log_level'] or logging.INFO,
)
log_file = logging_config.get('%s_log_file' % logger_name)
if log_file is not None and not logging_config['disable_file_logging']:
# make sure the log directory is writeable
# NOTE: the entire directory needs to be writable so that rotation works
if os.access(os.path.dirname(log_file), os.R_OK | os.W_OK):
file_handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=LOGGING_MAX_BYTES, backupCount=1)
formatter = logging.Formatter(get_log_format(logger_name), get_log_date_format())
file_handler.setFormatter(formatter)
root_log = logging.getLogger()
root_log.addHandler(file_handler)
else:
sys.stderr.write("Log file is unwritable: '%s'\n" % log_file)
# set up syslog
if logging_config['log_to_syslog']:
try:
from logging.handlers import SysLogHandler
if logging_config['syslog_host'] is not None and logging_config['syslog_port'] is not None:
sys_log_addr = (logging_config['syslog_host'], logging_config['syslog_port'])
else:
sys_log_addr = "/dev/log"
# Special-case BSDs
if Platform.is_darwin():
sys_log_addr = "/var/run/syslog"
elif Platform.is_freebsd():
sys_log_addr = "/var/run/log"
handler = SysLogHandler(address=sys_log_addr, facility=SysLogHandler.LOG_DAEMON)
handler.setFormatter(logging.Formatter(get_syslog_format(logger_name), get_log_date_format()))
root_log = logging.getLogger()
root_log.addHandler(handler)
except Exception as e:
sys.stderr.write("Error setting up syslog: '%s'\n" % str(e))
traceback.print_exc()
# Setting up logging in the event viewer for windows
if get_os() == 'windows' and logging_config['log_to_event_viewer']:
try:
from logging.handlers import NTEventLogHandler
nt_event_handler = NTEventLogHandler(logger_name, get_win32service_file('windows', 'win32service.pyd'), 'Application')
nt_event_handler.setFormatter(logging.Formatter(get_syslog_format(logger_name), get_log_date_format()))
nt_event_handler.setLevel(logging.ERROR)
app_log = logging.getLogger(logger_name)
app_log.addHandler(nt_event_handler)
except Exception as e:
sys.stderr.write("Error setting up Event viewer logging: '%s'\n" % str(e))
traceback.print_exc()
except Exception as e:
sys.stderr.write("Couldn't initialize logging: %s\n" % str(e))
traceback.print_exc()
# if config fails entirely, enable basic stdout logging as a fallback
logging.basicConfig(
format=get_log_format(logger_name),
level=logging.INFO,
)
# re-get the log after logging is initialized
global log
log = logging.getLogger(__name__)
|
2b3b0f3e5e8116c462f852cfa6f444b0d1262459
|
00e3667f9858ae1b556df726770e28e463ebccc4
|
/tests/test_api_key_tools.py
|
43223f9ce46df5a98520c536c03884144c61e241
|
[
"MIT"
] |
permissive
|
hydrosquall/tiingo-python
|
0b8e6fe13357e67831a8b425de1930949076989c
|
4585c81ce586fd8eb424d691701169f4ecb67ef4
|
refs/heads/master
| 2023-08-31T22:35:47.608479
| 2023-04-26T01:17:03
| 2023-04-26T01:17:03
| 101,357,472
| 234
| 61
|
MIT
| 2023-09-14T06:49:56
| 2017-08-25T02:30:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,226
|
py
|
test_api_key_tools.py
|
#
# Test setup based on https://gist.github.com/odyniec/d4ea0959d4e0ba17a980
#
import shutil, tempfile
from os import path
from unittest import TestCase
from tools.api_key_tool import remove_api_key, has_api_key
class TestAPIKeyTools(TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
f = open(path.join(self.test_dir, 'test.yaml'), 'w')
txt = '''interactions:
- request:
body: null
headers:
Accept: ['*/*']
Accept-Encoding: ['gzip, deflate']
Authorization: [Token a00000000000000000000a00000000000000000a]
Connection: [keep-alive]
Content-Type: [application/json]
User-Agent: [tiingo-python-client 0.5.0]
method: GET
uri: https://api.tiingo.com/tiingo/daily/GOOGL/prices?format=json&resampleFreq=daily
response:
body: {string: '[{"adjClose":1037.29,"adjHigh":1044.65,"adjLow":1026.05,"adjOpen":1031.47,"adjVolume":1644794,"close":1037.29,"date":"2018-04-12T00:00:00+00:00","divCash":0.0,"high":1044.65,"low":1026.05,"open":1031.47,"splitFactor":1.0,"volume":1644794}]'}
headers:
Allow: ['GET, HEAD, OPTIONS']
Content-Length: ['239']
Content-Type: [application/json]
Date: ['Fri, 13 Apr 2018 02:42:05 GMT']
Server: [nginx/1.10.1]
Vary: ['Accept, Cookie']
X-Frame-Options: [SAMEORIGIN]
status: {code: 200, message: OK}
version: 1
'''
f.write(txt)
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_key_detector(self):
assert has_api_key(path.join(self.test_dir, 'test.yaml')) is True
def test_key_remover(self):
remove_api_key(path.join(self.test_dir, 'test.yaml'))
assert has_api_key(path.join(self.test_dir, 'test.yaml')) is False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.