blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
41ba98b008236e1204ee0d56eee05869874d077b
|
b347bc4b850dee4a8a9a171b563a3f31230ce1c7
|
/sktime/transformations/series/date.py
|
30c21601eae4a47fd03fdc56d087fc93837df49f
|
[
"BSD-3-Clause"
] |
permissive
|
sktime/sktime
|
5963962df338c5931a2f9f1794d1203c50ddc27e
|
70b2bfaaa597eb31bc3a1032366dcc0e1f4c8a9f
|
refs/heads/main
| 2023-08-22T18:20:08.022950
| 2023-08-22T15:24:39
| 2023-08-22T15:24:39
| 156,401,841
| 1,117
| 268
|
BSD-3-Clause
| 2023-09-14T20:44:21
| 2018-11-06T15:08:24
|
Python
|
UTF-8
|
Python
| false
| false
| 14,941
|
py
|
date.py
|
#!/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Extract calendar features from datetimeindex."""
__author__ = ["danbartl", "KishManani", "VyomkeshVyas"]
__all__ = ["DateTimeFeatures"]
import warnings
import numpy as np
import pandas as pd
from sktime.transformations.base import BaseTransformer
_RAW_DUMMIES = [
["child", "parent", "dummy_func", "feature_scope"],
["year", "year", "year", "minimal"],
["quarter", "year", "quarter", "efficient"],
["month", "year", "month", "minimal"],
["week", "year", "week_of_year", "efficient"],
["day", "year", "day_of_year", "efficient"],
["month", "quarter", "month_of_quarter", "comprehensive"],
["week", "quarter", "week_of_quarter", "comprehensive"],
["day", "quarter", "day_of_quarter", "comprehensive"],
["week", "month", "week_of_month", "comprehensive"],
["day", "month", "day", "efficient"],
["day", "week", "weekday", "minimal"],
["hour", "day", "hour", "minimal"],
["hour", "week", "hour_of_week", "comprehensive"],
["minute", "hour", "minute", "minimal"],
["second", "minute", "second", "minimal"],
["millisecond", "second", "millisecond", "minimal"],
["day", "week", "is_weekend", "comprehensive"],
]
class DateTimeFeatures(BaseTransformer):
"""DateTime feature extraction for use in e.g. tree based models.
DateTimeFeatures uses a date index column and generates date features
identifying e.g. year, week of the year, day of the week.
Parameters
----------
ts_freq : str, optional (default="day")
Restricts selection of items to those with a frequency lower than
the frequency of the time series given by ts_freq.
E.g. if monthly data is provided and ts_freq = ("M"), it does not make
sense to derive dummies with higher frequency like weekly dummies.
Has to be provided by the user due to the abundance of different
frequencies supported by Pandas (e.g. every pandas allows freq of every 4 days).
Interaction with other arguments:
Used to narrow down feature selection for feature_scope, since only
features with a frequency lower than ts_freq are considered. Will be ignored
for the calculation of manually specified features, but when provided will
raise a warning if manual features have a frequency higher than ts_freq.
Only supports the following frequencies:
* Y - year
* Q - quarter
* M - month
* W - week
* D - day
* H - hour
* T - minute
* S - second
* L - millisecond
feature_scope: str, optional (default="minimal")
Specify how many calendar features you want to be returned.
E.g., rarely used features like week of quarter will only be returned
with feature_scope = "comprehensive".
* "minimal"
* "efficient"
* "comprehensive"
manual_selection: str, optional (default=None)
Manual selection of dummys. Notation is child of parent for precise notation.
Will ignore specified feature_scope, but will still check with warning against
a specified ts_freq.
Examples for possible values:
* None
* day_of_year
* day_of_month
* day_of_quarter
* is_weekend
* year (special case with no lower frequency).
keep_original_columns : boolean, optional, default=False
Keep original columns in X passed to `.transform()`.
Examples
--------
>>> from sktime.transformations.series.date import DateTimeFeatures
>>> from sktime.datasets import load_airline
>>> y = load_airline()
Returns columns `y`, `year`, `month_of_year`
>>> transformer = DateTimeFeatures(ts_freq="M")
>>> y_hat = transformer.fit_transform(y)
Returns columns `y`, `month_of_year`
>>> transformer = DateTimeFeatures(ts_freq="M", manual_selection=["month_of_year"])
>>> y_hat = transformer.fit_transform(y)
Returns columns 'y', 'year', 'quarter_of_year', 'month_of_year', 'month_of_quarter'
>>> transformer = DateTimeFeatures(ts_freq="M", feature_scope="comprehensive")
>>> y_hat = transformer.fit_transform(y)
Returns columns 'y', 'year', 'quarter_of_year', 'month_of_year'
>>> transformer = DateTimeFeatures(ts_freq="M", feature_scope="efficient")
>>> y_hat = transformer.fit_transform(y)
Returns columns 'y', 'year', 'month_of_year'
>>> transformer = DateTimeFeatures(ts_freq="M", feature_scope="minimal")
>>> y_hat = transformer.fit_transform(y)
"""
_tags = {
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Series",
# what scitype is returned: Primitives, Series, Panel
"scitype:instancewise": True, # is this an instance-wise transform?
"X_inner_mtype": [
"pd.Series",
"pd.DataFrame",
"pd-multiindex",
"pd_multiindex_hier",
],
# which mtypes do _fit/_predict support for X?
"y_inner_mtype": "None", # which mtypes do _fit/_predict support for y?
"univariate-only": False,
"fit_is_empty": True,
"transform-returns-same-time-index": True,
"enforce_index_type": [pd.DatetimeIndex, pd.PeriodIndex],
"skip-inverse-transform": True,
"python_dependencies": "pandas>=1.2.0", # from DateTimeProperties
}
def __init__(
self,
ts_freq=None,
feature_scope="minimal",
manual_selection=None,
keep_original_columns=False,
):
self.ts_freq = ts_freq
self.feature_scope = feature_scope
self.manual_selection = manual_selection
self.dummies = _prep_dummies(_RAW_DUMMIES)
self.keep_original_columns = keep_original_columns
super().__init__()
def _transform(self, X, y=None):
"""Transform X and return a transformed version.
private _transform containing the core logic, called from transform
Parameters
----------
X : pd.Series or pd.DataFrame
Data to be transformed
y : ignored argument for interface compatibility
Additional data, e.g., labels for transformation
Returns
-------
Xt : pd.Series or pd.DataFrame, same type as X
transformed version of X
"""
_check_ts_freq(self.ts_freq, self.dummies)
_check_feature_scope(self.feature_scope)
_check_manual_selection(self.manual_selection, self.dummies)
if isinstance(X.index, pd.MultiIndex):
time_index = X.index.get_level_values(-1)
else:
time_index = X.index
x_df = pd.DataFrame(index=X.index)
if isinstance(time_index, pd.PeriodIndex):
x_df["date_sequence"] = time_index.to_timestamp()
elif isinstance(time_index, pd.DatetimeIndex):
x_df["date_sequence"] = time_index
else:
raise ValueError("Index type not supported")
if self.manual_selection is None:
if self.ts_freq is not None:
supported = _get_supported_calendar(self.ts_freq, DUMMIES=self.dummies)
supported = supported[supported["feature_scope"] <= self.feature_scope]
calendar_dummies = supported[["dummy_func", "dummy"]]
else:
supported = self.dummies[
self.dummies["feature_scope"] <= self.feature_scope
]
calendar_dummies = supported[["dummy_func", "dummy"]]
else:
if self.ts_freq is not None:
supported = _get_supported_calendar(self.ts_freq, DUMMIES=self.dummies)
if not all(
elem in supported["dummy"] for elem in self.manual_selection
):
warnings.warn(
"Level of selected dummy variable "
+ " lower level than base ts_frequency.",
stacklevel=2,
)
calendar_dummies = self.dummies.loc[
self.dummies["dummy"].isin(self.manual_selection),
["dummy_func", "dummy"],
]
else:
calendar_dummies = self.dummies.loc[
self.dummies["dummy"].isin(self.manual_selection),
["dummy_func", "dummy"],
]
df = [
_calendar_dummies(x_df, dummy) for dummy in calendar_dummies["dummy_func"]
]
df = pd.concat(df, axis=1)
df.columns = calendar_dummies["dummy"]
if self.keep_original_columns:
Xt = pd.concat([X, df], axis=1, copy=True)
else:
# Remove the name `"dummy"` from column index.
Xt = df.rename_axis(None, axis="columns")
return Xt
@classmethod
def get_test_params(cls):
"""Return testing parameter settings for the estimator.
Returns
-------
params : dict or list of dict, default = {}
Parameters to create testing instances of the class
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`
"""
params1 = {"feature_scope": "minimal"}
params2 = {"feature_scope": "efficient", "keep_original_columns": True}
params3 = {"manual_selection": ["day_of_year", "day_of_month"]}
return [params1, params2, params3]
def _check_manual_selection(manual_selection, DUMMIES):
if (manual_selection is not None) and (
not all(elem in DUMMIES["dummy"].unique() for elem in manual_selection)
):
raise ValueError(
"Invalid manual_selection specified, must be in: "
+ ", ".join(DUMMIES["dummy"].unique())
)
def _check_feature_scope(feature_scope):
if feature_scope not in ["minimal", "efficient", "comprehensive"]:
raise ValueError(
"Invalid feature_scope specified,"
+ "must be in minimal,efficient,comprehensive"
+ "(minimal lowest number of variables)"
)
def _check_ts_freq(ts_freq, DUMMIES):
if (ts_freq is not None) & (ts_freq not in DUMMIES["ts_frequency"].unique()):
raise ValueError(
"Invalid ts_freq specified, must be in: "
+ ", ".join(DUMMIES["ts_frequency"].unique())
)
def _calendar_dummies(x, funcs):
date_sequence = x["date_sequence"].dt
if funcs == "week_of_year":
# The first week of an ISO year is the first (Gregorian)
# calendar week of a year containing a Thursday.
# So it is possible that a week in the new year is still
# indexed starting in last year (week 52 or 53)
cd = date_sequence.isocalendar()["week"]
elif funcs == "week_of_month":
cd = (date_sequence.day - 1) // 7 + 1
elif funcs == "month_of_quarter":
cd = (date_sequence.month.astype(np.int64) + 2) % 3 + 1
elif funcs == "week_of_quarter":
col_names = x.columns
x_columns = col_names.intersection(["year", "quarter", "week"]).to_list()
x_columns.append("date_sequence")
df = x.copy(deep=True)
df = df[x_columns]
if "year" not in x_columns:
df["year"] = df["date_sequence"].dt.year
if "quarter" not in x_columns:
df["quarter"] = df["date_sequence"].dt.quarter
if "week" not in x_columns:
df["week"] = df["date_sequence"].dt.isocalendar()["week"]
df["qdate"] = (
df["date_sequence"] + pd.tseries.offsets.DateOffset(days=1)
) - pd.tseries.offsets.QuarterBegin(startingMonth=1)
df["qweek"] = df["qdate"].dt.isocalendar()["week"]
df.loc[(df["quarter"] == 1) & (df["week"] < 52), "qweek"] = 0
cd = df["week"] - df["qweek"] + 1
elif funcs == "millisecond":
cd = date_sequence.microsecond * 1000
elif funcs == "day_of_quarter":
quarter = date_sequence.quarter
quarter_start = pd.DatetimeIndex(
date_sequence.year.map(str)
+ "-"
+ (3 * quarter - 2).map(int).map(str)
+ "-01"
)
values = (
(x["date_sequence"] - quarter_start) / pd.to_timedelta("1D") + 1
).astype(int)
cd = values
elif funcs == "hour_of_week":
cd = date_sequence.day_of_week * 24 + date_sequence.hour
elif funcs == "is_weekend":
cd = date_sequence.day_of_week > 4
else:
cd = getattr(date_sequence, funcs)
cd = pd.DataFrame(cd)
cd = cd.rename(columns={cd.columns[0]: funcs})
cd[funcs] = np.int64(cd[funcs])
return cd
def _get_supported_calendar(ts_freq, DUMMIES):
rank = DUMMIES.loc[DUMMIES["ts_frequency"] == ts_freq, "rank"].max()
matches = DUMMIES.loc[DUMMIES["rank"] <= rank]
if matches.shape[0] == 0:
raise ValueError("Seasonality or Frequency not supported")
return matches
def _prep_dummies(DUMMIES):
"""Use to prepare dummy data.
Includes defining function call names and ranking of date information based on
frequency (e.g. year has a lower frequency than week).
"""
DUMMIES = pd.DataFrame(DUMMIES[1:], columns=DUMMIES[0])
date_order = [
"year",
"quarter",
"month",
"week",
"day",
"hour",
"minute",
"second",
"millisecond",
]
DUMMIES["fourier"] = DUMMIES["child"] + "_in_" + DUMMIES["parent"]
DUMMIES["dummy"] = DUMMIES["child"] + "_of_" + DUMMIES["parent"]
DUMMIES.loc[DUMMIES["dummy"] == "year_of_year", "dummy"] = "year"
DUMMIES.loc[
DUMMIES["dummy_func"] == "is_weekend", ["dummy", "fourier"]
] = "is_weekend"
DUMMIES["child"] = (
DUMMIES["child"].astype("category").cat.reorder_categories(date_order)
)
flist = ["minimal", "efficient", "comprehensive"]
DUMMIES["feature_scope"] = (
DUMMIES["feature_scope"].astype("category").cat.reorder_categories(flist)
)
DUMMIES["feature_scope"] = pd.Categorical(DUMMIES["feature_scope"], ordered=True)
DUMMIES["rank"] = DUMMIES["child"].cat.codes
col = DUMMIES["child"]
DUMMIES.insert(0, "ts_frequency", col)
DUMMIES = DUMMIES.replace(
{
"ts_frequency": {
"year": "Y",
"quarter": "Q",
"month": "M",
"week": "W",
"day": "D",
"hour": "H",
"minute": "T",
"second": "S",
"millisecond": "L",
}
}
)
return DUMMIES
|
6cd31e8deb99487311d9da418037770f9cc3683a
|
c3e0a6919caf85c35239ef23084df9bbf8dd61c3
|
/pypeit/deprecated/bin/pypeit_chk_alignments
|
5047bf817096f3dacb818cb6249d4430301d9016
|
[
"BSD-3-Clause"
] |
permissive
|
pypeit/PypeIt
|
6eb9e5afd62acc9d363e497cd9e367d620f86ea4
|
0d2e2196afc6904050b1af4d572f5c643bb07e38
|
refs/heads/release
| 2023-08-25T21:15:59.113114
| 2023-06-04T15:23:39
| 2023-06-04T15:23:39
| 36,958,428
| 136
| 98
|
BSD-3-Clause
| 2023-09-12T17:42:15
| 2015-06-05T22:25:37
|
Python
|
UTF-8
|
Python
| false
| false
| 192
|
pypeit_chk_alignments
|
#!/usr/bin/env python
"""
Show the alignments in a Ginga window
"""
from pypeit.scripts import chk_alignments
if __name__ == '__main__':
chk_alignments.main(chk_alignments.parse_args())
|
|
79cd232ad45e0e0d7fe56cc03ee8728ae3e7f53c
|
d70e3750c600da2cd14f5a07f6cc4bc4d2261cd3
|
/scripts/analyze_endpoints_and_create_files.py
|
1095e8353e2280e62a4595788eab883afdc31fdd
|
[
"MIT"
] |
permissive
|
swar/nba_api
|
93d794123a856dcf2005e6c2856c2a369d29839e
|
8480b574c286a22c9064c014505ae61f7dd84fc1
|
refs/heads/master
| 2023-09-05T07:03:01.612102
| 2023-09-01T00:59:16
| 2023-09-01T00:59:16
| 149,062,453
| 1,909
| 554
|
MIT
| 2023-09-14T18:56:46
| 2018-09-17T03:13:07
|
Python
|
UTF-8
|
Python
| false
| false
| 792
|
py
|
analyze_endpoints_and_create_files.py
|
from tools.stats.endpoint_analysis import analysis as endpoint_analysis
from tools.stats.endpoint_py_file_generator import generator as py_file_generator
from tools.stats.endpoint_documentation_generator import generator as endpoint_documentation_generator
from tools.stats.parameter_documentation_generator import generator as parameter_documentation_generator
# Analyze Endpoints
endpoint_analysis.analyze_all_endpoints_with_threading()
endpoint_analysis.analyze_and_save_all_endpoints(pause=0)
# Generate Endpoint Py Files
py_file_generator.generate_endpoint_files()
# Generate Endpoint Documentation
endpoint_documentation_generator.generate_all_endpoint_documentation()
# Generate Parameter Documentation
parameter_documentation_generator.generate_parameter_documentation_file()
|
7f64aaaff9d6abb8e64f954c11556f0b774b408f
|
c001930958cb94f8b91b1f734108671f1db9e9f1
|
/tests/integration/renderer/test_due_diligence.py
|
8c28edd853a84168375644d53f7d436b7b722f8d
|
[
"MIT"
] |
permissive
|
plotly/dash
|
73c752135937e27975071fbd144e3fb21618e7b4
|
6eaf2e17c25f7ca1847c41aafeb18e87c586cb9f
|
refs/heads/dev
| 2023-08-30T21:21:06.056499
| 2023-08-29T16:49:04
| 2023-08-29T16:49:04
| 33,702,544
| 20,553
| 2,355
|
MIT
| 2023-08-31T20:51:14
| 2015-04-10T01:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 3,528
|
py
|
test_due_diligence.py
|
import json
import os
import string
import pytest
from bs4 import BeautifulSoup
import requests
import plotly
from dash import Dash, html
def test_rddd001_initial_state(dash_duo):
app = Dash(__name__)
my_class_attrs = {
"id": "p.c.4",
"className": "my-class",
"title": "tooltip",
"style": {"color": "red", "fontSize": 30},
}
# fmt:off
app.layout = html.Div([
'Basic string',
3.14,
True,
None,
html.Div('Child div with basic string', **my_class_attrs),
html.Div(id='p.c.5'),
html.Div([
html.Div('Grandchild div', id='p.c.6.p.c.0'),
html.Div([
html.Div('Great grandchild', id='p.c.6.p.c.1.p.c.0'),
3.14159,
'another basic string'
], id='p.c.6.p.c.1'),
html.Div([
html.Div(
html.Div([
html.Div([
html.Div(
id='p.c.6.p.c.2.p.c.0.p.c.p.c.0.p.c.0'
),
'',
html.Div(
id='p.c.6.p.c.2.p.c.0.p.c.p.c.0.p.c.2'
)
], id='p.c.6.p.c.2.p.c.0.p.c.p.c.0')
], id='p.c.6.p.c.2.p.c.0.p.c'),
id='p.c.6.p.c.2.p.c.0'
)
], id='p.c.6.p.c.2')
], id='p.c.6')
])
# fmt:on
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal(r"#p\.c\.5", "")
# Note: this .html file shows there's no undo/redo button by default
with open(
os.path.join(os.path.dirname(__file__), "initial_state_dash_app_content.html")
) as fp:
expected_dom = BeautifulSoup(fp.read().strip(), "lxml")
fetched_dom = dash_duo.dash_outerhtml_dom
assert (
fetched_dom.decode() == expected_dom.decode()
), "the fetching rendered dom is expected"
assert dash_duo.get_logs() == [], "Check that no errors or warnings were displayed"
assert dash_duo.driver.execute_script(
"return JSON.parse(JSON.stringify(window.store.getState().layout))"
) == json.loads(
json.dumps(app.layout, cls=plotly.utils.PlotlyJSONEncoder)
), "the state layout is identical to app.layout"
r = requests.get("{}/_dash-dependencies".format(dash_duo.server_url))
assert r.status_code == 200
assert r.json() == [], "no dependencies present in app as no callbacks are defined"
paths = dash_duo.redux_state_paths
assert paths["objs"] == {}
assert paths["strs"] == {
abbr: [
int(token)
if token in string.digits
else token.replace("p", "props").replace("c", "children")
for token in abbr.split(".")
]
for abbr in (
child.get("id")
for child in fetched_dom.find(id="react-entry-point").findChildren(id=True)
)
}, "paths should reflect to the component hierarchy"
assert not dash_duo.redux_state_is_loading, "no callback => no pendingCallbacks"
assert dash_duo.get_logs() == [], "console has no errors"
@pytest.mark.parametrize("child", [0, [0]])
def test_rddd002_falsy_child(dash_duo, child):
app = Dash(__name__)
app.layout = html.Div(id="falsy-wrapper", children=child)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#falsy-wrapper", "0")
assert not dash_duo.get_logs()
|
0a1a75fa12b844b1235c15932714d60923ba442d
|
8ca19f1a31070738b376c0370c4bebf6b7efcb43
|
/office365/todo/attachments/session.py
|
d9741b95a0142d40b8a4787d300b2aa676522c5c
|
[
"MIT"
] |
permissive
|
vgrem/Office365-REST-Python-Client
|
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
|
cbd245d1af8d69e013c469cfc2a9851f51c91417
|
refs/heads/master
| 2023-09-02T14:20:40.109462
| 2023-08-31T19:14:05
| 2023-08-31T19:14:05
| 51,305,798
| 1,006
| 326
|
MIT
| 2023-08-28T05:38:02
| 2016-02-08T15:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
session.py
|
from datetime import datetime
from office365.entity import Entity
from office365.runtime.types.collections import StringCollection
class AttachmentSession(Entity):
"""Represents a resource that uploads large attachments to a todoTask."""
@property
def content(self):
"""The content streams that are uploaded."""
return self.properties.get("content", None)
@property
def expiration_datetime(self):
"""The date and time in UTC when the upload session will expire.
The complete file must be uploaded before this expiration time is reached."""
return self.properties.get("expirationDateTime", datetime.min)
@property
def next_expected_ranges(self):
"""Indicates a single value {start} that represents the location in the file where the next
upload should begin."""
return self.properties.get("nextExpectedRanges", StringCollection())
def get_property(self, name, default_value=None):
if default_value is None:
property_mapping = {
"expirationDateTime": self.expiration_datetime,
"nextExpectedRanges": self.next_expected_ranges
}
default_value = property_mapping.get(name, None)
return super(AttachmentSession, self).get_property(name, default_value)
|
2968ef8bce80420194e9a779e6510a3a6b087d68
|
e4b11f60c768fb1719e4158e9e701d424184c5ce
|
/ceilometer/tests/unit/publisher/test_messaging_publisher.py
|
48ed89307ba94d958fa2ca6c0b50bc07001a3455
|
[
"Apache-2.0"
] |
permissive
|
openstack/ceilometer
|
af938664ccba710547dbb4c74e5deb2175482d56
|
d31d4ed3574a5d19fe4b09ab2c227dba64da170a
|
refs/heads/master
| 2023-08-28T15:09:01.659514
| 2023-08-21T03:29:16
| 2023-08-21T03:29:16
| 6,642,735
| 246
| 289
|
Apache-2.0
| 2019-11-01T04:21:47
| 2012-11-11T18:33:12
|
Python
|
UTF-8
|
Python
| false
| false
| 14,860
|
py
|
test_messaging_publisher.py
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/publisher/messaging.py"""
import datetime
from unittest import mock
import uuid
import oslo_messaging
from oslo_messaging._drivers import impl_kafka as kafka_driver
from oslo_utils import netutils
import testscenarios.testcase
from ceilometer.event import models as event
from ceilometer.publisher import messaging as msg_publisher
from ceilometer import sample
from ceilometer import service
from ceilometer.tests import base as tests_base
class BasePublisherTestCase(tests_base.BaseTestCase):
test_event_data = [
event.Event(message_id=uuid.uuid4(),
event_type='event_%d' % i,
generated=datetime.datetime.utcnow(),
traits=[], raw={})
for i in range(0, 5)
]
test_sample_data = [
sample.Sample(
name='test',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test2',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test2',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test3',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
]
def setUp(self):
super(BasePublisherTestCase, self).setUp()
self.CONF = service.prepare_service([], [])
self.setup_messaging(self.CONF)
class NotifierOnlyPublisherTest(BasePublisherTestCase):
@mock.patch('oslo_messaging.Notifier')
def test_publish_topic_override(self, notifier):
msg_publisher.SampleNotifierPublisher(
self.CONF,
netutils.urlsplit('notifier://?topic=custom_topic'))
notifier.assert_called_with(mock.ANY, topics=['custom_topic'],
driver=mock.ANY, retry=mock.ANY,
publisher_id=mock.ANY)
msg_publisher.EventNotifierPublisher(
self.CONF,
netutils.urlsplit('notifier://?topic=custom_event_topic'))
notifier.assert_called_with(mock.ANY, topics=['custom_event_topic'],
driver=mock.ANY, retry=mock.ANY,
publisher_id=mock.ANY)
@mock.patch('ceilometer.messaging.get_transport')
def test_publish_other_host(self, cgt):
msg_publisher.SampleNotifierPublisher(
self.CONF,
netutils.urlsplit('notifier://foo:foo@127.0.0.1:1234'))
cgt.assert_called_with(self.CONF, 'rabbit://foo:foo@127.0.0.1:1234')
msg_publisher.EventNotifierPublisher(
self.CONF,
netutils.urlsplit('notifier://foo:foo@127.0.0.1:1234'))
cgt.assert_called_with(self.CONF, 'rabbit://foo:foo@127.0.0.1:1234')
@mock.patch('ceilometer.messaging.get_transport')
def test_publish_other_host_vhost_and_query(self, cgt):
msg_publisher.SampleNotifierPublisher(
self.CONF,
netutils.urlsplit('notifier://foo:foo@127.0.0.1:1234/foo'
'?driver=amqp&amqp_auto_delete=true'))
cgt.assert_called_with(self.CONF, 'amqp://foo:foo@127.0.0.1:1234/foo'
'?amqp_auto_delete=true')
msg_publisher.EventNotifierPublisher(
self.CONF,
netutils.urlsplit('notifier://foo:foo@127.0.0.1:1234/foo'
'?driver=amqp&amqp_auto_delete=true'))
cgt.assert_called_with(self.CONF, 'amqp://foo:foo@127.0.0.1:1234/foo'
'?amqp_auto_delete=true')
@mock.patch('ceilometer.messaging.get_transport')
def test_publish_with_none_rabbit_driver(self, cgt):
sample_publisher = msg_publisher.SampleNotifierPublisher(
self.CONF,
netutils.urlsplit('notifier://127.0.0.1:9092?driver=kafka'))
cgt.assert_called_with(self.CONF, 'kafka://127.0.0.1:9092')
transport = oslo_messaging.get_transport(self.CONF,
'kafka://127.0.0.1:9092')
self.assertIsInstance(transport._driver, kafka_driver.KafkaDriver)
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(sample_publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
self.assertRaises(
msg_publisher.DeliveryFailure,
sample_publisher.publish_samples,
self.test_sample_data)
self.assertEqual(0, len(sample_publisher.local_queue))
self.assertEqual(100, len(fake_send.mock_calls))
fake_send.assert_called_with('metering', mock.ANY)
event_publisher = msg_publisher.EventNotifierPublisher(
self.CONF,
netutils.urlsplit('notifier://127.0.0.1:9092?driver=kafka'))
cgt.assert_called_with(self.CONF, 'kafka://127.0.0.1:9092')
with mock.patch.object(event_publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
self.assertRaises(
msg_publisher.DeliveryFailure,
event_publisher.publish_events,
self.test_event_data)
self.assertEqual(0, len(event_publisher.local_queue))
self.assertEqual(100, len(fake_send.mock_calls))
fake_send.assert_called_with('event', mock.ANY)
class TestPublisher(testscenarios.testcase.WithScenarios,
BasePublisherTestCase):
scenarios = [
('notifier',
dict(protocol="notifier",
publisher_cls=msg_publisher.SampleNotifierPublisher,
test_data=BasePublisherTestCase.test_sample_data,
pub_func='publish_samples', attr='source')),
('event_notifier',
dict(protocol="notifier",
publisher_cls=msg_publisher.EventNotifierPublisher,
test_data=BasePublisherTestCase.test_event_data,
pub_func='publish_events', attr='event_type')),
]
def setUp(self):
super(TestPublisher, self).setUp()
self.topic = (self.CONF.publisher_notifier.event_topic
if self.pub_func == 'publish_events' else
self.CONF.publisher_notifier.metering_topic)
class TestPublisherPolicy(TestPublisher):
@mock.patch('ceilometer.publisher.messaging.LOG')
def test_published_with_no_policy(self, mylog):
publisher = self.publisher_cls(
self.CONF,
netutils.urlsplit('%s://' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
self.assertRaises(
msg_publisher.DeliveryFailure,
getattr(publisher, self.pub_func),
self.test_data)
self.assertTrue(mylog.info.called)
self.assertEqual('default', publisher.policy)
self.assertEqual(0, len(publisher.local_queue))
self.assertEqual(100, len(fake_send.mock_calls))
fake_send.assert_called_with(
self.topic, mock.ANY)
@mock.patch('ceilometer.publisher.messaging.LOG')
def test_published_with_policy_block(self, mylog):
publisher = self.publisher_cls(
self.CONF,
netutils.urlsplit('%s://?policy=default' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
self.assertRaises(
msg_publisher.DeliveryFailure,
getattr(publisher, self.pub_func),
self.test_data)
self.assertTrue(mylog.info.called)
self.assertEqual(0, len(publisher.local_queue))
self.assertEqual(100, len(fake_send.mock_calls))
fake_send.assert_called_with(
self.topic, mock.ANY)
@mock.patch('ceilometer.publisher.messaging.LOG')
def test_published_with_policy_incorrect(self, mylog):
publisher = self.publisher_cls(
self.CONF,
netutils.urlsplit('%s://?policy=notexist' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
self.assertRaises(
msg_publisher.DeliveryFailure,
getattr(publisher, self.pub_func),
self.test_data)
self.assertTrue(mylog.warning.called)
self.assertEqual('default', publisher.policy)
self.assertEqual(0, len(publisher.local_queue))
self.assertEqual(100, len(fake_send.mock_calls))
fake_send.assert_called_with(
self.topic, mock.ANY)
@mock.patch('ceilometer.publisher.messaging.LOG', mock.Mock())
class TestPublisherPolicyReactions(TestPublisher):
def test_published_with_policy_drop_and_rpc_down(self):
publisher = self.publisher_cls(
self.CONF,
netutils.urlsplit('%s://?policy=drop' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
getattr(publisher, self.pub_func)(self.test_data)
self.assertEqual(0, len(publisher.local_queue))
fake_send.assert_called_once_with(
self.topic, mock.ANY)
def test_published_with_policy_queue_and_rpc_down(self):
publisher = self.publisher_cls(
self.CONF,
netutils.urlsplit('%s://?policy=queue' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
getattr(publisher, self.pub_func)(self.test_data)
self.assertEqual(1, len(publisher.local_queue))
fake_send.assert_called_once_with(
self.topic, mock.ANY)
def test_published_with_policy_queue_and_rpc_down_up(self):
self.rpc_unreachable = True
publisher = self.publisher_cls(
self.CONF,
netutils.urlsplit('%s://?policy=queue' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
getattr(publisher, self.pub_func)(self.test_data)
self.assertEqual(1, len(publisher.local_queue))
fake_send.side_effect = mock.MagicMock()
getattr(publisher, self.pub_func)(self.test_data)
self.assertEqual(0, len(publisher.local_queue))
topic = self.topic
expected = [mock.call(topic, mock.ANY),
mock.call(topic, mock.ANY),
mock.call(topic, mock.ANY)]
self.assertEqual(expected, fake_send.mock_calls)
def test_published_with_policy_sized_queue_and_rpc_down(self):
publisher = self.publisher_cls(self.CONF, netutils.urlsplit(
'%s://?policy=queue&max_queue_length=3' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
for i in range(0, 5):
for s in self.test_data:
setattr(s, self.attr, 'test-%d' % i)
getattr(publisher, self.pub_func)(self.test_data)
self.assertEqual(3, len(publisher.local_queue))
self.assertEqual(
'test-2',
publisher.local_queue[0][1][0][self.attr]
)
self.assertEqual(
'test-3',
publisher.local_queue[1][1][0][self.attr]
)
self.assertEqual(
'test-4',
publisher.local_queue[2][1][0][self.attr]
)
def test_published_with_policy_default_sized_queue_and_rpc_down(self):
publisher = self.publisher_cls(
self.CONF,
netutils.urlsplit('%s://?policy=queue' % self.protocol))
side_effect = msg_publisher.DeliveryFailure()
with mock.patch.object(publisher, '_send') as fake_send:
fake_send.side_effect = side_effect
for i in range(0, 2000):
for s in self.test_data:
setattr(s, self.attr, 'test-%d' % i)
getattr(publisher, self.pub_func)(self.test_data)
self.assertEqual(1024, len(publisher.local_queue))
self.assertEqual(
'test-976',
publisher.local_queue[0][1][0][self.attr]
)
self.assertEqual(
'test-1999',
publisher.local_queue[1023][1][0][self.attr]
)
|
6c046ad821cf394cdf2a498dd45322be5040fdb6
|
aae3c6fccb2296e4da5bb10310f5dd6baba8b7de
|
/activitysim/abm/models/trip_matrices.py
|
0c9e1f447fabd5ff4d1dee475f2e157d24d3f0d5
|
[
"BSD-3-Clause"
] |
permissive
|
ActivitySim/activitysim
|
3d938e616452be76db1bb0c8a1212e12b9216823
|
a8e755f96d0e32633a6d3657c4878e3b6a37e59a
|
refs/heads/main
| 2023-08-08T16:02:06.275693
| 2023-05-09T13:08:23
| 2023-05-09T13:08:23
| 20,981,950
| 118
| 89
|
BSD-3-Clause
| 2023-07-25T14:07:16
| 2014-06-18T23:57:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 13,051
|
py
|
trip_matrices.py
|
# ActivitySim
# See full license in LICENSE.txt.
import logging
import numpy as np
import openmatrix as omx
import pandas as pd
from activitysim.core import config, expressions, inject, los, pipeline
logger = logging.getLogger(__name__)
@inject.step()
def write_trip_matrices(network_los):
"""
Write trip matrices step.
Adds boolean columns to local trips table via annotation expressions,
then aggregates trip counts and writes OD matrices to OMX. Save annotated
trips table to pipeline if desired.
Writes taz trip tables for one and two zone system. Writes taz and tap
trip tables for three zone system. Add ``is_tap:True`` to the settings file
to identify an output matrix as tap level trips as opposed to taz level trips.
For one zone system, uses the land use table for the set of possible tazs.
For two zone system, uses the taz skim zone names for the set of possible tazs.
For three zone system, uses the taz skim zone names for the set of possible tazs
and uses the tap skim zone names for the set of possible taps.
"""
trips = inject.get_table("trips", None)
if trips is None:
# this step is a NOP if there is no trips table
# this might legitimately happen if they comment out some steps to debug but still want write_tables
# this saves them the hassle of remembering to comment out this step
logger.warning(
"write_trip_matrices returning empty-handed because there is no trips table"
)
return
model_settings = config.read_model_settings("write_trip_matrices.yaml")
trips_df = annotate_trips(trips, network_los, model_settings)
if bool(model_settings.get("SAVE_TRIPS_TABLE")):
pipeline.replace_table("trips", trips_df)
if "parking_location" in config.setting("models"):
parking_settings = config.read_model_settings("parking_location_choice.yaml")
parking_taz_col_name = parking_settings["ALT_DEST_COL_NAME"]
if parking_taz_col_name in trips_df:
# TODO make parking zone negative, not zero, if not used
trips_df.loc[trips_df[parking_taz_col_name] > 0, "destination"] = trips_df[
parking_taz_col_name
]
# Also need address the return trip
# write matrices by zone system type
if network_los.zone_system == los.ONE_ZONE: # taz trips written to taz matrices
logger.info("aggregating trips one zone...")
aggregate_trips = trips_df.groupby(["origin", "destination"], sort=False).sum(
numeric_only=True
)
# use the average household weight for all trips in the origin destination pair
hh_weight_col = model_settings.get("HH_EXPANSION_WEIGHT_COL")
aggregate_weight = (
trips_df[["origin", "destination", hh_weight_col]]
.groupby(["origin", "destination"], sort=False)
.mean()
)
aggregate_trips[hh_weight_col] = aggregate_weight[hh_weight_col]
orig_vals = aggregate_trips.index.get_level_values("origin")
dest_vals = aggregate_trips.index.get_level_values("destination")
# use the land use table for the set of possible tazs
land_use = pipeline.get_table("land_use")
zone_index = land_use.index
assert all(zone in zone_index for zone in orig_vals)
assert all(zone in zone_index for zone in dest_vals)
_, orig_index = zone_index.reindex(orig_vals)
_, dest_index = zone_index.reindex(dest_vals)
try:
zone_labels = land_use[f"_original_{land_use.index.name}"]
except KeyError:
zone_labels = land_use.index
write_matrices(
aggregate_trips, zone_labels, orig_index, dest_index, model_settings
)
elif network_los.zone_system == los.TWO_ZONE: # maz trips written to taz matrices
logger.info("aggregating trips two zone...")
trips_df["otaz"] = (
pipeline.get_table("land_use").reindex(trips_df["origin"]).TAZ.tolist()
)
trips_df["dtaz"] = (
pipeline.get_table("land_use").reindex(trips_df["destination"]).TAZ.tolist()
)
aggregate_trips = trips_df.groupby(["otaz", "dtaz"], sort=False).sum(
numeric_only=True
)
# use the average household weight for all trips in the origin destination pair
hh_weight_col = model_settings.get("HH_EXPANSION_WEIGHT_COL")
aggregate_weight = (
trips_df[["otaz", "dtaz", hh_weight_col]]
.groupby(["otaz", "dtaz"], sort=False)
.mean()
)
aggregate_trips[hh_weight_col] = aggregate_weight[hh_weight_col]
orig_vals = aggregate_trips.index.get_level_values("otaz")
dest_vals = aggregate_trips.index.get_level_values("dtaz")
try:
land_use_taz = pipeline.get_table("land_use_taz")
except (KeyError, RuntimeError):
pass # table missing, ignore
else:
if "_original_TAZ" in land_use_taz.columns:
orig_vals = orig_vals.map(land_use_taz["_original_TAZ"])
dest_vals = dest_vals.map(land_use_taz["_original_TAZ"])
zone_index = pd.Index(network_los.get_tazs(), name="TAZ")
assert all(zone in zone_index for zone in orig_vals)
assert all(zone in zone_index for zone in dest_vals)
_, orig_index = zone_index.reindex(orig_vals)
_, dest_index = zone_index.reindex(dest_vals)
write_matrices(
aggregate_trips, zone_index, orig_index, dest_index, model_settings
)
elif (
network_los.zone_system == los.THREE_ZONE
): # maz trips written to taz and tap matrices
logger.info("aggregating trips three zone taz...")
trips_df["otaz"] = (
pipeline.get_table("land_use").reindex(trips_df["origin"]).TAZ.tolist()
)
trips_df["dtaz"] = (
pipeline.get_table("land_use").reindex(trips_df["destination"]).TAZ.tolist()
)
aggregate_trips = trips_df.groupby(["otaz", "dtaz"], sort=False).sum(
numeric_only=True
)
# use the average household weight for all trips in the origin destination pair
hh_weight_col = model_settings.get("HH_EXPANSION_WEIGHT_COL")
aggregate_weight = (
trips_df[["otaz", "dtaz", hh_weight_col]]
.groupby(["otaz", "dtaz"], sort=False)
.mean()
)
aggregate_trips[hh_weight_col] = aggregate_weight[hh_weight_col]
orig_vals = aggregate_trips.index.get_level_values("otaz")
dest_vals = aggregate_trips.index.get_level_values("dtaz")
try:
land_use_taz = pipeline.get_table("land_use_taz")
except (KeyError, RuntimeError):
pass # table missing, ignore
else:
if "_original_TAZ" in land_use_taz.columns:
orig_vals = orig_vals.map(land_use_taz["_original_TAZ"])
dest_vals = dest_vals.map(land_use_taz["_original_TAZ"])
zone_index = pd.Index(network_los.get_tazs(), name="TAZ")
assert all(zone in zone_index for zone in orig_vals)
assert all(zone in zone_index for zone in dest_vals)
_, orig_index = zone_index.reindex(orig_vals)
_, dest_index = zone_index.reindex(dest_vals)
write_matrices(
aggregate_trips, zone_index, orig_index, dest_index, model_settings
)
logger.info("aggregating trips three zone tap...")
aggregate_trips = trips_df.groupby(["btap", "atap"], sort=False).sum(
numeric_only=True
)
# use the average household weight for all trips in the origin destination pair
hh_weight_col = model_settings.get("HH_EXPANSION_WEIGHT_COL")
aggregate_weight = (
trips_df[["btap", "atap", hh_weight_col]]
.groupby(["btap", "atap"], sort=False)
.mean()
)
aggregate_trips[hh_weight_col] = aggregate_weight[hh_weight_col]
orig_vals = aggregate_trips.index.get_level_values("btap")
dest_vals = aggregate_trips.index.get_level_values("atap")
zone_index = pd.Index(network_los.get_taps(), name="TAP")
assert all(zone in zone_index for zone in orig_vals)
assert all(zone in zone_index for zone in dest_vals)
_, orig_index = zone_index.reindex(orig_vals)
_, dest_index = zone_index.reindex(dest_vals)
write_matrices(
aggregate_trips, zone_index, orig_index, dest_index, model_settings, True
)
def annotate_trips(trips, network_los, model_settings):
"""
Add columns to local trips table. The annotator has
access to the origin/destination skims and everything
defined in the model settings CONSTANTS.
Pipeline tables can also be accessed by listing them under
TABLES in the preprocessor settings.
"""
trips_df = trips.to_frame()
trace_label = "trip_matrices"
skim_dict = network_los.get_default_skim_dict()
# setup skim keys
if "trip_period" not in trips_df:
trips_df["trip_period"] = network_los.skim_time_period_label(trips_df.depart)
od_skim_wrapper = skim_dict.wrap("origin", "destination")
odt_skim_stack_wrapper = skim_dict.wrap_3d(
orig_key="origin", dest_key="destination", dim3_key="trip_period"
)
skims = {"od_skims": od_skim_wrapper, "odt_skims": odt_skim_stack_wrapper}
locals_dict = {}
constants = config.get_model_constants(model_settings)
if constants is not None:
locals_dict.update(constants)
expressions.annotate_preprocessors(
trips_df, locals_dict, skims, model_settings, trace_label
)
if not np.issubdtype(trips_df["trip_period"].dtype, np.integer):
if hasattr(skim_dict, "map_time_periods_from_series"):
trip_period_idx = skim_dict.map_time_periods_from_series(
trips_df["trip_period"]
)
if trip_period_idx is not None:
trips_df["trip_period"] = trip_period_idx
# Data will be expanded by an expansion weight column from
# the households pipeline table, if specified in the model settings.
hh_weight_col = model_settings.get("HH_EXPANSION_WEIGHT_COL")
if hh_weight_col and hh_weight_col not in trips_df:
logger.info("adding '%s' from households to trips table" % hh_weight_col)
household_weights = pipeline.get_table("households")[hh_weight_col]
trips_df[hh_weight_col] = trips_df.household_id.map(household_weights)
return trips_df
def write_matrices(
aggregate_trips, zone_index, orig_index, dest_index, model_settings, is_tap=False
):
"""
Write aggregated trips to OMX format.
The MATRICES setting lists the new OMX files to write.
Each file can contain any number of 'tables', each specified by a
table key ('name') and a trips table column ('data_field') to use
for aggregated counts.
Any data type may be used for columns added in the annotation phase,
but the table 'data_field's must be summable types: ints, floats, bools.
"""
matrix_settings = model_settings.get("MATRICES")
if not matrix_settings:
logger.error("Missing MATRICES setting in write_trip_matrices.yaml")
for matrix in matrix_settings:
matrix_is_tap = matrix.get("is_tap", False)
if matrix_is_tap == is_tap: # only write tap matrices to tap matrix files
filename = matrix.get("file_name")
filepath = config.output_file_path(filename)
logger.info("opening %s" % filepath)
file = omx.open_file(filepath, "w") # possibly overwrite existing file
table_settings = matrix.get("tables")
for table in table_settings:
table_name = table.get("name")
col = table.get("data_field")
if col not in aggregate_trips:
logger.error(f"missing {col} column in aggregate_trips DataFrame")
return
hh_weight_col = model_settings.get("HH_EXPANSION_WEIGHT_COL")
if hh_weight_col:
aggregate_trips[col] = (
aggregate_trips[col] / aggregate_trips[hh_weight_col]
)
data = np.zeros((len(zone_index), len(zone_index)))
data[orig_index, dest_index] = aggregate_trips[col]
logger.debug(
"writing %s sum %0.2f" % (table_name, aggregate_trips[col].sum())
)
file[table_name] = data # write to file
# include the index-to-zone map in the file
logger.info(
"adding %s mapping for %s zones to %s"
% (zone_index.name, zone_index.size, filename)
)
file.create_mapping(zone_index.name, zone_index.to_numpy())
logger.info("closing %s" % filepath)
file.close()
|
533713605e00f7286066d7b812b26a1ad3cc6bc1
|
fbd2702e8c45d5e6cec39877295ef45f2f61d426
|
/pynamodb/expressions/update.py
|
2118e86259e8e2f92aa5ae848752499e459fc71e
|
[
"MIT"
] |
permissive
|
pynamodb/PynamoDB
|
365961a5cd7b2d7e924f8edf367dde641d00914a
|
335c7cde6732c5121347207e60479d96e47338f6
|
refs/heads/master
| 2023-08-22T06:19:59.762796
| 2023-07-18T05:37:13
| 2023-07-18T05:37:13
| 16,058,979
| 1,987
| 412
|
MIT
| 2023-08-29T14:48:51
| 2014-01-20T02:18:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,661
|
py
|
update.py
|
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import TYPE_CHECKING
from pynamodb.constants import BINARY_SET
from pynamodb.constants import NUMBER
from pynamodb.constants import NUMBER_SET
from pynamodb.constants import STRING_SET
if TYPE_CHECKING:
from pynamodb.expressions.operand import _Operand
from pynamodb.expressions.operand import Path
from pynamodb.expressions.operand import Value
class Action:
format_string: str = ''
def __init__(self, *values: '_Operand') -> None:
self.values = values
def __eq__(self, other: Any) -> bool:
return (
type(self) is type(other)
and len(self.values) == len(other.values)
and all(
(
type(v1) is type(v2)
and v1._equals_to(v2)
)
for v1, v2 in zip(self.values, other.values))
)
def serialize(self, placeholder_names: Dict[str, str], expression_attribute_values: Dict[str, str]) -> str:
values = [value.serialize(placeholder_names, expression_attribute_values) for value in self.values]
return self.format_string.format(*values)
def __repr__(self) -> str:
values = [str(value) for value in self.values]
return self.format_string.format(*values)
class SetAction(Action):
"""
The SET action adds an attribute to an item.
"""
format_string = '{0} = {1}'
def __init__(self, path: 'Path', value: '_Operand') -> None:
super(SetAction, self).__init__(path, value)
class RemoveAction(Action):
"""
The REMOVE action deletes an attribute from an item.
"""
format_string = '{0}'
def __init__(self, path: 'Path') -> None:
super(RemoveAction, self).__init__(path)
class AddAction(Action):
"""
The ADD action appends elements to a set or mathematically adds to a number attribute.
"""
format_string = '{0} {1}'
def __init__(self, path: 'Path', subset: 'Value') -> None:
path._type_check(BINARY_SET, NUMBER, NUMBER_SET, STRING_SET)
subset._type_check(BINARY_SET, NUMBER, NUMBER_SET, STRING_SET)
super(AddAction, self).__init__(path, subset)
class DeleteAction(Action):
"""
The DELETE action removes elements from a set.
"""
format_string = '{0} {1}'
def __init__(self, path: 'Path', subset: 'Value') -> None:
path._type_check(BINARY_SET, NUMBER_SET, STRING_SET)
subset._type_check(BINARY_SET, NUMBER_SET, STRING_SET)
super(DeleteAction, self).__init__(path, subset)
class Update:
def __init__(self, *actions: Action) -> None:
self.set_actions: List[SetAction] = []
self.remove_actions: List[RemoveAction] = []
self.add_actions: List[AddAction] = []
self.delete_actions: List[DeleteAction] = []
for action in actions:
self.add_action(action)
def add_action(self, action: Action) -> None:
if isinstance(action, SetAction):
self.set_actions.append(action)
elif isinstance(action, RemoveAction):
self.remove_actions.append(action)
elif isinstance(action, AddAction):
self.add_actions.append(action)
elif isinstance(action, DeleteAction):
self.delete_actions.append(action)
else:
raise ValueError("unsupported action type: '{}'".format(action.__class__.__name__))
def serialize(self, placeholder_names: Dict[str, str], expression_attribute_values: Dict[str, str]) -> Optional[str]:
clauses = [
self._get_clause('SET', self.set_actions, placeholder_names, expression_attribute_values),
self._get_clause('REMOVE', self.remove_actions, placeholder_names, expression_attribute_values),
self._get_clause('ADD', self.add_actions, placeholder_names, expression_attribute_values),
self._get_clause('DELETE', self.delete_actions, placeholder_names, expression_attribute_values),
]
expression = ' '.join(clause for clause in clauses if clause is not None)
return expression or None
@staticmethod
def _get_clause(
keyword: str,
actions: Sequence[Action],
placeholder_names: Dict[str, str],
expression_attribute_values: Dict[str, str]
) -> Optional[str]:
actions_string = ', '.join(
action.serialize(placeholder_names, expression_attribute_values) for action in actions
)
return keyword + ' ' + actions_string if actions_string else None
|
d6381bde14752473f42a86e8d431fa38e261e4d2
|
a8ecd983bc6100705c3916d5378b557ef8cf0dab
|
/grafanalib/tests/test_opentsdb.py
|
7a1db2a6432c16ea018041d12e1965b039346019
|
[
"Apache-2.0"
] |
permissive
|
weaveworks/grafanalib
|
711905c78dd9db4257f4d438e55a7112efd1db4a
|
b0d81c97b2e8980c4bb5d2b155a31a7b0a47cedb
|
refs/heads/main
| 2023-08-28T19:49:32.135931
| 2023-08-18T14:36:17
| 2023-08-18T14:36:17
| 75,401,165
| 1,782
| 313
|
Apache-2.0
| 2023-09-11T13:29:48
| 2016-12-02T14:11:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
test_opentsdb.py
|
"""Tests for OpenTSDB datasource"""
import grafanalib.core as G
from grafanalib.opentsdb import (
OpenTSDBFilter,
OpenTSDBTarget,
)
from grafanalib import _gen
import sys
if sys.version_info[0] < 3:
from io import BytesIO as StringIO
else:
from io import StringIO
def test_serialization_opentsdb_target():
"""Serializing a graph doesn't explode."""
graph = G.Graph(
title="CPU Usage",
dataSource="OpenTSDB data source",
targets=[
OpenTSDBTarget(
metric='cpu',
alias='$tag_instance',
filters=[
OpenTSDBFilter(value='*', tag='instance',
type='wildcard', groupBy=True),
]),
],
id=1,
yAxes=G.YAxes(
G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds / second"),
G.YAxis(format=G.SHORT_FORMAT),
),
)
stream = StringIO()
_gen.write_dashboard(graph, stream)
assert stream.getvalue() != ''
|
9bc5dc2f5ee39a32ad08dbf726a229a5fe8c0417
|
ac235a23f22be0d6f1818bb53902177f9969813a
|
/ddtrace/contrib/sanic/__init__.py
|
5646bf90cd7bb5b457a363ae06216c6cdacb9ba6
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
DataDog/dd-trace-py
|
f09d6d48c4c69aea68f999fc8a458ade5c6150cf
|
1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17
|
refs/heads/1.x
| 2023-09-01T20:25:26.746324
| 2023-09-01T18:54:37
| 2023-09-01T18:54:37
| 61,572,326
| 461
| 426
|
NOASSERTION
| 2023-09-14T20:38:57
| 2016-06-20T18:52:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
__init__.py
|
"""
The Sanic__ integration will trace requests to and from Sanic.
Enable Sanic tracing automatically via ``ddtrace-run``::
ddtrace-run python app.py
Sanic tracing can also be enabled manually::
from ddtrace import patch_all
patch_all(sanic=True)
from sanic import Sanic
from sanic.response import text
app = Sanic(__name__)
@app.route('/')
def index(request):
return text('hello world')
if __name__ == '__main__':
app.run()
On Python 3.6 and below, you must enable the legacy ``AsyncioContextProvider`` before using the middleware::
from ddtrace.contrib.asyncio.provider import AsyncioContextProvider
from ddtrace import tracer # Or whichever tracer instance you plan to use
tracer.configure(context_provider=AsyncioContextProvider())
Configuration
~~~~~~~~~~~~~
.. py:data:: ddtrace.config.sanic['distributed_tracing_enabled']
Whether to parse distributed tracing headers from requests received by your Sanic app.
Default: ``True``
.. py:data:: ddtrace.config.sanic['service_name']
The service name reported for your Sanic app.
Can also be configured via the ``DD_SERVICE`` environment variable.
Default: ``'sanic'``
Example::
from ddtrace import config
# Enable distributed tracing
config.sanic['distributed_tracing_enabled'] = True
# Override service name
config.sanic['service_name'] = 'custom-service-name'
.. __: https://sanic.readthedocs.io/en/latest/
"""
from ...internal.utils.importlib import require_modules
required_modules = ["sanic"]
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .patch import get_version
from .patch import patch
from .patch import unpatch
__all__ = ["patch", "unpatch", "get_version"]
|
e6147db8c2924c2de9b01a7ff653301bdb4d2608
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/ThreatMiner/Integrations/ThreatMiner/ThreatMiner_test.py
|
551cb16235208f696a7771208af1515b3f724faf
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 865
|
py
|
ThreatMiner_test.py
|
import demistomock as demisto
from ThreatMiner import get_dbot_score_report
from CommonServerPython import DBotScoreReliability
DBOT_SCORES = {
'Reliability': 'C - Fairly reliable',
'Vendor': 'ThreatMiner',
'Indicator': 'CA978112CA1BBDCAFAC231B39A23DC4DA786EFF8147C4E72B9807785AFEE48BB',
'Score': 0,
'Type': 'File'
}
def test_reliability_in_dbot(mocker):
"""
Given:
- The user reliability param
When:
- Running get_dbot_score_report
Then:
- Verify dbot_score outputs as excepted
"""
mocker.patch.object(demisto, 'args', return_value={'threshold': '10'})
dbot_score = get_dbot_score_report(0, 'CA978112CA1BBDCAFAC231B39A23DC4DA786EFF8147C4E72B9807785AFEE48BB', {},
DBotScoreReliability.C)
assert dbot_score == DBOT_SCORES
|
b3ae0a5cc5144d4cec9cb996500212a782eb6769
|
431bc209251c67ed68b69ca12a34f79da4be946c
|
/scripts/config_vscode.py
|
5ee130128be50b3d2054ecebd84b5d3f4603ca2f
|
[
"BSD-3-Clause"
] |
permissive
|
LLNL/conduit
|
4ae3157e8e9a83643f9479506f3b5f1def7b99fa
|
a6b0b179716eb804a0749cc20083c24c21ed682b
|
refs/heads/develop
| 2023-09-04T00:04:32.475102
| 2023-09-01T16:00:30
| 2023-09-01T16:00:30
| 40,552,086
| 168
| 58
|
NOASSERTION
| 2023-09-05T21:33:19
| 2015-08-11T16:14:10
|
C++
|
UTF-8
|
Python
| false
| false
| 1,944
|
py
|
config_vscode.py
|
# Copyright (c) Lawrence Livermore National Security, LLC and other Conduit
# Project developers. See top-level LICENSE AND COPYRIGHT files for dates and
# other details. No copyright assignment is required to contribute to Conduit.
import sys
import json
import os
from optparse import OptionParser
def parse_args():
"Parses args from command line"
parser = OptionParser()
parser.add_option("--args",
dest="cmake_args",
default=None,
help="cmake arguments")
parser.add_option("--host-config",
dest="host_config",
default=None,
help="path to host config file")
opts, extras = parser.parse_args()
# we want a dict b/c
opts = vars(opts)
return opts, extras
##################################
# Ref for vscode cmake settings:
##################################
# https://vector-of-bool.github.io/docs/vscode-cmake-tools/settings.html
def write_vscode_settings(settings):
settings_file = os.path.abspath(".vscode/settings.json")
print("[creating: {0}]".format(settings_file))
print("[contents]")
print(json.dumps(settings, indent=2))
open(settings_file, "w").write(json.dumps(settings, indent=2))
def gen_vscode_settings(opts):
settings_comm_file = os.path.abspath(".vscode/settings_common.json")
res = {}
if os.path.isfile(settings_comm_file):
res = json.load(open(settings_comm_file))
else:
print("[warning: {0} not found]".format(settings_comm_file))
if not opts["cmake_args"] is None:
res["cmake.configureArgs"] = [opts["cmake_args"]]
if not opts["host_config"] is None:
res["cmake.cacheInit"] = [opts["host_config"]]
return res
def main():
opts, extras = parse_args()
settings = gen_vscode_settings(opts)
write_vscode_settings(settings)
if __name__ == "__main__":
main()
|
952ef8979b4ec8442536863df1a050ab30d175c0
|
54f59bfc9d657e35f52762fd7ac5a5034c301dfb
|
/test/functional/feature_txwitness.py
|
260ebfe89f41cadf4ca0f814e86a81bbaa8c3317
|
[
"MIT"
] |
permissive
|
ElementsProject/elements
|
a8db662b18b27f7f0162bc76e98d23b2ca80727d
|
93cc036edfc855ea7e8a357897958cb55bbce369
|
refs/heads/master
| 2023-08-31T13:24:12.360307
| 2023-08-24T20:51:08
| 2023-08-24T20:51:08
| 37,064,937
| 1,064
| 436
|
MIT
| 2023-08-29T17:49:35
| 2015-06-08T12:17:33
|
C++
|
UTF-8
|
Python
| false
| false
| 11,960
|
py
|
feature_txwitness.py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the txwitness for elements
1) Make sure python serialization stuff matches whatever the node/wallet creates, signed and unsigned
2) Segwit transactions have witness data still, and is validated correctly
3) Fast merkle root test? (where oh where are we going to find sha2 internals ripped open in python)
4) transaction round-trips through rpc, p2p, inside and outside of blocks
5) make sure non-segwit transactions have no witness data too
6) If we’re not touching all other tests, we’ll need to actually copy our CTransaction python stuff directly into this test, or another adjacent file, otherwise other tests will still break
7) Try to give it some bitcoin serialization transactions, make sure it fails to decode
"""
from test_framework.messages import CTransaction, CBlock, ser_uint256, from_hex, uint256_from_str, CTxOut, CTxIn, COutPoint, OUTPOINT_ISSUANCE_FLAG, ser_string
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, hex_str_to_bytes, assert_raises_rpc_error, assert_greater_than
from test_framework import util
from test_framework.blocktools import get_witness_script
from io import BytesIO
import copy
import struct
class TxWitnessTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def assert_tx_format_also_signed(self, utxo, segwit):
raw = self.nodes[0].createrawtransaction(
[{"txid": utxo["txid"], "vout": utxo["vout"]}],
[{self.unknown_addr: "49.99"}, {"fee": "0.01"}]
)
unsigned_decoded = self.nodes[0].decoderawtransaction(raw)
assert_equal(len(unsigned_decoded["vin"]), 1)
assert 'txinwitness' not in unsigned_decoded["vin"][0]
# Cross-check python serialization
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(raw)))
assert_equal(tx.vin[0].prevout.hash, int("0x"+utxo["txid"], 0))
assert_equal(len(tx.vin), len(unsigned_decoded["vin"]))
assert_equal(len(tx.vout), len(unsigned_decoded["vout"]))
# assert re-encoding
serialized = tx.serialize().hex()
assert_equal(serialized, raw)
# Now sign and repeat tests
signed_raw = self.nodes[0].signrawtransactionwithwallet(raw)["hex"]
signed_decoded = self.nodes[0].decoderawtransaction(signed_raw)
assert_equal(len(signed_decoded["vin"]), 1)
assert ("txinwitness" in signed_decoded["vin"][0]) == segwit
# Cross-check python serialization
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(signed_raw)))
assert_equal(tx.vin[0].prevout.hash, int("0x"+utxo["txid"], 0))
assert_equal(tx.vin[0].scriptSig.hex(), signed_decoded["vin"][0]["scriptSig"]["hex"])
# test witness
if segwit:
wit_decoded = signed_decoded["vin"][0]["txinwitness"]
for i in range(len(wit_decoded)):
assert_equal(tx.wit.vtxinwit[0].scriptWitness.stack[i].hex(), wit_decoded[i])
# assert re-encoding
serialized = tx.serialize().hex()
assert_equal(serialized, signed_raw)
txid = self.nodes[0].sendrawtransaction(serialized)
nodetx = self.nodes[0].getrawtransaction(txid, 1)
assert_equal(nodetx["txid"], tx.rehash())
# cross-check wtxid report from node
wtxid = ser_uint256(tx.calc_sha256(True))[::-1].hex()
assert_equal(nodetx["wtxid"], wtxid)
assert_equal(nodetx["hash"], wtxid)
# witness hash stuff
assert_equal(nodetx["withash"], tx.calc_witness_hash())
return (txid, wtxid)
def test_transaction_serialization(self):
legacy_addr = self.nodes[0].getnewaddress("", "legacy")
p2sh_addr = self.nodes[0].getnewaddress("", "p2sh-segwit")
bech32_addr = self.nodes[0].getnewaddress("", "bech32")
self.unknown_addr = self.nodes[1].getnewaddress()
# directly seed types of utxos required
self.nodes[0].generatetoaddress(1, legacy_addr)
self.nodes[0].generatetoaddress(1, p2sh_addr)
self.nodes[0].generatetoaddress(1, bech32_addr)
self.nodes[0].generatetoaddress(101, self.unknown_addr)
# grab utxos filtering by age
legacy_utxo = self.nodes[0].listunspent(104, 104)[0]
p2sh_utxo = self.nodes[0].listunspent(103, 103)[0]
bech32_utxo = self.nodes[0].listunspent(102, 102)[0]
submitted_txids = []
self.log.info("Testing legacy UTXO")
submitted_txids.append(self.assert_tx_format_also_signed(legacy_utxo, segwit=False))
self.log.info("Testing p2sh UTXO")
submitted_txids.append(self.assert_tx_format_also_signed(p2sh_utxo, segwit=True))
self.log.info("Testing bech32 UTXO")
submitted_txids.append(self.assert_tx_format_also_signed(bech32_utxo, segwit=True))
blockhash = self.nodes[0].generate(1)[0]
hexblock = self.nodes[0].getblock(blockhash, 0)
block_details = self.nodes[0].getblock(blockhash, 2)
block = CBlock()
block.deserialize(BytesIO(hex_str_to_bytes(hexblock)))
assert len(block.vtx) == len(submitted_txids) + 1
assert_equal(len(block_details["tx"]), len(block.vtx))
for tx1, tx2 in zip(block.vtx[1:], block_details["tx"][1:]):
# no tuple wildcard, just re-used tx2 on first one
assert (tx1.rehash(), tx2["wtxid"]) in submitted_txids
assert (tx2["txid"], tx2["hash"]) in submitted_txids
assert (tx2["txid"], tx2["wtxid"]) in submitted_txids
block.rehash()
assert_equal(block.hash, self.nodes[0].getbestblockhash())
def test_coinbase_witness(self):
block = self.nodes[0].getnewblockhex()
block_struct = from_hex(CBlock(), block)
# Test vanilla block round-trip
self.nodes[0].testproposedblock(block_struct.serialize(with_witness=True).hex())
# Assert there's scriptWitness in the coinbase input that is the witness nonce and nothing else
assert_equal(block_struct.vtx[0].wit.vtxinwit[0].scriptWitness.stack, [b'\x00'*32])
assert_equal(block_struct.vtx[0].wit.vtxinwit[0].vchIssuanceAmountRangeproof, b'')
assert_equal(block_struct.vtx[0].wit.vtxinwit[0].vchInflationKeysRangeproof, b'')
assert_equal(block_struct.vtx[0].wit.vtxinwit[0].peginWitness.stack, [])
# Add extra witness that isn't covered by witness merkle root, make sure blocks are still valid
block_witness_stuffed = copy.deepcopy(block_struct)
block_witness_stuffed.vtx[0].wit.vtxinwit[0].vchIssuanceAmountRangeproof = b'\x00'
assert_raises_rpc_error(-25, "bad-cb-witness", self.nodes[0].testproposedblock, block_witness_stuffed.serialize(with_witness=True).hex())
block_witness_stuffed = copy.deepcopy(block_struct)
block_witness_stuffed.vtx[0].wit.vtxinwit[0].vchInflationKeysRangeproof = b'\x00'
assert_raises_rpc_error(-25, "bad-cb-witness", self.nodes[0].testproposedblock, block_witness_stuffed.serialize(with_witness=True).hex())
block_witness_stuffed = copy.deepcopy(block_struct)
# Let's blow out block weight limit by adding 4MW here
block_witness_stuffed.vtx[0].wit.vtxinwit[0].peginWitness.stack = [b'\x00'*4000000]
assert_raises_rpc_error(-25, "bad-cb-witness", self.nodes[0].testproposedblock, block_witness_stuffed.serialize(with_witness=True).hex())
# Test that node isn't blinded to the block
# Previously an over-stuffed block >4MW would have been marked permanently bad
# as it already passes witness merkle and regular merkle root checks
block_height = self.nodes[0].getblockcount()
assert_equal(self.nodes[0].submitblock(block_witness_stuffed.serialize(with_witness=True).hex()), "bad-cb-witness")
assert_equal(block_height, self.nodes[0].getblockcount())
assert_equal(self.nodes[0].submitblock(block_struct.serialize(with_witness=True).hex()), None)
assert_equal(block_height+1, self.nodes[0].getblockcount())
# New block since we used the first one
block_struct = from_hex(CBlock(), self.nodes[0].getnewblockhex())
block_witness_stuffed = copy.deepcopy(block_struct)
# Add extra witness data that is covered by witness merkle root, make sure invalid
assert_equal(block_witness_stuffed.vtx[0].wit.vtxoutwit[0].vchSurjectionproof, b'')
assert_equal(block_witness_stuffed.vtx[0].wit.vtxoutwit[0].vchRangeproof, b'')
block_witness_stuffed.vtx[0].wit.vtxoutwit[0].vchRangeproof = b'\x00'*100000
block_witness_stuffed.vtx[0].wit.vtxoutwit[0].vchSurjectionproof = b'\x00'*100000
assert_raises_rpc_error(-25, "bad-witness-merkle-match", self.nodes[0].testproposedblock, block_witness_stuffed.serialize(with_witness=True).hex())
witness_root_hex = block_witness_stuffed.calc_witness_merkle_root()
witness_root = uint256_from_str(hex_str_to_bytes(witness_root_hex)[::-1])
block_witness_stuffed.vtx[0].vout[-1] = CTxOut(0, get_witness_script(witness_root, 0))
block_witness_stuffed.vtx[0].rehash()
block_witness_stuffed.hashMerkleRoot = block_witness_stuffed.calc_merkle_root()
block_witness_stuffed.rehash()
assert_raises_rpc_error(-25, "bad-cb-amount", self.nodes[0].testproposedblock, block_witness_stuffed.serialize(with_witness=True).hex())
assert_greater_than(len(block_witness_stuffed.serialize(with_witness=True).hex()), 100000*4) # Make sure the witness data is actually serialized
# A CTxIn that always serializes the asset issuance, even for coinbases.
class AlwaysIssuanceCTxIn(CTxIn):
def serialize(self):
r = b''
outpoint = COutPoint()
outpoint.hash = self.prevout.hash
outpoint.n = self.prevout.n
outpoint.n |= OUTPOINT_ISSUANCE_FLAG
r += outpoint.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
r += self.assetIssuance.serialize()
return r
# Test that issuance inputs in coinbase don't survive a serialization round-trip
# (even though this can't cause issuance to occur either way due to VerifyCoinbaseAmount semantics)
block_witness_stuffed = copy.deepcopy(block_struct)
coinbase_orig = copy.deepcopy(block_witness_stuffed.vtx[0].vin[0])
coinbase_ser_size = len(block_witness_stuffed.vtx[0].vin[0].serialize())
block_witness_stuffed.vtx[0].vin[0] = AlwaysIssuanceCTxIn()
block_witness_stuffed.vtx[0].vin[0].prevout = coinbase_orig.prevout
block_witness_stuffed.vtx[0].vin[0].scriptSig = coinbase_orig.scriptSig
block_witness_stuffed.vtx[0].vin[0].nSequence = coinbase_orig.nSequence
block_witness_stuffed.vtx[0].vin[0].assetIssuance.nAmount.setToAmount(1)
bad_coinbase_ser_size = len(block_witness_stuffed.vtx[0].vin[0].serialize())
# 32+32+9+1 should be serialized for each assetIssuance field
assert_equal(bad_coinbase_ser_size, coinbase_ser_size+32+32+9+1)
assert not block_witness_stuffed.vtx[0].vin[0].assetIssuance.isNull()
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, block_witness_stuffed.vtx[0].serialize().hex())
def run_test(self):
util.node_fastmerkle = self.nodes[0]
self.test_coinbase_witness()
self.test_transaction_serialization()
if __name__ == '__main__':
TxWitnessTest().main()
|
22f24de15fdf7544484ba020a32aa5c0eca7b128
|
da1500e0d3040497614d5327d2461a22e934b4d8
|
/starboard/tools/package.py
|
246eb1d61a22d8c45f96510142d39160fcf25b5f
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
youtube/cobalt
|
34085fc93972ebe05b988b15410e99845efd1968
|
acefdaaadd3ef46f10f63d1acae2259e4024d383
|
refs/heads/main
| 2023-09-01T13:09:47.225174
| 2023-09-01T08:54:54
| 2023-09-01T08:54:54
| 50,049,789
| 169
| 80
|
BSD-3-Clause
| 2023-09-14T21:50:50
| 2016-01-20T18:11:34
| null |
UTF-8
|
Python
| false
| false
| 6,574
|
py
|
package.py
|
#
# Copyright 2016 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Classes related to building and installing platform-specific packages."""
import abc
import importlib
import logging
import os
from starboard.build.platforms import PLATFORMS
def _ImportModule(path, module_name=None):
"""Convert a filepath to a python-style import path and import it.
Returns:
A module loaded with importlib.import_module
Throws:
ImportError if the module fails to be loaded.
"""
components = os.path.normpath(path).split(os.sep)
if module_name:
components.append(module_name)
full_package_name = '.'.join(components)
return importlib.import_module(full_package_name)
def _GetPackageClass(platform_name, platform_path):
"""
Loads the package class associated with the given platform.
"""
try:
module = _ImportModule(platform_path)
except ImportError as e:
logging.debug('Failed to import module for platform %s with error: %s',
platform_name, e)
return None
if not hasattr(module, 'Package'):
return None
return module.Package
def _GetPlatformInfosDict():
"""Find Starboard ports that support building packages.
Returns:
A dict of [platform_name, Class] where Class inherits from PackageBase
"""
packager_modules = {}
for platform_name, platform_path in PLATFORMS.items():
# From the relative path to the starboard directory, construct a full
# python package name and attempt to load it.
package_class = _GetPackageClass(platform_name, platform_path)
if not package_class:
continue
# Populate a mapping from platform name to the module containing the
# Package class.
try:
for supported_name in package_class.SupportedPlatforms():
if supported_name in packager_modules:
logging.warning('Packager for %s is defined in multiple modules.',
supported_name)
else:
packager_modules[supported_name] = platform_path
except Exception as e: # pylint: disable=broad-except
# Catch all exceptions to avoid an error in one platform's Packager
# halting the script for other platforms' packagers.
logging.warning(
'Exception iterating supported platform for platform '
'%s: %s.', platform_name, e)
return packager_modules
class PackageBase(object):
"""Represents a build package that exists on the local filesystem."""
__metaclass__ = abc.ABCMeta
def __init__(self, source_dir, output_dir):
"""Initialize common paths for building Packages.
Args:
source_dir: The directory containing the application to be packaged.
output_dir: The directory into which the package should be placed.
"""
self.source_dir = source_dir
self.output_dir = output_dir
@abc.abstractmethod
def Install(self, targets=None):
"""Install the package to the specified list of targets.
Args:
targets: A list of targets to install the package to, or None on platforms
that support installing to a default target. This method can be
overridden to implement platform-specific steps to install the package
for that platform.
"""
del targets
@classmethod
def AddArguments(cls, arg_parser):
"""Add platform-specific command-line arguments to the ArgumentParser.
Platforms that require additional arguments to configure building a package
can override this method and add them to |arg_parser|.
Args:
arg_parser: An ArgumentParser object.
"""
del cls, arg_parser
@classmethod
def ExtractArguments(cls, options):
"""Extract arguments from an ArgumentParser's namespace object.
Platforms that add additional arguments can override this method to extract
the options and add them to a dict that will be passed to the Package
constructor.
Args:
options: A namespace object returned from ArgumentParser.parse_args
Returns:
A dict of kwargs to be passed to the Package constructor.
"""
del cls, options
return {}
class Packager(object):
"""Top level class for building a package."""
def __init__(self):
self.platform_infos = _GetPlatformInfosDict()
def SupportedPlatforms(self):
"""Get a list of platforms for which a package can be built."""
return self.platform_infos.keys()
def GetPlatformInfo(self, platform_name):
return self.platform_infos.get(platform_name, None)
def GetApplicationPackageInfo(self, platform_name, application_name):
"""Get application-specific packaging information."""
platform_path = self.GetPlatformInfo(platform_name)
try:
return _ImportModule(platform_path, f'{application_name}.package')
except ImportError as e:
# No package parameters specified for this platform.
logging.debug('Failed to import cobalt.package: %s', e)
return None
def BuildPackage(self, platform_name, source_dir, output_dir, **kwargs):
"""Build a package for the specified platform.
Args:
platform_name: The platform for which a package should be built.
source_dir: The directory containing the application to be packaged.
output_dir: The directory into which the package files should be placed.
**kwargs: Platform-specific arguments.
Returns:
A PackageBase instance.
"""
package_class = _GetPackageClass(platform_name,
self.platform_infos[platform_name])
return package_class(source_dir=source_dir, output_dir=output_dir, **kwargs)
def AddPlatformArguments(self, platform_name, argparser):
package_class = _GetPackageClass(platform_name,
self.platform_infos[platform_name])
package_class.AddArguments(argparser)
def ExtractPlatformArguments(self, platform_name, options):
package_class = _GetPackageClass(platform_name,
self.platform_infos[platform_name])
return package_class.ExtractArguments(options)
|
798f9d29d215b33de4a507cb56aff89b03c302e1
|
ec075d46cffdf5046164dcb31a1e0478848c6088
|
/test/regression/WaveInBar_MultiBlock/WaveInBar_MultiBlock_PostProcess.py
|
f06e191beeb13da001b5e3fab2b7d6fba43eb58a
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
peridigm/peridigm
|
dc42571181faadd337fd14003ca1101cb60988ef
|
82c6ad8f8152aaa3c8a2b4e7b31f82b8e1c6f28c
|
refs/heads/master
| 2023-08-18T09:47:15.926358
| 2023-08-03T21:30:18
| 2023-08-03T21:30:18
| 49,228,024
| 172
| 146
|
NOASSERTION
| 2023-08-03T21:29:55
| 2016-01-07T20:11:19
|
C++
|
UTF-8
|
Python
| false
| false
| 5,844
|
py
|
WaveInBar_MultiBlock_PostProcess.py
|
#! /usr/bin/env python
def verfiy_results(result = True):
csv_file_name = "WaveInBar_MultiBlock.csv"
csv_file = open(csv_file_name)
lines = csv_file.readlines()
csv_file.close()
vals = lines[-2].split()
time = float(vals[0].strip(","))
Volume_Block_1 = float(vals[1].strip(","))
Volume_Block_2 = float(vals[2].strip(","))
Volume_Block_3 = float(vals[3].strip(","))
Volume_Block_4 = float(vals[4].strip(","))
Volume_Block_5 = float(vals[5].strip(","))
Volume_Block_6 = float(vals[6].strip(","))
Volume_Block_7 = float(vals[7].strip(","))
Volume_Block_8 = float(vals[8].strip(","))
Volume_Node_Set_11 = float(vals[9].strip(","))
Volume_Node_Set_12 = float(vals[10].strip(","))
Volume_Node_Set_13 = float(vals[11].strip(","))
Volume_Node_Set_14 = float(vals[12].strip(","))
Volume_Node_Set_15 = float(vals[13].strip(","))
Volume_Node_Set_16 = float(vals[14].strip(","))
Volume_Node_Set_17 = float(vals[15].strip(","))
Volume_Node_Set_18 = float(vals[16].strip(","))
Volume_Node_Set_20 = float(vals[17].strip(","))
Volume_Node_Set_30 = float(vals[18].strip(","))
Max_Volume_Node_Set_30 = float(vals[19].strip(","))
Min_Volume_Node_Set_30 = float(vals[20].strip(","))
tol = 1.0e-12
element_vol = 8.0e-9
num_elem_block_1 = 5
num_elem_block_2 = 5
num_elem_block_3 = 125
num_elem_block_4 = 265
num_elem_block_5 = 50
num_elem_block_6 = 27
num_elem_block_7 = 1
num_elem_block_8 = 47
# Each block is also a node set
# Make sure the Block_Data and Node_Set_Data results match for each Block/Node Set pair
print("\nChecking consistency of Block_Data and Node_Set_Data values...\n")
truth_val = num_elem_block_1 * element_vol
print("Block 1 results: {} should equal {} and should equal {}".format(Volume_Block_1, Volume_Node_Set_11, truth_val))
if abs(Volume_Block_1 - Volume_Node_Set_11) > tol:
result = False
if abs(Volume_Block_1 - truth_val) > tol:
result = False
truth_val = num_elem_block_2 * element_vol
print("Block 2 results: {} should equal {} and should equal {}".format(Volume_Block_2, Volume_Node_Set_12, truth_val))
if abs(Volume_Block_2 - Volume_Node_Set_12) > tol:
result = False
if abs(Volume_Block_2 - truth_val) > tol:
result = False
truth_val = num_elem_block_3 * element_vol
print("Block 3 results: {} should equal {} and should equal {}".format(Volume_Block_3, Volume_Node_Set_13, truth_val))
if abs(Volume_Block_3 - Volume_Node_Set_13) > tol:
result = False
if abs(Volume_Block_3 - truth_val) > tol:
result = False
truth_val = num_elem_block_4 * element_vol
print("Block 4 results: {} should equal {} and should equal {}".format(Volume_Block_4, Volume_Node_Set_14, truth_val))
if abs(Volume_Block_4 - Volume_Node_Set_14) > tol:
result = False
if abs(Volume_Block_4 - truth_val) > tol:
result = False
truth_val = num_elem_block_5 * element_vol
print("Block 5 results: {} should equal {} and should equal {}".format(Volume_Block_5, Volume_Node_Set_15, truth_val))
if abs(Volume_Block_5 - Volume_Node_Set_15) > tol:
result = False
if abs(Volume_Block_5 - truth_val) > tol:
result = False
truth_val = num_elem_block_6 * element_vol
print("Block 6 results: {} should equal {} and should equal {}".format(Volume_Block_6, Volume_Node_Set_16, truth_val))
if abs(Volume_Block_6 - Volume_Node_Set_16) > tol:
result = False
if abs(Volume_Block_6 - truth_val) > tol:
result = False
truth_val = num_elem_block_7 * element_vol
print("Block 7 results: {} should equal {} and should equal {}".format(Volume_Block_7, Volume_Node_Set_17, truth_val))
if abs(Volume_Block_7 - Volume_Node_Set_17) > tol:
result = False
if abs(Volume_Block_7 - truth_val) > tol:
result = False
truth_val = num_elem_block_8 * element_vol
print("Block 8 results: {} should equal {} and should equal {}".format(Volume_Block_8, Volume_Node_Set_18, truth_val))
if abs(Volume_Block_8 - Volume_Node_Set_18) > tol:
result = False
if abs(Volume_Block_8 - truth_val) > tol:
result = False
# Node Set 20 should equal Block_1 + Block_2 + Block_3
block_val = Volume_Block_1 + Volume_Block_2 + Volume_Block_3
print("\nNode Set 20 results: {} should equal {}".format(block_val, Volume_Node_Set_20))
if abs(block_val - Volume_Node_Set_20) > tol:
result = False
# Node Set 30 is the entire model
# The volume should be 4.2e-6
block_val = Volume_Block_1 + Volume_Block_2 + Volume_Block_3 + Volume_Block_4 + Volume_Block_5 + Volume_Block_6 + Volume_Block_7 + Volume_Block_8
truth_val = 4.2e-6
print("Node Set 30 results: {} should equal {} and should equal {}".format(block_val, Volume_Node_Set_30, truth_val))
if abs(block_val - Volume_Node_Set_30) > tol:
result = False
if abs(truth_val - Volume_Node_Set_30) > tol:
result = False
# The minimim volume over the entire model should be 8.0e-9
truth_val = 8.0e-9
print("\nMin element volume: {} should equal {}".format(Min_Volume_Node_Set_30, truth_val))
if abs(truth_val - Min_Volume_Node_Set_30) > tol:
result = False
# The maxmimum volume over the entire model should be 8.0e-9
truth_val = 8.0e-9
print("Max element volume: {} should equal {}".format(Max_Volume_Node_Set_30, truth_val))
if abs(truth_val - Max_Volume_Node_Set_30) > tol:
result = False
return result
def main():
result = True
result = verfiy_results(result)
if result == True:
print("\nTest Passed.\n")
else:
print("\nTest FAILED.\n")
if __name__ == "__main__":
main()
|
651fe25149d71d075fe0a80276c9fddbb84b28dc
|
e8b04bef9aa1ac8e2c109dd315f133c8f4d28ae6
|
/projects/samples/environments/factory/controllers/screw_controller/screw_controller.py
|
7aaa1a73d649248a980da2f37a71ae7b8b709e97
|
[
"Apache-2.0"
] |
permissive
|
cyberbotics/webots
|
f075dacf4067e8dcebbfd89e8690df8525f6d745
|
8aba6eaae76989facf3442305c8089d3cc366bcf
|
refs/heads/master
| 2023-08-31T09:41:13.205940
| 2023-08-18T10:48:30
| 2023-08-18T10:48:30
| 156,228,018
| 2,495
| 1,525
|
Apache-2.0
| 2023-08-28T16:30:33
| 2018-11-05T14:09:10
|
C++
|
UTF-8
|
Python
| false
| false
| 1,820
|
py
|
screw_controller.py
|
# Copyright 1996-2023 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""screw_controller controller."""
from controller import Robot
robot = Robot()
timestep = int(robot.getBasicTimeStep())
deviceNames = []
for i in range(robot.getNumberOfDevices()):
deviceNames.append(robot.getDeviceByIndex(i).getName())
numberOfScrews = 0
motors = []
sensors = []
previousPosition = []
for i in range(robot.getNumberOfDevices()):
linearMotorName = 'linear motor %d' % i
positionSensorName = 'position sensor %d' % i
if linearMotorName in deviceNames and positionSensorName in deviceNames:
numberOfScrews += 1
motors.append(robot.getDevice(linearMotorName))
sensors.append(robot.getDevice(positionSensorName))
previousPosition.append(0)
else:
break
for sensor in sensors:
sensor.enable(timestep)
while robot.step(timestep) != -1:
for i in range(numberOfScrews):
targetPosition = sensors[i].getValue() * 0.001
maxPosition = motors[i].getMaxPosition()
minPosition = motors[i].getMinPosition()
targetPosition = max(min(targetPosition, maxPosition), minPosition)
if previousPosition[i] != targetPosition:
previousPosition[i] = targetPosition
motors[i].setPosition(targetPosition)
|
a4e156fcd37d4b82686889954719a218528a492a
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/devil/devil/utils/lazy/weak_constant.py
|
4110193752ac6cd94bacea5ac015f2e41c94db16
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,395
|
py
|
weak_constant.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import threading
from devil.utils import reraiser_thread
from devil.utils import timeout_retry
class WeakConstant(object):
"""A thread-safe, lazily initialized object.
This does not support modification after initialization. The intended
constant nature of the object is not enforced, though, hence the "weak".
"""
def __init__(self, initializer):
self._initialized = threading.Event()
self._initializer = initializer
self._lock = threading.Lock()
self._val = None
def read(self):
"""Get the object, creating it if necessary."""
if self._initialized.is_set():
return self._val
with self._lock:
if not self._initialized.is_set():
# We initialize the value on a separate thread to protect
# from holding self._lock indefinitely in the event that
# self._initializer hangs.
initializer_thread = reraiser_thread.ReraiserThread(self._initializer)
initializer_thread.start()
timeout_retry.WaitFor(
lambda: initializer_thread.join(1) or not initializer_thread.
isAlive(),
wait_period=0)
self._val = initializer_thread.GetReturnValue()
self._initialized.set()
return self._val
|
1b5fbeed5b0dd225553b76db0b9236806d1bf95f
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-sms/huaweicloudsdksms/v3/model/show_overview_response.py
|
06c659f2b358a551d6299f5797321c96f63c85b4
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,392
|
py
|
show_overview_response.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowOverviewResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'waiting': 'int',
'replicating': 'int',
'syncing': 'int',
'other': 'int'
}
attribute_map = {
'waiting': 'waiting',
'replicating': 'replicating',
'syncing': 'syncing',
'other': 'other'
}
def __init__(self, waiting=None, replicating=None, syncing=None, other=None):
"""ShowOverviewResponse
The model defined in huaweicloud sdk
:param waiting: 等待中
:type waiting: int
:param replicating: 复制中
:type replicating: int
:param syncing: 同步中
:type syncing: int
:param other: 其它
:type other: int
"""
super(ShowOverviewResponse, self).__init__()
self._waiting = None
self._replicating = None
self._syncing = None
self._other = None
self.discriminator = None
if waiting is not None:
self.waiting = waiting
if replicating is not None:
self.replicating = replicating
if syncing is not None:
self.syncing = syncing
if other is not None:
self.other = other
@property
def waiting(self):
"""Gets the waiting of this ShowOverviewResponse.
等待中
:return: The waiting of this ShowOverviewResponse.
:rtype: int
"""
return self._waiting
@waiting.setter
def waiting(self, waiting):
"""Sets the waiting of this ShowOverviewResponse.
等待中
:param waiting: The waiting of this ShowOverviewResponse.
:type waiting: int
"""
self._waiting = waiting
@property
def replicating(self):
"""Gets the replicating of this ShowOverviewResponse.
复制中
:return: The replicating of this ShowOverviewResponse.
:rtype: int
"""
return self._replicating
@replicating.setter
def replicating(self, replicating):
"""Sets the replicating of this ShowOverviewResponse.
复制中
:param replicating: The replicating of this ShowOverviewResponse.
:type replicating: int
"""
self._replicating = replicating
@property
def syncing(self):
"""Gets the syncing of this ShowOverviewResponse.
同步中
:return: The syncing of this ShowOverviewResponse.
:rtype: int
"""
return self._syncing
@syncing.setter
def syncing(self, syncing):
"""Sets the syncing of this ShowOverviewResponse.
同步中
:param syncing: The syncing of this ShowOverviewResponse.
:type syncing: int
"""
self._syncing = syncing
@property
def other(self):
"""Gets the other of this ShowOverviewResponse.
其它
:return: The other of this ShowOverviewResponse.
:rtype: int
"""
return self._other
@other.setter
def other(self, other):
"""Sets the other of this ShowOverviewResponse.
其它
:param other: The other of this ShowOverviewResponse.
:type other: int
"""
self._other = other
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowOverviewResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
cbc9a08a26da9c40eb11ce5947361c3ccd221644
|
8f7320c10f2c5fc8475753dc5256d1a66067e15c
|
/pykeops/pykeops/examples/numpy/plot_test_invkernel_numpy.py
|
6187ffacab3f732ec06cc72c1ae2e1e587aec5b8
|
[
"MIT"
] |
permissive
|
getkeops/keops
|
947a5409710379893c6c7a46d0a256133a6d8aff
|
52ed22a7fbbcf4bd02dbdf5dc2b00bf79cceddf5
|
refs/heads/main
| 2023-08-25T12:44:22.092925
| 2023-08-09T13:33:58
| 2023-08-09T13:33:58
| 182,054,091
| 910
| 69
|
MIT
| 2023-09-03T20:35:44
| 2019-04-18T09:04:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,035
|
py
|
plot_test_invkernel_numpy.py
|
"""
KernelSolve reduction
===========================
Let's see how to solve discrete deconvolution problems
using the **conjugate gradient solver** provided by
:class:`numpy.KernelSolve <pykeops.numpy.KernelSolve>`.
"""
###############################################################################
# Setup
# ----------------
#
# Standard imports:
#
import numpy as np
import time
import matplotlib.pyplot as plt
from pykeops.numpy import KernelSolve
import pykeops.config
###############################################################################
# Define our dataset:
#
N = 5000 if pykeops.config.gpu_available else 500 # Number of points
D = 2 # Dimension of the ambient space
Dv = 2 # Dimension of the vectors (= number of linear problems to solve)
sigma = 0.1 # Radius of our RBF kernel
dtype = "float32"
x = np.random.rand(N, D).astype(dtype)
b = np.random.rand(N, Dv).astype(dtype)
g = np.array([0.5 / sigma**2]).astype(dtype) # Parameter of the Gaussian RBF kernel
###############################################################################
# KeOps kernel
# ---------------
#
# Define a Gaussian RBF kernel:
#
formula = "Exp(- g * SqDist(x,y)) * b"
aliases = [
"x = Vi(" + str(D) + ")", # First arg: i-variable of size D
"y = Vj(" + str(D) + ")", # Second arg: j-variable of size D
"b = Vj(" + str(Dv) + ")", # Third arg: j-variable of size Dv
"g = Pm(1)",
] # Fourth arg: scalar parameter
###############################################################################
# Define the inverse kernel operation, with a ridge regularization **alpha**:
#
alpha = 0.01
Kinv = KernelSolve(formula, aliases, "b", axis=1, dtype=dtype)
###############################################################################
# .. note::
# This operator uses a conjugate gradient solver and assumes
# that **formula** defines a **symmetric**, positive and definite
# **linear** reduction with respect to the alias ``"b"``
# specified trough the third argument.
#
# Apply our solver on arbitrary point clouds:
#
# Warmup of gpu
Kinv(x, x, b, g, alpha=alpha)
print("Solving a Gaussian linear system, with {} points in dimension {}.".format(N, D))
start = time.time()
c = Kinv(x, x, b, g, alpha=alpha)
end = time.time()
print("Timing (KeOps implementation):", round(end - start, 5), "s")
###############################################################################
# Compare with a straightforward Numpy implementation:
#
start = time.time()
K_xx = alpha * np.eye(N) + np.exp(
-g * np.sum((x[:, None, :] - x[None, :, :]) ** 2, axis=2)
)
c_np = np.linalg.solve(K_xx, b)
end = time.time()
print("Timing (Numpy implementation):", round(end - start, 5), "s")
print("Relative error = ", np.linalg.norm(c - c_np) / np.linalg.norm(c_np))
# Plot the results next to each other:
for i in range(Dv):
plt.subplot(Dv, 1, i + 1)
plt.plot(c[:40, i], "-", label="KeOps")
plt.plot(c_np[:40, i], "--", label="NumPy")
plt.legend(loc="lower right")
plt.tight_layout()
plt.show()
|
28b653c1eb2f6048655218dbbbe082763d71db28
|
88fe84c79e5740b4aaa068df6a70e35841a68d25
|
/src/awkward/_nplikes/typetracer.py
|
63f753204009a77228f4f026f223a2f2c5394575
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-hep/awkward
|
176f56182a936270e163eab92ea18368c2bdc1be
|
519bba6ed2eec4e227994d2fd1a62b2a51f15e20
|
refs/heads/main
| 2023-09-02T20:19:10.175088
| 2023-09-01T20:13:25
| 2023-09-01T20:13:25
| 202,413,762
| 208
| 22
|
BSD-3-Clause
| 2023-09-14T17:19:29
| 2019-08-14T19:32:12
|
Python
|
UTF-8
|
Python
| false
| false
| 48,734
|
py
|
typetracer.py
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import annotations
from numbers import Number
import numpy
import awkward as ak
from awkward._nplikes.dispatch import register_nplike
from awkward._nplikes.numpylike import (
ArrayLike,
IndexType,
NumpyLike,
NumpyMetadata,
UniqueAllResult,
)
from awkward._nplikes.placeholder import PlaceholderArray
from awkward._nplikes.shape import ShapeItem, unknown_length
from awkward._operators import NDArrayOperatorsMixin
from awkward._regularize import is_integer, is_non_string_like_sequence
from awkward._typing import (
Any,
Final,
Literal,
Self,
SupportsIndex,
TypeVar,
)
np = NumpyMetadata.instance()
def is_unknown_length(array: Any) -> bool:
return array is unknown_length
def is_unknown_scalar(array: Any) -> bool:
return isinstance(array, TypeTracerArray) and array.ndim == 0
def is_unknown_integer(array: Any) -> bool:
return is_unknown_scalar(array) and np.issubdtype(array.dtype, np.integer)
def is_unknown_array(array: Any) -> bool:
return isinstance(array, TypeTracerArray) and array.ndim > 0
T = TypeVar("T")
S = TypeVar("S")
def ensure_known_scalar(value: T, default: S) -> T | S:
assert not is_unknown_scalar(default)
return default if is_unknown_scalar(value) else value
def _emptyarray(x):
if is_unknown_scalar(x):
return numpy.empty(0, x._dtype)
elif hasattr(x, "dtype"):
return numpy.empty(0, x.dtype)
else:
return numpy.empty(0, numpy.array(x).dtype)
class MaybeNone:
def __init__(self, content):
self._content = content
@property
def content(self):
return self._content
def __eq__(self, other):
if isinstance(other, MaybeNone):
return self._content == other._content
else:
return False
def __repr__(self):
return f"MaybeNone({self._content!r})"
def __str__(self):
return f"?{self._content}"
class OneOf:
def __init__(self, contents):
self._contents = contents
@property
def contents(self):
return self._contents
def __eq__(self, other):
if isinstance(other, OneOf):
return set(self._contents) == set(other._contents)
else:
return False
def __repr__(self):
return f"OneOf({self._contents!r})"
def __str__(self):
return (
f"oneof-{'-'.join(str(x).replace('unknown-', '') for x in self._contents)}"
)
class TypeTracerReport:
def __init__(self):
# maybe the order will be useful information
self._shape_touched_set = set()
self._shape_touched = []
self._data_touched_set = set()
self._data_touched = []
def __repr__(self):
return f"<TypeTracerReport with {len(self._shape_touched)} shape_touched, {len(self._data_touched)} data_touched>"
@property
def shape_touched(self):
return self._shape_touched
@property
def data_touched(self):
return self._data_touched
def touch_shape(self, label):
if label not in self._shape_touched_set:
self._shape_touched_set.add(label)
self._shape_touched.append(label)
def touch_data(self, label):
if label not in self._data_touched_set:
# touching data implies that the shape will be touched as well
# implemented here so that the codebase doesn't need to be filled
# with calls to both methods everywhere
self._shape_touched_set.add(label)
self._shape_touched.append(label)
self._data_touched_set.add(label)
self._data_touched.append(label)
class TypeTracerArray(NDArrayOperatorsMixin, ArrayLike):
_dtype: numpy.dtype
_shape: tuple[ShapeItem, ...]
def __new__(cls, *args, **kwargs):
raise TypeError(
"internal_error: the `TypeTracer` nplike's `TypeTracerArray` object should never be directly instantiated"
)
def __reduce__(self):
# Fix pickling, as we ban `__new__`
return object.__new__, (type(self),), vars(self)
@classmethod
def _new(
cls,
dtype: np.dtype,
shape: tuple[ShapeItem, ...],
form_key: str | None = None,
report: TypeTracerReport | None = None,
):
self = super().__new__(cls)
self.form_key = form_key
self.report = report
if not isinstance(shape, tuple):
raise TypeError("typetracer shape must be a tuple")
if any(is_unknown_scalar(x) for x in shape):
raise TypeError("typetracer shape must be integers or unknown-length")
self._shape = shape
self._dtype = np.dtype(dtype)
return self
def __repr__(self):
dtype = repr(self._dtype)
if self.shape is None:
shape = ""
else:
shape = ", shape=" + repr(self._shape)
return f"TypeTracerArray({dtype}{shape})"
def __str__(self):
if self.ndim == 0:
return "##"
else:
return repr(self)
@property
def T(self) -> Self:
return TypeTracerArray._new(
self.dtype, self._shape[::-1], self.form_key, self.report
)
@property
def dtype(self) -> np.dtype:
return self._dtype
@property
def size(self) -> ShapeItem:
size = 1
for item in self._shape:
size *= item
return size
@property
def shape(self) -> tuple[ShapeItem, ...]:
self.touch_shape()
return self._shape
@property
def form_key(self) -> str | None:
return self._form_key
@form_key.setter
def form_key(self, value: str | None):
if value is not None and not isinstance(value, str):
raise TypeError("form_key must be None or a string")
self._form_key = value
@property
def report(self) -> TypeTracerReport | None:
return self._report
@report.setter
def report(self, value: TypeTracerReport | None):
if value is not None and not isinstance(value, TypeTracerReport):
raise TypeError("report must be None or a TypeTracerReport")
self._report = value
def touch_shape(self):
if self._report is not None:
self._report.touch_shape(self._form_key)
def touch_data(self):
if self._report is not None:
self._report.touch_data(self._form_key)
@property
def nplike(self) -> TypeTracer:
return TypeTracer.instance()
@property
def ndim(self) -> int:
self.touch_shape()
return len(self._shape)
@property
def nbytes(self) -> ShapeItem:
return self.size * self._dtype.itemsize
def view(self, dtype: np.dtype) -> Self:
dtype = np.dtype(dtype)
if len(self._shape) >= 1:
last, remainder = divmod(
self._shape[-1] * self._dtype.itemsize, dtype.itemsize
)
if remainder is not unknown_length and remainder != 0:
raise ValueError(
"new size of array with larger dtype must be a "
"divisor of the total size in bytes (of the last axis of the array)"
)
shape = self._shape[:-1] + (last,)
else:
shape = self._shape
return self._new(
dtype, shape=shape, form_key=self._form_key, report=self._report
)
def forget_length(self) -> Self:
return self._new(
self._dtype,
(unknown_length,) + self._shape[1:],
self._form_key,
self._report,
)
def __iter__(self):
raise AssertionError(
"bug in Awkward Array: attempt to convert TypeTracerArray into a concrete array"
)
def __array__(self, dtype=None):
raise AssertionError(
"bug in Awkward Array: attempt to convert TypeTracerArray into a concrete array"
)
class _CTypes:
data = 0
@property
def ctypes(self):
return self._CTypes
def __len__(self):
raise AssertionError(
"bug in Awkward Array: attempt to get length of a TypeTracerArray"
)
def __getitem__(
self,
key: SupportsIndex
| slice
| Ellipsis
| tuple[SupportsIndex | slice | Ellipsis | ArrayLike, ...]
| ArrayLike,
) -> Self | int | float | bool | complex:
if not isinstance(key, tuple):
key = (key,)
# 1. Validate slice items
has_seen_ellipsis = 0
n_basic_non_ellipsis = 0
n_advanced = 0
for item in key:
# Basic indexing
if isinstance(item, (slice, int)) or is_unknown_integer(item):
n_basic_non_ellipsis += 1
# Advanced indexing
elif isinstance(item, TypeTracerArray) and (
np.issubdtype(item.dtype, np.integer)
or np.issubdtype(item.dtype, np.bool_)
):
n_advanced += 1
# Basic ellipsis
elif item is Ellipsis:
if not has_seen_ellipsis:
has_seen_ellipsis = True
else:
raise NotImplementedError(
"only one ellipsis value permitted for advanced index"
)
# Basic newaxis
elif item is np.newaxis:
pass
else:
raise NotImplementedError(
"only integer, unknown scalar, slice, ellipsis, or array indices are permitted"
)
n_dim_index = n_basic_non_ellipsis + n_advanced
if n_dim_index > self.ndim:
raise IndexError(
f"too many indices for array: array is {self.ndim}-dimensional, but {n_dim_index} were indexed"
)
# 2. Normalise Ellipsis and boolean arrays
key_parts = []
for item in key:
if item is Ellipsis:
# How many more dimensions do we have than the index provides
n_missing_dims = self.ndim - n_dim_index
key_parts.extend((slice(None),) * n_missing_dims)
elif is_unknown_array(item) and np.issubdtype(item, np.bool_):
key_parts.append(self.nplike.nonzero(item)[0])
else:
key_parts.append(item)
key = tuple(key_parts)
# 3. Apply Indexing
advanced_is_at_front = False
previous_item_is_basic = True
advanced_shapes = []
adjacent_advanced_shape = []
result_shape_parts = []
iter_shape = iter(self.shape)
for item in key:
# New axes don't reference existing dimensions
if item is np.newaxis:
result_shape_parts.append((1,))
previous_item_is_basic = True
# Otherwise, consume the dimension
else:
dimension_length = next(iter_shape)
# Advanced index
if n_advanced and (
isinstance(item, int)
or is_unknown_integer(item)
or is_unknown_array(item)
):
try_touch_data(item)
try_touch_data(self)
if is_unknown_scalar(item):
item = self.nplike.promote_scalar(item)
# If this is the first advanced index, insert the location
if not advanced_shapes:
result_shape_parts.append(adjacent_advanced_shape)
# If a previous item was basic and we have an advanced shape
# we have a split index
elif previous_item_is_basic:
advanced_is_at_front = True
advanced_shapes.append(item.shape)
previous_item_is_basic = False
# Slice
elif isinstance(item, slice):
(
start,
stop,
step,
slice_length,
) = self.nplike.derive_slice_for_length(item, dimension_length)
result_shape_parts.append((slice_length,))
previous_item_is_basic = True
# Integer
elif isinstance(item, int) or is_unknown_integer(item):
try_touch_data(item)
try_touch_data(self)
item = self.nplike.promote_scalar(item)
if is_unknown_length(dimension_length) or is_unknown_integer(item):
continue
if not 0 <= item < dimension_length:
raise NotImplementedError("integer index out of bounds")
advanced_shape = self.nplike.broadcast_shapes(*advanced_shapes)
if advanced_is_at_front:
result_shape_parts.insert(0, advanced_shape)
else:
adjacent_advanced_shape[:] = advanced_shape
broadcast_shape = tuple(i for p in result_shape_parts for i in p)
result_shape = broadcast_shape + tuple(iter_shape)
return self._new(
self._dtype,
result_shape,
self._form_key,
self._report,
)
def __setitem__(
self,
key: SupportsIndex
| slice
| Ellipsis
| tuple[SupportsIndex | slice | Ellipsis | ArrayLike, ...]
| ArrayLike,
value: int | float | bool | complex | ArrayLike,
):
existing_value = self.__getitem__(key)
if isinstance(value, TypeTracerArray) and value.ndim > existing_value.ndim:
raise ValueError("cannot assign shape larger than destination")
def copy(self):
self.touch_data()
return self
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# raise ak._errors.wrap_error(
# RuntimeError(
# "TypeTracerArray objects should not be used directly with ufuncs"
# )
# )
kwargs.pop("out", None)
if method != "__call__" or len(inputs) == 0:
raise NotImplementedError
if len(kwargs) > 0:
raise ValueError("TypeTracerArray does not support kwargs for ufuncs")
return self.nplike._apply_ufunc(ufunc, *inputs)
def __bool__(self) -> bool:
raise RuntimeError("cannot realise an unknown value")
def __int__(self) -> int:
raise RuntimeError("cannot realise an unknown value")
def __index__(self) -> int:
raise RuntimeError("cannot realise an unknown value")
def _scalar_type_of(obj) -> numpy.dtype:
if is_unknown_scalar(obj):
return obj.dtype
else:
return numpy.obj2sctype(obj)
def try_touch_data(array):
if isinstance(array, TypeTracerArray):
array.touch_data()
def try_touch_shape(array):
if isinstance(array, TypeTracerArray):
array.touch_shape()
@register_nplike
class TypeTracer(NumpyLike):
known_data: Final = False
is_eager: Final = True
supports_structured_dtypes: Final = True
def _apply_ufunc(self, ufunc, *inputs):
for x in inputs:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
inputs = [x.content if isinstance(x, MaybeNone) else x for x in inputs]
broadcasted = self.broadcast_arrays(*inputs)
placeholders = [numpy.empty(0, x.dtype) for x in broadcasted]
result = ufunc(*placeholders)
if isinstance(result, numpy.ndarray):
return TypeTracerArray._new(result.dtype, shape=broadcasted[0].shape)
elif isinstance(result, tuple):
return (
TypeTracerArray._new(x.dtype, shape=b.shape)
for x, b in zip(result, broadcasted)
)
else:
raise TypeError
def _axis_is_valid(self, axis: int, ndim: int) -> bool:
if axis < 0:
axis = axis + ndim
return 0 <= axis < ndim
@property
def ma(self):
raise NotImplementedError
@property
def char(self):
raise NotImplementedError
@property
def ndarray(self):
return TypeTracerArray
############################ array creation
def asarray(
self,
obj,
*,
dtype: numpy.dtype | None = None,
copy: bool | None = None,
) -> TypeTracerArray:
assert not isinstance(obj, PlaceholderArray)
if isinstance(obj, ak.index.Index):
obj = obj.data
if isinstance(obj, TypeTracerArray):
form_key = obj._form_key
report = obj._report
if dtype is None:
return obj
elif dtype == obj.dtype:
return TypeTracerArray._new(
dtype, obj.shape, form_key=form_key, report=report
)
elif copy is False:
raise ValueError(
"asarray was called with copy=False for an array of a different dtype"
)
else:
try_touch_data(obj)
return TypeTracerArray._new(
dtype, obj.shape, form_key=form_key, report=report
)
else:
# Convert NumPy generics to scalars
if isinstance(obj, np.generic):
obj = numpy.asarray(obj)
# Support array-like objects
if hasattr(obj, "shape") and hasattr(obj, "dtype"):
if obj.dtype.kind == "S":
raise TypeError("TypeTracerArray cannot be created from strings")
elif copy is False and dtype != obj.dtype:
raise ValueError(
"asarray was called with copy=False for an array of a different dtype"
)
else:
return TypeTracerArray._new(obj.dtype, obj.shape)
# Python objects
elif isinstance(obj, (Number, bool)):
as_array = numpy.asarray(obj)
return TypeTracerArray._new(as_array.dtype, ())
elif is_non_string_like_sequence(obj):
shape = []
flat_items = []
has_seen_leaf = False
# DFS walk into sequence, construct shape, then validate
# remainder of the sequence against this shape.
def populate_shape_and_items(node, dim):
nonlocal has_seen_leaf
# If we've already computed the shape,
# ensure this item matches!
if has_seen_leaf:
if len(node) != shape[dim - 1]:
raise ValueError(
f"sequence at dimension {dim} does not match shape {shape[dim-1]}"
)
else:
shape.append(len(node))
if isinstance(node, TypeTracerArray):
raise AssertionError(
"typetracer arrays inside sequences not currently supported"
)
# Found leaf!
elif len(node) == 0 or not is_non_string_like_sequence(node[0]):
has_seen_leaf = True
flat_items.extend(
[
item.dtype if is_unknown_scalar(item) else item
for item in node
]
)
# Keep recursing!
else:
for child in node:
populate_shape_and_items(child, dim + 1)
populate_shape_and_items(obj, 1)
if dtype is None:
dtype = numpy.result_type(*flat_items)
return TypeTracerArray._new(dtype, shape=tuple(shape))
else:
raise TypeError
def ascontiguousarray(self, x: ArrayLike) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
return TypeTracerArray._new(
x.dtype, shape=x.shape, form_key=x.form_key, report=x.report
)
def frombuffer(
self, buffer, *, dtype: np.dtype | None = None, count: int = -1
) -> TypeTracerArray:
for x in (buffer, count):
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
raise NotImplementedError
def from_dlpack(self, x: Any) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
raise NotImplementedError
def zeros(
self, shape: ShapeItem | tuple[ShapeItem, ...], *, dtype: np.dtype | None = None
) -> TypeTracerArray:
if not isinstance(shape, tuple):
shape = (shape,)
return TypeTracerArray._new(dtype, shape)
def ones(
self, shape: ShapeItem | tuple[ShapeItem, ...], *, dtype: np.dtype | None = None
) -> TypeTracerArray:
if not isinstance(shape, tuple):
shape = (shape,)
return TypeTracerArray._new(dtype, shape)
def empty(
self, shape: ShapeItem | tuple[ShapeItem, ...], *, dtype: np.dtype | None = None
) -> TypeTracerArray:
if not isinstance(shape, tuple):
shape = (shape,)
return TypeTracerArray._new(dtype, shape)
def full(
self,
shape: ShapeItem | tuple[ShapeItem, ...],
fill_value,
*,
dtype: np.dtype | None = None,
) -> TypeTracerArray:
assert not isinstance(fill_value, PlaceholderArray)
if not isinstance(shape, tuple):
shape = (shape,)
dtype = _scalar_type_of(fill_value) if dtype is None else dtype
return TypeTracerArray._new(dtype, shape)
def zeros_like(
self, x: ArrayLike, *, dtype: np.dtype | None = None
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_shape(x)
if is_unknown_scalar(x):
return TypeTracerArray._new(dtype or x.dtype, shape=())
else:
return TypeTracerArray._new(dtype or x.dtype, shape=x.shape)
def ones_like(
self, x: ArrayLike, *, dtype: np.dtype | None = None
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_shape(x)
return self.zeros_like(x, dtype=dtype)
def full_like(
self, x: ArrayLike, fill_value, *, dtype: np.dtype | None = None
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_shape(x)
return self.zeros_like(x, dtype=dtype)
def arange(
self,
start: float | int,
stop: float | int | None = None,
step: float | int = 1,
*,
dtype: np.dtype | None = None,
) -> TypeTracerArray:
assert not isinstance(start, PlaceholderArray)
assert not isinstance(stop, PlaceholderArray)
assert not isinstance(step, PlaceholderArray)
try_touch_data(start)
try_touch_data(stop)
try_touch_data(step)
if stop is None:
start, stop = 0, start
if is_integer(start) and is_integer(stop) and is_integer(step):
length = max(0, (stop - start + (step - (1 if step > 0 else -1))) // step)
else:
length = unknown_length
default_int_type = np.int64 if (ak._util.win or ak._util.bits32) else np.int32
return TypeTracerArray._new(dtype or default_int_type, (length,))
def meshgrid(
self, *arrays: ArrayLike, indexing: Literal["xy", "ij"] = "xy"
) -> list[TypeTracerArray]:
for x in arrays:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
assert x.ndim == 1
shape = tuple(x.size for x in arrays)
if indexing == "xy":
shape[:2] = shape[1], shape[0]
dtype = numpy.result_type(*arrays)
return [TypeTracerArray._new(dtype, shape=shape) for _ in arrays]
############################ testing
def array_equal(
self, x1: ArrayLike, x2: ArrayLike, *, equal_nan: bool = False
) -> TypeTracerArray:
assert not isinstance(x1, PlaceholderArray)
assert not isinstance(x2, PlaceholderArray)
try_touch_data(x1)
try_touch_data(x2)
return TypeTracerArray._new(np.bool_, shape=())
def searchsorted(
self,
x: ArrayLike,
values: ArrayLike,
*,
side: Literal["left", "right"] = "left",
sorter: ArrayLike | None = None,
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
assert not isinstance(values, PlaceholderArray)
assert not isinstance(sorter, PlaceholderArray)
try_touch_data(x)
try_touch_data(values)
try_touch_data(sorter)
if (
not (
is_unknown_length(x.size)
or sorter is None
or is_unknown_length(sorter.size)
)
and x.size != sorter.size
):
raise ValueError("x.size should equal sorter.size")
return TypeTracerArray._new(x.dtype, (values.size,))
############################ manipulation
def promote_scalar(self, obj) -> TypeTracerArray:
assert not isinstance(obj, PlaceholderArray)
if is_unknown_scalar(obj):
return obj
elif isinstance(obj, (Number, bool)):
# TODO: statically define these types for all nplikes
as_array = numpy.asarray(obj)
return TypeTracerArray._new(as_array.dtype, ())
else:
raise TypeError(f"expected scalar type, received {obj}")
def shape_item_as_index(self, x1: ShapeItem) -> IndexType:
if x1 is unknown_length:
return TypeTracerArray._new(np.int64, shape=())
elif isinstance(x1, int):
return x1
else:
raise TypeError(f"expected None or int type, received {x1}")
def index_as_shape_item(self, x1: IndexType) -> ShapeItem:
if is_unknown_scalar(x1) and np.issubdtype(x1.dtype, np.integer):
return unknown_length
else:
return int(x1)
def regularize_index_for_length(
self, index: IndexType, length: ShapeItem
) -> IndexType:
"""
Args:
index: index value
length: length of array
Returns regularized index that is guaranteed to be in-bounds.
"""
# Unknown indices are already regularized
if is_unknown_scalar(index):
return index
# Without a known length the result must be unknown, as we cannot regularize the index
length_scalar = self.shape_item_as_index(length)
if length is unknown_length:
return length_scalar
# We have known length and index
if index < 0:
index = index + length
if 0 <= index < length:
return index
else:
raise IndexError(f"index value out of bounds (0, {length}): {index}")
def derive_slice_for_length(
self, slice_: slice, length: ShapeItem
) -> tuple[IndexType, IndexType, IndexType, ShapeItem]:
"""
Args:
slice_: normalized slice object
length: length of layout
Return a tuple of (start, stop, step, length) indices into a layout, suitable for
`_getitem_range` (if step == 1). Normalize lengths to fit length of array,
and for arrays with unknown lengths, these offsets become none.
"""
start = slice_.start
stop = slice_.stop
step = slice_.step
# Unknown lengths mean that the slice index is unknown
length_scalar = self.shape_item_as_index(length)
if length is unknown_length:
return length_scalar, length_scalar, step, length
else:
# Normalise `None` values
if step is None:
step = 1
if start is None:
# `step` is unknown → `start` is unknown
if is_unknown_scalar(step):
start = step
elif step < 0:
start = length_scalar - 1
else:
start = 0
# Normalise negative integers
elif not is_unknown_scalar(start):
if start < 0:
start = start + length_scalar
# Clamp values into length bounds
if is_unknown_scalar(length_scalar):
start = length_scalar
else:
start = min(max(start, 0), length_scalar)
if stop is None:
# `step` is unknown → `stop` is unknown
if is_unknown_scalar(step):
stop = step
elif step < 0:
stop = -1
else:
stop = length_scalar
# Normalise negative integers
elif not is_unknown_scalar(stop):
if stop < 0:
stop = stop + length_scalar
# Clamp values into length bounds
if is_unknown_scalar(length_scalar):
stop = length_scalar
else:
stop = min(max(stop, 0), length_scalar)
# Compute the length of the slice for downstream use
slice_length, remainder = divmod((stop - start), step)
if not is_unknown_scalar(slice_length):
# Take ceiling of division
if remainder != 0:
slice_length += 1
slice_length = max(0, slice_length)
return start, stop, step, self.index_as_shape_item(slice_length)
def broadcast_shapes(self, *shapes: tuple[ShapeItem, ...]) -> tuple[ShapeItem, ...]:
ndim = max([len(s) for s in shapes], default=0)
result: list[ShapeItem] = [1] * ndim
for shape in shapes:
# Right broadcasting
missing_dim = ndim - len(shape)
if missing_dim > 0:
head: tuple[int, ...] = (1,) * missing_dim
shape = head + shape
# Fail if we absolutely know the shapes aren't compatible
for i, item in enumerate(shape):
# Item is unknown, take it
if is_unknown_length(item):
result[i] = item
# Existing item is unknown, keep it
elif is_unknown_length(result[i]):
continue
# Items match, continue
elif result[i] == item:
continue
# Item is broadcastable, take existing
elif item == 1:
continue
# Existing is broadcastable, take it
elif result[i] == 1:
result[i] = item
else:
raise ValueError(
"known component of shape does not match broadcast result"
)
return tuple(result)
def broadcast_arrays(self, *arrays: ArrayLike) -> list[TypeTracerArray]:
for x in arrays:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
if len(arrays) == 0:
return []
all_arrays = []
for x in arrays:
if not hasattr(x, "shape"):
x = self.promote_scalar(x)
all_arrays.append(x)
shapes = [x.shape for x in all_arrays]
shape = self.broadcast_shapes(*shapes)
return [TypeTracerArray._new(x.dtype, shape=shape) for x in all_arrays]
def broadcast_to(
self, x: ArrayLike, shape: tuple[ShapeItem, ...]
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
new_shape = self.broadcast_shapes(x.shape, shape)
# broadcast_to is asymmetric, whilst broadcast_shapes is not
# rather than implement broadcasting logic here, let's just santitise the result
# the above broadcasting result can either be equal to `shape`, have greater number dimensions,
# and/or have differing dimensions we only want the case where the shape is equal
if len(new_shape) != len(shape):
raise ValueError
for result, intended in zip(new_shape, shape):
if intended is unknown_length:
continue
if result is unknown_length:
continue
if intended != result:
raise ValueError
return TypeTracerArray._new(x.dtype, shape=new_shape)
def reshape(
self, x: ArrayLike, shape: tuple[ShapeItem, ...], *, copy: bool | None = None
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
x.touch_shape()
size = x.size
# Validate new shape to ensure that it only contains at-most one placeholder
n_placeholders = 0
new_size = 1
for item in shape:
if item is unknown_length:
# Size is no longer defined
new_size = unknown_length
elif not is_integer(item):
raise ValueError(
"shape must be comprised of positive integers, -1 (for placeholders), or unknown lengths"
)
elif item == -1:
if n_placeholders == 1:
raise ValueError(
"only one placeholder dimension permitted per shape"
)
n_placeholders += 1
elif item == 0:
raise ValueError("shape items cannot be zero")
else:
new_size *= item
# Populate placeholders
new_shape = [*shape]
for i, item in enumerate(shape):
if item == -1:
new_shape[i] = size // new_size
break
return TypeTracerArray._new(x.dtype, tuple(new_shape), x.form_key, x.report)
def cumsum(
self,
x: ArrayLike,
*,
axis: int | None = None,
maybe_out: ArrayLike | None = None,
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
if axis is None:
return TypeTracerArray._new(x.dtype, (x.size,))
else:
assert self._axis_is_valid(axis, x.ndim)
return TypeTracerArray._new(x.dtype, x.shape)
def nonzero(self, x: ArrayLike) -> tuple[TypeTracerArray, ...]:
assert not isinstance(x, PlaceholderArray)
# array
try_touch_data(x)
return (TypeTracerArray._new(np.int64, (unknown_length,)),) * len(x.shape)
def where(
self, condition: ArrayLike, x1: ArrayLike, x2: ArrayLike
) -> TypeTracerArray:
assert not isinstance(condition, PlaceholderArray)
assert not isinstance(x1, PlaceholderArray)
assert not isinstance(x2, PlaceholderArray)
condition, x1, x2 = self.broadcast_arrays(condition, x1, x2)
result_dtype = numpy.result_type(x1, x2)
return TypeTracerArray._new(result_dtype, shape=condition.shape)
def unique_values(self, x: ArrayLike) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
return TypeTracerArray._new(x.dtype, shape=(unknown_length,))
def unique_all(self, x: ArrayLike) -> UniqueAllResult:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
return UniqueAllResult(
TypeTracerArray._new(x.dtype, shape=(unknown_length,)),
TypeTracerArray._new(np.int64, shape=(unknown_length,)),
TypeTracerArray._new(np.int64, shape=x.shape),
TypeTracerArray._new(np.int64, shape=(unknown_length,)),
)
def sort(
self,
x: ArrayLike,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
) -> ArrayLike:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
return TypeTracerArray._new(x.dtype, shape=x.shape)
def concat(self, arrays, *, axis: int | None = 0) -> TypeTracerArray:
if axis is None:
assert all(x.ndim == 1 for x in arrays)
elif axis != 0:
raise NotImplementedError("concat with axis != 0")
for x in arrays:
try_touch_data(x)
inner_shape = None
emptyarrays = []
for x in arrays:
assert not isinstance(x, PlaceholderArray)
if inner_shape is None:
inner_shape = x.shape[1:]
elif inner_shape != x.shape[1:]:
raise ValueError(
"inner dimensions don't match in concatenate: {} vs {}".format(
inner_shape, x.shape[1:]
)
)
emptyarrays.append(_emptyarray(x))
if inner_shape is None:
raise ValueError("need at least one array to concatenate")
return TypeTracerArray._new(
numpy.concatenate(emptyarrays).dtype, (unknown_length, *inner_shape)
)
def repeat(
self,
x: ArrayLike,
repeats: ArrayLike | int,
*,
axis: int | None = None,
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
assert not isinstance(repeats, PlaceholderArray)
try_touch_data(x)
try_touch_data(repeats)
if axis is None:
size = x.size
if is_unknown_array(repeats):
size = unknown_length
else:
size = size * self.index_as_shape_item(repeats)
return TypeTracerArray._new(x.dtype, (size,))
else:
shape = list(x.shape)
if isinstance(repeats, TypeTracerArray) and repeats.ndim > 0:
raise NotImplementedError
else:
shape[axis] = shape[axis] * self.index_as_shape_item(repeats)
return TypeTracerArray._new(x.dtype, shape=tuple(shape))
def stack(
self,
arrays: list[ArrayLike] | tuple[ArrayLike, ...],
*,
axis: int = 0,
) -> TypeTracerArray:
for x in arrays:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
raise NotImplementedError
def packbits(
self,
x: ArrayLike,
*,
axis: int | None = None,
bitorder: Literal["big", "little"] = "big",
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
raise NotImplementedError
def unpackbits(
self,
x: ArrayLike,
*,
axis: int | None = None,
count: int | None = None,
bitorder: Literal["big", "little"] = "big",
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
raise NotImplementedError
def strides(self, x: ArrayLike) -> tuple[ShapeItem, ...]:
assert not isinstance(x, PlaceholderArray)
x.touch_shape()
out = (x._dtype.itemsize,)
for item in reversed(x._shape):
out = (item * out[0], *out)
return out
############################ ufuncs
def add(
self,
x1: ArrayLike,
x2: ArrayLike,
maybe_out: ArrayLike | None = None,
) -> TypeTracerArray:
assert not isinstance(x1, PlaceholderArray)
return self._apply_ufunc(numpy.add, x1, x2)
def logical_and(
self,
x1: ArrayLike,
x2: ArrayLike,
maybe_out: ArrayLike | None = None,
) -> TypeTracerArray:
assert not isinstance(x1, PlaceholderArray)
return self._apply_ufunc(numpy.logical_and, x1, x2)
def logical_or(
self,
x1: ArrayLike,
x2: ArrayLike,
maybe_out: ArrayLike | None = None,
) -> TypeTracerArray:
assert not isinstance(x1, PlaceholderArray)
assert not isinstance(x2, PlaceholderArray)
return self._apply_ufunc(numpy.logical_or, x1, x2)
def logical_not(
self, x: ArrayLike, maybe_out: ArrayLike | None = None
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
return self._apply_ufunc(numpy.logical_not, x)
def sqrt(self, x: ArrayLike, maybe_out: ArrayLike | None = None) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
return self._apply_ufunc(numpy.sqrt, x)
def exp(self, x: ArrayLike, maybe_out: ArrayLike | None = None) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
return self._apply_ufunc(numpy.exp, x)
def divide(
self,
x1: ArrayLike,
x2: ArrayLike,
maybe_out: ArrayLike | None = None,
) -> TypeTracerArray:
assert not isinstance(x1, PlaceholderArray)
assert not isinstance(x2, PlaceholderArray)
return self._apply_ufunc(numpy.divide, x1, x2)
############################ almost-ufuncs
def nan_to_num(
self,
x: ArrayLike,
*,
copy: bool = True,
nan: int | float | None = 0.0,
posinf: int | float | None = None,
neginf: int | float | None = None,
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
return TypeTracerArray._new(x.dtype, shape=x.shape)
def isclose(
self,
x1: ArrayLike,
x2: ArrayLike,
*,
rtol: float = 1e-5,
atol: float = 1e-8,
equal_nan: bool = False,
) -> TypeTracerArray:
assert not isinstance(x1, PlaceholderArray)
assert not isinstance(x2, PlaceholderArray)
try_touch_data(x1)
try_touch_data(x2)
out, _ = self.broadcast_arrays(x1, x2)
return TypeTracerArray._new(np.bool_, shape=out.shape)
def isnan(self, x: ArrayLike) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
return TypeTracerArray._new(np.bool_, shape=x.shape)
############################ reducers
def all(
self,
x: ArrayLike,
*,
axis: int | tuple[int, ...] | None = None,
keepdims: bool = False,
maybe_out: ArrayLike | None = None,
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
if axis is None:
return TypeTracerArray._new(np.bool_, shape=())
else:
raise NotImplementedError
def any(
self,
x: ArrayLike,
*,
axis: int | tuple[int, ...] | None = None,
keepdims: bool = False,
maybe_out: ArrayLike | None = None,
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
if axis is None:
return TypeTracerArray._new(np.bool_, shape=())
else:
raise NotImplementedError
def count_nonzero(
self, x: ArrayLike, *, axis: int | None = None, keepdims: bool = False
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
if axis is None:
return TypeTracerArray._new(np.intp, shape=())
else:
raise NotImplementedError
def min(
self,
x: ArrayLike,
*,
axis: int | tuple[int, ...] | None = None,
keepdims: bool = False,
maybe_out: ArrayLike | None = None,
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
raise NotImplementedError
def max(
self,
x: ArrayLike,
*,
axis: int | tuple[int, ...] | None = None,
keepdims: bool = False,
maybe_out: ArrayLike | None = None,
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
if axis is None:
return TypeTracerArray._new(x.dtype, shape=())
else:
raise NotImplementedError
def array_str(
self,
x: ArrayLike,
*,
max_line_width: int | None = None,
precision: int | None = None,
suppress_small: bool | None = None,
):
assert not isinstance(x, PlaceholderArray)
try_touch_data(x)
return "[## ... ##]"
def astype(
self, x: ArrayLike, dtype: numpy.dtype, *, copy: bool | None = True
) -> TypeTracerArray:
assert not isinstance(x, PlaceholderArray)
x.touch_data()
return TypeTracerArray._new(np.dtype(dtype), x.shape)
def can_cast(self, from_: np.dtype | ArrayLike, to: np.dtype | ArrayLike) -> bool:
return numpy.can_cast(from_, to, casting="same_kind")
@classmethod
def is_own_array_type(cls, type_: type) -> bool:
return issubclass(type_, TypeTracerArray)
@classmethod
def is_own_array(cls, obj) -> bool:
return cls.is_own_array_type(type(obj))
def is_c_contiguous(self, x: ArrayLike) -> bool:
assert not isinstance(x, PlaceholderArray)
return True
def __dlpack_device__(self) -> tuple[int, int]:
raise NotImplementedError
def __dlpack__(self, stream=None):
raise NotImplementedError
def _attach_report(
layout: ak.contents.Content, form: ak.forms.Form, report: TypeTracerReport
):
if isinstance(layout, (ak.contents.BitMaskedArray, ak.contents.ByteMaskedArray)):
assert isinstance(form, (ak.forms.BitMaskedForm, ak.forms.ByteMaskedForm))
layout.mask.data.form_key = form.form_key
layout.mask.data.report = report
_attach_report(layout.content, form.content, report)
elif isinstance(layout, ak.contents.EmptyArray):
assert isinstance(form, ak.forms.EmptyForm)
elif isinstance(layout, (ak.contents.IndexedArray, ak.contents.IndexedOptionArray)):
assert isinstance(form, (ak.forms.IndexedForm, ak.forms.IndexedOptionForm))
layout.index.data.form_key = form.form_key
layout.index.data.report = report
_attach_report(layout.content, form.content, report)
elif isinstance(layout, ak.contents.ListArray):
assert isinstance(form, ak.forms.ListForm)
layout.starts.data.form_key = form.form_key
layout.starts.data.report = report
layout.stops.data.form_key = form.form_key
layout.stops.data.report = report
_attach_report(layout.content, form.content, report)
elif isinstance(layout, ak.contents.ListOffsetArray):
assert isinstance(form, ak.forms.ListOffsetForm)
layout.offsets.data.form_key = form.form_key
layout.offsets.data.report = report
_attach_report(layout.content, form.content, report)
elif isinstance(layout, ak.contents.NumpyArray):
assert isinstance(form, ak.forms.NumpyForm)
layout.data.form_key = form.form_key
layout.data.report = report
elif isinstance(layout, ak.contents.RecordArray):
assert isinstance(form, ak.forms.RecordForm)
for x, y in zip(layout.contents, form.contents):
_attach_report(x, y, report)
elif isinstance(layout, (ak.contents.RegularArray, ak.contents.UnmaskedArray)):
assert isinstance(form, (ak.forms.RegularForm, ak.forms.UnmaskedForm))
_attach_report(layout.content, form.content, report)
elif isinstance(layout, ak.contents.UnionArray):
assert isinstance(form, ak.forms.UnionForm)
layout.tags.data.form_key = form.form_key
layout.tags.data.report = report
layout.index.data.form_key = form.form_key
layout.index.data.report = report
for x, y in zip(layout.contents, form.contents):
_attach_report(x, y, report)
else:
raise AssertionError(f"unrecognized layout type {type(layout)}")
def typetracer_with_report(
form: ak.forms.Form, forget_length: bool = True
) -> tuple[ak.contents.Content, TypeTracerReport]:
layout = form.length_zero_array(highlevel=False).to_typetracer(
forget_length=forget_length
)
report = TypeTracerReport()
_attach_report(layout, form, report)
return layout, report
|
79f1cde648d70121fd3feb98067dca1fb1427786
|
cfa35dc2ea93ee0eceb2399a9e6112e987579c09
|
/stonesoup/tests/conftest.py
|
20e9ff17f8bbaa158ad7f7c64b1c786271d6e796
|
[
"LicenseRef-scancode-proprietary-license",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-secret-labs-2011"
] |
permissive
|
dstl/Stone-Soup
|
227e6a9e6fbdceca14af3f0259f311ec74095597
|
f24090cc919b3b590b84f965a3884ed1293d181d
|
refs/heads/main
| 2023-09-01T14:33:14.626428
| 2023-09-01T11:35:46
| 2023-09-01T11:35:46
| 98,420,803
| 315
| 126
|
MIT
| 2023-09-14T14:55:34
| 2017-07-26T12:34:28
|
Python
|
UTF-8
|
Python
| false
| false
| 252
|
py
|
conftest.py
|
import pytest
from ..base import Base, Property
class _TestBase(Base):
property_a: int = Property()
property_b: str = Property()
property_c: int = Property(default=123)
@pytest.fixture(scope='session')
def base():
return _TestBase
|
3dc732d75b192ab8411516da9a7bc139213ecb02
|
4d616d46c7f9b0ce151f924f6d94e1fff0fd9640
|
/stages/org.osbuild.tmpfilesd
|
ed633d131ef67db6584de9e33e3458a3b12a6f23
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
osbuild/osbuild
|
36d04725fa263a4cbfc683cc466c8327328eac69
|
e7fb2e11174a9b69b1761a726989f7a9a9e2ce1c
|
refs/heads/main
| 2023-08-28T13:28:53.047810
| 2023-08-25T15:47:30
| 2023-08-28T11:25:04
| 165,869,351
| 150
| 84
|
Apache-2.0
| 2023-09-14T11:02:21
| 2019-01-15T14:55:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,642
|
tmpfilesd
|
org.osbuild.tmpfilesd
|
#!/usr/bin/python3
"""
Create tmpfiles.d configuration.
This stage creates a tmpfiles.d configuration file with the given name in
/usr/lib/tmpfiles.d. Provided list of configuration directives is written
as separate lines into the configuration file. At least one configuration
directive must be specified.
"""
import sys
import osbuild.api
SCHEMA = r"""
"definitions": {
"configuration": {
"type": "object",
"additionalProperties": false,
"required": ["type", "path"],
"description": "tmpfiles.d configuration directive representing one line in the configuration.",
"properties": {
"type": {
"type": "string",
"description": "The file system path type.",
"pattern": "^([fwpLcbaA]\\+?|[dDevqQCxXrRzZtThH]){1}((!?-?)|(-?!?)){0,1}$"
},
"path": {
"type": "string",
"description": "Absolute file system path."
},
"mode": {
"type": "string",
"description": "The file access mode when creating the file or directory.",
"pattern": "^~?[0-7]{4}$"
},
"user": {
"type": "string",
"description": "The user to use for the file or directory."
},
"group": {
"type": "string",
"description": "The group to use for the file or directory."
},
"age": {
"type": "string",
"description": "Date field used to decide what files to delete when cleaning."
},
"argument": {
"type": "string",
"description": "Argument with its meaning being specific to the path type."
}
}
}
},
"additionalProperties": false,
"required": ["filename", "config"],
"properties": {
"filename": {
"type": "string",
"description": "Name of the tmpfiles.d configuration file to create.",
"pattern": "^[\\w.-]{1,250}\\.conf$"
},
"config": {
"additionalProperties": false,
"type": "array",
"description": "List of configuration directives written into the configuration file.",
"minItems": 1,
"items": {
"$ref": "#/definitions/configuration"
}
}
}
"""
def main(tree, options):
filename = options["filename"]
cfg = options["config"]
tmpfilesd_config_dir = f"{tree}/usr/lib/tmpfiles.d"
cfg_lines = []
for cfg_item in cfg:
cfg_type = cfg_item["type"]
cfg_path = cfg_item["path"]
cfg_line = f"{cfg_type} {cfg_path}"
optional_properties = ["mode", "user", "group", "age", "argument"]
cfg_line_optional_part = ""
for optional_property in reversed(optional_properties):
cfg_property_value = cfg_item.get(optional_property)
if cfg_property_value:
if cfg_line_optional_part:
cfg_line_optional_part = " ".join([cfg_property_value, cfg_line_optional_part])
else:
cfg_line_optional_part = cfg_property_value
elif cfg_line_optional_part:
# if there were already some optional properties provided, then
# we must use "-" for any not provided optional values preceding
# them on the configuration line.
cfg_line_optional_part = " ".join(["-", cfg_line_optional_part])
if cfg_line_optional_part:
cfg_line += " " + cfg_line_optional_part
cfg_line += "\n"
cfg_lines.append(cfg_line)
with open(f"{tmpfilesd_config_dir}/{filename}", "w", encoding="utf8") as f:
f.writelines(cfg_lines)
return 0
if __name__ == '__main__':
args = osbuild.api.arguments()
r = main(args["tree"], args["options"])
sys.exit(r)
|
617a4ca47f8fc54b49e915ccebb9fa8e555d4d6f
|
916c1313c623c799e98d1bd897b3aef510172639
|
/py/phl/phlsys_git__it.py
|
cab6cabcf182c516cedaf498a6b6ddbb08429709
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bloomberg/phabricator-tools
|
377ba3dba299c5d21a015bb039ae920fae5478ef
|
09bd1587fe8945d93a891162fd4c89640c6fada7
|
refs/heads/master
| 2021-01-02T19:43:48.274684
| 2019-01-11T13:34:55
| 2019-01-11T13:34:55
| 8,464,182
| 154
| 40
|
Apache-2.0
| 2022-02-14T09:57:48
| 2013-02-27T20:02:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,074
|
py
|
phlsys_git__it.py
|
"""Test suite for phlsys_git."""
# =============================================================================
# TEST PLAN
# -----------------------------------------------------------------------------
# Here we detail the things we are concerned to test and specify which tests
# cover those concerns.
#
# Concerns:
# TODO
# -----------------------------------------------------------------------------
# Tests:
# TODO
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import phlsys_git
import phlsys_subprocess
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_can_commit(self):
# TODO: make this more portable with shutil etc.
run = phlsys_subprocess.run
runCommands = phlsys_subprocess.run_commands
path = "phlsys_git_TestGitContext"
runCommands("mkdir " + path)
run("git", "init", workingDir=path)
repo = phlsys_git.Repo(path)
runCommands("touch " + path + "/README")
repo("add", "README")
repo("commit", "-m", "initial commit")
runCommands("rm -rf " + path)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
4ced2ff74d143f7ae18888e00b0b2ef9909a9f10
|
77ff40ab8d90d15013f9cc884c2eb3ed24cd5cf7
|
/python/houdini_toolbox/ui/paste/sources.py
|
468cd4e5bacbeff403bfda8dd385f931e9363490
|
[
"MIT"
] |
permissive
|
captainhammy/Houdini-Toolbox
|
78655b943244d2c086966812031bb94ab0ace3a3
|
f233232d8fbf5934ac99dad1aa72bdd5faa63b8b
|
refs/heads/master
| 2023-06-23T09:37:30.745036
| 2023-06-12T19:59:23
| 2023-06-12T19:59:23
| 3,946,767
| 159
| 34
|
MIT
| 2022-10-01T15:31:26
| 2012-04-06T04:12:28
|
Python
|
UTF-8
|
Python
| false
| false
| 17,308
|
py
|
sources.py
|
"""Classes for finding, copying and pasting of files."""
# ==============================================================================
# IMPORTS
# ==============================================================================
# Future
from __future__ import annotations
# Standard Library
import abc
import datetime
import getpass
import json
import os
import platform
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
# Houdini Toolbox
import houdini_toolbox.ui.paste.helpers
from houdini_toolbox.ui.paste import utils
# Houdini
import hou
if TYPE_CHECKING:
from PySide2 import QtGui
# Handle differences between platforms.
if platform.system() == "Windows":
_CONTEXT_SEP = "@"
getpwuid = None
else:
_CONTEXT_SEP = ":"
from pwd import getpwuid # type: ignore
# ==============================================================================
# CLASSES
# ==============================================================================
class SourceManager:
"""Manager class for all source objects."""
def __init__(self):
self._sources = []
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def sources(self) -> List[CopyPasteSource]:
"""List of all available sources."""
return self._sources
# Sources
class CopyPasteSource(abc.ABC):
"""Base class for managing copy/paste items."""
def __init__(self):
self._sources = {}
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
@abc.abstractmethod
def display_name(self) -> str:
"""The source display name."""
@property
@abc.abstractmethod
def icon(self) -> QtGui.QIcon:
"""The icon for the source."""
@property
def sources(self) -> Dict[str, List[CopyPasteItemSource]]:
"""The item sources."""
return self._sources
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
@abc.abstractmethod
def copy_helper_widget(
self,
) -> (
houdini_toolbox.ui.paste.helpers._BaseCopyHelperWidget
): # pylint: disable=protected-access
"""Get the copy helper widget for this source.
:return: The helper widget to copy items to this source.
"""
@abc.abstractmethod
def create_source(self, *args, **kwargs):
"""Create a new source."""
@abc.abstractmethod
def get_sources(self, context: str) -> List[CopyPasteItemSource]:
"""Return a list of any sources for the context.
:param context: A Houdini context name.
:return: Available sources for the context.
"""
@abc.abstractmethod
def paste_helper_widget(
self,
) -> (
houdini_toolbox.ui.paste.helpers._BasePasteHelperWidget
): # pylint: disable=protected-access
"""Get the paste helper widget for this source.
:return: The helper widget to paste items from this source.
"""
@abc.abstractmethod
def refresh(self):
"""Refresh the list of sources.
:return:
"""
class HomeDirSource(CopyPasteSource):
"""Copy/Paste .cpio items from ~/copypaste."""
_extension = ".cpio"
_base_path = os.path.join(os.path.expanduser("~"), "copypaste")
def __init__(self):
super().__init__()
self._init_sources()
# -------------------------------------------------------------------------
# STATIC METHODS
# -------------------------------------------------------------------------
@staticmethod
def pack_name(name: str) -> str:
"""Take a source name and convert it to a name suitable for a file.
This method will replace all spaces with '--'.
:param name: The name to pack.
:return: The packed name.
"""
return name.replace(" ", "--")
@staticmethod
def unpack_name(name: str) -> str:
"""Unpack a file name into a source name.
This method will replace all '--' with spaces.
:param name: The name to unpack.
:return: The unpacked name.
"""
return name.replace("--", " ")
# -------------------------------------------------------------------------
# NON-PUBLIC METHODS
# -------------------------------------------------------------------------
def _create_sidecar_file(self, base_path: str, data: dict) -> str:
"""Write a .json sidecar file with the item info.
:param base_path: The path of the main file.
:param data: The data to write.
:return: The sidecar file path.
"""
sidecar_path = base_path.replace(self._extension, ".json")
with open(sidecar_path, "w", encoding="utf-8") as handle:
json.dump(data, handle, indent=4)
return sidecar_path
def _init_sources(self):
"""Initialize all the source items.
:return:
"""
# The target folder might not exist if nothing has been copied for that
# context.
if not os.path.exists(self._base_path):
return
files = os.listdir(self._base_path)
for file_name in files:
if os.path.splitext(file_name)[1] == self._extension:
path = os.path.join(self._base_path, file_name)
item = CPIOContextCopyPasteItemFile.from_path(path)
if item is not None:
context_sources = self.sources.setdefault(item.context, [])
context_sources.append(item)
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def display_name(self) -> str:
"""A display name for this source."""
return "$HOME"
@property
def icon(self) -> QtGui.QIcon:
"""The icon for the source."""
return hou.qt.createIcon("MISC_satchel")
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def copy_helper_widget(
self, *args, **kwargs
) -> (
houdini_toolbox.ui.paste.helpers.HomeToolDirItemsCopyHelperWidget
): # pylint: disable=arguments-differ
"""Get the copy helper widget for this source.
:return: The helper widget to copy items to this source.
"""
return houdini_toolbox.ui.paste.helpers.HomeToolDirItemsCopyHelperWidget(
self, *args, **kwargs
)
def create_source( # pylint: disable=arguments-differ
self, context: str, name: str, description: Optional[str] = None
) -> CPIOContextCopyPasteItemFile:
"""Create a new item source.
:param context: The operator context of the source.
:param name: The name of the source.
:param description: Optional description of the source.
:return: The created source item.
"""
clean_name = self.pack_name(name)
# The file name consists of the user name and the description, separated
# by a :.
file_name = f"{context}{_CONTEXT_SEP}{clean_name}{self._extension}"
file_path = os.path.join(self._base_path, file_name)
sidecar_data = {
"author": getpass.getuser(),
"name": name,
"context": context,
"date": houdini_toolbox.ui.paste.utils.date_to_string(
datetime.datetime.now()
),
}
if description is not None:
sidecar_data["description"] = description
sidecar_path = self._create_sidecar_file(file_path, sidecar_data)
source = CPIOContextCopyPasteItemFile(file_path, context, name, sidecar_path)
context_sources = self.sources.setdefault(context, [])
context_sources.append(source)
return source
def destroy_item(self, item: CopyPasteItemSource):
"""Destroy and item and remove it from the source map.
:param item: The item to remove and destroy.
:return:
"""
context_sources = self.sources.get(item.context, [])
if item in context_sources:
item.destroy()
context_sources.remove(item)
def get_sources(self, context: str) -> List[CopyPasteItemSource]:
"""Get a list of available sources for a context.
:param context: An operator context name.
:return: Available sources for the context.
"""
return self.sources.get(context, [])
def paste_helper_widget(
self, *args, **kwargs
) -> (
houdini_toolbox.ui.paste.helpers.HomeToolDirItemsPasteHelperWidget
): # pylint: disable=arguments-differ
"""Get the paste helper widget for this source.
:return: The helper widget to paste items from this source.
"""
return houdini_toolbox.ui.paste.helpers.HomeToolDirItemsPasteHelperWidget(
self, *args, **kwargs
)
def refresh(self):
"""Refresh the internal sources.
This will clear the internal data and reload all sources.
:return:
"""
self.sources.clear()
self._init_sources()
# Item Sources
class CopyPasteItemSource(abc.ABC):
"""Class responsible for loading and saving items from a source.
:param context: The operator context.
"""
def __init__(self, context: str):
self._context = context
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
@abc.abstractmethod
def author(self) -> str:
"""The name of the item author."""
@property
def context(self) -> str:
"""The operator context name."""
return self._context
@property
@abc.abstractmethod
def date(self) -> datetime.datetime:
"""datetime.datetime: The date of creation."""
@property
@abc.abstractmethod
def description(self) -> str:
"""The item description."""
@property
@abc.abstractmethod
def name(self) -> str:
"""The item name."""
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
@abc.abstractmethod
def destroy(self):
"""Remove this item and all associated files.
:return:
"""
@abc.abstractmethod
def load_items(self, parent: hou.Node):
"""Load this source's items under the parent node.
:param parent: The node to load the items under.
"""
@abc.abstractmethod
def save_items(self, parent: hou.Node, items: Tuple[hou.NetworkItem]):
"""Save the supplied items under the parent node..
:param parent: The parent node of the items to save.
:param items: The items to save.
:return:
"""
class CPIOContextCopyPasteItemFile(CopyPasteItemSource):
"""Class to load and save items from .cpio files.
:param file_path: The path to the file.
:param context: The operator context.
:param name: The item name.
:param sidecar_path: Optional path to a sidecar file.
"""
_extension = ".cpio"
def __init__(
self,
file_path: str,
context: str,
name: str,
sidecar_path: Optional[str] = None,
):
super().__init__(context)
self._author = None
self._date = None
self._description = None
self._file_path = file_path
self._name = name
self._sidecar_path = sidecar_path
self._init_metadata()
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
def __repr__(self):
return f"<{self.__class__.__name__} {self.file_path}>"
# -------------------------------------------------------------------------
# CLASS METHODS
# -------------------------------------------------------------------------
@classmethod
def from_path(cls, file_path: str) -> CPIOContextCopyPasteItemFile:
"""Initialize a new object based on a file path.
:param file_path: The path to the source file.
:return: A new object based on the file path.
"""
file_name = os.path.splitext(os.path.basename(file_path))[0]
context, name = file_name.split(_CONTEXT_SEP)
name = cls.unpack_name(name)
return cls(file_path, context, name)
# -------------------------------------------------------------------------
# STATIC METHODS
# -------------------------------------------------------------------------
@staticmethod
def pack_name(name: str) -> str:
"""Take a source name and convert it to a name suitable for a file.
This method will replace all spaces with '--'.
:param name: The name to pack.
:return: The packed name.
"""
return name.replace(" ", "--")
@staticmethod
def unpack_name(name: str) -> str:
"""Unpack a file name into a source name.
This method will replace all '--' with spaces.
:param name: The name to unpack.
:return: The unpacked name.
"""
return name.replace("--", " ")
# -------------------------------------------------------------------------
# NON-PUBLIC METHODS
# -------------------------------------------------------------------------
def _init_metadata(self):
"""Initialize data based on the sidecar file.
:return:
"""
# No sidecar path so assume the default one.
if self._sidecar_path is None:
self._sidecar_path = self.file_path.replace(self._extension, ".json")
# If the file exists, read it.
if os.path.exists(self._sidecar_path):
with open(self._sidecar_path, encoding="utf-8") as handle:
sidecar_data = json.load(handle)
else:
sidecar_data = None
# If data was found then set various properties based on it.
if sidecar_data is not None:
self._author = sidecar_data.get("author")
date = sidecar_data.get("date")
self._date = utils.date_from_string(date)
description = sidecar_data.get("description")
if description is not None:
self._description = description
# Need to stat file for info.
else:
stat_data = os.stat(self.file_path)
if getpwuid is not None:
self._author = getpwuid(stat_data.st_uid).pw_name
else:
self._author = "unknown"
self._date = datetime.datetime.fromtimestamp(stat_data.st_mtime)
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
@property
def author(self) -> Optional[str]:
"""The name of the item author."""
return self._author
@property
def date(self) -> Optional[datetime.datetime]:
"""The date of creation."""
return self._date
@property
def description(self) -> Optional[str]:
"""The item description."""
return self._description
@property
def file_path(self) -> str:
"""The path to the source file."""
return self._file_path
@property
def name(self) -> str:
"""The item name."""
return self._name
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def destroy(self):
"""Remove this item and all associated files.
:return:
"""
if self._sidecar_path is not None and os.path.exists(self._sidecar_path):
os.remove(self._sidecar_path)
if os.path.exists(self.file_path):
os.remove(self.file_path)
def load_items(self, parent: hou.Node):
"""Load this source's items under the parent node.
:param parent: The node to load the items under.
"""
parent.loadItemsFromFile(self.file_path)
def save_items(self, parent: hou.Node, items: Tuple[hou.NetworkItem]):
"""Save the supplied items under the parent node.
:param parent: The parent node of the items to save.
:param items: The items to save.
:return:
"""
target_folder = os.path.dirname(self.file_path)
# Ensure the path exists.
if not os.path.exists(target_folder):
os.mkdir(target_folder)
parent.saveItemsToFile(items, self.file_path)
|
de50f031c179ab23195681782262f19c1cfc6733
|
153a746ea3246cdc04ac0413c9793309ca924d08
|
/test/test_extensions/debug.py
|
7f0c1ca1280de1597e56f70d2a4375a44889f714
|
[
"MIT"
] |
permissive
|
GothenburgBitFactory/timewarrior
|
bfbc0e9960012ce47c1134ca60ba06077cfa8724
|
a72659f753af283879a5c3bfb71448583a2ba7b7
|
refs/heads/develop
| 2023-09-04T20:55:38.830794
| 2020-03-06T13:52:22
| 2023-08-11T18:50:29
| 120,080,436
| 1,082
| 129
|
MIT
| 2023-09-11T23:20:14
| 2018-02-03T10:15:22
|
C++
|
UTF-8
|
Python
| false
| false
| 167
|
py
|
debug.py
|
#!/usr/bin/env python3
import sys
for line in sys.stdin:
# skip configuration
if line == "\n":
break
for line in sys.stdin:
print(line.strip())
|
d6e1d0f94777f1c7caf4f6b097711567aefccf05
|
93e68522fd0237990491415d752832cee13d63bb
|
/pyani/aniblastall.py
|
32a6025e90ecb7a2bfce7d04b51d41345744ccf4
|
[
"MIT"
] |
permissive
|
widdowquinn/pyani
|
3d6b8abe36195595fe5a2d1ece091f3a758a9ab7
|
62949c49d971aa33e413821905565368563ad056
|
refs/heads/master
| 2022-08-15T02:53:13.466225
| 2022-06-11T10:25:25
| 2022-06-11T10:25:25
| 28,641,481
| 178
| 66
|
MIT
| 2022-07-29T14:14:59
| 2014-12-30T18:48:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,921
|
py
|
aniblastall.py
|
# -*- coding: utf-8 -*-
# (c) University of Strathclyde 2021
# Author: Leighton Pritchard
#
# Contact: leighton.pritchard@strath.ac.uk
#
# Leighton Pritchard,
# Strathclyde Institute for Pharmacy and Biomedical Sciences,
# Cathedral Street,
# Glasgow,
# G4 0RE
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2021 University of Strathclyde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Code to implement the ANIblastall average nucleotide identity method."""
import logging
import os
import platform
import re
import shutil
import subprocess
from pathlib import Path
from . import pyani_config
from . import PyaniException
class PyaniblastallException(PyaniException):
"""ANIblastall-specific exception for pyani."""
def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) -> str:
r"""Return BLAST blastall version as a string.
:param blast_exe: path to blastall executable
We expect blastall to return a string as, for example
.. code-block:: bash
$ blastall -version
[blastall 2.2.26] ERROR: Number of database sequences to show \
one-line descriptions for (V) [ersion] is bad or out of range [? to ?]
This is concatenated with the OS name.
The following circumstances are explicitly reported as strings
- no executable at passed path
- non-executable file at passed path (this includes cases where the user doesn't have execute permissions on the file)
- no version info returned
- executable cannot be run on this OS
"""
logger = logging.getLogger(__name__)
try:
blastall_path = Path(shutil.which(blast_exe)) # type:ignore
except TypeError:
return f"{blast_exe} is not found in $PATH"
if not blastall_path.is_file(): # no executable
return f"No blastall at {blastall_path}"
# This should catch cases when the file can't be executed by the user
if not os.access(blastall_path, os.X_OK): # file exists but not executable
return f"blastall exists at {blastall_path} but not executable"
if platform.system() == "Darwin":
cmdline = [blast_exe, "-version"]
else:
cmdline = [blast_exe]
try:
result = subprocess.run(
cmdline, # type: ignore
shell=False,
stdout=subprocess.PIPE, # type: ignore
stderr=subprocess.PIPE,
check=False, # blastall doesn't return 0
)
except OSError:
logger.warning("blastall executable will not run", exc_info=True)
return f"blastall exists at {blastall_path} but could not be executed"
version = re.search( # type: ignore
r"(?<=blastall\s)[0-9\.]*", str(result.stderr, "utf-8")
).group()
if 0 == len(version.strip()):
return f"blastall exists at {blastall_path} but could not retrieve version"
return f"{platform.system()}_{version} ({blastall_path})"
|
5184fbd87106188ac91456c0f792af09e9af05a9
|
2e3cbdf0b30ad85a049622a5b862976eb59a1730
|
/fireworks/features/stats.py
|
2e8ab58b2bb184e5bd1b7a03cea57ed721b31bed
|
[
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause",
"LicenseRef-scancode-hdf5"
] |
permissive
|
materialsproject/fireworks
|
dc754122374ffce4859b3418a40fc8796879c0e2
|
579bcf411196ce0bebb4f04ccd2410c091c966cf
|
refs/heads/main
| 2023-08-19T00:18:44.331744
| 2023-08-14T00:43:32
| 2023-08-14T00:44:39
| 7,507,548
| 298
| 195
|
NOASSERTION
| 2023-09-04T08:24:47
| 2013-01-08T19:18:02
|
Python
|
UTF-8
|
Python
| false
| false
| 18,454
|
py
|
stats.py
|
"""Important: this class is out-of-date and deprecated. It will be replaced by the FWReport() class."""
from collections import defaultdict
from datetime import datetime, timedelta
from bson.son import SON
from dateutil import parser
from fireworks import LaunchPad
__author__ = "Wei Chen"
__copyright__ = "Copyright 2014, The Material Project"
__maintainer__ = "Wei Chen"
__email__ = "weichen@lbl.gov"
__date__ = "Sep 8, 2014"
RUNTIME_STATS = {
"max_runtime(s)": {"$max": "$runtime_secs"},
"min_runtime(s)": {"$min": "$runtime_secs"},
"avg_runtime(s)": {"$avg": "$runtime_secs"},
}
class FWStats:
def __init__(self, lpad):
"""
Object to get Fireworks running stats from a LaunchPad.
Args:
lpad (LaunchPad): A LaunchPad object that manages the Fireworks database
"""
if isinstance(lpad, LaunchPad):
self._lpad = lpad
else:
raise TypeError("LaunchPad cannot be loaded!")
self._fireworks = lpad.db.fireworks
self._launches = lpad.db.launches
self._workflows = lpad.db.workflows
def get_fireworks_summary(self, query_start=None, query_end=None, query=None, time_field="updated_on", **args):
"""
Get fireworks summary for a specified time range.
Args:
query_start (str): The start time (inclusive) to query in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is 30 days before current time.
query_end (str): The end time (exclusive) to query in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is current time.
query (dict): Additional Pymongo queries to filter entries for process.
time_field (str): The field to query time range. Default is "updated_on".
args (dict): Time difference to calculate query_start from query_end.
Accepts arguments in python datetime.timedelta function. args and query_start can
not be given at the same time. Default is 30 days.
Returns:
(list) A summary of fireworks stats for the specified time range.
"""
return self._get_summary(
coll=self._fireworks,
query_start=query_start,
query_end=query_end,
query=query,
time_field=time_field,
**args
)
def get_launch_summary(
self,
query_start=None,
query_end=None,
time_field="time_end",
query=None,
runtime_stats=False,
include_ids=False,
**args
):
"""
Get launch summary for a specified time range.
Args:
query_start (str): The start time (inclusive) to query in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is 30 days before current time.
query_end (str): The end time (exclusive) to query in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is current time.
time_field (str): The field to query time range. Default is "time_end".
query (dict): Additional Pymongo queries to filter entries for process.
runtime_stats (bool): If return runtime stats. Default is False.
include_ids (bool): If return fw_ids. Default is False.
args (dict): Time difference to calculate query_start from query_end.
Accepts arguments in python datetime.timedelta function. args and query_start can
not be given at the same time. Default is 30 days.
Returns:
(list) A summary of launch stats for the specified time range.
"""
launch_id = self._get_launch_id_from_fireworks(query=query)
if launch_id:
match_launch_id = {"launch_id": {"$in": launch_id}}
results = self._get_summary(
coll=self._launches,
query_start=query_start,
query_end=query_end,
time_field=time_field,
query=match_launch_id,
runtime_stats=runtime_stats,
include_ids=include_ids,
**args
)
return results
def get_workflow_summary(self, query_start=None, query_end=None, query=None, time_field="updated_on", **args):
"""
Get workflow summary for a specified time range.
:param query_start: (str) The start time (inclusive) to query in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is 30 days before current time.
:param query_end: (str) The end time (exclusive) to query in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is current time.
:param query: (dict) Additional Pymongo queries to filter entries for process.
:param time_field: (str) The field to query time range. Default is "updated_on".
:param args: (dict) Time difference to calculate query_start from query_end. Accepts arguments in python
datetime.timedelta function. args and query_start can not be given at the same time. Default is 30 days.
:return: (list) A summary of workflow stats for the specified time range.
"""
return self._get_summary(
coll=self._workflows,
query_start=query_start,
query_end=query_end,
query=query,
time_field=time_field,
runtime_stats=False,
allow_null_time=False,
isoformat=False,
**args
)
def get_daily_completion_summary(self, query_start=None, query_end=None, query=None, time_field="time_end", **args):
"""
Get daily summary of fireworks for a specified time range
:param query_start: (str) The start time (inclusive) to query in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is 30 days before current time.
:param query_end: (str) The end time (exclusive) to query in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is current time.
:param query: (dict) Additional Pymongo queries to filter entries for process.
:param time_field: (str) The field to query time range. Default is "time_end".
:param args: (dict) Time difference to calculate query_start from query_end. Accepts arguments in python
datetime.timedelta function. args and query_start can not be given at the same time. Default is 30 days.
:return: (list) A summary of daily fireworks stats for the specified time range.
"""
launch_id = self._get_launch_id_from_fireworks(query=query)
if launch_id:
match_launch_id = {"launch_id": {"$in": launch_id}}
summary_query = self._get_summary(
coll=self._launches,
query_start=query_start,
query_end=query_end,
query=match_launch_id,
return_query_only=True,
**args
)
summary_query[1]["$project"][time_field] = {"$substr": ["$" + time_field, 0, 10]}
summary_query[2]["$group"]["_id"] = {time_field: "$" + time_field, "state": "$state"}
re_aggregate_query = [
summary_query[0],
summary_query[1],
summary_query[2],
{
"$group": {
"_id": "$_id." + time_field,
"run_counts": {"$push": {"state": "$_id.state", "count": "$count"}},
}
},
summary_query[-1],
]
results = self._launches.aggregate(re_aggregate_query)
return results["result"] if results["ok"] else None
def group_fizzled_fireworks(
self, group_by, query_start=None, query_end=None, query=None, include_ids=False, **args
):
"""
Group fizzled fireworks for a specified time range by a specified key.
:param group_by: (str) Database field used to group fireworks items.
:param query_start: (str) The start time (inclusive) to query in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is 30 days before current time.
:param query_end: (str) The end time (exclusive) to query in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is current time.
:param query: (dict) Additional Pymongo queries to filter entries for process.
:param include_ids: (bool) If return fw_ids. Default is False.
:param args: (dict) Time difference to calculate query_start from query_end. Accepts arguments in python
datetime.timedelta function. args and query_start can not be given at the same time. Default is 30 days.
:return: (list) A summary of fizzled fireworks for group by the specified key.
"""
project_query = {"key": "$" + group_by, "_id": 0}
group_query = {"_id": "$key", "count": {"$sum": 1}}
match_query = {
"state": "FIZZLED",
"created_on": self._query_datetime_range(start_time=query_start, end_time=query_end, **args),
}
if include_ids:
project_query.update({"fw_id": 1})
group_query.update({"fw_id": {"$push": "$fw_id"}})
if query:
match_query.update(query)
return self._aggregate(
coll=self._fireworks, match=match_query, project=project_query, group_op=group_query, group_by="key"
)
def identify_catastrophes(
self,
error_ratio=0.01,
query_start=None,
query_end=None,
query=None,
time_field="time_end",
include_ids=True,
**args
):
"""
Get days with higher failure ratio
:param error_ratio: (float) Threshold of error ratio to define as a catastrophic day
:param query_start: (str) The start time (inclusive) to query in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is 30 days before current time.
:param query_end: (str) The end time (exclusive) to query in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is current time.
:param query: (dict) Additional Pymongo queries to filter entries for process.
:param time_field: (str) The field to query time range. Default is "time_end".
:param include_ids: (bool) If return fw_ids. Default is False.
:param args: (dict) Time difference to calculate query_start from query_end. Accepts arguments in python
datetime.timedelta function. args and query_start can not be given at the same time. Default is 30 days.
:return: (list) Dates with higher failure ratio with optional failed fw_ids.
"""
results = self.get_daily_completion_summary(
query_start=query_start, query_end=query_end, query=query, time_field=time_field, **args
)
bad_dates = []
for dates in results:
sum, fizzled_counts = 0.0, 0.0
for counts in dates["run_counts"]:
if counts["state"] == "FIZZLED":
fizzled_counts += counts["count"]
sum += counts["count"]
if fizzled_counts / sum >= error_ratio:
bad_dates.append(dates["_id"])
if not include_ids:
return bad_dates
id_dict = defaultdict(list)
for d in bad_dates:
for fizzled_id in self._launches.find({time_field: {"$regex": d}, "state": "FIZZLED"}, {"fw_id": 1}):
id_dict[d].append(fizzled_id["fw_id"])
return id_dict
def _get_summary(
self,
coll,
query_start=None,
query_end=None,
query=None,
time_field="time_end",
runtime_stats=False,
include_ids=False,
id_field="fw_id",
return_query_only=False,
allow_null_time=True,
isoformat=True,
**args
):
"""
Get a summary of Fireworks stats with a specified time range.
:param coll: (Pymongo Collection) A PyMongo Collection instance.
:param query_start: (str) The start time (inclusive) to query in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is 30 days before current time.
:param query_end: (str) The end time (exclusive) to query in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is current time.
:param query: (dict) Additional Pymongo queries to filter entries for process.
:param time_field: (str) The field to query time range. Default is "time_end".
:param runtime_stats: (bool) If return runtime stats. Default is False.
:param include_ids: (bool) If return ids. Default is False.
:param id_field: (str) The ids returned when include_ids is True. Default is "fw_id".
:param return_query_only: (bool) If only return the query expression for aggregation. Default is False.
:param allow_null_time: (bool) If count entries with time_field is null. Default is True.
:param isoformat:(bool) If use isoformat time for query. Default is True.
:param args: (dict) Time difference to calculate query_start from query_end. Accepts arguments in python
datetime.timedelta function. args and query_start can not be given at the same time. Default is 30 days.
:return: (list) A summary of Fireworks stats with the specified time range.
"""
if query is None:
query = {}
project_query = {"state": 1, "_id": 0}
group_query = {"count": {"$sum": 1}}
if allow_null_time:
match_query = {
"$or": [
{
time_field: self._query_datetime_range(
start_time=query_start, end_time=query_end, isoformat=isoformat, **args
)
},
{time_field: None},
]
}
else:
match_query = {
time_field: self._query_datetime_range(
start_time=query_start, end_time=query_end, isoformat=isoformat, **args
)
}
match_query.update(query)
if runtime_stats:
project_query.update({"runtime_secs": 1})
group_query.update(RUNTIME_STATS)
if include_ids:
project_query.update({id_field: 1})
group_query.update({"ids": {"$push": "$" + id_field}})
return self._aggregate(
coll=coll,
match=match_query,
project=project_query,
group_op=group_query,
return_query_only=return_query_only,
)
def _get_launch_id_from_fireworks(self, query=None):
"""
Get a list of launch_ids from the fireworks collection.
:param query: (dict) PyMongo query expression to filter fireworks. Default is None
:return: (list) A list of launch_ids.
"""
if query is None:
query = {}
results = self._aggregate(
coll=self._fireworks,
match=query,
project={"launches": 1, "_id": 0},
unwind="launches",
group_op={"launch_id": {"$push": "$launches"}},
)
return results[0].get("launch_id")
@staticmethod
def _aggregate(
coll, group_by="state", match=None, project=None, unwind=None, group_op=None, sort=None, return_query_only=False
):
"""
Method to run aggregation in the Mongodb aggregation framework.
:param coll: (Pymongo Collection) A PyMongo Collection instance.
:param group_by: (str) Field to be used as key in the group step in Mongodb aggregation framework.
Default is the"state" field.
:param match: (dict) Query for the match step in Mongodb aggregation framework.
:param project: (dict) Query for the project step in Mongodb aggregation framework
:param unwind: (dict) Query for the unwind step in Mongodb aggregation framework
:param group_op: (dict) Additional operations to generate values in the group step in Mongodb aggregation
framework.
:param sort: (tuple) Defines how to sort the aggregation results. Default is to sort by field in group_by.
:param return_query_only: (bool) If only return the query expression for aggregation. Default is False.
:return: (list) Aggregation results if the operation is successful.
"""
for arg in [match, project, unwind, group_op]:
if arg is None:
arg = {}
group_op.update({"_id": "$" + group_by})
if sort is None:
sort_query = ("_id", 1)
query = [{"$match": match}, {"$project": project}, {"$group": group_op}, {"$sort": SON([sort_query])}]
if unwind:
query.insert(2, {"$unwind": "$" + unwind})
if return_query_only:
return query
print(query)
return list(coll.aggregate(query))
@staticmethod
def _query_datetime_range(start_time=None, end_time=None, isoformat=True, **time_delta):
"""
Get a PyMongo query expression for datetime
:param start_time: (str) Query start time (inclusive) in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is 30 days before current time.
:param end_time: (str) Query end time (exclusive) in isoformat (YYYY-MM-DDTHH:MM:SS.mmmmmm).
Default is current time.
:param isoformat: (bool) If returned Pymongo query uses isoformat for datetime. Default is True.
:param time_delta: (dict) Time difference to calculate start_time from end_time. Accepts arguments in python
datetime.timedelta function. time_delta and start_time can not be given at the same time. Default is 30 days.
:return: (dict) A Mongodb query expression for a datetime range.
"""
if start_time and time_delta:
raise SyntaxError("Can't specify start_time and time_delta at the same time!")
end_time = parser.parse(end_time) if end_time else datetime.utcnow()
if not start_time:
if not time_delta:
time_delta = {"days": 30}
start_time = end_time - timedelta(**time_delta)
else:
start_time = parser.parse(start_time)
if start_time > end_time:
raise ValueError("query_start should be earlier than query_end!")
if isoformat:
return {"$gte": start_time.isoformat(), "$lt": end_time.isoformat()}
return {"$gte": start_time, "$lt": end_time}
|
b20ecdac0b397f076349bd69d8f2b09bf12e52a4
|
5b7c2feb27a71837edf526315d413706a6bf82ff
|
/mmf/utils/configuration.py
|
6f23a81c418e1beb7143aac3aa27d4610334143f
|
[
"BSD-3-Clause"
] |
permissive
|
facebookresearch/mmf
|
df675223566dc8fb2359aa3e1a2d49db5e3c2b9a
|
63f76fbcfe2d056b88734fc41a983251d20e6c61
|
refs/heads/main
| 2023-08-23T23:40:46.827046
| 2023-07-11T06:18:50
| 2023-07-11T06:18:50
| 138,831,170
| 2,432
| 592
|
NOASSERTION
| 2023-08-11T20:26:11
| 2018-06-27T04:52:40
|
Python
|
UTF-8
|
Python
| false
| false
| 20,095
|
py
|
configuration.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import collections
import json
import logging
import os
import warnings
from ast import literal_eval
from typing import List
import torch
from mmf.common.registry import registry
from mmf.utils.env import import_user_module
from mmf.utils.file_io import PathManager
from mmf.utils.general import get_absolute_path, get_mmf_root
from omegaconf import DictConfig, errors as OCErrors, OmegaConf
logger = logging.getLogger(__name__)
def load_yaml(f):
# Convert to absolute path for loading includes
abs_f = get_absolute_path(f)
try:
mapping = OmegaConf.load(PathManager.get_local_path(abs_f))
f = abs_f
except FileNotFoundError as e:
# Check if this file might be relative to root?
# TODO: Later test if this can be removed
relative = os.path.abspath(os.path.join(get_mmf_root(), f))
if not PathManager.isfile(relative):
raise e
else:
f = relative
mapping = OmegaConf.load(PathManager.get_local_path(f))
if mapping is None:
mapping = OmegaConf.create()
includes = mapping.get("includes", [])
if not isinstance(includes, collections.abc.Sequence):
raise AttributeError(
"Includes must be a list, {} provided".format(type(includes))
)
include_mapping = OmegaConf.create()
mmf_root_dir = get_mmf_root()
for include in includes:
original_include_path = include
include = os.path.join(mmf_root_dir, include)
# If path doesn't exist relative to MMF root, try relative to current file
if not PathManager.exists(include):
include = os.path.join(os.path.dirname(f), original_include_path)
current_include_mapping = load_yaml(include)
include_mapping = OmegaConf.merge(include_mapping, current_include_mapping)
mapping.pop("includes", None)
mapping = OmegaConf.merge(include_mapping, mapping)
return mapping
def get_default_config_path():
directory = os.path.dirname(os.path.abspath(__file__))
configs_dir = os.path.join(directory, "..", "configs")
# Check for fb defaults
fb_defaults = os.path.join(configs_dir, "fb_defaults.yaml")
if PathManager.exists(fb_defaults):
return fb_defaults
else:
return os.path.join(configs_dir, "defaults.yaml")
def load_yaml_with_defaults(f):
default_config = get_default_config_path()
return OmegaConf.merge(load_yaml(default_config), load_yaml(f))
def get_zoo_config(
key, variation="defaults", zoo_config_path=None, zoo_type="datasets"
):
version = None
resources = None
if zoo_config_path is None:
zoo_config_path = os.path.join("configs", "zoo", f"{zoo_type}.yaml")
zoo = load_yaml(zoo_config_path)
# Set struct on zoo so that unidentified access is not allowed
OmegaConf.set_struct(zoo, True)
try:
item = OmegaConf.select(zoo, key)
except Exception:
# Key wasn't present or something else happened, return None, None
return version, resources
if not item:
return version, resources
if variation not in item:
# If variation is not present, then key value should
# be directly returned if "defaults" was selected as the variation
assert (
variation == "defaults"
), f"'{variation}' variation not present in zoo config"
return _get_version_and_resources(item)
elif "resources" in item:
# Case where full key is directly passed
return _get_version_and_resources(item)
else:
return _get_version_and_resources(item[variation])
def _get_version_and_resources(item):
assert "version" in item, "'version' key should be present in zoo config {}".format(
item._get_full_key("")
)
assert (
"resources" in item
), "'resources' key should be present in zoo config {}".format(
item._get_full_key("")
)
return item.version, item.resources
def get_global_config(key=None):
config = registry.get("config")
if config is None:
configuration = Configuration()
config = configuration.get_config()
registry.register("config", config)
if key:
config = OmegaConf.select(config, key)
return config
def get_mmf_cache_dir():
config = get_global_config()
cache_dir = config.env.cache_dir
# If cache_dir path exists do not join to mmf root
if not os.path.exists(cache_dir):
cache_dir = os.path.join(get_mmf_root(), cache_dir)
return cache_dir
def get_mmf_env(key=None):
config = get_global_config()
if key:
return OmegaConf.select(config.env, key)
else:
return config.env
def _merge_with_dotlist(
config: DictConfig,
opts: List[str],
skip_missing: bool = False,
log_info: bool = True,
):
# TODO: To remove technical debt, a possible solution is to use
# struct mode to update with dotlist OmegaConf node. Look into this
# in next iteration
# TODO: Simplify this function
if opts is None:
opts = []
if len(opts) == 0:
return config
# Support equal e.g. model=visual_bert for better future hydra support
has_equal = opts[0].find("=") != -1
if has_equal:
opt_values = [opt.split("=", maxsplit=1) for opt in opts]
if not all(len(opt) == 2 for opt in opt_values):
for opt in opt_values:
assert len(opt) == 2, f"{opt} has no value"
else:
assert len(opts) % 2 == 0, "Number of opts should be multiple of 2"
opt_values = zip(opts[0::2], opts[1::2])
for opt, value in opt_values:
if opt == "dataset":
opt = "datasets"
splits = opt.split(".")
current = config
for idx, field in enumerate(splits):
array_index = -1
if field.find("[") != -1 and field.find("]") != -1:
stripped_field = field[: field.find("[")]
array_index = int(field[field.find("[") + 1 : field.find("]")])
else:
stripped_field = field
if stripped_field not in current:
if skip_missing is True:
break
raise AttributeError(
"While updating configuration"
" option {} is missing from"
" configuration at field {}".format(opt, stripped_field)
)
if isinstance(current[stripped_field], collections.abc.Mapping):
current = current[stripped_field]
elif (
isinstance(current[stripped_field], collections.abc.Sequence)
and array_index != -1
):
try:
current_value = current[stripped_field][array_index]
except OCErrors.ConfigIndexError:
if skip_missing:
break
raise
# Case where array element to be updated is last element
if (
not isinstance(
current_value,
(collections.abc.Mapping, collections.abc.Sequence),
)
or idx == len(splits) - 1
):
if log_info:
logger.info(f"Overriding option {opt} to {value}")
current[stripped_field][array_index] = _decode_value(value)
else:
# Otherwise move on down the chain
current = current_value
else:
if idx == len(splits) - 1:
if log_info:
logger.info(f"Overriding option {opt} to {value}")
current[stripped_field] = _decode_value(value)
else:
if skip_missing:
break
raise AttributeError(
"While updating configuration",
"option {} is not present "
"after field {}".format(opt, stripped_field),
)
return config
def _decode_value(value):
# https://github.com/rbgirshick/yacs/blob/master/yacs/config.py#L400
if not isinstance(value, str):
return value
if value == "None":
value = None
try:
value = literal_eval(value)
except ValueError:
pass
except SyntaxError:
pass
return value
def resolve_cache_dir(env_variable="MMF_CACHE_DIR", default="mmf"):
# Some of this follow what "transformers" does for there cache resolving
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv(
"TORCH_HOME",
os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"),
)
)
default_cache_path = os.path.join(torch_cache_home, default)
cache_path = os.getenv(env_variable, default_cache_path)
if not PathManager.exists(cache_path):
try:
PathManager.mkdirs(cache_path)
except PermissionError:
cache_path = os.path.join(get_mmf_root(), ".mmf_cache")
PathManager.mkdirs(cache_path)
return cache_path
def resolve_dir(env_variable, default="data"):
default_dir = os.path.join(resolve_cache_dir(), default)
dir_path = os.getenv(env_variable, default_dir)
if not PathManager.exists(dir_path):
PathManager.mkdirs(dir_path)
return dir_path
class Configuration:
def __init__(self, args=None, default_only=False):
self.config = {}
if not args:
import argparse
args = argparse.Namespace(opts=[])
default_only = True
self.args = args
self._register_resolvers()
self._default_config = self._build_default_config()
# Initially, silently add opts so that some of the overrides for the defaults
# from command line required for setup can be honored
self._default_config = _merge_with_dotlist(
self._default_config, args.opts, skip_missing=True, log_info=False
)
# Register the config and configuration for setup
registry.register("config", self._default_config)
registry.register("configuration", self)
if default_only:
other_configs = {}
else:
other_configs = self._build_other_configs()
self.config = OmegaConf.merge(self._default_config, other_configs)
self.config = _merge_with_dotlist(self.config, args.opts)
self._update_specific(self.config)
self.upgrade(self.config)
# Resolve the config here itself after full creation so that spawned workers
# don't face any issues
self.config = OmegaConf.create(
OmegaConf.to_container(self.config, resolve=True)
)
# Update the registry with final config
registry.register("config", self.config)
def _build_default_config(self):
self.default_config_path = get_default_config_path()
default_config = load_yaml(self.default_config_path)
return default_config
def _build_other_configs(self):
opts_config = self._build_opt_list(self.args.opts)
user_config = self._build_user_config(opts_config)
self._opts_config = opts_config
self._user_config = user_config
self.import_user_dir()
model_config = self._build_model_config(opts_config)
dataset_config = self._build_dataset_config(opts_config)
args_overrides = self._build_demjson_config(self.args.config_override)
other_configs = OmegaConf.merge(
model_config, dataset_config, user_config, args_overrides
)
return other_configs
def _build_opt_list(self, opts):
opts_dot_list = self._convert_to_dot_list(opts)
return OmegaConf.from_dotlist(opts_dot_list)
def _build_user_config(self, opts):
user_config = {}
# Update user_config with opts if passed
self.config_path = opts.config
if self.config_path is not None:
user_config = load_yaml(self.config_path)
return user_config
def import_user_dir(self):
# Try user_dir options in order of MMF configuration hierarchy
# First try the default one, which can be set via environment as well
user_dir = self._default_config.env.user_dir
# Now, check user's config
user_config_user_dir = self._user_config.get("env", {}).get("user_dir", None)
if user_config_user_dir:
user_dir = user_config_user_dir
# Finally, check opts
opts_user_dir = self._opts_config.get("env", {}).get("user_dir", None)
if opts_user_dir:
user_dir = opts_user_dir
if user_dir:
import_user_module(user_dir)
def _build_model_config(self, config):
model = config.model
if model is None:
raise KeyError("Required argument 'model' not passed")
model_cls = registry.get_model_class(model)
if model_cls is None:
warning = f"No model named '{model}' has been registered"
warnings.warn(warning)
return OmegaConf.create()
default_model_config_path = model_cls.config_path()
if default_model_config_path is None:
warning = "Model {}'s class has no default configuration provided".format(
model
)
warnings.warn(warning)
return OmegaConf.create()
return load_yaml(default_model_config_path)
def _build_dataset_config(self, config):
dataset = config.get("dataset", None)
datasets = config.get("datasets", None)
if dataset is None and datasets is None:
raise KeyError("Required argument 'dataset|datasets' not passed")
if datasets is None:
config.datasets = dataset
datasets = dataset.split(",")
else:
datasets = datasets.split(",")
dataset_config = OmegaConf.create()
for dataset in datasets:
builder_cls = registry.get_builder_class(dataset)
if builder_cls is None:
warning = f"No dataset named '{dataset}' has been registered"
warnings.warn(warning)
continue
default_dataset_config_path = builder_cls.config_path()
if default_dataset_config_path is None:
warning = (
f"Dataset {dataset}'s builder class has no default configuration "
+ "provided"
)
warnings.warn(warning)
continue
dataset_config = OmegaConf.merge(
dataset_config, load_yaml(default_dataset_config_path)
)
return dataset_config
def get_config(self):
self._register_resolvers()
return self.config
def _build_demjson_config(self, demjson_string):
if demjson_string is None:
return OmegaConf.create()
try:
import demjson
except ImportError:
logger.warning("demjson is required to use config_override")
raise
demjson_dict = demjson.decode(demjson_string)
return OmegaConf.create(demjson_dict)
def _get_args_config(self, args):
args_dict = vars(args)
return OmegaConf.create(args_dict)
def _register_resolvers(self):
OmegaConf.clear_resolvers()
# Device count resolver
device_count = max(1, torch.cuda.device_count())
OmegaConf.register_new_resolver("device_count", lambda: device_count)
OmegaConf.register_new_resolver("resolve_cache_dir", resolve_cache_dir)
OmegaConf.register_new_resolver("resolve_dir", resolve_dir)
def freeze(self):
OmegaConf.set_struct(self.config, True)
def defrost(self):
OmegaConf.set_struct(self.config, False)
def _convert_to_dot_list(self, opts):
if opts is None:
opts = []
if len(opts) == 0:
return opts
# Support equal e.g. model=visual_bert for better future hydra support
has_equal = opts[0].find("=") != -1
if has_equal:
return opts
return [(opt + "=" + value) for opt, value in zip(opts[0::2], opts[1::2])]
def pretty_print(self):
if not self.config.training.log_detailed_config:
return
logger.info("===== Training Parameters =====")
logger.info(self._convert_node_to_json(self.config.training))
logger.info("====== Dataset Attributes ======")
datasets = self.config.datasets.split(",")
for dataset in datasets:
if dataset in self.config.dataset_config:
logger.info(f"======== {dataset} =======")
dataset_config = self.config.dataset_config[dataset]
logger.info(self._convert_node_to_json(dataset_config))
else:
logger.warning(f"No dataset named '{dataset}' in config. Skipping")
logger.info("====== Optimizer Attributes ======")
logger.info(self._convert_node_to_json(self.config.optimizer))
if self.config.model not in self.config.model_config:
raise ValueError(f"{self.config.model} not present in model attributes")
logger.info(f"====== Model ({self.config.model}) Attributes ======")
logger.info(
self._convert_node_to_json(self.config.model_config[self.config.model])
)
def _convert_node_to_json(self, node):
container = OmegaConf.to_container(node, resolve=True)
return json.dumps(container, indent=4, sort_keys=True)
def _update_specific(self, config):
# tp = self.config.training
# if args["seed"] is not None or tp['seed'] is not None:
# print(
# "You have chosen to seed the training. This will turn on CUDNN "
# "deterministic setting which can slow down your training "
# "considerably! You may see unexpected behavior when restarting "
# "from checkpoints."
# )
# if args["seed"] == -1:
# self.config["training"]["seed"] = random.randint(1, 1000000)
if (
"learning_rate" in config
and "optimizer" in config
and "params" in config.optimizer
):
lr = config.learning_rate
config.optimizer.params.lr = lr
# TODO: Correct the following issue
# This check is triggered before the config override from
# commandline is effective even after setting
# training.device = 'xla', it gets triggered.
if not torch.cuda.is_available() and "cuda" in config.training.device:
warnings.warn(
"Device specified is 'cuda' but cuda is not present. "
+ "Switching to CPU version."
)
config.training.device = "cpu"
return config
def upgrade(self, config):
mapping = {
"training.resume_file": "checkpoint.resume_file",
"training.resume": "checkpoint.resume",
"training.resume_best": "checkpoint.resume_best",
"training.load_pretrained": "checkpoint.resume_pretrained",
"training.pretrained_state_mapping": "checkpoint.pretrained_state_mapping",
"training.run_type": "run_type",
}
for old, new in mapping.items():
value = OmegaConf.select(config, old)
if value:
OmegaConf.update(config, new, value)
# This is still here due to legacy reasons around
# older checkpoint loading from v0.3
class ConfigNode(collections.OrderedDict):
pass
|
24d330118591a1dcf2f2274a2f20fa1186786861
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/7_graph/经典题/置换环/F - Guess The Number 2-置换环、孙子定理.py
|
2726fcc29f43445e02e886322d8d4967874c7033
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,764
|
py
|
F - Guess The Number 2-置换环、孙子定理.py
|
"""
置换环+中国剩余定理(孙子定理)
你需要猜到对方的n的大小,你仅可做一件事:
- 指定m,输出一个长度为 m的数组A。其中 1≤m≤110,1≤Ai≤m
- 对方根据数组 A,输出一个数组 B
- 你根据该数组 B,得出 n的值。
生成数组 B的方式为:
假想一个有m个点的图,其中点i连一条有向边到点ai。
Bi的值为:从i号点出发,走 n条边到达的点的编号。
n≤1e9
解:
# !多个置换环的大小 lcm要大于1e9
# 环的大小需要由两两互素的数构成
# 4,9,5,7,11,13,17,19,23,其和为108<110,其乘积为1338557220>1e9
通过该数组(每个元素是一个环的大小)可以构造出对应的数组a。
然后根据数组b计算得到每个余数 ri,然后由中国剩余定理解出 n。
"""
import sys
sys.setrecursionlimit(int(1e9))
input = lambda: sys.stdin.readline().rstrip("\r\n")
from typing import List, Optional, Tuple
def crt(remains: List[int], mods: List[int]) -> Optional[int]:
"""
`模数两两互素`的线性同余方程组的最小非负整数解 - 中国剩余定理 (CRT)
x ≡ remains_i (mod mods_i), mods_i 两两互质且 Πmods_i <= 1e18
"""
modMul = 1
for m in mods:
modMul *= m
res = 0
for mod, remain in zip(mods, remains):
other = modMul // mod
inv = modInv(other, mod)
if inv is None:
return None
res = (res + remain * other * inv) % modMul
return res
def exgcd(a: int, b: int) -> Tuple[int, int, int]:
"""
求a, b最大公约数,同时求出裴蜀定理中的一组系数x, y,
满足 x*a + y*b = gcd(a, b)
ax + by = gcd_ 返回 `(gcd_, x, y)`
"""
if b == 0:
return a, 1, 0
gcd_, x, y = exgcd(b, a % b)
return gcd_, y, x - a // b * y
def modInv(a: int, mod: int) -> Optional[int]:
"""
扩展gcd求a在mod下的逆元
即求出逆元 `inv` 满足 `a*inv ≡ 1 (mod m)`
"""
gcd_, x, _ = exgcd(a, mod)
if gcd_ != 1:
return None
return x % mod
if __name__ == "__main__":
MODS = [4, 9, 5, 7, 11, 13, 17, 19, 23]
A = []
start = 1
starts = [] # 每个置换环的起点
for size in MODS:
cycle = list(range(start, start + size))
cycle = cycle[1:] + [cycle[0]]
A.extend(cycle)
starts.append(start)
start += size
print(len(A), flush=True)
print(*A, flush=True)
B = list(map(int, input().split())) # 長さ M の整数列 B=(B 1 ,B 2 ,…,B M ) が与えられる。
# 求n
remains = [B[start - 1] - start for start in starts]
res = crt(remains, MODS)
print(res, flush=True)
|
1c0d462fe531b78acb68fe2816f9ae7bf646dc56
|
bb12da8a0d637b68255b04b182fdd47558bec12e
|
/tests/test_differ_differconfig.py
|
4f35d6c1c45691f8db5ab35431aea9758025c32b
|
[
"ISC"
] |
permissive
|
wwkimball/yamlpath
|
03f1dffc0c5d5208d43fca33578de9ad0074c395
|
be7af7de60e920659b535aaae39046e84c85c248
|
refs/heads/master
| 2023-08-25T07:42:53.174510
| 2023-03-30T22:32:16
| 2023-03-30T22:32:16
| 184,194,939
| 102
| 20
|
ISC
| 2023-03-30T22:32:17
| 2019-04-30T05:05:17
|
Python
|
UTF-8
|
Python
| false
| false
| 11,959
|
py
|
test_differ_differconfig.py
|
import pytest
from types import SimpleNamespace
from yamlpath.func import get_yaml_editor, get_yaml_data
from yamlpath.differ.enums import (
AoHDiffOpts,
ArrayDiffOpts,
)
from yamlpath.wrappers import NodeCoords
from yamlpath.differ import DifferConfig
from yamlpath import YAMLPath
from tests.conftest import (
info_warn_logger,
quiet_logger,
create_temp_yaml_file
)
class Test_differ_DifferConfig():
"""Tests for the DifferConfig class."""
###
# array_diff_mode
###
def test_array_diff_mode_default(self, quiet_logger):
mc = DifferConfig(quiet_logger, SimpleNamespace(arrays=None))
assert mc.array_diff_mode(
NodeCoords(None, None, None)) == ArrayDiffOpts.POSITION
@pytest.mark.parametrize("setting, mode", [
("position", ArrayDiffOpts.POSITION),
("value", ArrayDiffOpts.VALUE),
])
def test_array_diff_mode_cli(self, quiet_logger, setting, mode):
mc = DifferConfig(quiet_logger, SimpleNamespace(arrays=setting))
assert mc.array_diff_mode(
NodeCoords(None, None, None)) == mode
@pytest.mark.parametrize("setting, mode", [
("position", ArrayDiffOpts.POSITION),
("value", ArrayDiffOpts.VALUE),
])
def test_array_diff_mode_ini(
self, quiet_logger, tmp_path_factory, setting, mode
):
config_file = create_temp_yaml_file(tmp_path_factory, """
[defaults]
arrays = {}
""".format(setting))
mc = DifferConfig(quiet_logger, SimpleNamespace(
config=config_file
, arrays=None))
assert mc.array_diff_mode(
NodeCoords(None, None, None)) == mode
@pytest.mark.parametrize("cli, ini, mode", [
("position", "value", ArrayDiffOpts.POSITION),
("value", "position", ArrayDiffOpts.VALUE),
])
def test_array_diff_mode_cli_overrides_ini_defaults(
self, quiet_logger, tmp_path_factory, cli, ini, mode
):
config_file = create_temp_yaml_file(tmp_path_factory, """
[defaults]
arrays = {}
""".format(ini))
mc = DifferConfig(quiet_logger, SimpleNamespace(
config=config_file
, arrays=cli))
assert mc.array_diff_mode(
NodeCoords(None, None, None)) == mode
@pytest.mark.parametrize("cli, ini_default, ini_rule, mode", [
("value", "value", "position", ArrayDiffOpts.POSITION),
("position", "position", "value", ArrayDiffOpts.VALUE),
])
def test_array_diff_mode_ini_rule_overrides_cli(
self, quiet_logger, tmp_path_factory, cli, ini_default, ini_rule, mode
):
config_file = create_temp_yaml_file(tmp_path_factory, """
[defaults]
arrays = {}
[rules]
/hash/diff_targets/subarray = {}
""".format(ini_default, ini_rule))
lhs_yaml_file = create_temp_yaml_file(tmp_path_factory, """---
hash:
lhs_exclusive: lhs value 1
diff_targets:
subkey: lhs value 2
subarray:
- one
- two
array_of_hashes:
- name: LHS Record 1
id: 1
prop: LHS value AoH 1
- name: LHS Record 2
id: 2
prop: LHS value AoH 2
""")
lhs_yaml = get_yaml_editor()
(lhs_data, lhs_loaded) = get_yaml_data(lhs_yaml, quiet_logger, lhs_yaml_file)
mc = DifferConfig(quiet_logger, SimpleNamespace(
config=config_file
, arrays=cli))
mc.prepare(lhs_data)
node = lhs_data["hash"]["diff_targets"]["subarray"]
parent = lhs_data["hash"]["diff_targets"]
parentref = "subarray"
assert mc.array_diff_mode(
NodeCoords(node, parent, parentref)) == mode
###
# aoh_diff_mode
###
def test_aoh_diff_mode_default(self, quiet_logger):
mc = DifferConfig(quiet_logger, SimpleNamespace(aoh=None))
assert mc.aoh_diff_mode(
NodeCoords(None, None, None)) == AoHDiffOpts.POSITION
@pytest.mark.parametrize("setting, mode", [
("deep", AoHDiffOpts.DEEP),
("dpos", AoHDiffOpts.DPOS),
("key", AoHDiffOpts.KEY),
("position", AoHDiffOpts.POSITION),
("value", AoHDiffOpts.VALUE),
])
def test_aoh_diff_mode_cli(self, quiet_logger, setting, mode):
mc = DifferConfig(quiet_logger, SimpleNamespace(aoh=setting))
assert mc.aoh_diff_mode(
NodeCoords(None, None, None)) == mode
@pytest.mark.parametrize("setting, mode", [
("deep", AoHDiffOpts.DEEP),
("dpos", AoHDiffOpts.DPOS),
("key", AoHDiffOpts.KEY),
("position", AoHDiffOpts.POSITION),
("value", AoHDiffOpts.VALUE),
])
def test_aoh_diff_mode_ini(
self, quiet_logger, tmp_path_factory, setting, mode
):
config_file = create_temp_yaml_file(tmp_path_factory, """
[defaults]
aoh = {}
""".format(setting))
mc = DifferConfig(quiet_logger, SimpleNamespace(
config=config_file
, aoh=None))
assert mc.aoh_diff_mode(
NodeCoords(None, None, None)) == mode
@pytest.mark.parametrize("cli, ini, mode", [
("deep", "dpos", AoHDiffOpts.DEEP),
("dpos", "key", AoHDiffOpts.DPOS),
("key", "position", AoHDiffOpts.KEY),
("position", "value", AoHDiffOpts.POSITION),
("value", "deep", AoHDiffOpts.VALUE),
])
def test_aoh_diff_mode_cli_overrides_ini_defaults(
self, quiet_logger, tmp_path_factory, cli, ini, mode
):
config_file = create_temp_yaml_file(tmp_path_factory, """
[defaults]
aoh = {}
""".format(ini))
mc = DifferConfig(quiet_logger, SimpleNamespace(
config=config_file
, aoh=cli))
assert mc.aoh_diff_mode(
NodeCoords(None, None, None)) == mode
@pytest.mark.parametrize("cli, ini_default, ini_rule, mode", [
("deep", "dpos", "key", AoHDiffOpts.KEY),
("dpos", "key", "position", AoHDiffOpts.POSITION),
("key", "position", "value", AoHDiffOpts.VALUE),
("position", "value", "deep", AoHDiffOpts.DEEP),
("value", "deep", "dpos", AoHDiffOpts.DPOS),
])
def test_aoh_diff_mode_ini_rule_overrides_cli(
self, quiet_logger, tmp_path_factory, cli, ini_default, ini_rule, mode
):
config_file = create_temp_yaml_file(tmp_path_factory, """
[defaults]
aoh = {}
[rules]
/array_of_hashes = {}
""".format(ini_default, ini_rule))
lhs_yaml_file = create_temp_yaml_file(tmp_path_factory, """---
hash:
lhs_exclusive: lhs value 1
diff_targets:
subkey: lhs value 2
subarray:
- one
- two
array_of_hashes:
- name: LHS Record 1
id: 1
prop: LHS value AoH 1
- name: LHS Record 2
id: 2
prop: LHS value AoH 2
""")
lhs_yaml = get_yaml_editor()
(lhs_data, lhs_loaded) = get_yaml_data(lhs_yaml, quiet_logger, lhs_yaml_file)
mc = DifferConfig(quiet_logger, SimpleNamespace(
config=config_file
, aoh=cli))
mc.prepare(lhs_data)
node = lhs_data["array_of_hashes"]
parent = lhs_data
parentref = "array_of_hashes"
assert mc.aoh_diff_mode(
NodeCoords(node, parent, parentref)) == mode
###
# aoh_diff_key
###
def test_aoh_diff_key_default(self, quiet_logger, tmp_path_factory):
lhs_yaml_file = create_temp_yaml_file(tmp_path_factory, """---
hash:
lhs_exclusive: lhs value 1
diff_targets:
subkey: lhs value 2
subarray:
- one
- two
array_of_hashes:
- name: LHS Record 1
id: 1
prop: LHS value AoH 1
- name: LHS Record 2
id: 2
prop: LHS value AoH 2
""")
lhs_yaml = get_yaml_editor()
(lhs_data, lhs_loaded) = get_yaml_data(lhs_yaml, quiet_logger, lhs_yaml_file)
mc = DifferConfig(quiet_logger, SimpleNamespace())
mc.prepare(lhs_data)
parent = lhs_data["array_of_hashes"]
parentref = 0
node = parent[parentref]
nc = NodeCoords(node, parent, parentref)
(key_attr, is_user_defined) = mc.aoh_diff_key(nc)
assert key_attr == "name" and is_user_defined == False
def test_aoh_diff_key_ini(self, quiet_logger, tmp_path_factory):
config_file = create_temp_yaml_file(tmp_path_factory, """
[keys]
/array_of_hashes = id
""")
lhs_yaml_file = create_temp_yaml_file(tmp_path_factory, """---
hash:
lhs_exclusive: lhs value 1
diff_targets:
subkey: lhs value 2
subarray:
- one
- two
array_of_hashes:
- name: LHS Record 1
id: 1
prop: LHS value AoH 1
- name: LHS Record 2
id: 2
prop: LHS value AoH 2
""")
lhs_yaml = get_yaml_editor()
(lhs_data, lhs_loaded) = get_yaml_data(lhs_yaml, quiet_logger, lhs_yaml_file)
mc = DifferConfig(quiet_logger, SimpleNamespace(config=config_file))
mc.prepare(lhs_data)
parent = lhs_data["array_of_hashes"]
parentref = 0
node = parent[parentref]
nc = NodeCoords(node, parent, parentref)
(key_attr, is_user_defined) = mc.aoh_diff_key(nc)
assert key_attr == "id" and is_user_defined == True
def test_aoh_diff_key_ini_inferred_parent(
self, quiet_logger, tmp_path_factory
):
config_file = create_temp_yaml_file(tmp_path_factory, """
[keys]
/array_of_hashes = prop
""")
lhs_yaml_file = create_temp_yaml_file(tmp_path_factory, """---
hash:
lhs_exclusive: lhs value 1
diff_targets:
subkey: lhs value 2
subarray:
- one
- two
array_of_hashes:
- name: LHS Record 1
id: 1
prop: LHS value AoH 1
- name: LHS Record 2
id: 2
prop: LHS value AoH 2
""")
lhs_yaml = get_yaml_editor()
(lhs_data, lhs_loaded) = get_yaml_data(lhs_yaml, quiet_logger, lhs_yaml_file)
mc = DifferConfig(quiet_logger, SimpleNamespace(config=config_file))
mc.prepare(lhs_data)
parent = lhs_data["array_of_hashes"]
parentref = 1
node = parent[parentref]
nc = NodeCoords(node, parent, parentref)
(key_attr, is_user_defined) = mc.aoh_diff_key(nc)
assert key_attr == "prop" and is_user_defined == True
###
# Edge Cases
###
def test_warn_when_rules_matches_zero_nodes(
self, capsys, info_warn_logger, tmp_path_factory
):
config_file = create_temp_yaml_file(tmp_path_factory, """
[rules]
/does_not_exist = left
/array_of_hashes[name = "Does Not Compute"] = right
""")
lhs_yaml_file = create_temp_yaml_file(tmp_path_factory, """---
hash:
lhs_exclusive: lhs value 1
diff_targets:
subkey: lhs value 2
subarray:
- one
- two
array_of_hashes:
- name: LHS Record 1
id: 1
prop: LHS value AoH 1
- name: LHS Record 2
id: 2
prop: LHS value AoH 2
""")
lhs_yaml = get_yaml_editor()
lhs_data = get_yaml_data(lhs_yaml, info_warn_logger, lhs_yaml_file)
mc = DifferConfig(info_warn_logger, SimpleNamespace(config=config_file))
mc.prepare(lhs_data)
console = capsys.readouterr()
assert "YAML Path matches no nodes" in console.out
|
614af475ac155f281e3b3b28c8b8f8c569c88601
|
e30874b3aa20804833dd11788176f839fcd08690
|
/python/cudf/cudf/core/buffer/utils.py
|
373be99ec96d8967f2a1f35d83214a14f94fb7f9
|
[
"Apache-2.0"
] |
permissive
|
rapidsai/cudf
|
eaba8948cddde8161c3b02b1b972dab3df8d95b3
|
c51633627ee7087542ad4c315c0e139dea58e408
|
refs/heads/branch-23.10
| 2023-09-04T07:18:27.194295
| 2023-09-03T06:20:33
| 2023-09-03T06:20:33
| 90,506,918
| 5,386
| 751
|
Apache-2.0
| 2023-09-14T00:27:03
| 2017-05-07T03:43:37
|
C++
|
UTF-8
|
Python
| false
| false
| 4,735
|
py
|
utils.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
from __future__ import annotations
import threading
from contextlib import ContextDecorator
from typing import Any, Dict, Optional, Tuple, Union
from cudf.core.buffer.buffer import Buffer, cuda_array_interface_wrapper
from cudf.core.buffer.exposure_tracked_buffer import as_exposure_tracked_buffer
from cudf.core.buffer.spill_manager import get_global_manager
from cudf.core.buffer.spillable_buffer import SpillLock, as_spillable_buffer
from cudf.options import get_option
def as_buffer(
data: Union[int, Any],
*,
size: Optional[int] = None,
owner: Optional[object] = None,
exposed: bool = False,
) -> Buffer:
"""Factory function to wrap `data` in a Buffer object.
If `data` isn't a buffer already, a new buffer that points to the memory of
`data` is created. If `data` represents host memory, it is copied to a new
`rmm.DeviceBuffer` device allocation. Otherwise, the memory of `data` is
**not** copied, instead the new buffer keeps a reference to `data` in order
to retain its lifetime.
If `data` is an integer, it is assumed to point to device memory.
Raises ValueError if data isn't C-contiguous.
Parameters
----------
data : int or buffer-like or array-like
An integer representing a pointer to device memory or a buffer-like
or array-like object. When not an integer, `size` and `owner` must
be None.
size : int, optional
Size of device memory in bytes. Must be specified if `data` is an
integer.
owner : object, optional
Python object to which the lifetime of the memory allocation is tied.
A reference to this object is kept in the returned Buffer.
exposed : bool, optional
Mark the buffer as permanently exposed. This is used by
ExposureTrackedBuffer to determine when a deep copy is required and
by SpillableBuffer to mark the buffer unspillable.
Return
------
Buffer
A buffer instance that represents the device memory of `data`.
"""
if isinstance(data, Buffer):
return data
# We handle the integer argument in the factory function by wrapping
# the pointer in a `__cuda_array_interface__` exposing object so that
# the Buffer (and its sub-classes) do not have to.
if isinstance(data, int):
if size is None:
raise ValueError(
"size must be specified when `data` is an integer"
)
data = cuda_array_interface_wrapper(ptr=data, size=size, owner=owner)
elif size is not None or owner is not None:
raise ValueError(
"`size` and `owner` must be None when "
"`data` is a buffer-like or array-like object"
)
if get_option("copy_on_write"):
return as_exposure_tracked_buffer(data, exposed=exposed)
if get_global_manager() is not None:
return as_spillable_buffer(data, exposed=exposed)
if hasattr(data, "__cuda_array_interface__"):
return Buffer._from_device_memory(data)
return Buffer._from_host_memory(data)
_thread_spill_locks: Dict[int, Tuple[Optional[SpillLock], int]] = {}
def _push_thread_spill_lock() -> None:
_id = threading.get_ident()
spill_lock, count = _thread_spill_locks.get(_id, (None, 0))
if spill_lock is None:
spill_lock = SpillLock()
_thread_spill_locks[_id] = (spill_lock, count + 1)
def _pop_thread_spill_lock() -> None:
_id = threading.get_ident()
spill_lock, count = _thread_spill_locks[_id]
if count == 1:
spill_lock = None
_thread_spill_locks[_id] = (spill_lock, count - 1)
class acquire_spill_lock(ContextDecorator):
"""Decorator and context to set spill lock automatically.
All calls to `get_spill_lock()` within the decorated function or context
will return a spill lock with a lifetime bound to the function or context.
Developer Notes
---------------
We use the global variable `_thread_spill_locks` to track the global spill
lock state. To support concurrency, each thread tracks its own state by
pushing and popping from `_thread_spill_locks` using its thread ID.
"""
def __enter__(self) -> Optional[SpillLock]:
_push_thread_spill_lock()
return get_spill_lock()
def __exit__(self, *exc):
_pop_thread_spill_lock()
def get_spill_lock() -> Union[SpillLock, None]:
"""Return a spill lock within the context of `acquire_spill_lock` or None
Returns None, if spilling is disabled.
"""
if get_global_manager() is None:
return None
_id = threading.get_ident()
spill_lock, _ = _thread_spill_locks.get(_id, (None, 0))
return spill_lock
|
d7d62520eff93ce0c7ffe393b9322e56d1ffe784
|
8da41ffa2ccb09e04f95db0f211e0ed69a42a352
|
/courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/werkzeug/middleware/__init__.py
|
5e049f5ee97070ee39bcc46db88c37b4334d32d6
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/training-data-analyst
|
808af9b09a0e5f5657c4ca76cdd205f808d76d89
|
975a95032ce5b7012d1772c7f1f5cfe606eae839
|
refs/heads/master
| 2023-09-05T19:50:59.722334
| 2023-09-04T14:25:33
| 2023-09-04T14:25:33
| 56,459,948
| 7,311
| 5,917
|
Apache-2.0
| 2023-09-13T21:45:54
| 2016-04-17T21:39:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 549
|
py
|
__init__.py
|
"""
Middleware
==========
A WSGI middleware is a WSGI application that wraps another application
in order to observe or change its behavior. Werkzeug provides some
middleware for common use cases.
.. toctree::
:maxdepth: 1
proxy_fix
shared_data
dispatcher
http_proxy
lint
profiler
The :doc:`interactive debugger </debug>` is also a middleware that can
be applied manually, although it is typically used automatically with
the :doc:`development server </serving>`.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
|
fc864b6e064f3aa0ac86132ef92cef3676d95311
|
7e1c4dd6a2cae0597b4f4e961063cf077acdfd4c
|
/txcouchbase/tests/conftest.py
|
6eead6539538adb5ca12b63cb0a4afc40deb151d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
couchbase/couchbase-python-client
|
753fa434db910d175bf9ea53a5829a40ba36e938
|
c7d80434be3f917d6f25439a918aed30273f63f4
|
refs/heads/master
| 2023-08-29T14:04:13.532717
| 2023-08-24T22:53:30
| 2023-08-25T03:35:21
| 2,122,194
| 223
| 87
|
Apache-2.0
| 2023-05-30T16:05:59
| 2011-07-29T04:24:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,362
|
py
|
conftest.py
|
# Copyright 2016-2022. Couchbase, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from twisted.internet import threads
from twisted.internet.error import ReactorAlreadyInstalledError
class TwistedObjects:
_REACTOR = None
_GREENLET = None
_TWISTED_THREAD = None
def pytest_configure(config):
run_tx = config.getoption('--txcouchbase')
if config and config.option and config.option.markexpr:
run_tx = config.getoption('--txcouchbase')
if 'txcouchbase' in config.option.markexpr:
run_tx = True
if run_tx is True:
init_reactor()
def run_in_reactor_thread(fn, *args, **kwargs):
result = threads.blockingCallFromThread(TwistedObjects._REACTOR, fn, *args, **kwargs)
return result
def run_reactor(reactor):
reactor.run()
def init_reactor():
from twisted.internet import asyncioreactor
from acouchbase import get_event_loop
try:
asyncioreactor.install(get_event_loop())
except ReactorAlreadyInstalledError as ex:
print(f'Twisted setup: {ex}')
finally:
import twisted.internet.reactor
TwistedObjects._REACTOR = twisted.internet.reactor
if not hasattr(TwistedObjects._REACTOR, '_asyncioEventloop'):
raise RuntimeError(
"Reactor installed is not the asyncioreactor.")
TwistedObjects._TWISTED_THREAD = threading.Thread(target=lambda: run_reactor(TwistedObjects._REACTOR))
TwistedObjects._REACTOR.suggestThreadPoolSize(10)
TwistedObjects._TWISTED_THREAD.start()
# hook to catch prior to running tests
# def pytest_runtest_call(item):
# pass
def pytest_unconfigure():
if TwistedObjects._TWISTED_THREAD:
threads.blockingCallFromThread(TwistedObjects._REACTOR, TwistedObjects._REACTOR.stop)
TwistedObjects._TWISTED_THREAD.join()
|
7ba334128b9dcbeb434e85dbdf7ec1199daf780b
|
88d555a009f9075e59177fac70036892f397b439
|
/bin/archive/basenji_map_seqs.py
|
a4dfd3f92d916b42772ab29eb792900cec2c42ff
|
[
"Apache-2.0"
] |
permissive
|
calico/basenji
|
f9f406971d355dda81821dcf274696a7d27e332d
|
615b9eec8a591783b16d959029ddad08edae853d
|
refs/heads/master
| 2023-09-04T11:14:15.620786
| 2023-07-27T00:05:13
| 2023-07-27T00:05:13
| 96,346,574
| 326
| 143
|
Apache-2.0
| 2023-08-16T00:36:32
| 2017-07-05T17:54:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,055
|
py
|
basenji_map_seqs.py
|
#!/usr/bin/env python
# Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from optparse import OptionParser
import gc
import os
import pdb
import sys
import time
import h5py
import numpy as np
import pyBigWig
from scipy.stats import ttest_1samp
import tensorflow as tf
import basenji
from basenji_map import score_write
'''
basenji_map_seqs.py
Visualize a sequence's prediction's gradients as a map of influence across
the genomic region.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <hdf5_file> <bed_file>'
parser = OptionParser(usage)
parser.add_option(
'-g',
dest='genome_file',
default='%s/data/human.hg19.genome'%os.environ['BASENJIDIR'],
help='Chromosome lengths file [Default: %default]')
parser.add_option(
'-l',
dest='gene_list',
help='Process only gene ids in the given file')
parser.add_option(
'--mc',
dest='mc_n',
default=0,
type='int',
help='Monte carlo test iterations [Default: %default]')
parser.add_option('-n',
dest='norm',
default=None,
type='int',
help='Compute saliency norm [Default% default]')
parser.add_option(
'-o',
dest='out_dir',
default='grad_map',
help='Output directory [Default: %default]')
parser.add_option(
'--rc',
dest='rc',
default=False,
action='store_true',
help='Average the forward and reverse complement predictions when testing [Default: %default]')
parser.add_option(
'--shifts',
dest='shifts',
default='0',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option(
'-t',
dest='target_indexes',
default=None,
help='Target indexes to plot')
if len(args) != 4:
parser.error('Must provide parameters, model, and genomic position')
else:
params_file = args[0]
model_file = args[1]
hdf5_file = args[2]
bed_file = args[3]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
options.shifts = [int(shift) for shift in options.shifts.split(',')]
#######################################################
# load data
data_open = h5py.File(hdf5_file)
test_seqs = data_open['test_in']
# extract sequence chrom and start
seqs_chrom = []
seqs_pos = []
for line in open(bed_file):
a = line.split()
if a[3] == 'test':
seqs_chrom.append(a[0])
seqs_pos.append(int(a[1]))
#######################################################
# model parameters and placeholders
job = basenji.dna_io.read_job_params(params_file)
job['seq_length'] = test_seqs.shape[1]
job['seq_depth'] = test_seqs.shape[2]
job['target_pool'] = int(np.array(data_open.get('pool_width', 1)))
if 'num_targets' not in job:
print("Must specify number of targets (num_targets) in the parameters file.",
file=sys.stderr)
exit(1)
# set target indexes
if options.target_indexes is not None:
options.target_indexes = [int(ti) for ti in options.target_indexes.split(',')]
target_subset = options.target_indexes
else:
options.target_indexes = list(range(job['num_targets']))
target_subset = None
# build model
model = basenji.seqnn.SeqNN()
model.build(job, target_subset=target_subset)
# determine latest pre-dilated layer
dilated_mask = np.array(model.cnn_dilation) > 1
dilated_indexes = np.where(dilated_mask)[0]
pre_dilated_layer = np.min(dilated_indexes)
print('Pre-dilated layer: %d' % pre_dilated_layer)
# build gradients ops
t0 = time.time()
print('Building target/position-specific gradient ops.', end='')
model.build_grads(layers=[pre_dilated_layer])
print(' Done in %ds' % (time.time()-t0), flush=True)
#######################################################
# acquire gradients
# initialize saver
saver = tf.train.Saver()
with tf.Session() as sess:
# load variables into session
saver.restore(sess, model_file)
# score sequences and write bigwigs
score_write(sess, model, options, test_seqs, seqs_chrom, seqs_pos)
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
8ad9099fcf40314b05b5bcc78ca47b4e33db1c5d
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/custom/abt/tests/test_custom_recipients.py
|
05c625560d1499a1b6603287d4a38ab14649464e
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,019
|
py
|
test_custom_recipients.py
|
from django.test import TestCase
from corehq.apps.domain.models import Domain
from corehq.apps.locations.models import SQLLocation, LocationType
from corehq.apps.users.models import CommCareUser
from corehq.messaging.scheduling.scheduling_partitioned.models import CaseTimedScheduleInstance
from corehq.util.test_utils import create_test_case
class CustomRecipientTest(TestCase):
def setUp(self):
self.domain = 'custom-recipient-test'
self.domain_obj = Domain(name=self.domain)
self.domain_obj.save()
self.parent_location_type = LocationType.objects.create(
domain=self.domain,
name='parent type',
code='parent'
)
self.child_location_type = LocationType.objects.create(
domain=self.domain,
name='child type',
code='child',
parent_type=self.parent_location_type
)
self.parent_location = SQLLocation.objects.create(
domain=self.domain,
name='parent test',
site_code='parent',
location_type=self.parent_location_type
)
self.child_location = SQLLocation.objects.create(
domain=self.domain,
name='child test',
site_code='child',
location_type=self.child_location_type,
parent=self.parent_location
)
self.user = CommCareUser.create(self.domain, 'test', 'test', None, None)
self.user.set_location(self.child_location)
def tearDown(self):
self.user.delete(self.domain, deleted_by=None)
self.child_location.delete()
self.parent_location.delete()
self.child_location_type.delete()
self.parent_location_type.delete()
self.domain_obj.delete()
def test_recipient_mobile_worker_case_owner_location_parent(self):
with create_test_case(self.domain, 'test-case', 'test-name', owner_id=self.user.get_id) as case:
self.assertEqual(case.owner_id, self.user.get_id)
def instance(case_id=''):
# recipient is memoized
return CaseTimedScheduleInstance(
domain=self.domain,
case_id=case_id or case.case_id,
recipient_type='CustomRecipient',
recipient_id='MOBILE_WORKER_CASE_OWNER_LOCATION_PARENT'
)
# Test the recipient is returned correctly
self.assertTrue(isinstance(instance().recipient, SQLLocation))
self.assertEqual(instance().recipient.pk, self.parent_location.pk)
# Test when the user's location has no parent location
self.user.set_location(self.parent_location)
self.assertIsNone(instance().recipient)
# Remove child location
self.user.unset_location()
self.assertIsNone(instance().recipient)
# Remove case
self.assertIsNone(instance(case_id='does-not-exist').recipient)
def test_recipient_location_case_owner_parent_location(self):
with create_test_case(
self.domain,
'test-case',
'test-name',
owner_id=self.child_location.location_id
) as case:
self.assertEqual(case.owner_id, self.child_location.location_id)
def instance(case_id=''):
# recipient is memoized
return CaseTimedScheduleInstance(
domain=self.domain,
case_id=case_id or case.case_id,
recipient_type='CustomRecipient',
recipient_id='LOCATION_CASE_OWNER_PARENT_LOCATION'
)
# Test the recipient is returned correctly
self.assertTrue(isinstance(instance().recipient, SQLLocation))
self.assertEqual(instance().recipient.pk, self.parent_location.pk)
# Remove case
self.assertIsNone(instance(case_id='does-not-exist').recipient)
|
fbce7b0eefe0c65a773e9710881873aba7184455
|
8cb7399499d582efbc900b530cd7075dd82ec0bd
|
/tests/plugins/core/test_taskmanager.py
|
1eb179f2fa1b6d00f5ae44d04d46e45c0b41bd5c
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SpockBotMC/SpockBot
|
115334d3ebb6db806e859a9ea20ea476761702b3
|
f89911551f18357720034fbaa52837a0d09f66ea
|
refs/heads/master
| 2021-01-15T15:33:00.003492
| 2016-05-01T14:57:55
| 2016-05-01T14:57:55
| 7,111,791
| 188
| 61
| null | 2016-04-25T00:06:56
| 2012-12-11T13:01:23
|
Python
|
UTF-8
|
Python
| false
| false
| 7,381
|
py
|
test_taskmanager.py
|
import random
from collections import defaultdict
from unittest import TestCase
from spockbot.plugins.core.taskmanager import TaskManager
from spockbot.plugins.tools.task import Task, TaskFailed
class EventMock(object):
def __init__(self):
self.handlers = defaultdict(list)
def reg_event_handler(self, evt, handler):
self.handlers[evt].append(handler)
def emit(self, event, data):
to_remove = []
for handler in reversed(self.handlers[event]):
if handler(event, data):
to_remove.append(handler)
else:
raise AssertionError('%s %s' % (handler.__name__, handler))
for handler in to_remove:
self.handlers[event].remove(handler)
class PluginLoaderMock(object):
def provides(self, ident, obj):
self.provides_ident = ident
self.provides_obj = obj
def requires(self, plug_name):
assert plug_name == 'Event', 'Unexpected requirement %s' % plug_name
return EventMock()
class TaskManagerTest(TestCase):
def setUp(self):
ploader = PluginLoaderMock()
self.task_manager = TaskManager(ploader, {})
assert ploader.provides_ident == 'TaskManager'
assert isinstance(ploader.provides_obj, TaskManager)
def test_run_task(self):
def some_task(level):
if level <= 0:
raise StopIteration('We are done!', level)
# one event name
event, data = yield 'some_event'
self.assertEqual('some_event', event)
self.assertDictEqual({'some': 'data'}, data)
# multiple event names
event, data = yield 'other_event', 'never_emitted', 'this_neither'
self.assertEqual('other_event', event)
self.assertDictEqual({'other': 'data'}, data)
# event name + check
event, data = yield 'cool_event', lambda e, d: True
self.assertEqual('cool_event', event)
self.assertDictEqual({'more': 'data'}, data)
# subtask
event, data = yield some_task(level - 1)
self.assertEqual(None, event)
self.assertTupleEqual(('We are done!', level - 1), data)
raise StopIteration('We are done!', level)
def emit_them(level):
"""Recursively emit and check the events waited for by the task"""
if level <= 0:
return
self.assertIn('some_event', self.task_manager.event.handlers,
'Event not registered')
self.task_manager.event.emit('some_event', {'some': 'data'})
self.assertFalse(self.task_manager.event.handlers['some_event'],
'Emitted event not unregistered')
self.assertIn('other_event', self.task_manager.event.handlers,
'Event not registered')
self.assertIn('never_emitted', self.task_manager.event.handlers,
'Event not registered')
self.assertIn('this_neither', self.task_manager.event.handlers,
'Event not registered')
self.task_manager.event.emit('other_event', {'other': 'data'})
self.assertFalse(self.task_manager.event.handlers['other_Event'],
'Emitted event not unregistered')
self.assertIn('cool_event', self.task_manager.event.handlers,
'Event not registered')
self.task_manager.event.emit('cool_event', {'more': 'data'})
self.assertFalse(self.task_manager.event.handlers['cool_event'],
'Emitted event not unregistered')
emit_them(level - 1)
done = [False] # because we can't use nonlocal in Python 2.7
test_case = self # to make flake8 happy
class SomeParent(object):
def on_success(self, data):
done[0] = True
def on_error(self, e):
test_case.fail('Task failed with %s: %s'
% (e.__class__.__name__, e.args))
levels = 3
task = some_task(levels)
parent = SomeParent()
ret = self.task_manager.run_task(task, parent)
self.assertIsInstance(ret, Task)
emit_them(levels)
assert done[0]
def test_failure(self):
last_data = ['started']
def task_with_failure():
last_data[0] = yield 'bbbb'
raise TaskFailed('Some error!')
def task_deep_failure():
def task_deepest_failure():
last_data[0] = yield 'cccc'
raise TaskFailed('Low level error!')
def task_deeper_failure():
yield task_deepest_failure() # error falls through
try:
yield task_deeper_failure()
except TaskFailed as error:
raise TaskFailed('High level error!').with_error(error)
def task_with_exception():
last_data[0] = yield 'dddd'
1/0
def top_task():
try:
yield task_with_failure()
except TaskFailed as e:
self.assertEqual('Some error!', e.message)
self.assertEqual(None, e.prev_error)
self.assertEqual(1, len(e.tasktrace))
self.assertEqual('task_with_failure', e.tasktrace[0].name)
self.assertEqual(1, len(e.full_tasktrace))
self.assertEqual('task_with_failure',
e.full_tasktrace[0].name)
else:
self.fail('TaskFailed not passed into task')
try:
yield task_deep_failure()
except TaskFailed as e:
self.assertEqual('High level error!', e.message)
self.assertEqual('Low level error!', e.prev_error.message)
self.assertEqual(None, e.prev_error.prev_error)
self.assertEqual(1, len(e.tasktrace))
self.assertEqual('task_deep_failure', e.tasktrace[0].name)
self.assertEqual(3, len(e.full_tasktrace))
self.assertEqual('task_deepest_failure',
e.full_tasktrace[0].name)
self.assertEqual('task_deeper_failure',
e.full_tasktrace[1].name)
self.assertEqual('task_deep_failure',
e.full_tasktrace[2].name)
else:
self.fail('TaskFailed not passed through tasks')
try:
yield task_with_exception()
except ZeroDivisionError as e:
pass # everything OK
else:
self.fail('Exception not passed into task')
task = top_task()
ret = self.task_manager.run_task(task)
self.assertIsInstance(ret, Task)
self.assertEqual('started', last_data[0])
self.emit_and_check('bbbb', last_data)
self.emit_and_check('cccc', last_data)
self.emit_and_check('dddd', last_data)
def emit_and_check(self, event, last_data):
data = random.random()
self.task_manager.event.emit(event, data)
self.assertEqual(event, last_data[0][0])
self.assertEqual(data, last_data[0][1])
|
96eac2c0bee121a0318f0934e3cc1ec7a3701a9b
|
39fd37bc43089b5b7e025b4f18ddbb45fea33f05
|
/models/proto_norm.py
|
4efbdfb51792cbee56875f6528020d21ab8da88c
|
[
"MIT"
] |
permissive
|
thunlp/FewRel
|
48e385970543cece1a837a7da3584dfa5dca4d5b
|
278a2315d2138810a379cd8d5718914dc56e2582
|
refs/heads/master
| 2023-08-11T02:17:32.738965
| 2022-05-04T14:41:14
| 2022-05-04T14:41:14
| 146,135,239
| 752
| 155
|
MIT
| 2019-12-19T07:10:18
| 2018-08-25T22:55:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,974
|
py
|
proto_norm.py
|
import sys
sys.path.append('..')
import fewshot_re_kit
import torch
from torch import autograd, optim, nn
from torch.autograd import Variable
from torch.nn import functional as F
def l2norm(X):
norm = torch.pow(X, 2).sum(dim=-1, keepdim=True).sqrt()
X = torch.div(X, norm)
return X
class ProtoNorm(fewshot_re_kit.framework.FewShotREModel):
def __init__(self, sentence_encoder, hidden_size=230):
fewshot_re_kit.framework.FewShotREModel.__init__(self, sentence_encoder)
self.hidden_size = hidden_size
# self.fc = nn.Linear(hidden_size, hidden_size)
self.drop = nn.Dropout()
def __dist__(self, x, y, dim):
return (torch.pow(x - y, 2)).sum(dim)
def __batch_dist__(self, S, Q):
return self.__dist__(S.unsqueeze(1), Q.unsqueeze(2), 3)
def forward(self, support, query, N, K, total_Q):
'''
support: Inputs of the support set.
query: Inputs of the query set.
N: Num of classes
K: Num of instances for each class in the support set
Q: Num of instances in the query set
'''
support = self.sentence_encoder(support) # (B * N * K, D), where D is the hidden size
query = self.sentence_encoder(query) # (B * total_Q, D)
support = l2norm(support)
query = l2norm(query)
support = self.drop(support)
query = self.drop(query)
support = support.view(-1, N, K, self.hidden_size) # (B, N, K, D)
query = query.view(-1, total_Q, self.hidden_size) # (B, total_Q, D)
# Prototypical Networks
# Ignore NA policy
support = torch.mean(support, 2) # Calculate prototype for each class
logits = -self.__batch_dist__(support, query) # (B, total_Q, N)
minn, _ = logits.min(-1)
logits = torch.cat([logits, minn.unsqueeze(2) - 1], 2) # (B, total_Q, N + 1)
_, pred = torch.max(logits.view(-1, N + 1), 1)
return logits, pred
|
85911cc8210b627a50ab89c2e7ce4ef9048d6ff5
|
c1b77c0b1630c2e319e7ba7782a744f4ac867f7d
|
/test/lazy/test_lazy_evaluated_kernel_tensor.py
|
5a35287044d4c9f67d719d07247d98605bf146bb
|
[
"MIT",
"Python-2.0"
] |
permissive
|
cornellius-gp/gpytorch
|
6b9ab969b2888fa7f27f236a1b20041f00cc0253
|
5e93d2c04ac0634a7aeea9fd964be529bb250888
|
refs/heads/master
| 2023-08-31T21:13:02.741585
| 2023-08-25T19:24:53
| 2023-08-25T19:24:53
| 93,868,719
| 3,182
| 578
|
MIT
| 2023-09-13T01:06:00
| 2017-06-09T14:48:20
|
Python
|
UTF-8
|
Python
| false
| false
| 8,539
|
py
|
test_lazy_evaluated_kernel_tensor.py
|
#!/usr/bin/env python3
import math
import unittest
from unittest.mock import MagicMock, patch
import linear_operator
import torch
from linear_operator import to_dense
from linear_operator.test.linear_operator_test_case import LinearOperatorTestCase
import gpytorch
class TestLazyEvaluatedKernelTensorBatch(LinearOperatorTestCase, unittest.TestCase):
seed = 0
def create_linear_op(self):
kern = gpytorch.kernels.RBFKernel()
mat1 = torch.randn(2, 5, 6)
mat2 = mat1.detach().clone()
return kern(mat1, mat2)
def evaluate_linear_op(self, lazy_tensor):
with gpytorch.settings.lazily_evaluate_kernels(False):
return to_dense(lazy_tensor.kernel(lazy_tensor.x1, lazy_tensor.x2))
def _test_matmul(self, rhs):
lazy_tensor = self.create_linear_op().requires_grad_(True)
lazy_tensor_copy = lazy_tensor.clone().detach_().requires_grad_(True)
evaluated = self.evaluate_linear_op(lazy_tensor_copy)
rhs_evaluated = to_dense(rhs)
res = lazy_tensor.matmul(rhs)
actual = evaluated.matmul(rhs_evaluated)
res_evaluated = to_dense(res)
self.assertAllClose(res_evaluated, actual)
grad = torch.randn_like(res_evaluated)
res_evaluated.backward(gradient=grad)
actual.backward(gradient=grad)
for param, param_copy in zip(lazy_tensor.kernel.parameters(), lazy_tensor_copy.kernel.parameters()):
self.assertAllClose(param.grad, param_copy.grad, rtol=1e-3)
self.assertAllClose(
lazy_tensor.x1.grad + lazy_tensor.x2.grad, lazy_tensor_copy.x1.grad + lazy_tensor_copy.x2.grad, rtol=1e-3
)
def _test_rmatmul(self, lhs):
lazy_tensor = self.create_linear_op().requires_grad_(True)
lazy_tensor_copy = lazy_tensor.clone().detach_().requires_grad_(True)
evaluated = self.evaluate_linear_op(lazy_tensor_copy)
res = lhs @ lazy_tensor
actual = lhs @ evaluated
self.assertAllClose(res, actual)
grad = torch.randn_like(res)
res.backward(gradient=grad)
actual.backward(gradient=grad)
for param, param_copy in zip(lazy_tensor.kernel.parameters(), lazy_tensor_copy.kernel.parameters()):
self.assertAllClose(param.grad, param_copy.grad, rtol=1e-3)
self.assertAllClose(
lazy_tensor.x1.grad + lazy_tensor.x2.grad,
lazy_tensor_copy.x1.grad + lazy_tensor_copy.x2.grad,
rtol=1e-3,
atol=1e-4,
)
def _test_inv_matmul(self, rhs, lhs=None, cholesky=False):
lazy_tensor = self.create_linear_op().requires_grad_(True)
lazy_tensor_copy = lazy_tensor.clone().detach_().requires_grad_(True)
evaluated = self.evaluate_linear_op(lazy_tensor_copy)
evaluated.register_hook(self._ensure_symmetric_grad)
# Create a test right hand side and left hand side
rhs.requires_grad_(True)
rhs_copy = rhs.clone().detach().requires_grad_(True)
if lhs is not None:
lhs.requires_grad_(True)
lhs_copy = lhs.clone().detach().requires_grad_(True)
_wrapped_cg = MagicMock(wraps=linear_operator.utils.linear_cg)
with patch("linear_operator.utils.linear_cg", new=_wrapped_cg) as linear_cg_mock:
with gpytorch.settings.max_cholesky_size(math.inf if cholesky else 0), gpytorch.settings.cg_tolerance(1e-4):
# Perform the inv_matmul
if lhs is not None:
res = lazy_tensor.solve(rhs, lhs)
actual = lhs_copy @ evaluated.inverse() @ rhs_copy
else:
res = lazy_tensor.solve(rhs)
actual = evaluated.inverse().matmul(rhs_copy)
self.assertAllClose(res, actual, rtol=0.02, atol=1e-5)
# Perform backward pass
grad = torch.randn_like(res)
res.backward(gradient=grad)
actual.backward(gradient=grad)
for param, param_copy in zip(lazy_tensor.kernel.parameters(), lazy_tensor_copy.kernel.parameters()):
self.assertAllClose(param.grad, param_copy.grad, rtol=1e-3)
self.assertAllClose(
lazy_tensor.x1.grad + lazy_tensor.x2.grad,
lazy_tensor_copy.x1.grad + lazy_tensor_copy.x2.grad,
rtol=1e-3,
)
self.assertAllClose(rhs.grad, rhs_copy.grad, rtol=0.03, atol=1e-5)
if lhs is not None:
self.assertAllClose(lhs.grad, lhs_copy.grad, rtol=0.03, atol=1e-5)
# Determine if we've called CG or not
if not cholesky and self.__class__.should_call_cg:
self.assertTrue(linear_cg_mock.called)
else:
self.assertFalse(linear_cg_mock.called)
def test_batch_getitem(self):
"""Indexing was wrong when the kernel had more batch dimensions than the
data"""
x1 = torch.randn(5, 6)
x2 = torch.randn(5, 6)
kern = gpytorch.kernels.RBFKernel(batch_shape=torch.Size([2]))
k = kern(x1, x2)
self.assertEqual(k.size(), torch.Size([2, 5, 5]))
self.assertEqual(k[..., :4, :3].size(), torch.Size([2, 4, 3]))
def test_batch_getitem_multioutput(self):
"""Ensure slicing is efficient when using a multioutput kernel"""
x1 = torch.randn(5, 6)
x2 = torch.randn(5, 6)
kern = gpytorch.kernels.RBFKernelGrad(batch_shape=torch.Size([2]))
k = kern(x1, x2)
k.evaluate_kernel = MagicMock(name="evaluate_kernel")
k_sliced = k[..., :7, :14]
self.assertFalse(k.evaluate_kernel.called)
self.assertEqual(k.size(), torch.Size([2, 35, 35]))
self.assertEqual(k_sliced.size(), torch.Size([2, 7, 14]))
def test_getitem_tensor_index(self):
# Not supported a.t.m. with LazyEvaluatedKernelTensors
pass
def test_bilinear_derivative(self):
pass
def test_t_matmul_matrix(self):
pass
def test_half(self):
# many transform operations aren't supported in half so we overwrite
# this test
lazy_tensor = self.create_linear_op()
lazy_tensor.kernel.raw_lengthscale_constraint.transform = lambda x: x + 0.1
self._test_half(lazy_tensor)
def test_grad_state(self):
k = gpytorch.kernels.RBFKernel()
X = torch.randn(2, 3)
X.requires_grad = True
lazy_tensor = k(X)
self.assertTrue(lazy_tensor.to_dense().requires_grad)
with torch.no_grad():
lazy_tensor = k(X)
self.assertFalse(lazy_tensor.to_dense().requires_grad)
class TestLazyEvaluatedKernelTensorMultitaskBatch(TestLazyEvaluatedKernelTensorBatch):
seed = 0
skip_slq_tests = True # we skip these because of the kronecker structure
def create_linear_op(self):
kern = gpytorch.kernels.MultitaskKernel(gpytorch.kernels.RBFKernel(), num_tasks=3, rank=2)
mat1 = torch.randn(2, 5, 6)
mat2 = mat1.detach().clone()
return kern(mat1, mat2)
def test_inv_matmul_matrix_with_checkpointing(self):
pass
def test_half(self):
# many transform operations aren't supported in half so we overwrite
# this test
lazy_tensor = self.create_linear_op()
lazy_tensor.kernel.data_covar_module.raw_lengthscale_constraint.transform = lambda x: x + 0.1
self._test_half(lazy_tensor)
class TestLazyEvaluatedKernelTensorAdditive(TestLazyEvaluatedKernelTensorBatch):
seed = 0
def create_linear_op(self):
kern = gpytorch.kernels.AdditiveStructureKernel(gpytorch.kernels.RBFKernel(), num_dims=6)
mat1 = torch.randn(5, 6)
mat2 = mat1.detach().clone()
return kern(mat1, mat2)
def evaluate_linear_op(self, lazy_tensor):
res = to_dense(
gpytorch.Module.__call__(
lazy_tensor.kernel.base_kernel,
lazy_tensor.x1.transpose(-1, -2).unsqueeze(-1),
lazy_tensor.x2.transpose(-1, -2).unsqueeze(-1),
)
).sum(0)
return res
def test_inv_matmul_matrix_with_checkpointing(self):
pass
def test_half(self):
# many transform operations aren't supported in half so we overwrite
# this test
lazy_tensor = self.create_linear_op()
lazy_tensor.kernel.base_kernel.raw_lengthscale_constraint.transform = lambda x: x + 0.1
self._test_half(lazy_tensor)
|
0897247aac03222e040717aa52e02075ad5d2f07
|
3ac3806457ca4340ac93f925ec866ca7f53c96c9
|
/pyExamples/paralleltruss.py
|
4b8e448ebe9436979d16c96588b3a70a87f3126b
|
[] |
permissive
|
zhuminjie/OpenSeesPyDoc
|
7e4be3e3c7adceefbfd90c97653064f351a6c7bb
|
4d2af110c392ad388b34b362a3e8ecd9ab106313
|
refs/heads/master
| 2023-08-02T04:43:11.881919
| 2023-07-25T16:38:54
| 2023-07-25T16:38:54
| 104,932,114
| 115
| 122
|
MIT
| 2023-03-23T03:08:34
| 2017-09-26T20:03:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
paralleltruss.py
|
import openseespy.opensees as ops
pid = ops.getPID()
np = ops.getNP()
ops.start()
if np != 2:
exit()
ops.model('basic', '-ndm', 2, '-ndf', 2)
ops.uniaxialMaterial('Elastic', 1, 3000.0)
if pid == 0:
ops.node(1, 0.0, 0.0)
ops.node(4, 72.0, 96.0)
ops.fix(1, 1, 1)
ops.element('Truss', 1, 1, 4, 10.0, 1)
ops.timeSeries('Linear', 1)
ops.pattern('Plain', 1, 1)
ops.load(4, 100.0, -50.0)
else:
ops.node(2, 144.0, 0.0)
ops.node(3, 168.0, 0.0)
ops.node(4, 72.0, 96.0)
ops.fix(2, 1, 1)
ops.fix(3, 1, 1)
ops.element('Truss', 2, 2, 4, 5.0, 1)
ops.element('Truss', 3, 3, 4, 5.0, 1)
ops.constraints('Transformation')
ops.numberer('ParallelPlain')
ops.system('Mumps')
ops.test('NormDispIncr', 1e-6, 6, 2)
ops.algorithm('Newton')
ops.integrator('LoadControl', 0.1)
ops.analysis('Static')
ops.analyze(10)
print('Node 4: ', [ops.nodeCoord(4), ops.nodeDisp(4)])
ops.loadConst('-time', 0.0)
if pid == 0:
ops.pattern('Plain', 2, 1)
ops.load(4, 1.0, 0.0)
ops.domainChange()
ops.integrator('ParallelDisplacementControl', 4, 1, 0.1)
ops.analyze(10)
print('Node 4: ', [ops.nodeCoord(4), ops.nodeDisp(4)])
ops.stop()
|
a1108bdc8e611e2e046f3b9c0df35fbbfe52c14f
|
efe44bbc64cf788768d73e35cfb1baa2098181f6
|
/networks/scitile/storti_integrals/torus.py
|
aa06ee44b56b1e1948b1b7b3b49c47f7e1710328
|
[
"Apache-2.0"
] |
permissive
|
plaidml/plaidml
|
d2dd18f8228d3959ec875b66aa4ff31f6c29ef00
|
49fbaa5ac387e621f11ba0b81f49461e4b2d02ef
|
refs/heads/plaidml-v1
| 2023-08-09T18:38:07.681422
| 2023-07-23T20:15:07
| 2023-07-23T20:15:07
| 100,326,126
| 4,779
| 516
|
Apache-2.0
| 2023-02-14T21:33:05
| 2017-08-15T01:43:24
|
C++
|
UTF-8
|
Python
| false
| false
| 3,561
|
py
|
torus.py
|
import math
import matplotlib.pyplot as plt
from op import *
def toroidal_shell_integral_moment_of_innertia_exact(R, r):
return 2 * (math.pi**2) * r * R * ((2 * (R**2)) + (3 * (r**2)))
def torus_volume_exact(R, r):
return 2 * (math.pi**2) * (r**2) * R
def torus_surface_area_exact(R, r):
return 4 * (math.pi**2) * r * R
def torus(X, Y, Z, vars):
R = vars[0] # major radius
r = vars[1] # minor radius
return sq(edsl.sqrt(sq(X) + sq(Y)) - R) + sq(Z) - sq(r)
def integrand_inertia(X, Y, Z):
return sq(X) + sq(Y)
def generate_plot_data(min_N, max_N, jump, R, r, check_integral, type):
error_chart = np.array([])
error_chart_N_by_delta = np.array([])
for N in range(min_N, max_N, jump):
print("evaluating for N: {}".format(N))
minval = -1.25 * R
maxval = 1.25 * R
delta = (maxval - minval) / (N - 1) # grid spacing
eps = 1.0e-8
error = check_integral(type, N, minval, maxval, eps, torus, [R, r])
error_chart = np.append(error_chart, math.log(error, 10))
error_chart_N_by_delta = np.append(error_chart_N_by_delta, math.log((R / delta), 10))
return [error_chart_N_by_delta, error_chart]
def get_line_y(x, y):
den = x.dot(x) - x.mean() * x.sum()
m = (x.dot(y) - y.mean() * x.sum()) / den
b = (y.mean() * x.dot(x) - x.mean() * x.dot(y)) / den
y_line = (m * x + b)
res = y - y_line
tot = y - y.mean()
R_sq = 1 - res.dot(res) / tot.dot(tot)
label = "f(x) = " + '%.2f' % m + "x +" + '%.2f' % b + "| R^2 = " + '%.2f' % R_sq
return [y_line, label]
def main(
): #TODO:work in progress to generate graphs will clean up after documentation is completed
R = 10.0 # major radius
r = 2.0 # minor radius
min_N = 32
max_N = 513
interval = 32
def check_integral(type, N, minval, maxval, eps, frep, vars):
if type == 'volume':
exact_value = torus_volume_exact(R, r)
result = integral_volume(N, minval, maxval, eps, frep, vars)
if type == 'surface_area':
exact_value = torus_surface_area_exact(R, r)
result = integral_surface_area(N, minval, maxval, eps, frep, vars, integrand_empty)
if type == 'inertia':
exact_value = toroidal_shell_integral_moment_of_innertia_exact(R, r)
result = integral_surface_area(N, minval, maxval, eps, frep, vars, integrand_inertia)
print("Exact value: {}".format(exact_value))
print("computed result using integral: {}".format(result))
return (abs(result - exact_value) / exact_value) * 100
fig = plt.figure()
fig.suptitle("convergence study Genus 1 : Torus ", fontsize=14)
ax1 = fig.add_subplot(111)
ax1.set(xlabel='log(R/delta)', ylabel='log(error percentage)')
x1, y1 = generate_plot_data(min_N, max_N, interval, R, r, check_integral, 'volume')
x2, y2 = generate_plot_data(min_N, max_N, interval, R, r, check_integral, 'surface_area')
x3, y3 = generate_plot_data(min_N, max_N, interval, R, r, check_integral, 'inertia')
ax1.scatter(x1, y1, label='Volume')
ax1.scatter(x2, y2, label='SurfaceArea')
ax1.scatter(x3, y3, label='Inertia')
y_line1, label1 = get_line_y(x1, y1)
ax1.plot(x1, y_line1, label=label1)
y_line2, label2 = get_line_y(x2, y2)
ax1.plot(x2, y_line2, label=label2)
y_line3, label3 = get_line_y(x3, y3)
ax1.plot(x3, y_line3, label=label3)
plt.legend(loc='lower left')
plt.savefig('torus_integral_graph.pdf')
if __name__ == '__main__':
main()
|
157345db6c9968fd887547afef44c5d122b65966
|
72293b4650b92019f9c046133f7de13ea6f69644
|
/zulip_bots/zulip_bots/bots/monkeytestit/test_monkeytestit.py
|
1ee4fc103e31198cbd88222de8460f5e7f98b583
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/python-zulip-api
|
820978c36689db4872abf21730b25ce8abb5fbcf
|
35a8ff8839ac39cff0638f533fea59665cb9aff3
|
refs/heads/main
| 2023-09-03T14:04:46.920347
| 2023-06-12T21:03:10
| 2023-08-11T19:36:11
| 96,455,158
| 387
| 437
|
Apache-2.0
| 2023-08-11T19:36:12
| 2017-07-06T17:25:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,750
|
py
|
test_monkeytestit.py
|
from importlib import import_module
from unittest.mock import patch
from zulip_bots.test_lib import BotTestCase, DefaultTests
class TestMonkeyTestitBot(BotTestCase, DefaultTests):
bot_name = "monkeytestit"
def setUp(self):
self.monkeytestit_class = import_module(
"zulip_bots.bots.monkeytestit.monkeytestit"
).MonkeyTestitBot
def test_bot_responds_to_empty_message(self):
message = dict(
content="",
type="stream",
)
with patch.object(self.monkeytestit_class, "initialize", return_value=None):
with self.mock_config_info({"api_key": "magic"}):
res = self.get_response(message)
self.assertTrue("Unknown command" in res["content"])
def test_website_fail(self):
message = dict(
content="check https://website.com",
type="stream",
)
with patch.object(self.monkeytestit_class, "initialize", return_value=None):
with self.mock_config_info({"api_key": "magic"}):
with self.mock_http_conversation("website_result_fail"):
res = self.get_response(message)
self.assertTrue("Status: tests_failed" in res["content"])
def test_website_success(self):
message = dict(
content="check https://website.com",
type="stream",
)
with patch.object(self.monkeytestit_class, "initialize", return_value=None):
with self.mock_config_info({"api_key": "magic"}):
with self.mock_http_conversation("website_result_success"):
res = self.get_response(message)
self.assertTrue("success" in res["content"])
|
d84e79c07f590c190b6b6ab86638da6182f0cc67
|
1aeb564072899cc48596af5c3066fc720ea741a7
|
/testing_neuraxle/hyperparams/test_distributions.py
|
86b389a9b80e970cc26f2b5e799f1f948285c2ac
|
[
"Apache-2.0"
] |
permissive
|
Neuraxio/Neuraxle
|
f8dfb8d6f6c8ce295dcb5f7c37b98545c11976e3
|
af917c984241178436a759be3b830e6d8b03245f
|
refs/heads/master
| 2023-08-07T03:36:01.680863
| 2022-08-16T17:43:49
| 2022-08-16T17:43:49
| 177,868,131
| 597
| 62
|
Apache-2.0
| 2023-07-09T06:12:47
| 2019-03-26T21:01:54
|
Python
|
UTF-8
|
Python
| false
| false
| 24,942
|
py
|
test_distributions.py
|
"""
Tests for Hyperparameters Distributions
========================================
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
from collections import Counter
import numpy as np
import pytest
from neuraxle.hyperparams.distributions import *
NUM_TRIALS = 5000
def hd_rvs_many(hd: HyperparameterDistribution):
random.seed(111)
np.random.seed(111)
return hd.rvs_many(NUM_TRIALS)
def test_boolean_distribution():
hd = Boolean()
samples = hd_rvs_many(hd)
falses = Counter(samples).get(False)
trues = Counter(samples).get(True)
# You'd need to win the lotto for this test to fail. Or a broken random sampler. Or a bug.
assert trues > NUM_TRIALS * 0.4
assert falses > NUM_TRIALS * 0.4
assert hd.pdf(False) == 0.5
assert hd.pdf(0.) == 0.5
assert hd.pdf(True) == 0.5
assert hd.pdf(1.) == 0.5
assert hd.pdf(-0.1) == 0.
assert hd.pdf(1.1) == 0.
assert hd.cdf(False) == 0.5
assert hd.cdf(0.) == 0.5
assert hd.cdf(True) == 1.
assert hd.cdf(1.) == 1.
assert hd.cdf(-0.1) == 0.
assert hd.cdf(1.1) == 1.
assert hd.min() == 0
assert hd.max() == 1
assert abs(hd.mean() - 0.5) < 1e-6
assert abs(hd.std() - 0.5) < 1e-6
assert abs(hd.var() - 0.25) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 1e-2
assert abs(hd.var() - np.var(samples)) < 1e-2
def test_boolean_distribution_with_proba():
proba_is_true = 0.7
hd = Boolean(proba_is_true=proba_is_true)
samples = hd_rvs_many(hd)
falses = Counter(samples).get(False)
trues = Counter(samples).get(True)
# You'd need to win the lotto for this test to fail. Or a broken random sampler. Or a bug.
assert trues > NUM_TRIALS * (proba_is_true - 0.1)
assert falses > NUM_TRIALS * (1 - proba_is_true - 0.1)
assert abs(hd.pdf(False) - (1 - proba_is_true)) < 1e-6
assert abs(hd.pdf(0.) - (1 - proba_is_true)) < 1e-6
assert abs(hd.pdf(True) - proba_is_true) < 1e-6
assert abs(hd.pdf(1.) - proba_is_true) < 1e-6
assert abs(hd.pdf(-0.1) - 0.) < 1e-6
assert abs(hd.pdf(1.1) - 0.) < 1e-6
assert abs(hd.cdf(False) - (1 - proba_is_true)) < 1e-6
assert abs(hd.cdf(0.) - (1 - proba_is_true)) < 1e-6
assert abs(hd.cdf(True) - 1.) < 1e-6
assert abs(hd.cdf(1.) - 1.) < 1e-6
assert abs(hd.cdf(-0.1) - 0.) < 1e-6
assert abs(hd.cdf(1.1) - 1.) < 1e-6
assert hd.min() == 0
assert hd.max() == 1
assert abs(hd.mean() - proba_is_true) < 1e-6
assert abs(hd.std() - math.sqrt(proba_is_true * (1 - proba_is_true))) < 1e-6
assert abs(hd.var() - proba_is_true * (1 - proba_is_true)) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 1e-2
assert abs(hd.var() - np.var(samples)) < 1e-2
@pytest.mark.parametrize("ctor", [Choice, PriorityChoice])
def test_choice_and_priority_choice(ctor):
choice_list = [0, 1, False, "Test"]
hd = ctor(choice_list)
samples = hd_rvs_many(hd)
z0 = Counter(samples).get(0)
z1 = Counter(samples).get(1)
zNone = Counter(samples).get(False)
zTest = Counter(samples).get("Test")
# You'd need to win the lotto for this test to fail. Or a broken random sampler. Or a bug.
assert z0 > NUM_TRIALS * 0.2
assert z1 > NUM_TRIALS * 0.2
assert zNone > NUM_TRIALS * 0.2
assert zTest > NUM_TRIALS * 0.2
assert (hd.pdf(0) - 1 / 4) < 1e-6
assert (hd.pdf(1) - 1 / 4) < 1e-6
assert (hd.pdf(False) - 1 / 4) < 1e-6
assert (hd.pdf("Test") - 1 / 4) < 1e-6
assert abs(hd.pdf(0) - 1 / 4) < 1e-6
assert abs(hd.pdf(1) - 1 / 4) < 1e-6
assert abs(hd.pdf(False) - 1 / 4) < 1e-6
assert abs(hd.pdf("Test") - 1 / 4) < 1e-6
assert abs(hd.cdf(0) - 1 / 4) < 1e-6
assert abs(hd.cdf(1) - 2 / 4) < 1e-6
assert abs(hd.cdf(False) - 3 / 4) < 1e-6
assert hd.cdf("Test") == 1.
with pytest.raises(ValueError):
assert hd.pdf(3) == 0.
assert hd.cdf(3) == 0.
assert hd.min() == 0
assert hd.max() == len(choice_list) - 1
assert abs(hd.mean() - (len(choice_list) - 1) / 2) < 1e-6
assert abs(hd.var() - (len(choice_list) ** 2 - 1) / 12) < 1e-6
assert abs(hd.std() - math.sqrt((len(choice_list) ** 2 - 1) / 12)) < 1e-6
# Convert samples in sample index
samples_index = [get_index_in_list_with_bool(choice_list, sample) for sample in samples]
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs((hd.mean() - np.mean(samples_index)) / hd.mean()) < 1e-1
assert abs((hd.var() - np.var(samples_index)) / hd.var()) < 1e-1
@pytest.mark.parametrize("ctor", [Choice, PriorityChoice])
def test_choice_and_priority_choice_with_probas(ctor):
probas = [0.1, 0.4, 0.3, 0.2]
probas_array = np.array(probas)
choice_list = [0, 1, False, "Test"]
hd = ctor(choice_list, probas=probas)
samples = hd_rvs_many(hd)
z0 = Counter(samples).get(0)
z1 = Counter(samples).get(1)
zNone = Counter(samples).get(False)
zTest = Counter(samples).get("Test")
# You'd need to win the lotto for this test to fail. Or a broken random sampler. Or a bug.
assert z0 > NUM_TRIALS * (probas[0] - 0.05)
assert z1 > NUM_TRIALS * (probas[1] - 0.05)
assert zNone > NUM_TRIALS * (probas[2] - 0.05)
assert zTest > NUM_TRIALS * (probas[3] - 0.05)
assert abs(hd.pdf(0) - probas[0]) < 1e-6
assert abs(hd.pdf(1) - probas[1]) < 1e-6
assert abs(hd.pdf(False) - probas[2]) < 1e-6
assert abs(hd.pdf("Test") - probas[3]) < 1e-6
assert abs(hd.cdf(0) - probas_array[0]) < 1e-6
assert abs(hd.cdf(1) - np.sum(probas_array[0:2])) < 1e-6
assert abs(hd.cdf(False) - np.sum(probas_array[0:3])) < 1e-6
assert abs(hd.cdf("Test") - 1.) < 1e-6
with pytest.raises(ValueError):
assert hd.pdf(3) == 0.
assert hd.cdf(3) == 0.
assert hd.min() == 0
assert hd.max() == len(choice_list) - 1
assert abs(hd.mean() - 1.6) < 1e-6
assert abs(hd.var() - 0.84) < 1e-6
assert abs(hd.std() - 0.9165151389911679) < 1e-6
# Convert samples in sample index
samples_index = [get_index_in_list_with_bool(choice_list, sample) for sample in samples]
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs((hd.mean() - np.mean(samples_index)) / hd.mean()) < 1e-1
assert abs((hd.var() - np.var(samples_index)) / hd.var()) < 1e-1
def test_quantized_uniform():
low = -10
high = 10
hd = Quantized(Uniform(low, high))
samples = hd_rvs_many(hd)
for s in samples:
assert type(s) == int
samples_mean = np.abs(np.mean(samples))
assert samples_mean < 1.0
assert min(samples) >= -10.0
assert max(samples) <= 10.0
assert abs(hd.pdf(-10) - 1 / 40) < 1e-6
assert abs(hd.pdf(-9) - 1 / 20) < 1e-6
assert abs(hd.pdf(0) - 1 / 20) < 1e-6
assert abs(hd.pdf(9) - 1 / 20) < 1e-6
assert abs(hd.pdf(10) - 1 / 40) < 1e-6
assert abs(hd.cdf(-10) - 1 / 40) < 1e-6
assert abs(hd.cdf(-9) - 1.5 / 20) < 1e-6
assert abs(hd.cdf(0) - 10.5 / 20) < 1e-6
assert abs(hd.cdf(9) - 19.5 / 20) < 1e-6
assert abs(hd.cdf(9.2) - 19.5 / 20) < 1e-6
assert hd.cdf(10) == 1.
assert hd.min() == low
assert hd.max() == high
assert abs(hd.mean() - 0.0) < 1e-6
assert abs(hd.var() - 33.50000000000001) < 1e-6
assert abs(hd.std() - 5.787918451395114) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 1e-1
assert abs((hd.var() - np.var(samples)) / hd.var()) < 1e-1
def test_randint():
low = -10
high = 10
hd = RandInt(low, high)
samples = hd_rvs_many(hd)
for s in samples:
assert type(s) == int
samples_mean = np.abs(np.mean(samples))
assert samples_mean < 1.0
assert min(samples) >= -10.0
assert max(samples) <= 10.0
assert hd.pdf(-11) == 0.
assert abs(hd.pdf(-10) - 1 / (10 + 10 + 1)) < 1e-6
assert abs(hd.pdf(0) - 1 / (10 + 10 + 1)) < 1e-6
assert hd.pdf(0.5) == 0.
assert abs(hd.pdf(10) - 1 / (10 + 10 + 1)) < 1e-6
assert hd.pdf(11) == 0.
assert hd.cdf(-10.1) == 0.
assert abs(hd.cdf(-10) - 1 / (10 + 10 + 1)) < 1e-6
assert abs(hd.cdf(0) - (0 + 10 + 1) / (10 + 10 + 1)) < 1e-6
assert abs(hd.cdf(5) - (5 + 10 + 1) / (10 + 10 + 1)) < 1e-6
assert abs(hd.cdf(10) - 1.) < 1e-6
assert hd.cdf(10.1) == 1.
assert hd.min() == low
assert hd.max() == high
assert abs(hd.mean() - (10 - 10) / 2) < 1e-6
assert abs(hd.var() - ((high - low + 1) ** 2 - 1) / 12) < 1e-6
assert abs(hd.std() - math.sqrt(((high - low + 1) ** 2 - 1) / 12)) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 1e-1
assert abs((hd.var() - np.var(samples)) / hd.var()) < 1e-1
def test_uniform():
low = -10
high = 10
hd = Uniform(low, high)
samples = hd_rvs_many(hd)
samples_mean = np.abs(np.mean(samples))
assert samples_mean < 1.0
assert min(samples) >= -10.0
assert max(samples) <= 10.0
assert hd.pdf(-10.1) == 0.
assert abs(hd.pdf(0) - 1 / (10 + 10)) < 1e-6
assert hd.pdf(10.1) == 0.
assert hd.cdf(-10.1) == 0.
assert abs(hd.cdf(0) - (0 + 10) / (10 + 10)) < 1e-6
assert hd.cdf(10.1) == 1.
assert hd.min() == low
assert hd.max() == high
assert abs(hd.mean() - (10 - 10) / 2) < 1e-6
assert abs(hd.var() - 1 / 12 * (high - low) ** 2) < 1e-6
assert abs(hd.std() - math.sqrt(1 / 12 * (high - low) ** 2)) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 1e-1
assert abs((hd.var() - np.var(samples)) / hd.var()) < 1e-1
def test_loguniform():
min_included = 0.001
max_included = 10
hd = LogUniform(min_included, max_included)
samples = hd_rvs_many(hd)
samples_mean = np.abs(np.mean(samples))
assert samples_mean < 1.15 # if it was just uniform, this assert would break.
assert min(samples) >= 0.001
assert max(samples) <= 10.0
assert hd.pdf(0.0001) == 0.
assert abs(hd.pdf(2) - 0.054286810237906484) < 1e-6
assert hd.pdf(10.1) == 0.
assert hd.cdf(0.0001) == 0.
assert abs(hd.cdf(2) - (math.log2(2) - math.log2(0.001)) / (math.log2(10) - math.log2(0.001))) < 1e-6
assert hd.cdf(10.1) == 1.
assert hd.min() == min_included
assert hd.max() == max_included
assert abs(hd.mean() - (max_included - min_included) / (
math.log(2) * (math.log2(max_included) - math.log2(min_included)))) < 1e-6
esperance_squared = (max_included ** 2 - min_included ** 2) / (
2 * math.log(2) * (math.log2(max_included) - math.log2(min_included)))
assert abs(hd.var() - (esperance_squared - hd.mean() ** 2)) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 5e-2
assert abs(hd.var() - np.var(samples)) < 2.5e-1
def test_normal():
hd_mean = 0.0
hd_std = 1.0
hd = Normal(hd_mean, hd_std)
samples = hd_rvs_many(hd)
samples_mean = np.abs(np.mean(samples))
assert samples_mean < 0.1
samples_std = np.std(samples)
assert 0.9 < samples_std < 1.1
assert abs(hd.pdf(-1.) - 0.24197072451914337) < 1e-6
assert abs(hd.pdf(0.) - 0.3989422804014327) < 1e-6
assert abs(hd.pdf(1.) - 0.24197072451914337) < 1e-6
assert abs(hd.cdf(-1.) - 0.15865525393145707) < 1e-6
assert abs(hd.cdf(0.) - 0.5) < 1e-6
assert abs(hd.cdf(1.) - 0.8413447460685429) < 1e-6
assert hd.min() == -np.inf
assert hd.max() == np.inf
assert abs(hd.mean() - hd_mean) < 1e-6
assert abs(hd.var() - hd_std ** 2) < 1e-6
assert abs(hd.std() - hd_std) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 5e-2
assert abs(hd.var() - np.var(samples)) < 5e-2
def test_normal_truncated():
hd_mean = 2.0
hd_std = 1.0
hard_clip_min = 1.8
hard_clip_max = 2.5
hd = Normal(hd_mean, hd_std, hard_clip_min=hard_clip_min, hard_clip_max=hard_clip_max)
samples = hd_rvs_many(hd)
samples_mean = np.abs(np.mean(samples))
assert 2.0 < samples_mean < 2.2
samples_std = np.std(samples)
assert 0 < samples_std < 0.4
assert abs(hd.pdf(1.7) - 0.) < 1e-6
assert abs(hd.pdf(1.8) - 1.4444428136247596) < 1e-6
assert abs(hd.pdf(2.) - 1.473622494051997) < 1e-6
assert abs(hd.pdf(2.25) - 1.4282838963071145) < 1e-6
assert abs(hd.pdf(2.5) - 1.3004672865798739) < 1e-6
assert abs(hd.pdf(2.6) - 0.) < 1e-6
assert abs(hd.cdf(1.7) - 0.) < 1e-6
assert abs(hd.cdf(1.8) - 0.) < 1e-6
assert abs(hd.cdf(2.) - 0.2927714018778846) < 1e-6
assert abs(hd.cdf(2.25) - 0.65737517785574) < 1e-6
assert abs(hd.cdf(2.5) - 1.) < 1e-6
assert abs(hd.cdf(2.6) - 1.) < 1e-6
assert np.all((np.array(samples) >= hard_clip_min) & (np.array(samples) <= hard_clip_max))
assert hd.min() == hard_clip_min
assert hd.max() == hard_clip_max
assert abs(hd.mean() - 2.1439755270448857) < 1e-6
assert abs(hd.var() - 0.04014884159725845) < 1e-6
assert abs(hd.std() - 0.20037175848222336) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 1e-2
assert abs(hd.var() - np.var(samples)) < 1e-2
def test_normal_onside_lower_tail_truncated():
hd_mean = 2.0
hd_std = 1.0
hard_clip_min = 1.8
hard_clip_max = None
hd = Normal(hd_mean, hd_std, hard_clip_min=hard_clip_min, hard_clip_max=hard_clip_max)
samples = hd_rvs_many(hd)
samples_mean = np.abs(np.mean(samples))
assert 2.5 < samples_mean < 2.8
samples_std = np.std(samples)
assert 0.5 < samples_std < 0.7
assert abs(hd.pdf(1.7) - 0.) < 1e-6
assert abs(hd.pdf(1.8) - 0.6750731797902921) < 1e-6
assert abs(hd.pdf(2.) - 0.688710562638179) < 1e-6
assert abs(hd.pdf(2.5) - 0.607784938305487) < 1e-6
assert abs(hd.pdf(5.) - 0.007650883256198442) < 1e-6
assert abs(hd.cdf(1.7) - 0.) < 1e-6
assert abs(hd.cdf(1.8) - 0.) < 1e-6
assert abs(hd.cdf(2.) - 0.13682931532705794) < 1e-6
assert abs(hd.cdf(2.5) - 0.46735888290117106) < 1e-6
assert abs(hd.cdf(5.) - 0.9976696151835984) < 1e-6
assert np.all(np.array(samples) >= hard_clip_min)
assert hd.min() == hard_clip_min
assert hd.max() == np.inf
assert abs(hd.mean() - 2.6750731797902922) < 1e-6
assert abs(hd.var() - 0.4092615659697655) < 1e-6
assert abs(hd.std() - 0.6397355437755241) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 1e-2
assert abs(hd.var() - np.var(samples)) < 1e-2
def test_normal_onside_upper_tail_truncated():
hd_mean = 2.0
hd_std = 1.0
hard_clip_min = None
hard_clip_max = 2.5
hd = Normal(hd_mean, hd_std, hard_clip_min=hard_clip_min, hard_clip_max=hard_clip_max)
samples = hd_rvs_many(hd)
samples_mean = np.abs(np.mean(samples))
assert 1.4 < samples_mean < 1.6
samples_std = np.std(samples)
assert 0.6 < samples_std < 0.8
assert abs(hd.pdf(-1.) - 0.006409383965359982) < 1e-6
assert abs(hd.pdf(1.8) - 0.5655298962361072) < 1e-6
assert abs(hd.pdf(2.) - 0.5769543579652687) < 1e-6
assert abs(hd.pdf(2.5) - 0.5091604338370336) < 1e-6
assert abs(hd.pdf(2.6) - 0.) < 1e-6
assert abs(hd.cdf(-1.) - 0.0019522361765567414) < 1e-6
assert abs(hd.cdf(1.8) - 0.6084788605670465) < 1e-6
assert abs(hd.cdf(2.) - 0.723105053423659) < 1e-6
assert abs(hd.cdf(2.5) - 1.) < 1e-6
assert abs(hd.cdf(2.6) - 1.) < 1e-6
assert np.all(np.array(samples) <= hard_clip_max)
assert hd.min() == -np.inf
assert hd.max() == hard_clip_max
assert abs(hd.mean() - 1.4908395661629665) < 1e-6
assert abs(hd.var() - 0.486175435696367) < 1e-6
assert abs(hd.std() - 0.6972628168032245) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 1e-2
assert abs(hd.var() - np.var(samples)) < 1e-2
@pytest.mark.parametrize("seed", (15, 20, 32, 40, 50))
def test_lognormal(seed):
np.random.seed(seed)
log2_space_mean = 0.0
log2_space_std = 2.0
hd = LogNormal(log2_space_mean, log2_space_std)
samples = hd_rvs_many(hd)
samples_median = np.median(samples)
assert 0.9 < samples_median < 1.1
samples_std = np.std(samples)
assert 5 < samples_std < 8
assert hd.pdf(0.) == 0.
assert abs(hd.pdf(1.) - 0.28777602476804065) < 1e-6
assert abs(hd.pdf(5.) - 0.029336304593386688) < 1e-6
assert hd.cdf(0.) == 0.
assert hd.cdf(1.) == 0.5
assert abs(hd.cdf(5.) - 0.8771717397015799) < 1e-6
assert hd.min() == 0
assert hd.max() == np.inf
assert abs(hd.mean() - 2.614063815405198) < 1e-6
assert abs(hd.var() - 39.86106421503915) < 1e-6
assert abs(hd.std() - 6.313561927710787) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 1e-1
assert abs((hd.var() - np.var(samples)) / hd.var()) < 2.5e-1
def test_lognormal_clipped():
log2_space_mean = 10.0
log2_space_std = 5.0
hard_clip_min = 5
hard_clip_max = 100
hd = LogNormal(log2_space_mean, log2_space_std, hard_clip_min=hard_clip_min, hard_clip_max=hard_clip_max)
samples = hd_rvs_many(hd)
samples_median = np.median(samples)
assert 25 < samples_median < 35
samples_std = np.std(samples)
assert 20 < samples_std < 30
assert hd.pdf(0.) == 0.
assert abs(hd.pdf(6.) - 0.03385080142719004) < 1e-6
assert abs(hd.pdf(10.) - 0.024999599033243936) < 1e-6
assert hd.cdf(0.) == 0.
assert abs(hd.cdf(6.) - 0.03560663481768936) < 1e-6
assert abs(hd.cdf(10.) - 0.15112888563249155) < 1e-6
assert hd.min() == hard_clip_min
assert hd.max() == hard_clip_max
assert abs(hd.mean() - 38.110960930274594) < 1e-6
assert abs(hd.var() - 728.7599668053633) < 1e-6
assert abs(hd.std() - 26.995554574880718) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs((hd.mean() - np.mean(samples)) / hd.mean()) < 1e-1
assert abs((hd.var() - np.var(samples)) / hd.var()) < 1e-1
def test_gaussian_distribution_mixture():
distribution_amplitudes = [1, 1, 1]
means = [-2, 0, 2]
stds = [1, 1, 1]
distribution_mins = [None for _ in range(len(means))]
distribution_max = [None for _ in range(len(means))]
hd = DistributionMixture.build_gaussian_mixture(distribution_amplitudes, means, stds, distribution_mins,
distribution_max)
samples = hd_rvs_many(hd)
samples_median = np.median(samples)
assert -0.5 < samples_median < 0.5
samples_std = np.std(samples)
assert 1 < samples_std < 4
assert abs(hd.pdf(-2.) - 0.1510223590467952) < 1e-6
assert abs(hd.pdf(0.) - 0.16897473780926958) < 1e-6
assert abs(hd.pdf(2.) - 0.1510223590467952) < 1e-6
assert abs(hd.cdf(-2.) - 0.17426060106333743) < 1e-6
assert abs(hd.cdf(0.) - 0.5) < 1e-6
assert abs(hd.cdf(2.) - 0.8257393989366625) < 1e-6
assert hd.min() == -np.inf
assert hd.max() == np.inf
assert abs(hd.mean() - 0.0) < 1e-6
assert abs(hd.var() - 11. / 3) < 1e-6
assert abs(hd.std() - math.sqrt(11. / 3)) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 1e-1
assert abs(hd.var() - np.var(samples)) < 1e-1
def test_gaussian_distribution_mixture_truncated():
distribution_amplitudes = [1, 1, 1]
means = [-2, 0, 2]
stds = [1, 1, 1]
distribution_mins = [-2.5, -0.5, 1.5]
distribution_max = [-1.5, 0.5, 2.5]
hd = DistributionMixture.build_gaussian_mixture(distribution_amplitudes, means, stds, distribution_mins,
distribution_max)
samples = hd_rvs_many(hd)
assert np.all(np.logical_and(np.array(samples) >= distribution_mins[0], np.array(samples) <= distribution_max[0]) |
np.logical_and(np.array(samples) >= distribution_mins[1], np.array(samples) <= distribution_max[1]) |
np.logical_and(np.array(samples) >= distribution_mins[2], np.array(samples) <= distribution_max[2]))
samples_median = np.median(samples)
assert -0.5 < samples_median < 0.5
samples_std = np.std(samples)
assert 1 < samples_std < 4
assert abs(hd.pdf(-2.) - 0.3472763257323177) < 1e-6
assert abs(hd.pdf(0.) - 0.3472763257323177) < 1e-6
assert abs(hd.pdf(2.) - 0.3472763257323177) < 1e-6
assert abs(hd.cdf(-2.) - 0.16666666666666666) < 1e-6
assert abs(hd.cdf(0.) - 0.5) < 1e-6
assert abs(hd.cdf(2.) - 0.8333333333333333) < 1e-6
assert hd.min() == min(distribution_mins)
assert hd.max() == max(distribution_max)
assert abs(hd.mean() - 0.0) < 1e-6
assert abs(hd.var() - 2.747255821267478) < 1e-6
assert abs(hd.std() - 1.6574847876428545) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 1e-1
assert abs(hd.var() - np.var(samples)) < 1e-1
def test_gaussian_distribution_mixture_log():
distribution_amplitudes = [1, 1, 1]
means = [1, 2, 3]
stds = [1, 1, 1]
distribution_mins = [None for _ in range(len(means))]
distribution_max = [None for _ in range(len(means))]
hd = DistributionMixture.build_gaussian_mixture(
distribution_amplitudes, means, stds,
distribution_mins, distribution_max, use_logs=True)
samples = hd_rvs_many(hd)
samples_median = np.median(samples)
assert 1.5 < samples_median < 2.5
samples_std = np.std(samples)
assert 1.5 < hd.mean() < 2.5
assert 1 < samples_std < 4
assert 1 < hd.std() < 2
assert abs(hd.pdf(-2.) - 0.) == 0
assert abs(hd.pdf(1) - 1.0 / 3.0) < 0.1
assert abs(hd.pdf(2) - 1.0 / 3.0) < 0.1
assert abs(hd.pdf(3) - 1.0 / 3.0) < 0.1
assert abs(hd.cdf(1) - 1.0 / 4.0) < 0.1
assert abs(hd.cdf(2) - 2.0 / 4.0) < 0.1
assert abs(hd.cdf(3) - 3.0 / 4.0) < 0.1
assert hd.pdf(5.) < 0.05
assert hd.min() == 0
assert hd.max() == np.inf
assert abs(hd.mean() - 2.15) < 0.015
assert abs(hd.std() ** 2 - hd.var()) < 0.001
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 0.05
assert abs(hd.var() - np.var(samples)) < 0.15
def test_gaussian_distribution_mixture_quantized():
distribution_amplitudes = [1, 1, 1]
means = [-2, 0, 2]
stds = [1, 1, 1]
distribution_mins = [None for _ in range(len(means))]
distribution_max = [None for _ in range(len(means))]
hd = DistributionMixture.build_gaussian_mixture(
distribution_amplitudes, means, stds, distribution_mins, distribution_max,
use_quantized_distributions=True
)
samples = hd_rvs_many(hd)
samples_median = np.median(samples)
assert -0.5 < samples_median < 0.5
samples_std = np.std(samples)
assert 1 < samples_std < 4
assert abs(hd.pdf(-2.) - 0.147917229965673) < 1e-6
assert abs(hd.pdf(-1.5) - 0.) < 1e-6
assert abs(hd.pdf(1.) - 0.16314590372033272) < 1e-6
assert abs(hd.pdf(5.) - 0.001993471656810324) < 1e-6
assert abs(hd.cdf(-2.) - 0.2528340972073022) < 1e-6
assert abs(hd.cdf(-1.5) - 0.2528340972073022) < 1e-6
assert abs(hd.cdf(1.) - 0.7471659027926978) < 1e-6
assert abs(hd.cdf(5.) - 0.99992245064379) < 1e-6
assert hd.min() == -np.inf
assert hd.max() == np.inf
assert abs(hd.mean() - 0.) < 1e-6
assert abs(hd.var() - 3.749999989027785) < 1e-6
assert abs(hd.std() - 1.9364916702706947) < 1e-6
# Verify that hd mean and variance also correspond to mean and variance of sampling.
assert abs(hd.mean() - np.mean(samples)) < 1e-1
assert abs((hd.var() - np.var(samples)) / hd.var()) < 1e-1
|
3ba57795ba617fe2563200afadb5b9b0202e6290
|
9468849850c7c2b2040835eb9496bfb716a98c21
|
/cea/plots/optimization/b_parallel_coordinates.py
|
10bbc9a353941485e6e93e03d478b9818ef0e5be
|
[
"MIT"
] |
permissive
|
architecture-building-systems/CityEnergyAnalyst
|
e6532c0c794538dbb665366ccf6d783e0d9d1345
|
b84bcefdfdfc2bc0e009b5284b74391a957995ac
|
refs/heads/master
| 2023-08-30T19:57:47.445797
| 2023-08-25T13:30:28
| 2023-08-25T13:30:28
| 49,491,341
| 166
| 60
|
MIT
| 2023-09-11T11:10:00
| 2016-01-12T10:02:17
|
Python
|
UTF-8
|
Python
| false
| false
| 5,316
|
py
|
b_parallel_coordinates.py
|
"""
Show a Pareto curve plot for individuals in a given generation.
"""
import plotly.graph_objs as go
import cea.plots.optimization
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2020, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
class ParallelCoordinatesForOneGenerationPlot(cea.plots.optimization.GenerationPlotBase):
"""Show a pareto curve for a single generation"""
name = "Parallel coordinates"
expected_parameters = {
'generation': 'plots-optimization:generation',
'normalization': 'plots-optimization:normalization',
'scenario-name': 'general:scenario-name',
}
def __init__(self, project, parameters, cache):
super(ParallelCoordinatesForOneGenerationPlot, self).__init__(project, parameters, cache)
self.analysis_fields = ['individual_name',
'GHG_sys_tonCO2',
'TAC_sys_USD',
'Opex_a_sys_USD',
'Capex_total_sys_USD',
]
self.objectives = ['GHG_sys_tonCO2', 'TAC_sys_USD', 'Opex_a_sys_USD', 'Capex_total_sys_USD']
self.normalization = self.parameters['normalization']
self.input_files = [(self.locator.get_optimization_generation_total_performance_pareto, [self.generation])]
self.titles = self.calc_titles()
def calc_titles(self):
title = 'System No.'
if self.normalization == "gross floor area":
titlex = 'Equivalent annual costs <br>[USD$(2015)/m2.yr]'
titley = 'GHG emissions <br>[kg CO2-eq/m2.yr]'
titlez = 'Investment costs <br>[USD$(2015)/m2]'
titlel = 'Operation costs <br>[USD$(2015)/m2.yr]'
elif self.normalization == "net floor area":
titlex = 'Equivalent annual costs <br>[USD$(2015)/m2.yr]'
titley = 'GHG emissions <br>[kg CO2-eq/m2.yr]'
titlez = 'Investment costs <br>[USD$(2015)/m2]'
titlel = 'Operation costs <br>[USD$(2015)/m2.yr]'
elif self.normalization == "air conditioned floor area":
titlex = 'Equivalent annual costs <br>[USD$(2015)/m2.yr]'
titley = 'GHG emissions <br>[kg CO2-eq/m2.yr]'
titlez = 'Investment costs <br>[USD$(2015)/m2.yr]'
titlel = 'Operation costs <br>[USD$(2015)/m2.yr]'
elif self.normalization == "building occupancy":
titlex = 'Equivalent annual costs <br>[USD$(2015)/p.yr]'
titley = 'GHG emissions <br>[kg CO2-eq/p.yr]'
titlez = 'Investment costs <br>[USD$(2015)/p]'
titlel = 'Operation costs <br>[USD$(2015)/p.yr]'
else:
titlex = 'Equivalent annual costs <br>[USD$(2015)/yr]'
titley = 'GHG emissions <br>[ton CO2-eq/yr]'
titlez = 'Investment costs <br>[USD$(2015)]'
titlel = 'Operation costs <br>[USD$(2015)/yr]'
return title, titley, titlex, titlel, titlez
@property
def layout(self):
return go.Layout()
@property
def title(self):
if self.normalization != "none":
return "Parallel Plot for generation {generation} normalized to {normalized}".format(
generation=self.generation, normalized=self.normalization)
else:
return "Parallel Plot for generation {generation}".format(generation=self.generation)
@property
def output_path(self):
return self.locator.get_timeseries_plots_file('gen{generation}_parallelplot'.format(generation=self.generation),
self.category_name)
def calc_graph(self):
graph = []
# PUT THE PARETO CURVE INSIDE
data = self.process_generation_total_performance_pareto()
data = self.normalize_data(data, self.normalization, self.objectives)
data = data.sort_values(['GHG_sys_tonCO2'])
dimensions = list([dict(label=label, values=data[field]) if field != 'individual_name' else dict(
ticktext=data[field], label=label, tickvals=list(range(data.shape[0])), values=list(range(data.shape[0])))
for field, label in zip(self.analysis_fields, self.titles)])
line = dict(color= data['Capex_total_sys_USD'], colorscale='Jet', showscale=True)
trace = go.Parcoords(line=line, dimensions=dimensions, labelfont=dict(size=10), rangefont=dict(size=8), tickfont=dict(size=10))
graph.append(trace)
return graph
def main():
"""Test this plot"""
import cea.config
import cea.plots.cache
config = cea.config.Configuration()
cache = cea.plots.cache.NullPlotCache()
ParallelCoordinatesForOneGenerationPlot(config.project,
{'scenario-name': config.scenario_name,
'generation': config.plots_optimization.generation,
'normalization': config.plots_optimization.normalization},
cache).plot(auto_open=True)
if __name__ == '__main__':
main()
|
55be6530921d093c19ef4c7441e0c8ec5c3b06f9
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/perf/test_ozone_ar_speed_order_350.py
|
661e4fbb2d5974c393cc23fcc30482964cc05a8d
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 70
|
py
|
test_ozone_ar_speed_order_350.py
|
import tests.perf.test_ozone_ar_speed_many as gen
gen.run_test(350)
|
c7a923abbe93e48a0d1750ebdfaa6a6c398b9880
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/legacy_test/test_scatter_nd_op.py
|
ee6a2423e0d2002a66a4c69df2cd790bd1a881b9
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 18,016
|
py
|
test_scatter_nd_op.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.dygraph.base import switch_to_static_graph
def numpy_scatter_nd(ref, index, updates, fun):
ref_shape = ref.shape
index_shape = index.shape
end_size = index_shape[-1]
remain_numl = 1
for i in range(len(index_shape) - 1):
remain_numl *= index_shape[i]
slice_size = 1
for i in range(end_size, len(ref_shape)):
slice_size *= ref_shape[i]
flat_index = index.reshape([remain_numl] + list(index_shape[-1:]))
flat_updates = updates.reshape((remain_numl, slice_size))
flat_output = ref.reshape(list(ref_shape[:end_size]) + [slice_size])
for i_up, i_out in enumerate(flat_index):
i_out = tuple(i_out)
flat_output[i_out] = fun(flat_output[i_out], flat_updates[i_up])
return flat_output.reshape(ref.shape)
def numpy_scatter_nd_add(ref, index, updates):
return numpy_scatter_nd(ref, index, updates, lambda x, y: x + y)
def judge_update_shape(ref, index):
ref_shape = ref.shape
index_shape = index.shape
update_shape = []
for i in range(len(index_shape) - 1):
update_shape.append(index_shape[i])
for i in range(index_shape[-1], len(ref_shape), 1):
update_shape.append(ref_shape[i])
return update_shape
class TestScatterNdAddSimpleOp(OpTest):
"""
A simple example
"""
def setUp(self):
self.op_type = "scatter_nd_add"
self.python_api = paddle.scatter_nd_add
self.public_python_api = paddle.scatter_nd_add
self.prim_op_type = "prim"
self._set_dtype()
if self.dtype == np.float64:
target_dtype = "float64"
elif self.dtype == np.float16:
target_dtype = "float16"
else:
target_dtype = "float32"
ref_np = np.random.random([100]).astype(target_dtype)
index_np = np.random.randint(0, 100, [100, 1]).astype("int32")
updates_np = np.random.random([100]).astype(target_dtype)
expect_np = numpy_scatter_nd_add(ref_np.copy(), index_np, updates_np)
if self.dtype == np.uint16:
ref_np = convert_float_to_uint16(ref_np)
updates_np = convert_float_to_uint16(updates_np)
expect_np = convert_float_to_uint16(expect_np)
self.inputs = {'X': ref_np, 'Index': index_np, 'Updates': updates_np}
self.outputs = {'Out': expect_np}
def _set_dtype(self):
self.dtype = np.float64
def test_check_output(self):
self.check_output(check_cinn=True)
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_prim=True)
class TestScatterNdAddSimpleFP16Op(TestScatterNdAddSimpleOp):
"""
A simple example
"""
def _set_dtype(self):
self.dtype = np.float16
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
)
class TestScatterNdAddSimpleBF16Op(TestScatterNdAddSimpleOp):
"""
A simple example
"""
def _set_dtype(self):
self.dtype = np.uint16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_prim=True
)
class TestScatterNdAddWithEmptyIndex(OpTest):
"""
Index has empty element
"""
def setUp(self):
self.op_type = "scatter_nd_add"
self.python_api = paddle.scatter_nd_add
self.public_python_api = paddle.scatter_nd_add
self.prim_op_type = "prim"
self._set_dtype()
if self.dtype == np.float64:
target_dtype = "float64"
elif self.dtype == np.float16:
target_dtype = "float16"
else:
target_dtype = "float32"
ref_np = np.random.random((10, 10)).astype(target_dtype)
index_np = np.array([[], []]).astype("int32")
updates_np = np.random.random((2, 10, 10)).astype(target_dtype)
expect_np = numpy_scatter_nd_add(ref_np.copy(), index_np, updates_np)
if self.dtype == np.uint16:
ref_np = convert_float_to_uint16(ref_np)
updates_np = convert_float_to_uint16(updates_np)
expect_np = convert_float_to_uint16(expect_np)
self.inputs = {'X': ref_np, 'Index': index_np, 'Updates': updates_np}
self.outputs = {'Out': expect_np}
def _set_dtype(self):
self.dtype = np.float64
def test_check_output(self):
self.check_output(check_cinn=True)
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_prim=True)
class TestScatterNdAddWithEmptyIndexFP16(TestScatterNdAddWithEmptyIndex):
"""
Index has empty element
"""
def _set_dtype(self):
self.dtype = np.float16
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
)
class TestScatterNdAddWithEmptyIndexBF16(TestScatterNdAddWithEmptyIndex):
"""
Index has empty element
"""
def _set_dtype(self):
self.dtype = np.uint16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_prim=True
)
class TestScatterNdAddWithHighRankSame(OpTest):
"""
Both Index and X have high rank, and Rank(Index) = Rank(X)
"""
def setUp(self):
self.op_type = "scatter_nd_add"
self.python_api = paddle.scatter_nd_add
self.public_python_api = paddle.scatter_nd_add
self.prim_op_type = "prim"
self._set_dtype()
if self.dtype == np.float64:
target_dtype = "float64"
elif self.dtype == np.float16:
target_dtype = "float16"
else:
target_dtype = "float32"
shape = (3, 2, 2, 1, 10)
ref_np = np.random.rand(*shape).astype(target_dtype)
index_np = np.vstack(
[np.random.randint(0, s, size=100) for s in shape]
).T.astype("int32")
update_shape = judge_update_shape(ref_np, index_np)
updates_np = np.random.rand(*update_shape).astype(target_dtype)
expect_np = numpy_scatter_nd_add(ref_np.copy(), index_np, updates_np)
if self.dtype == np.uint16:
ref_np = convert_float_to_uint16(ref_np)
updates_np = convert_float_to_uint16(updates_np)
expect_np = convert_float_to_uint16(expect_np)
self.inputs = {'X': ref_np, 'Index': index_np, 'Updates': updates_np}
self.outputs = {'Out': expect_np}
def _set_dtype(self):
self.dtype = np.float64
def test_check_output(self):
self.check_output(check_cinn=True)
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_prim=True)
class TestScatterNdAddWithHighRankSameFP16(TestScatterNdAddWithHighRankSame):
"""
Both Index and X have high rank, and Rank(Index) = Rank(X)
"""
def _set_dtype(self):
self.dtype = np.float16
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
)
class TestScatterNdAddWithHighRankSameBF16(TestScatterNdAddWithHighRankSame):
"""
Both Index and X have high rank, and Rank(Index) = Rank(X)
"""
def _set_dtype(self):
self.dtype = np.uint16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_prim=True
)
class TestScatterNdAddWithHighRankDiff(OpTest):
"""
Both Index and X have high rank, and Rank(Index) < Rank(X)
"""
def setUp(self):
self.op_type = "scatter_nd_add"
self.python_api = paddle.scatter_nd_add
self.public_python_api = paddle.scatter_nd_add
self.prim_op_type = "prim"
shape = (8, 2, 2, 1, 10)
ref_np = np.random.rand(*shape).astype("double")
index = np.vstack([np.random.randint(0, s, size=500) for s in shape]).T
index_np = index.reshape([10, 5, 10, 5]).astype("int64")
update_shape = judge_update_shape(ref_np, index_np)
updates_np = np.random.rand(*update_shape).astype("double")
expect_np = numpy_scatter_nd_add(ref_np.copy(), index_np, updates_np)
self.inputs = {'X': ref_np, 'Index': index_np, 'Updates': updates_np}
self.outputs = {'Out': expect_np}
def test_check_output(self):
self.check_output(check_cinn=True)
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_prim=True)
# Test Python API
class TestScatterNdOpAPI(unittest.TestCase):
"""
test scatter_nd_add api and scatter_nd api
"""
def testcase1(self):
with paddle.fluid.framework._static_guard():
ref1 = paddle.static.data(
name='ref1',
shape=[10, 9, 8, 1, 3],
dtype='float32',
)
index1 = paddle.static.data(
name='index1',
shape=[5, 5, 8, 5],
dtype='int32',
)
updates1 = paddle.static.data(
name='update1',
shape=[5, 5, 8],
dtype='float32',
)
output1 = paddle.scatter_nd_add(ref1, index1, updates1)
def testcase2(self):
with paddle.fluid.framework._static_guard():
ref2 = paddle.static.data(
name='ref2',
shape=[10, 9, 8, 1, 3],
dtype='double',
)
index2 = paddle.static.data(
name='index2',
shape=[5, 8, 5],
dtype='int32',
)
updates2 = paddle.static.data(
name='update2',
shape=[5, 8],
dtype='double',
)
output2 = paddle.scatter_nd_add(
ref2, index2, updates2, name="scatter_nd_add"
)
def testcase3(self):
with paddle.fluid.framework._static_guard():
shape3 = [10, 9, 8, 1, 3]
index3 = paddle.static.data(
name='index3',
shape=[5, 5, 8, 5],
dtype='int32',
)
updates3 = paddle.static.data(
name='update3',
shape=[5, 5, 8],
dtype='float32',
)
output3 = paddle.scatter_nd(index3, updates3, shape3)
def testcase4(self):
with paddle.fluid.framework._static_guard():
shape4 = [10, 9, 8, 1, 3]
index4 = paddle.static.data(
name='index4',
shape=[5, 5, 8, 5],
dtype='int32',
)
updates4 = paddle.static.data(
name='update4',
shape=[5, 5, 8],
dtype='double',
)
output4 = paddle.scatter_nd(
index4, updates4, shape4, name='scatter_nd'
)
def testcase5(self):
if not fluid.core.is_compiled_with_cuda():
return
shape = [2, 3, 4]
x = np.arange(int(np.prod(shape))).reshape(shape)
index = np.array([[0, 0, 2], [0, 1, 2]])
val = np.array([-1, -3])
with fluid.dygraph.guard():
device = paddle.get_device()
paddle.set_device('gpu')
gpu_value = paddle.scatter_nd_add(
paddle.to_tensor(x),
paddle.to_tensor(index),
paddle.to_tensor(val),
)
paddle.set_device('cpu')
cpu_value = paddle.scatter_nd_add(
paddle.to_tensor(x),
paddle.to_tensor(index),
paddle.to_tensor(val),
)
np.testing.assert_array_equal(gpu_value.numpy(), cpu_value.numpy())
paddle.set_device(device)
@switch_to_static_graph
def test_static_graph():
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
x_t = paddle.static.data(name="x", dtype=x.dtype, shape=x.shape)
index_t = paddle.static.data(
name="index", dtype=index.dtype, shape=index.shape
)
val_t = paddle.static.data(
name="val", dtype=val.dtype, shape=val.shape
)
out_t = paddle.scatter_nd_add(x_t, index_t, val_t)
feed = {x_t.name: x, index_t.name: index, val_t.name: val}
fetch = [out_t]
gpu_exe = paddle.static.Executor(paddle.CUDAPlace(0))
gpu_value = gpu_exe.run(feed=feed, fetch_list=fetch)[0]
cpu_exe = paddle.static.Executor(paddle.CPUPlace())
cpu_value = cpu_exe.run(feed=feed, fetch_list=fetch)[0]
np.testing.assert_array_equal(gpu_value, cpu_value)
test_static_graph()
# Test Raise Error
class TestScatterNdOpRaise(unittest.TestCase):
def test_check_raise(self):
def check_raise_is_test():
with paddle.fluid.framework._static_guard():
try:
ref5 = paddle.static.data(
name='ref5', shape=[-1, 3, 4, 5], dtype='float32'
)
index5 = paddle.static.data(
name='index5', shape=[-1, 2, 10], dtype='int32'
)
updates5 = paddle.static.data(
name='updates5', shape=[-1, 2, 10], dtype='float32'
)
output5 = paddle.scatter_nd_add(ref5, index5, updates5)
except Exception as e:
t = "The last dimension of Input(Index)'s shape should be no greater "
if t in str(e):
raise IndexError
self.assertRaises(IndexError, check_raise_is_test)
def test_check_raise2(self):
with self.assertRaises(ValueError):
with paddle.fluid.framework._static_guard():
ref6 = paddle.static.data(
name='ref6',
shape=[10, 9, 8, 1, 3],
dtype='double',
)
index6 = paddle.static.data(
name='index6',
shape=[5, 8, 5],
dtype='int32',
)
updates6 = paddle.static.data(
name='update6',
shape=[5, 8],
dtype='float32',
)
output6 = paddle.scatter_nd_add(ref6, index6, updates6)
def test_check_raise3(self):
def check_raise_is_test():
with paddle.fluid.framework._static_guard():
try:
shape = [3, 4, 5]
index7 = paddle.static.data(
name='index7', shape=[-1, 2, 1], dtype='int32'
)
updates7 = paddle.static.data(
name='updates7',
shape=[-1, 2, 4, 5, 20],
dtype='float32',
)
output7 = paddle.scatter_nd(index7, updates7, shape)
except Exception as e:
t = "Updates has wrong shape"
if t in str(e):
raise ValueError
self.assertRaises(ValueError, check_raise_is_test)
class TestDygraph(unittest.TestCase):
def test_dygraph(self):
with fluid.dygraph.guard(fluid.CPUPlace()):
index_data = np.array([[1, 1], [0, 1], [1, 3]]).astype(np.int64)
index = fluid.dygraph.to_variable(index_data)
updates = paddle.rand(shape=[3, 9, 10], dtype='float32')
shape = [3, 5, 9, 10]
output = paddle.scatter_nd(index, updates, shape)
def test_dygraph_1(self):
with fluid.dygraph.guard(fluid.CPUPlace()):
x = paddle.rand(shape=[3, 5, 9, 10], dtype='float32')
updates = paddle.rand(shape=[3, 9, 10], dtype='float32')
index_data = np.array([[1, 1], [0, 1], [1, 3]]).astype(np.int64)
index = fluid.dygraph.to_variable(index_data)
output = paddle.scatter_nd_add(x, index, updates)
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
|
769ec3448eb5eb5c4100aec665e7094f8a105786
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/cfitsio/all/conanfile.py
|
b5849b0ae29466397a6bac68ecc8c6bec4352465
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,730
|
py
|
conanfile.py
|
from conan import ConanFile
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir
from conan.tools.scm import Version
import glob
import os
required_conan_version = ">=1.54.0"
class CfitsioConan(ConanFile):
name = "cfitsio"
description = "C library for reading and writing data files in FITS " \
"(Flexible Image Transport System) data format"
license = "ISC"
topics = ("fits", "image", "nasa", "astronomy", "astrophysics", "space")
homepage = "https://heasarc.gsfc.nasa.gov/fitsio/"
url = "https://github.com/conan-io/conan-center-index"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"threadsafe": [True, False],
"simd_intrinsics": [None, "sse2", "ssse3"],
"with_bzip2": [True, False],
"with_curl": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"threadsafe": False,
"simd_intrinsics": None,
"with_bzip2": False,
"with_curl": False,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
del self.options.with_curl
if self.settings.arch not in ["x86", "x86_64"]:
del self.options.simd_intrinsics
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.cppstd")
self.settings.rm_safe("compiler.libcxx")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("zlib/1.2.13")
if self.options.threadsafe and self.settings.os == "Windows" and \
self.settings.compiler.get_safe("threads") != "posix":
self.requires("pthreads4w/3.0.0")
if self.options.with_bzip2:
self.requires("bzip2/1.0.8")
if self.options.get_safe("with_curl"):
self.requires("libcurl/8.0.0")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["USE_PTHREADS"] = self.options.threadsafe
if Version(self.version) >= "4.1.0":
tc.variables["USE_SSE2"] = self.options.get_safe("simd_intrinsics") == "sse2"
tc.variables["USE_SSSE3"] = self.options.get_safe("simd_intrinsics") == "ssse3"
tc.variables["USE_BZIP2"] = self.options.with_bzip2
else:
tc.variables["CFITSIO_USE_SSE2"] = self.options.get_safe("simd_intrinsics") == "sse2"
tc.variables["CFITSIO_USE_SSSE3"] = self.options.get_safe("simd_intrinsics") == "ssse3"
tc.variables["CFITSIO_USE_BZIP2"] = self.options.with_bzip2
if Version(self.version) >= "4.0.0":
tc.variables["USE_CURL"] = self.options.get_safe("with_curl", False)
tc.variables["TESTS"] = False
tc.variables["UTILS"] = False
else:
tc.variables["UseCurl"] = self.options.get_safe("with_curl", False)
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def _patch_sources(self):
apply_conandata_patches(self)
if Version(self.version) < "4.0.0":
# Remove embedded zlib files
for zlib_file in glob.glob(os.path.join(self.source_folder, "zlib", "*")):
if not zlib_file.endswith(("zcompress.c", "zuncompress.c")):
os.remove(zlib_file)
def build(self):
self._patch_sources()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "License.txt", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rmdir(self, os.path.join(self.package_folder, "lib", f"cfitsio-{self.version}"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "cfitsio")
self.cpp_info.set_property("cmake_target_name", "cfitsio::cfitsio")
self.cpp_info.set_property("pkg_config_name", "cfitsio")
self.cpp_info.libs = ["cfitsio"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.append("m")
if self.options.threadsafe:
self.cpp_info.system_libs.append("pthread")
|
190f08b00e8aa79ca7b82f1b57f23fd937b20b08
|
a5cffc68c40887b34c298f98b3c684a84bbfe96e
|
/apps/radioss/get_radioss_bm_info.py
|
a12ffa7584d89b6e57a25e6033d771776cdfac16
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azurehpc
|
54e3a852b0ef739ec598cfd751929aa0b004d5ff
|
f9766c25d7084bbab463182dadf9ed48e58a09ba
|
refs/heads/master
| 2023-08-19T01:44:44.088508
| 2023-08-02T18:27:10
| 2023-08-02T18:27:10
| 196,273,513
| 112
| 71
|
MIT
| 2023-08-02T18:27:11
| 2019-07-10T20:52:18
|
Shell
|
UTF-8
|
Python
| false
| false
| 3,381
|
py
|
get_radioss_bm_info.py
|
#!/bin/env python3
import argparse
from subprocess import Popen, PIPE
import json
import pprint
import statistics as stat
import os, sys
parser = argparse.ArgumentParser(prog='PROG', usage='%(prog)s [options]')
parser.add_argument("-m", type=str, help="-m <model name>")
parser.add_argument("-f", type=str, help="-f <file name>")
args = parser.parse_args()
filename=""
if args.m != None:
print("Model Name: {}".format(args.m))
filename="{}_00001.out".format(args.m)
if args.f != None:
# print("File Name: {}".format(args.f))
filename="{}".format(args.f)
# Read file and collect data
data = {}
data["procs_cpu"] = []
with open(filename) as rst:
lines=rst.readlines()
try:
data["Model"] = lines[0][:-10]
except:
sys.exit(-1)
for i,line in enumerate(lines):
if line.find("NUMBER OF SPMD DOMAINS") != -1:
sline = line.split()
# print("Sline: {}".format(sline))
data["NumOfSpmdDomains"] = sline[-1]
elif line.find("NUMBER OF THREADS PER DOMAIN") != -1:
sline = line.split()
# print("Sline: {}".format(sline))
data["NumOfThreadsPerDomain"] = sline[-1]
elif line.find("ELAPSED TIME") != -1:
sline = line.split()
# print("Sline: {}".format(sline))
data["ElapsedTime"] = sline[-2]
elif line.find("ESTIMATED SPEEDUP") != -1:
sline = line.split()
# print("Sline: {}".format(sline))
data["EstimatedSpeedUp"] = line.split()[-1]
elif line.find("** SPMD COMM. TIME **") != -1:
nlines = lines[i:]
for nline in nlines:
if nline.find("** CUMULATIVE CPU TIME SUMMARY **") == -1:
sline = nline.split()
if len(sline) > 0:
# print("Sline: {}".format(sline))
if len(sline) == 7:
data["procs_cpu"].append(float(sline[-1]))
else:
break
# print("Data: {}".format(data))
# Add json data
try:
jdata = {}
jdata["model"] = data["Model"]
jdata["total_time"] = data["ElapsedTime"]
jdata["num_of_spmd_domains"] = data["NumOfSpmdDomains"]
jdata["num_of_threads_per_domain"] = data["NumOfThreadsPerDomain"]
jdata["est_speed_up"] = data["EstimatedSpeedUp"]
jdata["cpu_mean"] = "{:2.3}".format(stat.mean(data["procs_cpu"]))
jdata["cpu_max"] = "{:2.3}".format(max(data["procs_cpu"]))
jdata["cpu_min"] = "{:2.3}".format(min(data["procs_cpu"]))
jdata["cpu_stdev"] = "{:2.3}".format(stat.stdev(data["procs_cpu"]))
#jdata = json.dumps(jdata)
# pprint.pprint(jdata)
except:
sys.exit(-1)
if args.m != None:
with open('{}_results.json'.format(data["Model"].strip()), 'w') as f:
json.dump(jdata, f, indent=4, separators=(',', ': '), sort_keys=True)
print(
"{:>20}".format(data["Model"]), \
"{:>10}".format(data["ElapsedTime"]), \
"{:>10}".format(data["NumOfSpmdDomains"]), \
"{:>10}".format(data["NumOfThreadsPerDomain"]), \
"{:>10}".format(data["EstimatedSpeedUp"]), \
"{:>10.2f}".format(stat.mean(data["procs_cpu"])), \
"{:>10.2f}".format(max(data["procs_cpu"])), \
"{:>10.2f}".format(min(data["procs_cpu"])), \
"{:>10.2f}".format(stat.stdev(data["procs_cpu"]))
)
|
2f2b472b1f7dd38965842016a83c6e20402ad5e3
|
a935ec8dab61675b6a3e348511fb9f8c5d490026
|
/python/audioflux/dsp/resample.py
|
1d4a33144941912e372539589d02ff489fc14ec2
|
[
"MIT"
] |
permissive
|
libAudioFlux/audioFlux
|
d604c54941c17d90e1ef42f04ba353ef6e244926
|
3ae59434e2c1cacb6da43562677ed4899d7047c7
|
refs/heads/master
| 2023-04-28T05:17:22.649299
| 2023-04-25T09:45:59
| 2023-04-25T09:45:59
| 589,514,195
| 1,701
| 85
|
MIT
| 2023-04-25T09:00:05
| 2023-01-16T09:53:04
|
C
|
UTF-8
|
Python
| false
| false
| 6,620
|
py
|
resample.py
|
import numpy as np
from ctypes import Structure, POINTER, pointer, c_int, c_float, c_void_p
from audioflux.base import Base
from audioflux.type import ResampleQualityType, WindowType
from audioflux.utils import check_audio, format_channel, revoke_channel
__all__ = ["Resample", "WindowResample"]
class OpaqueResample(Structure):
_fields_ = []
def _get_quality_type(tp):
if isinstance(tp, ResampleQualityType):
return tp
if not isinstance(tp, str):
raise ValueError(f'ResampleQualityType[{tp}] not supported')
if tp in ('af_best', 'audio_best', 'best'):
return ResampleQualityType.BEST
elif tp in ('af_mid', 'audio_mid', 'mid'):
return ResampleQualityType.MID
elif tp in ('af_fast', 'audio_fast', 'fast'):
return ResampleQualityType.FAST
raise ValueError(f'ResampleQualityType[{tp}] not supported')
class ResampleBase(Base):
def __init__(self):
super(ResampleBase, self).__init__(pointer(OpaqueResample()))
self.source_rate = None
self.target_rate = None
def set_samplate(self, source_rate, target_rate):
"""
Set samplate
Parameters
----------
source_rate: int
target_rate: int
"""
fn = self._lib['resampleObj_setSamplate']
fn.argtypes = [POINTER(OpaqueResample), c_int, c_int]
fn(self._obj, c_int(source_rate), c_int(target_rate))
self.source_rate = source_rate
self.target_rate = target_rate
def cal_data_length(self, data_length):
"""
Compute the data length
Parameters
----------
data_length: int
Returns
-------
out: int
"""
fn = self._lib['resampleObj_calDataLength']
fn.argtypes = [POINTER(OpaqueResample), c_int]
fn.restype = c_int
fn(self._obj, c_int(data_length))
def resample(self, data_arr):
"""
Resample for audio data
Parameters
----------
data_arr: np.ndarray [shape=(..., n)]
Input audio array.
Returns
-------
out: np.ndarray [shape=(..., n)]
Audio data after resampling
"""
data_arr = np.asarray(data_arr, dtype=np.float32, order='C')
check_audio(data_arr, is_mono=False)
fn = self._lib['resampleObj_resample']
fn.argtypes = [POINTER(OpaqueResample),
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1, flags='C_CONTIGUOUS'),
c_int,
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1, flags='C_CONTIGUOUS')]
data_len = data_arr.shape[-1]
if data_arr.ndim == 1:
ret_arr = np.zeros(data_len * 5, dtype=np.float32)
new_arr_len = fn(self._obj, data_arr, c_int(data_len), ret_arr)
else:
data_arr, o_channel_shape = format_channel(data_arr, 1)
channel_num = data_arr.shape[0]
ret_arr = np.zeros((channel_num, data_len * 5), dtype=np.float32)
new_arr_len = 0
for i in range(channel_num):
_new_arr_len = fn(self._obj, data_arr[i], c_int(data_len), ret_arr[i])
new_arr_len = max(new_arr_len, _new_arr_len)
ret_arr = revoke_channel(ret_arr, o_channel_shape, 1)
return ret_arr[..., :new_arr_len]
def __del__(self):
if self._is_created:
free_fn = self._lib['resampleObj_free']
free_fn.argtypes = [POINTER(OpaqueResample)]
free_fn.restype = c_void_p
free_fn(self._obj)
class Resample(ResampleBase):
"""
Calculate resampling
Parameters
----------
qual_type: ResampleQualityType or str
Resample quality type
is_scale: bool
Whether to use scale
Examples
--------
>>> import audioflux as af
>>> from audioflux.type import ResampleQualityType
>>> audio_data, sr = af.read(af.utils.sample_path('220'))
>>>
>>> resample_obj = af.Resample(qual_type=ResampleQualityType.BEST, is_scale=False)
>>> resample_obj.set_samplate(sr, 16000)
>>> new_audio_data = resample_obj.resample(audio_data)
"""
def __init__(self, qual_type=ResampleQualityType.BEST, is_scale=False):
super(Resample, self).__init__()
self.qual_type = _get_quality_type(qual_type)
self.is_scale = is_scale
self.is_continue = False
fn = self._lib['resampleObj_new']
fn.argtypes = [POINTER(POINTER(OpaqueResample)),
POINTER(c_int),
POINTER(c_int),
POINTER(c_int)]
fn(self._obj,
None if self.qual_type is None else pointer(c_int(self.qual_type.value)),
pointer(c_int(int(self.is_scale))),
pointer(c_int(int(self.is_continue))))
self._is_created = True
class WindowResample(ResampleBase):
"""
Calculate window resample
Parameters
----------
zero_num: int
nbit: int
win_type: WindowType
value: float
roll_off: float
is_scale: bool
Whether to use scale
Examples
--------
>>> import audioflux as af
>>> audio_data, sr = af.read(af.utils.sample_path('220'))
>>>
>>> resample_obj = af.WindowResample()
>>> resample_obj.set_samplate(sr, 16000)
>>> new_audio_data = resample_obj.resample(audio_data)
"""
def __init__(self, zero_num=64, nbit=9, win_type=WindowType.HANN,
value=None, roll_off=0.945, is_scale=False):
super(WindowResample, self).__init__()
self.zero_num = zero_num
self.nbit = nbit
self.win_type = win_type
self.value = value
self.roll_off = roll_off
self.is_scale = is_scale
self.is_continue = False
fn = self._lib['resampleObj_newWithWindow']
fn.argtypes = [POINTER(POINTER(OpaqueResample)),
POINTER(c_int),
POINTER(c_int),
POINTER(c_int),
POINTER(c_float),
POINTER(c_float),
POINTER(c_int),
POINTER(c_int)]
fn(self._obj,
pointer(c_int(self.zero_num)),
pointer(c_int(self.nbit)),
pointer(c_int(self.win_type.value)),
None if self.value is None else pointer(c_float(self.value)),
pointer(c_float(self.roll_off)),
pointer(c_int(int(self.is_scale))),
pointer(c_int(int(self.is_continue))))
self._is_created = True
|
75bc70e0e2651581cddb993ad1001841556fff84
|
91fe60d56e95c1824d52921b100c8b13a2770163
|
/duel/help.py
|
c41356336a11997e0e682aeb8420ad9b618404d7
|
[
"BSD-3-Clause"
] |
permissive
|
vuvova/gdb-tools
|
9b109db954c155caded6d121a92232bf9609f30c
|
c9dd6fa10576f155c401ee7b1c2c8c9d3d15cb2c
|
refs/heads/arpeggio
| 2023-07-09T23:50:29.174461
| 2023-07-02T21:12:31
| 2023-07-02T21:12:59
| 90,655,454
| 129
| 22
| null | 2017-06-20T19:38:08
| 2017-05-08T17:37:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,627
|
py
|
help.py
|
import re
INTRO = """\
Supported DUEL commands:
duel help - give basic help (shortcut: dl ?)
duel longhelp - give a longer help (dl ??)
duel examples - show useful usage examples
duel operators - operators summary
duel aliases - show current aliases
duel clear - clear all aliases
"""
with open(__file__.rstrip("pyc") + "md") as f: LONGHELP = ''.join(f)
HELP = """\
Duel - Debugging U (might) Even Like -- A high level data exploration language
Duel was designed to overcome problems with traditional debuggers' print
statement. It supports the C operators, many C constructs, and many new
operators for easy exploration of the program's space, e.g.
x[..100] >? 0 show positive x[i] for i=0 to 99
y[10..20].code !=? 0 show non-zero y[i].code for i=10 to 20
h-->next->code expand linked list h->next, h->next->next ...
head-->next.if(code>0) name show name for each element with code>0
x[i:=..100]=y[i]; array copy. i is an alias to vals 0..99
head-->next[[10..15]] the 10th to 15th element of a linked list
#/(head-->next->val==?4) count elements with val==4
head-->next->if(next) val >? next->val check if list is sorted by val
Duel was created by Michael Golan at Princeton University.
Duel.py is a pure-python Duel implementation by Sergei Golubchik.
Try "dl operators" or "dl longhelp"
"""
OPERATORS = re.sub(r'(?s)^.*\nOperators\n---------\n\n*(.*?\n)[^\n]+\n-----+\n.*$', r'\1', LONGHELP)
EXAMPLES = re.sub(r'(?s)^.*\nExamples\n--------\n\n*(.*?\n)[^\n]+\n-----+\n.*$', r'\1', LONGHELP)
EXAMPLES = re.sub(r'\n\n', r'\n', EXAMPLES)
|
af7539e1ca69ed58680d06bfe32f1a915624b4ea
|
8848d293343245920974f774e22f470cd99e2a86
|
/tests/test_visualization.py
|
40d67a228bb1f0a708bf961cfc9e8d9b97619d33
|
[
"BSD-3-Clause"
] |
permissive
|
UCSBarchlab/PyRTL
|
8765d4393cf39b6f5fec91821a4bf84ae5a03d8e
|
90646ed6adadfc82f7077754ff7c89b40c621fcc
|
refs/heads/development
| 2023-08-07T18:37:41.365915
| 2023-07-28T22:32:10
| 2023-07-28T22:32:10
| 17,123,539
| 215
| 82
|
BSD-3-Clause
| 2023-09-12T21:05:35
| 2014-02-24T02:58:06
|
Python
|
UTF-8
|
Python
| false
| false
| 17,399
|
py
|
test_visualization.py
|
import unittest
import random
import io
import pyrtl
graphviz_string_detailed = """\
digraph g {
graph [splines="spline", outputorder="edgesfirst"];
node [shape=circle, style=filled, fillcolor=lightblue1,
fontcolor=black, fontname=helvetica, penwidth=0,
fixedsize=shape];
edge [labelfloat=false, penwidth=2, color=deepskyblue, arrowsize=.5];
n0 [label="a", shape=invhouse, fillcolor=coral];
n1 [label="8", shape=circle, fillcolor=lightgrey];
n2 [label="0", shape=circle, fillcolor=lightgrey];
n3 [label="0", shape=circle, fillcolor=lightgrey];
n4 [label=" (Fanout: 0)", height=.1, width=.1];
n5 [label="d", shape=house, fillcolor=lawngreen];
n6 [label="[0]*2 (Fanout: 1)", fillcolor=azure1, height=.25, width=.25];
n7 [label="concat (Fanout: 1)", height=.1, width=.1];
n8 [label="* (Fanout: 1)"];
n9 [label="[7:2] (Fanout: 1)", fillcolor=azure1, height=.25, width=.25];
n10 [label="[0]*4 (Fanout: 1)", fillcolor=azure1, height=.25, width=.25];
n11 [label="concat (Fanout: 1)", height=.1, width=.1];
n0 -> n7 [label="a/2 (Delay: 0.00)", penwidth="6", arrowhead="none"];
n1 -> n8 [label="const_0_8/4 (Delay: 0.00)", penwidth="6", arrowhead="normal"];
n2 -> n6 [label="const_1_0/1 (Delay: 0.00)", penwidth="2", arrowhead="none"];
n3 -> n10 [label="const_2_0/1 (Delay: 0.00)", penwidth="2", arrowhead="none"];
n4 -> n5 [label="d/10 (Delay: 706.50)", penwidth="6", arrowhead="normal"];
n6 -> n7 [label="tmp0/2 (Delay: 0.00)", penwidth="6", arrowhead="none"];
n7 -> n8 [label="tmp1/4 (Delay: 0.00)", penwidth="6", arrowhead="normal"];
n8 -> n9 [label="tmp2/8 (Delay: 706.50)", penwidth="6", arrowhead="none"];
n9 -> n11 [label="tmp3/6 (Delay: 706.50)", penwidth="6", arrowhead="none"];
n10 -> n11 [label="tmp4/4 (Delay: 0.00)", penwidth="6", arrowhead="none"];
n11 -> n4 [label="tmp5/10 (Delay: 706.50)", penwidth="6", arrowhead="normal"];
{
rank=same;
edge[style=invis];
n6 -> n0;
rankdir=LR;
}
{
rank=same;
edge[style=invis];
n10 -> n9;
rankdir=LR;
}
}
"""
graphviz_string_arg_ordered = """\
digraph g {
graph [splines="spline", outputorder="edgesfirst"];
node [shape=circle, style=filled, fillcolor=lightblue1,
fontcolor=black, fontname=helvetica, penwidth=0,
fixedsize=shape];
edge [labelfloat=false, penwidth=2, color=deepskyblue, arrowsize=.5];
n0 [label="0", shape=circle, fillcolor=lightgrey];
n1 [label="0", shape=circle, fillcolor=lightgrey];
n2 [label="0", shape=circle, fillcolor=lightgrey];
n3 [label="i", shape=invhouse, fillcolor=coral];
n4 [label="j", shape=invhouse, fillcolor=coral];
n5 [label="", height=.1, width=.1];
n6 [label="o", shape=house, fillcolor=lawngreen];
n7 [label="", height=.1, width=.1];
n8 [label="q", shape=house, fillcolor=lawngreen];
n9 [label="[0]*4", fillcolor=azure1, height=.25, width=.25];
n10 [label="concat", height=.1, width=.1];
n11 [label="<"];
n12 [label="[0]*7", fillcolor=azure1, height=.25, width=.25];
n13 [label="concat", height=.1, width=.1];
n14 [label="[0]*4", fillcolor=azure1, height=.25, width=.25];
n15 [label="concat", height=.1, width=.1];
n16 [label=">"];
n0 -> n9 [label="", penwidth="2", arrowhead="none"];
n1 -> n12 [label="", penwidth="2", arrowhead="none"];
n2 -> n14 [label="", penwidth="2", arrowhead="none"];
n3 -> n11 [label="", penwidth="6", arrowhead="normal"];
n3 -> n16 [label="", penwidth="6", arrowhead="normal"];
n4 -> n10 [label="", penwidth="6", arrowhead="none"];
n4 -> n15 [label="", penwidth="6", arrowhead="none"];
n5 -> n6 [label="", penwidth="6", arrowhead="normal"];
n7 -> n8 [label="", penwidth="2", arrowhead="normal"];
n9 -> n10 [label="", penwidth="6", arrowhead="none"];
n10 -> n11 [label="", penwidth="6", arrowhead="normal"];
n11 -> n13 [label="", penwidth="2", arrowhead="none"];
n12 -> n13 [label="", penwidth="6", arrowhead="none"];
n13 -> n5 [label="", penwidth="6", arrowhead="normal"];
n14 -> n15 [label="", penwidth="6", arrowhead="none"];
n15 -> n16 [label="", penwidth="6", arrowhead="normal"];
n16 -> n7 [label="", penwidth="2", arrowhead="normal"];
{
rank=same;
edge[style=invis];
n9 -> n4;
rankdir=LR;
}
{
rank=same;
edge[style=invis];
n3 -> n10;
rankdir=LR;
}
{
rank=same;
edge[style=invis];
n12 -> n11;
rankdir=LR;
}
{
rank=same;
edge[style=invis];
n14 -> n4;
rankdir=LR;
}
{
rank=same;
edge[style=invis];
n15 -> n3;
rankdir=LR;
}
}
"""
graphviz_string_arg_unordered = """\
digraph g {
graph [splines="spline", outputorder="edgesfirst"];
node [shape=circle, style=filled, fillcolor=lightblue1,
fontcolor=black, fontname=helvetica, penwidth=0,
fixedsize=shape];
edge [labelfloat=false, penwidth=2, color=deepskyblue, arrowsize=.5];
n0 [label="0", shape=circle, fillcolor=lightgrey];
n1 [label="0", shape=circle, fillcolor=lightgrey];
n2 [label="0", shape=circle, fillcolor=lightgrey];
n3 [label="i", shape=invhouse, fillcolor=coral];
n4 [label="j", shape=invhouse, fillcolor=coral];
n5 [label="", height=.1, width=.1];
n6 [label="o", shape=house, fillcolor=lawngreen];
n7 [label="", height=.1, width=.1];
n8 [label="q", shape=house, fillcolor=lawngreen];
n9 [label="[0]*4", fillcolor=azure1, height=.25, width=.25];
n10 [label="concat", height=.1, width=.1];
n11 [label="<"];
n12 [label="[0]*7", fillcolor=azure1, height=.25, width=.25];
n13 [label="concat", height=.1, width=.1];
n14 [label="[0]*4", fillcolor=azure1, height=.25, width=.25];
n15 [label="concat", height=.1, width=.1];
n16 [label=">"];
n0 -> n9 [label="", penwidth="2", arrowhead="none"];
n1 -> n12 [label="", penwidth="2", arrowhead="none"];
n2 -> n14 [label="", penwidth="2", arrowhead="none"];
n3 -> n11 [label="", penwidth="6", arrowhead="normal"];
n3 -> n16 [label="", penwidth="6", arrowhead="normal"];
n4 -> n10 [label="", penwidth="6", arrowhead="none"];
n4 -> n15 [label="", penwidth="6", arrowhead="none"];
n5 -> n6 [label="", penwidth="6", arrowhead="normal"];
n7 -> n8 [label="", penwidth="2", arrowhead="normal"];
n9 -> n10 [label="", penwidth="6", arrowhead="none"];
n10 -> n11 [label="", penwidth="6", arrowhead="normal"];
n11 -> n13 [label="", penwidth="2", arrowhead="none"];
n12 -> n13 [label="", penwidth="6", arrowhead="none"];
n13 -> n5 [label="", penwidth="6", arrowhead="normal"];
n14 -> n15 [label="", penwidth="6", arrowhead="none"];
n15 -> n16 [label="", penwidth="6", arrowhead="normal"];
n16 -> n7 [label="", penwidth="2", arrowhead="normal"];
}
"""
class TestOutputGraphs(unittest.TestCase):
def setUp(self):
pyrtl.reset_working_block()
def test_output_to_tgf_does_not_throw_error(self):
from .test_importexport import full_adder_blif
with io.StringIO() as vfile:
pyrtl.input_from_blif(full_adder_blif)
pyrtl.output_to_trivialgraph(vfile)
def test_output_to_graphviz_does_not_throw_error(self):
from .test_importexport import full_adder_blif
with io.StringIO() as vfile:
pyrtl.input_from_blif(full_adder_blif)
pyrtl.output_to_graphviz(vfile)
def test_output_to_graphviz_with_custom_namer_does_not_throw_error(self):
from .test_importexport import full_adder_blif
with io.StringIO() as vfile:
pyrtl.input_from_blif(full_adder_blif)
timing = pyrtl.TimingAnalysis()
node_fan_in = {net: len(net.args) for net in pyrtl.working_block()}
graph_namer = pyrtl.graphviz_detailed_namer(
extra_node_info=node_fan_in,
extra_edge_info=timing.timing_map
)
pyrtl.output_to_graphviz(vfile, namer=graph_namer)
def test_output_to_graphviz_correct_detailed_output(self):
pyrtl.wire._reset_wire_indexers()
a = pyrtl.Input(2, 'a')
b = a * 8
c = b[2:]
d = pyrtl.Output(10, 'd')
d <<= c
analysis = pyrtl.TimingAnalysis()
_, dst_map = pyrtl.working_block().net_connections()
def get_fanout(n):
if isinstance(n, pyrtl.LogicNet):
if n.op == '@':
return 0
w = n.dests[0]
else:
w = n
if isinstance(w, pyrtl.Output):
return 0
else:
return len(dst_map[w])
node_fanout = {n: "Fanout: %d" % get_fanout(n) for n in pyrtl.working_block().logic}
wire_delay = {
w: "Delay: %.2f" % analysis.timing_map[w] for w in pyrtl.working_block().wirevector_set
}
with io.StringIO() as vfile:
pyrtl.output_to_graphviz(
file=vfile,
namer=pyrtl.graphviz_detailed_namer(node_fanout, wire_delay),
maintain_arg_order=True
)
self.assertEqual(vfile.getvalue(), graphviz_string_detailed)
def test_output_to_graphviz_correct_output_with_arg_ordering(self):
i = pyrtl.Input(8, 'i')
j = pyrtl.Input(4, 'j')
o = pyrtl.Output(8, 'o')
q = pyrtl.Output(1, 'q')
o <<= i < j
q <<= j > i
with io.StringIO() as vfile:
pyrtl.output_to_graphviz(file=vfile, maintain_arg_order=True)
self.assertEqual(vfile.getvalue(), graphviz_string_arg_ordered)
def test_output_to_graphviz_correct_output_without_arg_ordering(self):
i = pyrtl.Input(8, 'i')
j = pyrtl.Input(4, 'j')
o = pyrtl.Output(8, 'o')
q = pyrtl.Output(1, 'q')
o <<= i < j
q <<= j > i
with io.StringIO() as vfile:
pyrtl.output_to_graphviz(file=vfile)
self.assertEqual(vfile.getvalue(), graphviz_string_arg_unordered)
class TestNetGraph(unittest.TestCase):
def setUp(self):
pyrtl.reset_working_block()
def test_as_graph(self):
inwire = pyrtl.Input(bitwidth=1, name="inwire1")
inwire2 = pyrtl.Input(bitwidth=1)
inwire3 = pyrtl.Input(bitwidth=1)
tempwire = pyrtl.WireVector()
tempwire2 = pyrtl.WireVector()
outwire = pyrtl.Output()
tempwire <<= inwire | inwire2
tempwire2 <<= ~tempwire
outwire <<= tempwire2 & inwire3
g = pyrtl.net_graph()
# note for future: this might fail if we change
# the way that temp wires are inserted, but that
# should not matter for this test and so the number
# can be safely updated.
self.assertEqual(len(g), 10)
self.assertEqual(len(g[inwire]), 1)
self.assertEqual(list(g[inwire].keys())[0].op, '|')
self.assertEqual(len(g[inwire].values()), 1)
edges = list(g[inwire].values())[0]
self.assertEqual(len(edges), 1)
self.assertIs(edges[0], inwire)
def test_netgraph_unused_wires(self):
genwire = pyrtl.WireVector(8, "genwire")
inwire = pyrtl.Input(8, "inwire")
outwire = pyrtl.Output(8, "outwire")
constwire = pyrtl.Const(8, 8)
reg = pyrtl.Register(8, "reg")
g = pyrtl.net_graph()
self.assertEqual(len(g), 0)
def test_netgraph_same_wire_multiple_edges_to_same_net(self):
c = pyrtl.Const(1, 1)
w = pyrtl.concat(c, c, c)
g = pyrtl.net_graph()
self.assertEqual(len(g[c]), 1)
edges = list(g[c].values())[0]
self.assertEqual(len(edges), 3)
for w in edges:
self.assertIs(w, c)
class TestOutputIPynb(unittest.TestCase):
def setUp(self):
pyrtl.reset_working_block()
self.maxDiff = None
def test_one_bit_adder_matches_expected(self):
temp1 = pyrtl.WireVector(bitwidth=1, name='temp1')
temp2 = pyrtl.WireVector()
a, b, c = pyrtl.Input(1, 'a'), pyrtl.Input(1, 'b'), pyrtl.Input(1, 'c')
sum, carry_out = pyrtl.Output(1, 'sum'), pyrtl.Output(1, 'carry_out')
sum <<= a ^ b ^ c
temp1 <<= a & b # connect the result of a & b to the pre-allocated wirevector
temp2 <<= a & c
temp3 = b & c # temp3 IS the result of b & c (this is the first mention of temp3)
carry_out <<= temp1 | temp2 | temp3
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
for cycle in range(15):
sim.step({
'a': random.choice([0, 1]),
'b': random.choice([0, 1]),
'c': random.choice([0, 1])
})
htmlstring = pyrtl.trace_to_html(sim_trace) # tests if it compiles or not
def test_trace_to_html(self):
i = pyrtl.Input(1, 'i')
o = pyrtl.Output(2, 'o')
o <<= i + 1
sim = pyrtl.Simulation()
sim.step_multiple({'i': '0100110'})
htmlstring = pyrtl.trace_to_html(sim.tracer)
expected = (
'<script type="WaveDrom">\n'
'{\n'
' signal : [\n'
' { name: "i", wave: "010.1.0" },\n'
' { name: "o", wave: "===.=.=", data: ["0x1", "0x2", "0x1", "0x2", "0x1"] },\n'
' ],\n'
' config: { hscale: 1 }\n'
'}\n'
'</script>\n'
)
self.assertEqual(htmlstring, expected)
def test_trace_to_html_repr_func(self):
i = pyrtl.Input(1, 'i')
o = pyrtl.Output(2, 'o')
o <<= i + 1
sim = pyrtl.Simulation()
sim.step_multiple({'i': '0100110'})
htmlstring = pyrtl.trace_to_html(sim.tracer, repr_func=bin)
expected = (
'<script type="WaveDrom">\n'
'{\n'
' signal : [\n'
' { name: "i", wave: "010.1.0" },\n'
' { name: "o", wave: "===.=.=", data: ["0b1", "0b10", "0b1", "0b10", "0b1"] },\n'
' ],\n'
' config: { hscale: 1 }\n'
'}\n'
'</script>\n'
)
self.assertEqual(htmlstring, expected)
def test_trace_to_html_repr_per_name(self):
from enum import IntEnum
class Foo(IntEnum):
A = 0
B = 1
C = 2
D = 3
def __str__(self):
'''Changed in version 3.11: __str__() is now int.__str__()'''
cls_name = self.__class__.__name__
return f'{cls_name}.{self.name}'
i = pyrtl.Input(4, 'i')
state = pyrtl.Register(max(Foo).bit_length(), name='state')
o = pyrtl.Output(name='o')
o <<= state
with pyrtl.conditional_assignment:
with i == 0b0001:
state.next |= Foo.A
with i == 0b0010:
state.next |= Foo.B
with i == 0b0100:
state.next |= Foo.C
with i == 0b1000:
state.next |= Foo.D
sim = pyrtl.Simulation()
sim.step_multiple({
'i': [1, 2, 4, 8, 0]
})
htmlstring = pyrtl.trace_to_html(sim.tracer, repr_per_name={'state': Foo})
expected = (
'<script type="WaveDrom">\n'
'{\n'
' signal : [\n'
' { name: "i", wave: "=====", data: ["0x1", "0x2", "0x4", "0x8", "0x0"] },\n'
' { name: "o", wave: "=.===", data: ["0x0", "0x1", "0x2", "0x3"] },\n'
' { name: "state", wave: "=.===", data: ["Foo.A", "Foo.B", "Foo.C", "Foo.D"] },\n'
' ],\n'
' config: { hscale: 2 }\n'
'}\n'
'</script>\n'
)
self.assertEqual(htmlstring, expected)
def test_trace_to_html_repr_per_name_enum_is_bool(self):
from enum import IntEnum
class Foo(IntEnum):
A = 0
B = 1
def __str__(self):
'''Changed in version 3.11: __str__() is now int.__str__()'''
cls_name = self.__class__.__name__
return f'{cls_name}.{self.name}'
i = pyrtl.Input(2, 'i')
state = pyrtl.Register(max(Foo).bit_length(), name='state')
o = pyrtl.Output(name='o')
o <<= state
with pyrtl.conditional_assignment:
with i == 0b01:
state.next |= Foo.A
with i == 0b10:
state.next |= Foo.B
sim = pyrtl.Simulation()
sim.step_multiple({
'i': [1, 2, 1, 2, 2]
})
htmlstring = pyrtl.trace_to_html(sim.tracer, repr_per_name={'state': Foo})
expected = (
'<script type="WaveDrom">\n'
'{\n'
' signal : [\n'
' { name: "i", wave: "====.", data: ["0x1", "0x2", "0x1", "0x2"] },\n'
' { name: "o", wave: "0.101" },\n'
' { name: "state", wave: "=.===", data: ["Foo.A", "Foo.B", "Foo.A", "Foo.B"] },\n'
' ],\n'
' config: { hscale: 2 }\n'
'}\n'
'</script>\n'
)
self.assertEqual(htmlstring, expected)
if __name__ == "__main__":
unittest.main()
|
c4ea8ab38ffc5a660e06893b91f83cfba67e8318
|
885d3e4017d96ed9fd56545d95ad63895e6dc01d
|
/rootpy/plotting/style/atlas/labels.py
|
8d946b83a94c4d03d3ff73e9058f8a5bc2e39a3a
|
[
"BSD-3-Clause",
"MPL-1.1"
] |
permissive
|
rootpy/rootpy
|
c3eb7f70d29e4779a0bda8356fb96922bb95537f
|
3926935e1f2100d8ba68070c2ab44055d4800f73
|
refs/heads/master
| 2021-01-17T04:08:51.330059
| 2019-01-05T17:05:50
| 2019-01-05T17:05:50
| 3,276,014
| 159
| 60
|
BSD-3-Clause
| 2019-12-08T12:35:08
| 2012-01-26T18:05:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
labels.py
|
from __future__ import absolute_import
from .... import ROOT
from ....context import preserve_current_canvas
from ....memory.keepalive import keepalive
__all__ = [
'ATLAS_label',
]
def ATLAS_label(x, y, text="Preliminary 20XX", sqrts=8,
pad=None,
expfont=73, labelfont=43,
textsize=20, sep=None):
if pad is None:
pad = ROOT.gPad
with preserve_current_canvas():
pad.cd()
l = ROOT.TLatex(x, y, "ATLAS")
#l.SetTextAlign(12)
#l.SetTextSize(tsize)
l.SetNDC()
l.SetTextFont(expfont)
l.SetTextSize(textsize)
l.SetTextColor(1)
l.Draw()
keepalive(pad, l)
if sep is None:
# guess
sep = 0.115 * 696 * pad.GetWh() / (472 * pad.GetWw())
if text is not None:
if sqrts is not None:
text = text + " #sqrt{{s}}={0:d}TeV".format(sqrts)
p = ROOT.TLatex(x + sep, y, text)
p.SetNDC()
p.SetTextFont(labelfont)
p.SetTextSize(textsize)
p.SetTextColor(1)
p.Draw()
keepalive(pad, p)
else:
p = None
pad.Modified()
pad.Update()
return l, p
|
3eccdd5ac4b98f26678016f9ac0bdbe1adbc75ac
|
b25e0126f671c6ddca46a4329e388e14d99c2220
|
/iotdb-client/client-py/iotdb/tsfile/common/constant/TsFileConstant.py
|
0baad6a5fb8709c89061aa77679d22fc525832ff
|
[
"Apache-2.0",
"BSD-3-Clause",
"EPL-1.0",
"CDDL-1.1",
"MIT",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
apache/iotdb
|
fd59940125c905537ffedad3b0963f7170ba67d9
|
d5450a1e5648699409e1c793035204989d78cfbb
|
refs/heads/master
| 2023-09-01T01:15:59.527322
| 2023-08-31T11:43:29
| 2023-08-31T11:43:29
| 158,975,124
| 2,882
| 893
|
Apache-2.0
| 2023-09-14T14:16:58
| 2018-11-24T21:29:17
|
Java
|
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
TsFileConstant.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
class TsFileConstant:
TSFILE_SUFFIX = ".tsfile"
TSFILE_HOME = "TSFILE_HOME"
TSFILE_CONF = "TSFILE_CONF"
PATH_ROOT = "root"
TMP_SUFFIX = "tmp"
PATH_SEPARATOR = "."
PATH_SEPARATOR_CHAR = "."
PATH_SEPARATER_NO_REGEX = "\\."
DOUBLE_QUOTE = '"'
TIME_COLUMN_MASK = 0x80
VALUE_COLUMN_MASK = 0x40
def __ts_file_constant(self):
...
|
7ae72e02037057b98081de59185cbc9e3d7ec78b
|
264f392530710b287ac54f40ea805638c6348cc3
|
/google/scripts/run_onpolicy_saddle.py
|
9a2235019bc65e7a4182de4c452908f332edb8aa
|
[
"Apache-2.0"
] |
permissive
|
google-research/dice_rl
|
b26dd2231b0a664f11e0ede08d8209a4ace1cd2f
|
6551950608ad0472ddf6e8f4075f51793c9d2763
|
refs/heads/master
| 2023-08-06T21:35:15.690175
| 2023-01-30T19:26:12
| 2023-01-30T19:27:38
| 285,369,787
| 106
| 14
|
Apache-2.0
| 2023-01-30T19:27:44
| 2020-08-05T18:15:53
|
Python
|
UTF-8
|
Python
| false
| false
| 6,521
|
py
|
run_onpolicy_saddle.py
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import os
import tensorflow.compat.v2 as tf
tf.compat.v1.enable_v2_behavior()
import pickle
from tf_agents.environments import gym_wrapper
from tf_agents.environments import tf_py_environment
import dice_rl.environments.gridworld.navigation as navigation
import dice_rl.environments.gridworld.tree as tree
import dice_rl.environments.gridworld.taxi as taxi
from dice_rl.estimators import estimator as estimator_lib
from dice_rl.google.rl_algos.tabular_saddle_point import TabularSaddlePoint
import dice_rl.utils.common as common_utils
from dice_rl.data.dataset import Dataset, EnvStep, StepType
from dice_rl.data.tf_agents_onpolicy_dataset import TFAgentsOnpolicyDataset
from dice_rl.data.tf_offpolicy_dataset import TFOffpolicyDataset
FLAGS = flags.FLAGS
flags.DEFINE_string('env_name', 'grid', 'Environment name.')
flags.DEFINE_integer('seed', 0, 'Initial random seed.')
flags.DEFINE_integer('num_trajectory', 1,
'Number of trajectories to collect at each iteration.')
flags.DEFINE_integer('max_trajectory_length', 20,
'Cutoff trajectory at this step.')
flags.DEFINE_bool('tabular_obs', True,
'Whether to use tabular observations.')
flags.DEFINE_float('gamma', 0.95,
'Discount factor.')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate.')
flags.DEFINE_integer('num_steps', 100000, 'Number of training steps.')
flags.DEFINE_integer('batch_size', 1024, 'Batch size.')
def get_onpolicy_dataset(env_name, tabular_obs, policy_fn, policy_info_spec):
"""Gets target policy."""
if env_name == 'taxi':
env = taxi.Taxi(tabular_obs=tabular_obs)
elif env_name == 'grid':
env = navigation.GridWalk(tabular_obs=tabular_obs)
elif env_name == 'tree':
env = tree.Tree(branching=2, depth=10)
else:
raise ValueError('Unknown environment: %s.' % env_name)
tf_env = tf_py_environment.TFPyEnvironment(
gym_wrapper.GymWrapper(env))
tf_policy = common_utils.TFAgentsWrappedPolicy(
tf_env.time_step_spec(), tf_env.action_spec(),
policy_fn, policy_info_spec,
emit_log_probability=True)
return TFAgentsOnpolicyDataset(tf_env, tf_policy)
def get_random_policy(env_name, tabular_obs):
if env_name == 'taxi':
env = taxi.Taxi(tabular_obs=tabular_obs)
policy_fn, policy_info_spec = taxi.get_taxi_policy(
env, env, alpha=0.0, py=False)
elif env_name == 'grid':
env = navigation.GridWalk(tabular_obs=tabular_obs)
policy_fn, policy_info_spec = navigation.get_navigation_policy(
env, epsilon_explore=1.0, py=False)
elif env_name == 'tree':
env = tree.Tree(branching=2, depth=10)
policy_fn, policy_info_spec = tree.get_tree_policy(
env, epsilon_explore=1.0, py=False)
else:
raise ValueError('Unknown environment: %s.' % env_name)
return policy_fn, policy_info_spec
tf_env = tf_py_environment.TFPyEnvironment(
gym_wrapper.GymWrapper(env))
tf_policy = common_utils.TFAgentsWrappedPolicy(
tf_env.time_step_spec(), tf_env.action_spec(),
policy_fn, policy_info_spec,
emit_log_probability=True)
return tf_policy, policy_info_spec
def add_episodes_to_dataset(episodes, valid_ids, write_dataset):
num_episodes = 1 if tf.rank(valid_ids) == 1 else tf.shape(valid_ids)[0]
for ep_id in range(num_episodes):
if tf.rank(valid_ids) == 1:
this_valid_ids = valid_ids
this_episode = episodes
else:
this_valid_ids = valid_ids[ep_id, ...]
this_episode = tf.nest.map_structure(
lambda t: t[ep_id, ...], episodes)
episode_length = tf.shape(this_valid_ids)[0]
for step_id in range(episode_length):
this_valid_id = this_valid_ids[step_id]
this_step = tf.nest.map_structure(
lambda t: t[step_id, ...], this_episode)
if this_valid_id:
write_dataset.add_step(this_step)
def main(argv):
env_name = FLAGS.env_name
seed = FLAGS.seed
tabular_obs = FLAGS.tabular_obs
num_trajectory = FLAGS.num_trajectory
max_trajectory_length = FLAGS.max_trajectory_length
gamma = FLAGS.gamma
assert 0 <= gamma < 1.
learning_rate = FLAGS.learning_rate
num_steps = FLAGS.num_steps
batch_size = FLAGS.batch_size
optimizer = tf.keras.optimizers.Adam(learning_rate)
init_policy_fn, init_policy_info_spec = get_random_policy(
env_name, tabular_obs)
onpolicy_data = get_onpolicy_dataset(
env_name, tabular_obs,
init_policy_fn, init_policy_info_spec)
onpolicy_episodes, valid_steps = onpolicy_data.get_episode(
num_trajectory * 100, truncate_episode_at=max_trajectory_length)
dataset = TFOffpolicyDataset(onpolicy_data.spec)
add_episodes_to_dataset(onpolicy_episodes, valid_steps, dataset)
algo = TabularSaddlePoint(
dataset.spec, optimizer,
gamma=gamma)
losses = []
for step in range(num_steps):
init_batch, _ = dataset.get_episode(batch_size, truncate_episode_at=1)
init_batch = tf.nest.map_structure(lambda t: t[:, 0, ...], init_batch)
batch = dataset.get_step(batch_size, num_steps=2)
loss, policy_loss = algo.train_step(init_batch, batch)
losses.append(loss)
if step % 100 == 0 or step == num_steps - 1:
print('step', step, 'loss', np.mean(losses, 0))
losses = []
policy_fn, policy_info_spec = algo.get_policy()
onpolicy_data = get_onpolicy_dataset(env_name, tabular_obs,
policy_fn, policy_info_spec)
onpolicy_episodes, valid_steps = onpolicy_data.get_episode(
num_trajectory, truncate_episode_at=max_trajectory_length)
add_episodes_to_dataset(onpolicy_episodes, valid_steps, dataset)
print('estimated per step avg', np.mean(onpolicy_episodes.reward))
print('Done!')
if __name__ == '__main__':
app.run(main)
|
f06b3d31e51a098ab3dff64ba9c9c574246a4d38
|
cd99fe697ce43e30a64df9cc1df9470d1b0712ab
|
/forge/trinity/ann.py
|
1e47d436a372b18c7bf6d60fa6b5df2a03493bc5
|
[
"MIT"
] |
permissive
|
openai/neural-mmo
|
ecde7382aa06123b9951cd78712e5eb1497204aa
|
38fd0310bc784de9b86e5144d0e78f4d31005e6b
|
refs/heads/v1.0
| 2023-08-05T02:48:18.988444
| 2019-09-13T23:46:13
| 2019-09-13T23:46:13
| 173,154,826
| 1,607
| 203
|
MIT
| 2023-07-21T13:04:40
| 2019-02-28T17:17:24
|
Python
|
UTF-8
|
Python
| false
| false
| 8,664
|
py
|
ann.py
|
from pdb import set_trace as T
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.distributions import Categorical
from forge.blade.action.tree import ActionTree
from forge.blade.action.v2 import ActionV2
from forge.blade.lib.enums import Neon
from forge.blade.lib import enums
from forge.ethyr import torch as torchlib
from forge.blade import entity
def classify(logits):
if len(logits.shape) == 1:
logits = logits.view(1, -1)
distribution = Categorical(1e-3+F.softmax(logits, dim=1))
atn = distribution.sample()
return atn
####### Network Modules
class ConstDiscrete(nn.Module):
def __init__(self, config, h, nattn):
super().__init__()
self.fc1 = torch.nn.Linear(h, nattn)
self.config = config
def forward(self, env, ent, action, stim):
leaves = action.args(env, ent, self.config)
x = self.fc1(stim)
xIdx = classify(x)
leaf = leaves[int(xIdx)]
return leaf, x, xIdx
class VariableDiscrete(nn.Module):
def __init__(self, config, xdim, h):
super().__init__()
self.attn = AttnCat(xdim, h)
self.config = config
#Arguments: stim, action/argument embedding
def forward(self, env, ent, action, key, vals):
leaves = action.args(env, ent, self.config)
x = self.attn(key, vals)
xIdx = classify(x)
leaf = leaves[int(xIdx)]
return leaf, x, xIdx
class AttnCat(nn.Module):
def __init__(self, xdim, h):
super().__init__()
#self.fc1 = torch.nn.Linear(xdim, h)
#self.fc2 = torch.nn.Linear(h, 1)
self.fc = torch.nn.Linear(xdim, 1)
self.h = h
def forward(self, x, args):
n = args.shape[0]
x = x.expand(n, self.h)
xargs = torch.cat((x, args), dim=1)
x = self.fc(xargs)
#x = F.relu(self.fc1(xargs))
#x = self.fc2(x)
return x.view(1, -1)
####### End network modules
class ValNet(nn.Module):
def __init__(self, config):
super().__init__()
self.fc = torch.nn.Linear(config.HIDDEN, 1)
self.envNet = Env(config)
def forward(self, conv, flat, ent):
stim = self.envNet(conv, flat, ent)
x = self.fc(stim)
x = x.view(1, -1)
return x
class Ent(nn.Module):
def __init__(self, entDim, h):
super().__init__()
self.ent = torch.nn.Linear(entDim, h)
def forward(self, ents):
ents = self.ent(ents)
ents, _ = torch.max(ents, 0)
return ents
class Env(nn.Module):
def __init__(self, config):
super().__init__()
h = config.HIDDEN
entDim = 11 # + 225
self.fc1 = torch.nn.Linear(3*h, h)
self.embed = torch.nn.Embedding(7, 7)
self.conv = torch.nn.Linear(1800, h)
self.flat = torch.nn.Linear(entDim, h)
self.ents = Ent(entDim, h)
def forward(self, conv, flat, ents):
tiles, nents = conv[0], conv[1]
nents = nents.view(-1)
tiles = self.embed(tiles.view(-1).long()).view(-1)
conv = torch.cat((tiles, nents))
conv = self.conv(conv)
ents = self.ents(ents)
flat = self.flat(flat)
x = torch.cat((conv, flat, ents)).view(1, -1)
x = self.fc1(x)
#Removed relu (easier training, lower policy cap)
#x = torch.nn.functional.relu(self.fc1(x))
return x
class MoveNet(nn.Module):
def __init__(self, config):
super().__init__()
self.moveNet = ConstDiscrete(config, config.HIDDEN, 5)
self.envNet = Env(config)
def forward(self, env, ent, action, s):
stim = self.envNet(s.conv, s.flat, s.ents)
action, arg, argIdx = self.moveNet(env, ent, action, stim)
return action, (arg, argIdx)
#Network that selects an attack style
class StyleAttackNet(nn.Module):
def __init__(self, config):
super().__init__()
self.config, h = config, config.HIDDEN
self.h = h
self.envNet = Env(config)
self.targNet = ConstDiscrete(config, h, 3)
def target(self, ent, arguments):
if len(arguments) == 1:
return arguments[0]
arguments = [e for e in arguments if e.entID != ent.entID]
arguments = sorted(arguments, key=lambda a: a.health.val)
return arguments[0]
def forward(self, env, ent, action, s):
stim = self.envNet(s.conv, s.flat, s.ents)
action, atn, atnIdx = self.targNet(env, ent, action, stim)
#Hardcoded targeting
arguments = action.args(env, ent, self.config)
argument = self.target(ent, arguments)
attkOuts = [(atn, atnIdx)]
return action, [argument], attkOuts
#Network that selects an attack and target (In progress,
#for learned targeting experiments)
class AttackNet(nn.Module):
def __init__(self, config):
super().__init__()
self.config, h = config, config.HIDDEN
entDim = 11
self.styleEmbed = torch.nn.Embedding(3, h)
self.targEmbed = Ent(entDim, h)
self.h = h
self.envNet = Env(config)
self.styleNet = ConstDiscrete(config, h, 3)
self.targNet = VariableDiscrete(config, 3*h, h)
def forward(self, env, ent, action, s):
stim = self.envNet(s.conv, s.flat, s.ents)
action, atn, atnIdx = self.styleNet(env, ent, action, stim)
#Embed targets
targets = action.args(env, ent, self.config)
targets = torch.tensor([e.stim for e in targets]).float()
targets = self.targEmbed(targets).unsqueeze(0)
nTargs = len(targets)
atns = self.styleEmbed(atnIdx).expand(nTargs, self.h)
vals = torch.cat((atns, targets), 1)
argument, arg, argIdx = self.targNet(
env, ent, action, stim, vals)
attkOuts = ((atn, atnIdx), (arg, argIdx))
return action, [argument], attkOuts
class ANN(nn.Module):
def __init__(self, config):
super().__init__()
self.valNet = ValNet(config)
self.config = config
self.moveNet = MoveNet(config)
self.attackNet = (StyleAttackNet(config) if
config.AUTO_TARGET else AttackNet(config))
def forward(self, ent, env):
s = torchlib.Stim(ent, env, self.config)
val = self.valNet(s.conv, s.flat, s.ents)
actions = ActionTree(env, ent, ActionV2).actions()
_, move, attk = actions
#Actions
moveArg, moveOuts = self.moveNet(
env, ent, move, s)
attk, attkArg, attkOuts = self.attackNet(
env, ent, attk, s)
action = (move, attk)
arguments = (moveArg, attkArg)
outs = (moveOuts, *attkOuts)
return action, arguments, outs, val
#Messy hooks for visualizers
def visDeps(self):
from forge.blade.core import realm
from forge.blade.core.tile import Tile
colorInd = int(12*np.random.rand())
color = Neon.color12()[colorInd]
color = (colorInd, color)
ent = realm.Desciple(-1, self.config, color).server
targ = realm.Desciple(-1, self.config, color).server
sz = 15
tiles = np.zeros((sz, sz), dtype=object)
for r in range(sz):
for c in range(sz):
tiles[r, c] = Tile(enums.Grass, r, c, 1, None)
targ.pos = (7, 7)
tiles[7, 7].addEnt(0, targ)
posList, vals = [], []
for r in range(sz):
for c in range(sz):
ent.pos = (r, c)
tiles[r, c].addEnt(1, ent)
s = torchlib.Stim(ent, tiles, self.config)
conv, flat, ents = s.conv, s.flat, s.ents
val = self.valNet(conv, s.flat, s.ents)
vals.append(float(val))
tiles[r, c].delEnt(1)
posList.append((r, c))
vals = list(zip(posList, vals))
return vals
def visVals(self, food='max', water='max'):
from forge.blade.core import realm
posList, vals = [], []
R, C = self.world.shape
for r in range(self.config.BORDER, R-self.config.BORDER):
for c in range(self.config.BORDER, C-self.config.BORDER):
colorInd = int(12*np.random.rand())
color = Neon.color12()[colorInd]
color = (colorInd, color)
ent = entity.Player(-1, color, self.config)
ent._pos = (r, c)
if food != 'max':
ent._food = food
if water != 'max':
ent._water = water
posList.append(ent.pos)
self.world.env.tiles[r, c].addEnt(ent.entID, ent)
stim = self.world.env.stim(ent.pos, self.config.STIM)
s = torchlib.Stim(ent, stim, self.config)
val = self.valNet(s.conv, s.flat, s.ents).detach()
self.world.env.tiles[r, c].delEnt(ent.entID)
vals.append(float(val))
vals = list(zip(posList, vals))
return vals
|
02f2057a8fa754124a4194df44cc3bbb20a63e3d
|
d1c2d00078520cd556f60b7213c27856f8b3460d
|
/sdks/python/apache_beam/io/parquetio.py
|
734dfa0bfe8e0236d2d6d315e535dde4958a0a2f
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-protobuf",
"Apache-2.0",
"Python-2.0"
] |
permissive
|
apache/beam
|
ed11b9e043465c720659eac20ac71b5b171bfa88
|
6d5048e05087ea54abc889ce402ae2a0abb9252b
|
refs/heads/master
| 2023-09-04T07:41:07.002653
| 2023-09-01T23:01:05
| 2023-09-01T23:01:05
| 50,904,245
| 7,061
| 4,522
|
Apache-2.0
| 2023-09-14T21:43:38
| 2016-02-02T08:00:06
|
Java
|
UTF-8
|
Python
| false
| false
| 26,641
|
py
|
parquetio.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""``PTransforms`` for reading from and writing to Parquet files.
Provides two read ``PTransform``\\s, ``ReadFromParquet`` and
``ReadAllFromParquet``, that produces a ``PCollection`` of records.
Each record of this ``PCollection`` will contain a single record read from
a Parquet file. Records that are of simple types will be mapped into
corresponding Python types. The actual parquet file operations are done by
pyarrow. Source splitting is supported at row group granularity.
Additionally, this module provides a write ``PTransform`` ``WriteToParquet``
that can be used to write a given ``PCollection`` of Python objects to a
Parquet file.
"""
# pytype: skip-file
from functools import partial
from packaging import version
from apache_beam.io import filebasedsink
from apache_beam.io import filebasedsource
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import RangeTracker
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Write
from apache_beam.transforms import DoFn
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.transforms import window
try:
import pyarrow as pa
import pyarrow.parquet as pq
except ImportError:
pa = None
pq = None
ARROW_MAJOR_VERSION = None
else:
base_pa_version = version.parse(pa.__version__).base_version
ARROW_MAJOR_VERSION, _, _ = map(int, base_pa_version.split('.'))
__all__ = [
'ReadFromParquet',
'ReadAllFromParquet',
'ReadFromParquetBatched',
'ReadAllFromParquetBatched',
'WriteToParquet',
'WriteToParquetBatched'
]
class _ArrowTableToRowDictionaries(DoFn):
""" A DoFn that consumes an Arrow table and yields a python dictionary for
each row in the table."""
def process(self, table, with_filename=False):
if with_filename:
file_name = table[0]
table = table[1]
num_rows = table.num_rows
data_items = table.to_pydict().items()
for n in range(num_rows):
row = {}
for column, values in data_items:
row[column] = values[n]
if with_filename:
yield (file_name, row)
else:
yield row
class _RowDictionariesToArrowTable(DoFn):
""" A DoFn that consumes python dictionarys and yields a pyarrow table."""
def __init__(
self,
schema,
row_group_buffer_size=64 * 1024 * 1024,
record_batch_size=1000):
self._schema = schema
self._row_group_buffer_size = row_group_buffer_size
self._buffer = [[] for _ in range(len(schema.names))]
self._buffer_size = record_batch_size
self._record_batches = []
self._record_batches_byte_size = 0
def process(self, row):
if len(self._buffer[0]) >= self._buffer_size:
self._flush_buffer()
if self._record_batches_byte_size >= self._row_group_buffer_size:
table = self._create_table()
yield table
# reorder the data in columnar format.
for i, n in enumerate(self._schema.names):
self._buffer[i].append(row[n])
def finish_bundle(self):
if len(self._buffer[0]) > 0:
self._flush_buffer()
if self._record_batches_byte_size > 0:
table = self._create_table()
yield window.GlobalWindows.windowed_value_at_end_of_window(table)
def display_data(self):
res = super().display_data()
res['row_group_buffer_size'] = str(self._row_group_buffer_size)
res['buffer_size'] = str(self._buffer_size)
return res
def _create_table(self):
table = pa.Table.from_batches(self._record_batches, schema=self._schema)
self._record_batches = []
self._record_batches_byte_size = 0
return table
def _flush_buffer(self):
arrays = [[] for _ in range(len(self._schema.names))]
for x, y in enumerate(self._buffer):
arrays[x] = pa.array(y, type=self._schema.types[x])
self._buffer[x] = []
rb = pa.RecordBatch.from_arrays(arrays, schema=self._schema)
self._record_batches.append(rb)
size = 0
for x in arrays:
for b in x.buffers():
if b is not None:
size = size + b.size
self._record_batches_byte_size = self._record_batches_byte_size + size
class ReadFromParquetBatched(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading
Parquet files as a `PCollection` of `pyarrow.Table`. This `PTransform` is
currently experimental. No backward-compatibility guarantees."""
def __init__(
self, file_pattern=None, min_bundle_size=0, validate=True, columns=None):
""" Initializes :class:`~ReadFromParquetBatched`
An alternative to :class:`~ReadFromParquet` that yields each row group from
the Parquet file as a `pyarrow.Table`. These Table instances can be
processed directly, or converted to a pandas DataFrame for processing. For
more information on supported types and schema, please see the pyarrow
documentation.
.. testcode::
with beam.Pipeline() as p:
dataframes = p \\
| 'Read' >> beam.io.ReadFromParquetBatched('/mypath/mypqfiles*') \\
| 'Convert to pandas' >> beam.Map(lambda table: table.to_pandas())
.. NOTE: We're not actually interested in this error; but if we get here,
it means that the way of calling this transform hasn't changed.
.. testoutput::
:hide:
Traceback (most recent call last):
...
OSError: No files found based on the file pattern
See also: :class:`~ReadFromParquet`.
Args:
file_pattern (str): the file glob to read
min_bundle_size (int): the minimum size in bytes, to be considered when
splitting the input into bundles.
validate (bool): flag to verify that the files exist during the pipeline
creation time.
columns (List[str]): list of columns that will be read from files.
A column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'
"""
super().__init__()
self._source = _create_parquet_source(
file_pattern,
min_bundle_size,
validate=validate,
columns=columns,
)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
def display_data(self):
return {'source_dd': self._source}
class ReadFromParquet(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading
Parquet files as a `PCollection` of dictionaries. This `PTransform` is
currently experimental. No backward-compatibility guarantees."""
def __init__(
self, file_pattern=None, min_bundle_size=0, validate=True, columns=None):
"""Initializes :class:`ReadFromParquet`.
Uses source ``_ParquetSource`` to read a set of Parquet files defined by
a given file pattern.
If ``/mypath/myparquetfiles*`` is a file-pattern that points to a set of
Parquet files, a :class:`~apache_beam.pvalue.PCollection` for the records in
these Parquet files can be created in the following manner.
.. testcode::
with beam.Pipeline() as p:
records = p | 'Read' >> beam.io.ReadFromParquet('/mypath/mypqfiles*')
.. NOTE: We're not actually interested in this error; but if we get here,
it means that the way of calling this transform hasn't changed.
.. testoutput::
:hide:
Traceback (most recent call last):
...
OSError: No files found based on the file pattern
Each element of this :class:`~apache_beam.pvalue.PCollection` will contain
a Python dictionary representing a single record. The keys will be of type
:class:`str` and named after their corresponding column names. The values
will be of the type defined in the corresponding Parquet schema. Records
that are of simple types will be mapped into corresponding Python types.
Records that are of complex types like list and struct will be mapped to
Python list and dictionary respectively. For more information on supported
types and schema, please see the pyarrow documentation.
See also: :class:`~ReadFromParquetBatched`.
Args:
file_pattern (str): the file glob to read
min_bundle_size (int): the minimum size in bytes, to be considered when
splitting the input into bundles.
validate (bool): flag to verify that the files exist during the pipeline
creation time.
columns (List[str]): list of columns that will be read from files.
A column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'
"""
super().__init__()
self._source = _create_parquet_source(
file_pattern,
min_bundle_size,
validate=validate,
columns=columns,
)
def expand(self, pvalue):
return pvalue | Read(self._source) | ParDo(_ArrowTableToRowDictionaries())
def display_data(self):
return {'source_dd': self._source}
class ReadAllFromParquetBatched(PTransform):
"""A ``PTransform`` for reading ``PCollection`` of Parquet files.
Uses source ``_ParquetSource`` to read a ``PCollection`` of Parquet files or
file patterns and produce a ``PCollection`` of ``pyarrow.Table``, one for
each Parquet file row group. This ``PTransform`` is currently experimental.
No backward-compatibility guarantees.
"""
DEFAULT_DESIRED_BUNDLE_SIZE = 64 * 1024 * 1024 # 64MB
def __init__(
self,
min_bundle_size=0,
desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE,
columns=None,
with_filename=False,
label='ReadAllFiles'):
"""Initializes ``ReadAllFromParquet``.
Args:
min_bundle_size: the minimum size in bytes, to be considered when
splitting the input into bundles.
desired_bundle_size: the desired size in bytes, to be considered when
splitting the input into bundles.
columns: list of columns that will be read from files. A column name
may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'
with_filename: If True, returns a Key Value with the key being the file
name and the value being the actual data. If False, it only returns
the data.
"""
super().__init__()
source_from_file = partial(
_create_parquet_source,
min_bundle_size=min_bundle_size,
columns=columns)
self._read_all_files = filebasedsource.ReadAllFiles(
True,
CompressionTypes.UNCOMPRESSED,
desired_bundle_size,
min_bundle_size,
source_from_file,
with_filename)
self.label = label
def expand(self, pvalue):
return pvalue | self.label >> self._read_all_files
class ReadAllFromParquet(PTransform):
def __init__(self, with_filename=False, **kwargs):
self._with_filename = with_filename
self._read_batches = ReadAllFromParquetBatched(
with_filename=self._with_filename, **kwargs)
def expand(self, pvalue):
return pvalue | self._read_batches | ParDo(
_ArrowTableToRowDictionaries(), with_filename=self._with_filename)
def _create_parquet_source(
file_pattern=None, min_bundle_size=0, validate=False, columns=None):
return \
_ParquetSource(
file_pattern=file_pattern,
min_bundle_size=min_bundle_size,
validate=validate,
columns=columns,
)
class _ParquetUtils(object):
@staticmethod
def find_first_row_group_index(pf, start_offset):
for i in range(_ParquetUtils.get_number_of_row_groups(pf)):
row_group_start_offset = _ParquetUtils.get_offset(pf, i)
if row_group_start_offset >= start_offset:
return i
return -1
@staticmethod
def get_offset(pf, row_group_index):
first_column_metadata =\
pf.metadata.row_group(row_group_index).column(0)
if first_column_metadata.has_dictionary_page:
return first_column_metadata.dictionary_page_offset
else:
return first_column_metadata.data_page_offset
@staticmethod
def get_number_of_row_groups(pf):
return pf.metadata.num_row_groups
class _ParquetSource(filebasedsource.FileBasedSource):
"""A source for reading Parquet files.
"""
def __init__(self, file_pattern, min_bundle_size, validate, columns):
super().__init__(
file_pattern=file_pattern,
min_bundle_size=min_bundle_size,
validate=validate)
self._columns = columns
def read_records(self, file_name, range_tracker):
next_block_start = -1
def split_points_unclaimed(stop_position):
if next_block_start >= stop_position:
# Next block starts at or after the suggested stop position. Hence
# there will not be split points to be claimed for the range ending at
# suggested stop position.
return 0
return RangeTracker.SPLIT_POINTS_UNKNOWN
range_tracker.set_split_points_unclaimed_callback(split_points_unclaimed)
start_offset = range_tracker.start_position()
if start_offset is None:
start_offset = 0
with self.open_file(file_name) as f:
pf = pq.ParquetFile(f)
# find the first dictionary page (or data page if there's no dictionary
# page available) offset after the given start_offset. This offset is also
# the starting offset of any row group since the Parquet specification
# describes that the data pages always come first before the meta data in
# each row group.
index = _ParquetUtils.find_first_row_group_index(pf, start_offset)
if index != -1:
next_block_start = _ParquetUtils.get_offset(pf, index)
else:
next_block_start = range_tracker.stop_position()
number_of_row_groups = _ParquetUtils.get_number_of_row_groups(pf)
while range_tracker.try_claim(next_block_start):
table = pf.read_row_group(index, self._columns)
if index + 1 < number_of_row_groups:
index = index + 1
next_block_start = _ParquetUtils.get_offset(pf, index)
else:
next_block_start = range_tracker.stop_position()
yield table
class WriteToParquet(PTransform):
"""A ``PTransform`` for writing parquet files.
This ``PTransform`` is currently experimental. No backward-compatibility
guarantees.
"""
def __init__(
self,
file_path_prefix,
schema,
row_group_buffer_size=64 * 1024 * 1024,
record_batch_size=1000,
codec='none',
use_deprecated_int96_timestamps=False,
use_compliant_nested_type=False,
file_name_suffix='',
num_shards=0,
shard_name_template=None,
mime_type='application/x-parquet'):
"""Initialize a WriteToParquet transform.
Writes parquet files from a :class:`~apache_beam.pvalue.PCollection` of
records. Each record is a dictionary with keys of a string type that
represent column names. Schema must be specified like the example below.
.. testsetup::
from tempfile import NamedTemporaryFile
import glob
import os
import pyarrow
filename = NamedTemporaryFile(delete=False).name
.. testcode::
with beam.Pipeline() as p:
records = p | 'Read' >> beam.Create(
[{'name': 'foo', 'age': 10}, {'name': 'bar', 'age': 20}]
)
_ = records | 'Write' >> beam.io.WriteToParquet(filename,
pyarrow.schema(
[('name', pyarrow.binary()), ('age', pyarrow.int64())]
)
)
.. testcleanup::
for output in glob.glob('{}*'.format(filename)):
os.remove(output)
For more information on supported types and schema, please see the pyarrow
document.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
schema: The schema to use, as type of ``pyarrow.Schema``.
row_group_buffer_size: The byte size of the row group buffer. Note that
this size is for uncompressed data on the memory and normally much
bigger than the actual row group size written to a file.
record_batch_size: The number of records in each record batch. Record
batch is a basic unit used for storing data in the row group buffer.
A higher record batch size implies low granularity on a row group buffer
size. For configuring a row group size based on the number of records,
set ``row_group_buffer_size`` to 1 and use ``record_batch_size`` to
adjust the value.
codec: The codec to use for block-level compression. Any string supported
by the pyarrow specification is accepted.
use_deprecated_int96_timestamps: Write nanosecond resolution timestamps to
INT96 Parquet format. Defaults to False.
use_compliant_nested_type: Write compliant Parquet nested type (lists).
file_name_suffix: Suffix for the files written.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
mime_type: The MIME type to use for the produced files, if the filesystem
supports specifying MIME types.
Returns:
A WriteToParquet transform usable for writing.
"""
super().__init__()
self._schema = schema
self._row_group_buffer_size = row_group_buffer_size
self._record_batch_size = record_batch_size
self._sink = \
_create_parquet_sink(
file_path_prefix,
schema,
codec,
use_deprecated_int96_timestamps,
use_compliant_nested_type,
file_name_suffix,
num_shards,
shard_name_template,
mime_type
)
def expand(self, pcoll):
return pcoll | ParDo(
_RowDictionariesToArrowTable(
self._schema, self._row_group_buffer_size,
self._record_batch_size)) | Write(self._sink)
def display_data(self):
return {
'sink_dd': self._sink,
'row_group_buffer_size': str(self._row_group_buffer_size)
}
class WriteToParquetBatched(PTransform):
"""A ``PTransform`` for writing parquet files from a `PCollection` of
`pyarrow.Table`.
This ``PTransform`` is currently experimental. No backward-compatibility
guarantees.
"""
def __init__(
self,
file_path_prefix,
schema=None,
codec='none',
use_deprecated_int96_timestamps=False,
use_compliant_nested_type=False,
file_name_suffix='',
num_shards=0,
shard_name_template=None,
mime_type='application/x-parquet',
):
"""Initialize a WriteToParquetBatched transform.
Writes parquet files from a :class:`~apache_beam.pvalue.PCollection` of
records. Each record is a pa.Table Schema must be specified like the
example below.
.. testsetup:: batched
from tempfile import NamedTemporaryFile
import glob
import os
import pyarrow
filename = NamedTemporaryFile(delete=False).name
.. testcode:: batched
table = pyarrow.Table.from_pylist([{'name': 'foo', 'age': 10},
{'name': 'bar', 'age': 20}])
with beam.Pipeline() as p:
records = p | 'Read' >> beam.Create([table])
_ = records | 'Write' >> beam.io.WriteToParquetBatched(filename,
pyarrow.schema(
[('name', pyarrow.string()), ('age', pyarrow.int64())]
)
)
.. testcleanup:: batched
for output in glob.glob('{}*'.format(filename)):
os.remove(output)
For more information on supported types and schema, please see the pyarrow
document.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
schema: The schema to use, as type of ``pyarrow.Schema``.
codec: The codec to use for block-level compression. Any string supported
by the pyarrow specification is accepted.
use_deprecated_int96_timestamps: Write nanosecond resolution timestamps to
INT96 Parquet format. Defaults to False.
use_compliant_nested_type: Write compliant Parquet nested type (lists).
file_name_suffix: Suffix for the files written.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
mime_type: The MIME type to use for the produced files, if the filesystem
supports specifying MIME types.
Returns:
A WriteToParquetBatched transform usable for writing.
"""
super().__init__()
self._sink = \
_create_parquet_sink(
file_path_prefix,
schema,
codec,
use_deprecated_int96_timestamps,
use_compliant_nested_type,
file_name_suffix,
num_shards,
shard_name_template,
mime_type
)
def expand(self, pcoll):
return pcoll | Write(self._sink)
def display_data(self):
return {'sink_dd': self._sink}
def _create_parquet_sink(
file_path_prefix,
schema,
codec,
use_deprecated_int96_timestamps,
use_compliant_nested_type,
file_name_suffix,
num_shards,
shard_name_template,
mime_type):
return \
_ParquetSink(
file_path_prefix,
schema,
codec,
use_deprecated_int96_timestamps,
use_compliant_nested_type,
file_name_suffix,
num_shards,
shard_name_template,
mime_type
)
class _ParquetSink(filebasedsink.FileBasedSink):
"""A sink for parquet files from batches."""
def __init__(
self,
file_path_prefix,
schema,
codec,
use_deprecated_int96_timestamps,
use_compliant_nested_type,
file_name_suffix,
num_shards,
shard_name_template,
mime_type):
super().__init__(
file_path_prefix,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
coder=None,
mime_type=mime_type,
# Compression happens at the block level using the supplied codec, and
# not at the file level.
compression_type=CompressionTypes.UNCOMPRESSED)
self._schema = schema
self._codec = codec
if ARROW_MAJOR_VERSION == 1 and self._codec.lower() == "lz4":
raise ValueError(
"Due to ARROW-9424, writing with LZ4 compression is not supported in "
"pyarrow 1.x, please use a different pyarrow version or a different "
f"codec. Your pyarrow version: {pa.__version__}")
self._use_deprecated_int96_timestamps = use_deprecated_int96_timestamps
if use_compliant_nested_type and ARROW_MAJOR_VERSION < 4:
raise ValueError(
"With ARROW-11497, use_compliant_nested_type is only supported in "
"pyarrow version >= 4.x, please use a different pyarrow version. "
f"Your pyarrow version: {pa.__version__}")
self._use_compliant_nested_type = use_compliant_nested_type
self._file_handle = None
def open(self, temp_path):
self._file_handle = super().open(temp_path)
if ARROW_MAJOR_VERSION < 4:
return pq.ParquetWriter(
self._file_handle,
self._schema,
compression=self._codec,
use_deprecated_int96_timestamps=self._use_deprecated_int96_timestamps)
return pq.ParquetWriter(
self._file_handle,
self._schema,
compression=self._codec,
use_deprecated_int96_timestamps=self._use_deprecated_int96_timestamps,
use_compliant_nested_type=self._use_compliant_nested_type)
def write_record(self, writer, table: pa.Table):
writer.write_table(table)
def close(self, writer):
writer.close()
if self._file_handle:
self._file_handle.close()
self._file_handle = None
def display_data(self):
res = super().display_data()
res['codec'] = str(self._codec)
res['schema'] = str(self._schema)
return res
|
f0082253f8e7e41cf494f3e7860041a9a6f0cf61
|
ae07b42c9e0e5d0d06b42bdb9a13f62b6358bcb1
|
/src/result_parser.py
|
d4c413b09f2f002efdb65c190d08bc64203aa4cb
|
[
"CC-BY-4.0",
"LicenseRef-scancode-generic-cla",
"MIT",
"ODbL-1.0"
] |
permissive
|
microsoft/P.808
|
67c3b263e5339373de10386fdc5c39f2c2376ba3
|
c489ab4963946fa5adca5830c8785f82a2db4f2d
|
refs/heads/master
| 2023-06-30T00:59:33.335852
| 2023-04-17T11:50:04
| 2023-04-17T11:50:04
| 233,724,186
| 163
| 56
|
MIT
| 2023-04-17T11:50:05
| 2020-01-14T00:44:58
|
HTML
|
UTF-8
|
Python
| false
| false
| 42,339
|
py
|
result_parser.py
|
"""
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
@author: Babak Naderi
"""
import csv
import statistics
import math
import pandas as pd
import argparse
import os
import re
import numpy as np
import sys
import re
from scipy import stats
from scipy.stats import spearmanr
import time
import itertools
import configparser as CP
import collections
max_found_per_file = -1
def outliers_modified_z_score(votes):
"""
return outliers, using modified z-score
:param votes:
:return:
"""
threshold = 3.5
median_v = np.median(votes)
median_absolute_deviation_v = np.median([np.abs(v - median_v) for v in votes])
if median_absolute_deviation_v == 0:
median_absolute_deviation_v = sys.float_info.min
modified_z_scores = [np.abs(0.6745 * (v - median_v) / median_absolute_deviation_v)
for v in votes]
x = np.array(modified_z_scores)
v = np.array(votes)
v = v[(x < threshold)]
return v.tolist()
def outliers_z_score(votes):
"""
return outliers, using z-score
:param votes:
:return:
"""
if len(votes) == 0:
return votes
threshold = 3.29
z = np.abs(stats.zscore(votes))
x = np.array(z)
v = np.array(votes)
v = v[x < threshold]
return v.tolist()
def check_if_session_accepted(data):
"""
Check if the session can be acceptd given the criteria in config and the calculations
:param data:
:return:
"""
msg = "Make sure you follow the instruction:"
accept = True
if data['all_audio_played'] != int(config['acceptance_criteria']['all_audio_played_equal']):
accept = False
msg += "All clips should be played until the end;"
if data['correct_math'] is not None and data['correct_math'] < \
int(config['acceptance_criteria']['correct_math_bigger_equal']):
accept = False
msg += "Gold or trapping clips question are answered wrongly;"
if data['correct_tps'] < int(config['acceptance_criteria']['correct_tps_bigger_equal']):
accept = False
msg += "Gold or trapping clips question are answered wrongly;"
if data['qualification'] is not None and data['qualification'] != 1:
accept = False
msg += "Qualification (bandwidth check) is not passed;"
if not accept:
data['Reject'] = msg
else:
data['Reject'] = ""
return accept
def check_if_session_should_be_used(data):
if data['accept'] != 1:
return False, []
should_be_used = True
failures = []
if data['variance_in_ratings'] < float(config['accept_and_use']['variance_bigger_equal']):
should_be_used = False
failures.append('variance')
if 'correct_gold_question' in data and data['correct_gold_question'] < int(config['accept_and_use']['gold_standard_bigger_equal']):
should_be_used = False
failures.append('gold')
if data['correct_cmps'] is not None and data['correct_cmps'] < int(config['accept_and_use']['correct_cmp_bigger_equal']):
should_be_used = False
failures.append('comparisons')
return should_be_used, failures
def check_audio_played(row, method):
"""
check if all audios for questions played until the end
:param row:
:param method: acr,dcr, ot ccr
:return:
"""
question_played = 0
try:
if method == 'acr':
for q_name in question_names:
if int(row[f'answer.audio_n_finish_{q_name}']) > 0:
question_played += 1
elif method in ['p835', 'echo_impairment_test']:
for q_name in question_names:
if int(row[f'answer.audio_n_finish_{q_name}{question_name_suffix}_audio']) > 0:
question_played += 1
elif method == "ccr":
for q_name in question_names:
if int(row[f'answer.audio_n_finish_q_{q_name[1:]}']) > 0:
question_played += 1
else:
for q_name in question_names:
if int(row[f'answer.audio_n_finish_q_a{q_name[1:]}']) > 0 and int(row[f'answer.audio_n_finish_q_b{q_name[1:]}']) > 0:
question_played += 1
except:
return False
return question_played == len(question_names)
def check_tps(row, method):
"""
Check if the trapping clips questions are answered correctly
:param row:
:param method: acr, dcr, or ccr
:return:
"""
correct_tps = 0
tp_url = row[config['trapping']['url_found_in']]
if method in ['acr', 'p835', 'echo_impairment_test']:
tp_correct_ans = [int(float(row[config['trapping']['ans_found_in']]))]
elif method == "dcr":
tp_correct_ans = [4, 5]
elif method == "ccr":
tp_correct_ans = [-1, 0, 1]
else:
return -1
try:
suffix = ''
if method == 'p835':
# only look at the ovrl for tps.
suffix = "_ovrl"
if method == 'echo_impairment_test':
suffix = '_echo'
for q_name in question_names:
if tp_url in row[f'answer.{q_name}_url']:
# found a trapping clips question
if int(row[f'answer.{q_name}{suffix}']) in tp_correct_ans:
correct_tps = 1
return correct_tps
except:
pass
return correct_tps
def check_variance(row):
"""
Check how is variance of ratings in the session (if the worker just clicked samething)
:param row:
:return:
"""
r = []
for q_name in question_names:
if 'gold_question' in config and row[config['gold_question']['url_found_in']] in row[f'answer.{q_name}_url']:
continue
if row[config['trapping']['url_found_in']] in row[f'answer.{q_name}_url']:
continue
try:
r.append(int(row[f'answer.{q_name}{question_name_suffix}']))
except:
pass
try:
v = statistics.variance(r)
return v
except:
pass
return -1
def check_gold_question(method, row):
"""
Check if the gold_question is answered correctly
:param row:
:return:
"""
correct_gq = 0
try:
gq_url = row[config['gold_question']['url_found_in']]
# gq_correct_ans = int(float(row[config['gold_question']['ans_found_in']]))
# tp_correct_ans = int(float(row[config['trapping']['ans_found_in']]))
gq_correct_ans= -1
# check if it is hardcoded correct answer or dynamic one
if config.has_option('gold_question', 'correct_ans'):
gq_correct_ans = int(config['gold_question']['correct_ans'])
elif config.has_option('gold_question', 'ans_found_in'):
gq_correct_ans = int(float(row[config['gold_question']['ans_found_in']]))
else:
return -1
gq_var = int(config['gold_question']['variance'])
suffix = ''
if method == 'p835':
# only look at the ovrl for tps.
suffix = "_ovrl"
if method == 'echo_impairment_test':
suffix = '_echo'
for q_name in question_names:
if gq_url in row[f'answer.{q_name}_url']:
# found a gold standard question
if int(row[f'answer.{q_name}{suffix}']) in range(gq_correct_ans-gq_var, gq_correct_ans+gq_var+1):
correct_gq = 1
return correct_gq
except:
return None
return correct_gq
def digitsum(x):
"""
sum of the digits in a string
:param x:
:return:
"""
total = 0
for letter in str(x):
total += int(letter)
return total
def check_math(input, output, audio_played):
"""
check if the math question is answered correctly
:param input:
:param output:
:param audio_played:
:return:
"""
if audio_played == 0:
return False
keys = list(config['math'].keys())
ans = int(float(output))
# it could be a case that participant typed in the 2 or 3 numbers that they heard rather their sum.
if ans > 9:
ans = digitsum(ans)
try:
for key in keys:
if key in input and int(config['math'][key]) == ans:
return True
except:
return False
return False
def check_qualification_answer(row):
checked = True
# TODO hearing test - correct ans should be added in the inputs -update in master script is needed
# check bw contrill
if "answer.comb_bw1" not in row:
return checked, ''
bw_v2_test_data ={"comb_bw1":'dq', "comb_bw2":'dq', "comb_bw3":'dq', "comb_bw4":'sq', "comb_bw5":'sq'}
bw_messages= {"comb_bw1":'BW TP failed', "comb_bw2":'SWB failed', "comb_bw3":'FB failed', "comb_bw4":'BW TP failed', "comb_bw5":'BW TP failed'}
ans_array= [0, 0, 0 ,0, 0]
msg = ''
for i in range(1, 6):
if row[f'answer.comb_bw{i}'] != bw_v2_test_data[f'comb_bw{i}']:
msg += bw_messages[f'comb_bw{i}'] + ', '
ans_array[i-1] = 0
else:
ans_array[i-1] = 1
bw_min = config['acceptance_criteria']['bw_min'].upper()
bw_max = config['acceptance_criteria']['bw_max'].upper()
if ans_array[0] + ans_array[3] + ans_array[4] !=3:
#failed in trapping of obvious questions
return False, msg
if (bw_min == 'SWB' and ans_array[1] != 1) or (bw_min == 'FB' and ans_array[1]+ans_array[2] != 2):
return False, msg
if (bw_max == 'NB-WB' and ans_array[1]+ans_array[2] != 0) or (bw_max == 'SWB' and ans_array[2] != 0):
return False, msg
return checked, msg
def check_a_cmp(file_a, file_b, ans, audio_a_played, audio_b_played):
"""
check if pair comparision answered correctly
:param file_a:
:param file_b:
:param ans:
:param audio_a_played:
:param audio_b_played:
:return:
"""
if (audio_a_played == 0 or
audio_b_played == 0):
return False
a = int((file_a.rsplit('/', 1)[-1])[:2])
b = int((file_b.rsplit('/', 1)[-1])[:2])
# one is 50 and one is 42, the one with bigger number (higher SNR) has to have a better quality
answer_is_correct = False
if a > b and ans.strip() == 'a':
answer_is_correct = True
elif b > a and ans.strip() == 'b':
answer_is_correct = True
elif a == b and ans.strip() == 'o':
answer_is_correct = True
return answer_is_correct
# p835
def data_cleaning(filename, method):
"""
Data screening process
:param filename:
:param method: acr, dcr, or ccr
:return:
"""
print('Start by Data Cleaning...')
with open(filename, encoding="utf8") as csvfile:
reader = csv.DictReader(csvfile)
# lowercase the fieldnames
reader.fieldnames = [field.strip().lower() for field in reader.fieldnames]
# ----------- pair comparision
# Input.CMP1_A Input.CMP1_B Input.CMP2_A Input.CMP2_B Input.CMP3_A Input.CMP3_B Input.CMP4_A Input.CMP4_B
# Answer.cmp1 Answer.cmp2 Answer.cmp3 Answer.cmp4
# Answer.audio_n_play_CMP1_A Answer.audio_n_play_CMP1_B Answer.audio_n_play_CMP2_A Answer.audio_n_play_CMP2_B
# Answer.audio_n_play_CMP3_A Answer.audio_n_play_CMP3_B Answer.audio_n_play_CMP4_A Answer.audio_n_play_CMP4_B
# WorkerId
# ---------------- math
# Input.math, Answer.Math,Answer.audio_n_play_math1
worker_list = []
use_sessions = []
count_sig_bak = 0
not_using_further_reasons = []
for row in reader:
correct_cmp_ans = 0
#print(row['answer.8_hearing'] is None)
#print(row['answer.8_hearing'] is None or len(row['answer.8_hearing'].strip()) == 0)
qualification_was_hidden = (row['answer.8_hearing'] is None) or len(row['answer.8_hearing'].strip()) == 0
setup_was_hidden = row['answer.cmp1'] is None or len(row['answer.cmp1'].strip()) == 0
d = dict()
d['worker_id'] = row['workerid']
d['HITId'] = row['hitid']
d['assignment'] = row['assignmentid']
d['status'] = row['assignmentstatus']
# step1. check if audio of all X questions are played at least once
d['all_audio_played'] = 1 if check_audio_played(row, method) else 0
# check qualification if it was shown
if not qualification_was_hidden:
check_qualification, msg = check_qualification_answer(row)
d['qualification'] = 1 if check_qualification else 0
d['qualification_msg'] = msg
else:
d['qualification'] = None
d['qualification_msg'] = 'no qualification was shown'
# check if setup was shown
if setup_was_hidden:
# the setup is not shown
d['correct_cmps'] = None
d['correct_math'] = None
else:
# step2. check math
d['correct_math'] = 1 if check_math(row['input.math'], row['answer.math'],
row['answer.audio_n_play_math1']) else 0
# step3. check pair comparision
for i in range(1, 5):
if check_a_cmp(row[f'input.cmp{i}_a'], row[f'input.cmp{i}_b'], row[f'answer.cmp{i}'],
row[f'answer.audio_n_play_cmp{i}_a'],
row[f'answer.audio_n_play_cmp{i}_b']):
correct_cmp_ans += 1
d['correct_cmps'] = correct_cmp_ans
# step 4. check tps
d['correct_tps'] = check_tps(row, method)
# step5. check gold_standard, just for acr
if method in ['acr','p835', 'echo_impairment_test']:
d['correct_gold_question'] = check_gold_question(method, row)
# step6. check variance in a session rating
d['variance_in_ratings'] = check_variance(row)
if check_if_session_accepted(d):
d['accept'] = 1
d['Approve'] = 'x'
else:
d['accept'] = 0
d['Approve'] = ''
should_be_used, failures = check_if_session_should_be_used(d)
not_using_further_reasons.extend(failures)
if should_be_used:
d['accept_and_use'] = 1
use_sessions.append(row)
if method == 'p835' and row['answer.p835_order'] == 'sig_bak':
count_sig_bak += 1
else:
d['accept_and_use'] = 0
worker_list.append(d)
report_file = os.path.splitext(filename)[0] + '_data_cleaning_report.csv'
approved_file = os.path.splitext(filename)[0] + '_accept.csv'
rejected_file = os.path.splitext(filename)[0] + '_rejection.csv'
accept_reject_gui_file = os.path.splitext(filename)[0] + '_accept_reject_gui.csv'
extending_hits_file = os.path.splitext(filename)[0] + '_extending.csv'
# reject hits when the user performed more than the limit
worker_list = evaluate_maximum_hits(worker_list)
accept_and_use_sessions = [d for d in worker_list if d['accept_and_use'] == 1]
write_dict_as_csv(worker_list, report_file)
save_approved_ones(worker_list, approved_file)
save_rejected_ones(worker_list, rejected_file)
save_approve_rejected_ones_for_gui(worker_list, accept_reject_gui_file)
save_hits_to_be_extended(worker_list, extending_hits_file)
print(f" {len(accept_and_use_sessions)} answers are good to be used further {list(collections.Counter(not_using_further_reasons).items())}")
print(f" Data cleaning report is saved in: {report_file}")
if method == 'p835':
print(f" percentage of 'sig_bak': {round(count_sig_bak/len(accept_and_use_sessions),4)*100} %")
return worker_list, use_sessions
#p835
def evaluate_maximum_hits(data):
df = pd.DataFrame(data)
small_df = df[['worker_id']].copy()
grouped = small_df.groupby(['worker_id']).size().reset_index(name='counts')
grouped = grouped[grouped.counts > int(config['acceptance_criteria']['allowedMaxHITsInProject'])]
# grouped.to_csv('out.csv')
print(f"{len(grouped.index)} workers answered more than the allowedMaxHITsInProject"
f"(>{config['acceptance_criteria']['allowedMaxHITsInProject']})")
cheater_workers_list = list(grouped['worker_id'])
cheater_workers_work_count = dict.fromkeys(cheater_workers_list, 0)
result = []
for d in data:
if d['worker_id'] in cheater_workers_work_count:
if cheater_workers_work_count[d['worker_id']] >= int(config['acceptance_criteria']['allowedMaxHITsInProject']):
d['accept'] = 0
d['Reject'] += f"More than allowed limit of {config['acceptance_criteria']['allowedMaxHITsInProject']}"
d['accept_and_use'] = 0
d['Approve'] = ''
else:
cheater_workers_work_count[d['worker_id']] += 1
result.append(d)
return result
def save_approve_rejected_ones_for_gui(data, path):
"""
save approved/rejected in file t be used in GUI
:param data:
:param path:
:return:
"""
df = pd.DataFrame(data)
df = df[df.status == 'Submitted']
small_df = df[['assignment', 'HITId', 'Approve', 'Reject']].copy()
small_df.rename(columns={'assignment': 'assignmentId'}, inplace=True)
small_df.to_csv(path, index=False)
def save_approved_ones(data, path):
"""
save approved results in the given path
:param data:
:param path:
:return:
"""
df = pd.DataFrame(data)
df = df[df.accept == 1]
c_accepted = df.shape[0]
df = df[df.status == 'Submitted']
if df.shape[0] == c_accepted:
print(f' {c_accepted} answers are accepted')
else:
print(f' overall {c_accepted} answers are accepted, from them {df.shape[0]} were in submitted status')
small_df = df[['assignment']].copy()
small_df.rename(columns={'assignment': 'assignmentId'}, inplace=True)
small_df.to_csv(path, index=False)
def save_rejected_ones(data, path):
"""
Save the rejected ones in the path
:param data:
:param path:
:return:
"""
df = pd.DataFrame(data)
df = df[df.accept == 0]
c_rejected = df.shape[0]
df = df[df.status == 'Submitted']
if df.shape[0] == c_rejected:
print(f' {c_rejected} answers are rejected')
else:
print(f' overall {c_rejected} answers are rejected, from them {df.shape[0]} were in submitted status')
small_df = df[['assignment']].copy()
small_df.rename(columns={'assignment': 'assignmentId'}, inplace=True)
small_df = small_df.assign(feedback= config['acceptance_criteria']['rejection_feedback'])
small_df.to_csv(path, index=False)
def save_hits_to_be_extended(data, path):
"""
Save the list of HITs that are accepted but not to be used. The list can be used to extend those hits
:param data:
:param path:
:return:
"""
df = pd.DataFrame(data)
df = df[(df.accept == 1) & (df.accept_and_use == 0)]
small_df = df[['HITId']].copy()
grouped = small_df.groupby(['HITId']).size().reset_index(name='counts')
grouped.rename(columns={'counts': 'n_extended_assignments'}, inplace=True)
grouped.to_csv(path, index=False)
def filter_answer_by_status_and_workers(answer_df, all_time_worker_id_in, new_woker_id_in, status_in):
"""
return answered who are
:param answer_df:
:param all_time_worker_id_in:
:param new_woker_id_in:
:param status_in:
:return:
"""
frames = []
if 'all' in status_in:
# new_worker_id_in.extend(old_worker_id_in)
answer_df = answer_df[answer_df['worker_id'].isin(all_time_worker_id_in)]
return answer_df
if 'submitted' in status_in:
d1 = answer_df[answer_df['status'] == "Submitted"]
d1 = d1[d1['worker_id'].isin(all_time_worker_id_in)]
frames.append(d1)
d2 = answer_df[answer_df['status'] != "Submitted"]
d2 = d2[d2['worker_id'].isin(new_woker_id_in)]
frames.append(d2)
return pd.concat(frames)
#p835
def calc_quantity_bonuses(answer_list, conf, path):
"""
Calculate the quantity bonuses given the configurations
:param answer_list:
:param conf:
:param path:
:return:
"""
if path is not None:
print('Calculate the quantity bonuses...')
df = pd.DataFrame(answer_list)
old_answers = df[df['status'] != "Submitted"]
grouped = df.groupby(['worker_id'], as_index=False)['accept'].sum()
old_answers_grouped = old_answers.groupby(['worker_id'], as_index=False)['accept'].sum()
# condition more than 30 hits
grouped = grouped[grouped.accept >= int(config['bonus']['quantity_hits_more_than'])]
old_answers_grouped = old_answers_grouped[old_answers_grouped.accept >= int(config['bonus']['quantity_hits_more_than'])]
old_eligible = list(old_answers_grouped['worker_id'])
eligible_all = list(grouped['worker_id'])
new_eligible = list(set(eligible_all)-set(old_eligible))
# the bonus should be given to the tasks that are either automatically accepted or submited. The one with status
# accepted should have been already payed.
filtered_answers = filter_answer_by_status_and_workers(df, eligible_all, new_eligible, conf)
# could be also accept_and_use
grouped = filtered_answers.groupby(['worker_id'], as_index=False)['accept'].sum()
grouped['bonusAmount'] = grouped['accept']*float(config['bonus']['quantity_bonus'])
# now find an assignment id
df.drop_duplicates('worker_id', keep='first', inplace=True)
w_ids = list(dict(grouped['worker_id']).values())
df = df[df.isin(w_ids).worker_id]
small_df = df[['worker_id', 'assignment']].copy()
merged = pd.merge(grouped, small_df, how='inner', left_on='worker_id', right_on='worker_id')
merged.rename(columns={'worker_id': 'workerId', 'assignment': 'assignmentId'}, inplace=True)
merged['reason'] = f'Well done! More than {config["bonus"]["quantity_hits_more_than"]} accepted submissions.'
merged = merged.round({'bonusAmount': 2})
if path is not None:
merged.to_csv(path, index=False)
print(f' Quantity bonuses report is saved in: {path}')
return merged
#p835
def calc_quality_bonuses(quantity_bonus_result, answer_list, overall_mos, conf, path, n_workers, test_method, use_condition_level):
"""
Calculate the bonuses given the configurations
:param quantity_bonus_result:
:param answer_list:
:param overall_mos:
:param conf:
:param path:
:param test_method:
:param use_condition_level: if true the condition level aggregation will be used otherwise file level
:return:
"""
print('Calculate the quality bonuses...')
mos_name = method_to_mos[f"{test_method}{question_name_suffix}"]
eligible_list = []
df = pd.DataFrame(answer_list)
tmp = pd.DataFrame(overall_mos)
if use_condition_level:
aggregate_on = 'condition_name'
else:
aggregate_on = 'file_url'
c_df = tmp[[aggregate_on, mos_name]].copy()
c_df.rename(columns={mos_name: 'mos'}, inplace=True)
candidates = quantity_bonus_result['workerId'].tolist()
max_workers = int(n_workers * int(conf['bonus']['quality_top_percentage']) / 100)
for worker in candidates:
# select answers
worker_answers = df[df['workerid'] == worker]
votes_p_file, votes_per_condition, _ = transform(test_method, worker_answers.to_dict('records'),
use_condition_level, True)
if use_condition_level:
aggregated_data = pd.DataFrame(votes_per_condition)
else:
aggregated_data = pd.DataFrame(votes_p_file)
if len(aggregated_data) > 0:
merged = pd.merge(aggregated_data, c_df, how='inner', left_on=aggregate_on, right_on=aggregate_on)
r = calc_correlation(merged["mos"].tolist(), merged[mos_name].tolist())
else:
r = 0
entry = {'workerId': worker, 'r': r}
eligible_list.append(entry)
if len(eligible_list) > 0:
eligible_df = pd.DataFrame(eligible_list)
eligible_df = eligible_df[eligible_df['r'] >= float(conf['bonus']['quality_min_pcc'])]
eligible_df = eligible_df.sort_values(by=['r'], ascending=False)
merged = pd.merge(eligible_df, quantity_bonus_result, how='inner', left_on='workerId', right_on='workerId')
smaller_df = merged[['workerId', 'r', 'accept', 'assignmentId']].copy()
smaller_df['bonusAmount'] = smaller_df['accept'] * float(config['bonus']['quality_bonus'])
smaller_df = smaller_df.round({'bonusAmount': 2})
smaller_df['reason'] = f'Well done! You belong to top {conf["bonus"]["quality_top_percentage"]}%.'
else:
smaller_df = pd.DataFrame(columns=['workerId', 'r', 'accept', 'assignmentId', 'bonusAmount', 'reason'])
smaller_df.head(max_workers).to_csv(path, index=False)
print(f' Quality bonuses report is saved in: {path}')
def write_dict_as_csv(dic_to_write, file_name, *args, **kwargs):
"""
write the dict object in a file
:param dic_to_write:
:param file_name:
:return:
"""
headers = kwargs.get('headers', None)
with open(file_name, 'w', newline='') as output_file:
if headers is None:
if len(dic_to_write) > 0:
headers = list(dic_to_write[0].keys())
else:
headers = []
writer = csv.DictWriter(output_file, fieldnames=headers)
writer.writeheader()
for d in dic_to_write:
writer.writerow(d)
file_to_condition_map = {}
def conv_filename_to_condition(f_name):
"""
extract the condition name from filename given the mask in the config
:param f_name:
:return:
"""
if f_name in file_to_condition_map:
return file_to_condition_map[f_name]
file_to_condition_map[f_name] = {'Unknown': 'NoCondition' }
pattern = ''
if config.has_option('general','condition_pattern'):
pattern = config['general']['condition_pattern']
m = re.match(pattern, f_name)
if m:
file_to_condition_map[f_name] = m.groupdict('')
return file_to_condition_map[f_name]
def dict_value_to_key(d, value):
for k, v in d.items():
if v == value:
return k
return None
method_to_mos = {
"acr": 'MOS',
"ccr": 'CMOS',
"dcr": 'DMOS',
"p835_bak": 'MOS_BAK',
"p835_sig": 'MOS_SIG',
"p835_ovrl": 'MOS_OVRL',
"echo_impairment_test_echo": 'MOS_ECHO',
"echo_impairment_test_other": 'MOS_OTHER'
}
p835_columns = ['condition_name', 'n', 'MOS_BAK', 'MOS_SIG', 'MOS_OVRL', 'std_bak', 'std_sig', 'std_ovrl',
'95%CI_bak', '95%CI_sig', '95%CI_ovrl']
echo_impairment_test_columns = ['condition_name', 'n', 'MOS_ECHO', 'MOS_OTHER', 'std_echo', 'std_other',
'95%CI_echo', '95%CI_other']
question_names = []
question_name_suffix = ''
p835_suffixes = ['_bak', '_sig', '_ovrl']
echo_impairment_test_suffixes = ['_echo', '_other']
create_per_worker = True
def transform(test_method, sessions, agrregate_on_condition, is_worker_specific):
"""
Given the valid sessions from answer.csv, group votes per files, and per conditions.
Assumption: file name conatins the condition name/number, which can be extracted using "condition_patten" .
:param sessions:
:return:
"""
data_per_file = {}
global max_found_per_file
global file_to_condition_map
file_to_condition_map ={}
data_per_condition = {}
data_per_worker =[]
mos_name = method_to_mos[f"{test_method}{question_name_suffix}"]
for session in sessions:
found_gold_question = False
for question in question_names:
# is it a trapping clips question
if session[config['trapping']['url_found_in']] == session[f'answer.{question}_url']:
continue
# is it a gold clips
if test_method in ['acr', 'p835', 'echo_impairment_test'] and not found_gold_question and\
session[config['gold_question']['url_found_in']] == session[f'answer.{question}_url']:
found_gold_question = True
continue
short_file_name = session[f'answer.{question}_url'].rsplit('/', 1)[-1]
file_name = session[f'answer.{question}_url']
if file_name not in data_per_file:
data_per_file[file_name] = []
votes = data_per_file[file_name]
try:
votes.append(int(session[f'answer.{question}{question_name_suffix}']))
cond =conv_filename_to_condition(file_name)
tmp = {'HITId': session['hitid'],
'workerid': session['workerid'],
'file':file_name,
'short_file_name': file_name.rsplit('/', 1)[-1],
'vote': int(session[f'answer.{question}{question_name_suffix}'])}
tmp.update(cond)
data_per_worker.append(tmp)
except Exception as err:
print(err)
pass
# convert the format: one row per file
group_per_file = []
condition_detail = {}
for key in data_per_file.keys():
tmp = dict()
votes = data_per_file[key]
vote_counter = 1
# extra step:: add votes to the per-condition dict
tmp_n = conv_filename_to_condition(key)
if agrregate_on_condition:
condition_keys = config['general']['condition_keys'].split(',')
condition_keys.append('Unknown')
condition_dict = {k: tmp_n[k] for k in tmp_n.keys() & condition_keys}
tmp = condition_dict.copy()
condition_dict = collections.OrderedDict(sorted(condition_dict.items()))
for num_combinations in range(len(condition_dict)):
combinations = list(itertools.combinations(condition_dict.values(), num_combinations + 1))
for combination in combinations:
condition = '____'.join(combination).strip('_')
if condition not in data_per_condition:
data_per_condition[condition]=[]
pattern_dic ={dict_value_to_key(condition_dict, v): v for v in combination}
for k in condition_dict.keys():
if k not in pattern_dic:
pattern_dic[k] = ""
condition_detail[condition] = pattern_dic
data_per_condition[condition].extend(votes)
else:
condition = 'Overall'
if condition not in data_per_condition:
data_per_condition[condition] = []
data_per_condition[condition].extend(votes)
tmp['file_url'] = key
tmp['short_file_name'] = key.rsplit('/', 1)[-1]
for vote in votes:
tmp[f'vote_{vote_counter}'] = vote
vote_counter += 1
count = vote_counter
tmp['n'] = count-1
# tmp[mos_name] = abs(statistics.mean(votes))
tmp[mos_name] = statistics.mean(votes)
if tmp['n'] > 1:
tmp[f'std{question_name_suffix}'] = statistics.stdev(votes)
tmp[f'95%CI{question_name_suffix}'] = (1.96 * tmp[f'std{question_name_suffix}']) / math.sqrt(tmp['n'])
else:
tmp[f'std{question_name_suffix}'] = None
tmp[f'95%CI{question_name_suffix}'] = None
if tmp['n'] > max_found_per_file:
max_found_per_file = tmp['n']
group_per_file.append(tmp)
# convert the format: one row per condition
group_per_condition = []
if agrregate_on_condition:
for key in data_per_condition.keys():
tmp = dict()
tmp['condition_name'] = key
votes = data_per_condition[key]
# apply z-score outlier detection
if (not(is_worker_specific) and 'outlier_removal' in config['accept_and_use']) \
and (config['accept_and_use']['outlier_removal'].lower() in ['true', '1', 't', 'y', 'yes']):
v_len = len(votes)
votes = outliers_z_score(votes)
v_len_after = len(votes)
if v_len != v_len_after:
print(f'Condition{tmp["condition_name"]}: {v_len-v_len_after} votes are removed, remains {v_len_after}')
tmp = {**tmp, **condition_detail[key]}
tmp['n'] = len(votes)
if tmp['n'] > 0:
# tmp[mos_name] = abs(statistics.mean(votes))
tmp[mos_name] = statistics.mean(votes)
else:
tmp[mos_name] = None
if tmp['n'] > 1:
tmp[f'std{question_name_suffix}'] = statistics.stdev(votes)
tmp[f'95%CI{question_name_suffix}'] = (1.96 * tmp[f'std{question_name_suffix}']) / math.sqrt(tmp['n'])
else:
tmp[f'std{question_name_suffix}'] = None
tmp[f'95%CI{question_name_suffix}'] = None
group_per_condition.append(tmp)
return group_per_file, group_per_condition, data_per_worker
# p835
def create_headers_for_per_file_report(test_method, condition_keys):
"""
add default values in the dict
:param d:
:return:
"""
mos_name = method_to_mos[f"{test_method}{question_name_suffix}"]
if test_method in ["p835", "echo_impairment_test"]:
header = ['file_url', 'n', mos_name, f'std{question_name_suffix}', f'95%CI{question_name_suffix}',
'short_file_name'] + condition_keys
else:
header = ['file_url', 'n', mos_name, 'std', '95%CI', 'short_file_name'] + condition_keys
max_votes = max_found_per_file
if max_votes == -1:
max_votes = int(config['general']['expected_votes_per_file'])
for i in range(1, max_votes+1):
header.append(f'vote_{i}')
return header
def calc_stats(input_file):
"""
calc the statistics considering the time worker spend
:param input_file:
:return:
"""
df = pd.read_csv(input_file, low_memory=False)
median_time_in_sec = df["WorkTimeInSeconds"].median()
payment_text = df['Reward'].values[0]
paymnet = re.findall("\d+\.\d+", payment_text)
avg_pay = 3600*float(paymnet[0])/median_time_in_sec
formatted_time = time.strftime("%M:%S", time.gmtime(median_time_in_sec))
print(f"Stats: work duration (median) {formatted_time} (MM:SS), payment per hour: ${avg_pay:.2f}")
def calc_correlation(cs, lab):
"""
calc the spearman's correlation
:param cs:
:param lab:
:return:
"""
rho, pval = spearmanr(cs, lab)
return rho
def number_of_uniqe_workers(answers):
"""
return numbe rof unique workers
:param answers:
:return:
"""
df = pd.DataFrame(answers)
df.drop_duplicates('worker_id', keep='first', inplace=True)
return len(df)
def analyze_results(config, test_method, answer_path, list_of_req, quality_bonus):
"""
main method for calculating the results
:param config:
:param test_method:
:param answer_path:
:param list_of_req:
:param quality_bonus:
:return:
"""
global question_name_suffix
if test_method == 'p835':
question_name_suffix = p835_suffixes[2]
suffixes = p835_suffixes
elif test_method == 'echo_impairment_test':
question_name_suffix = echo_impairment_test_suffixes[1]
suffixes = echo_impairment_test_suffixes
else:
suffixes = ['']
full_data, accepted_sessions = data_cleaning(answer_path, test_method)
n_workers = number_of_uniqe_workers(full_data)
print(f"{n_workers} workers participated in this batch.")
calc_stats(answer_path)
# votes_per_file, votes_per_condition = transform(accepted_sessions)
if len(accepted_sessions) > 1:
condition_set = []
for suffix in suffixes:
question_name_suffix = suffix
print("Transforming data (the ones with 'accepted_and_use' ==1 --> group per clip")
use_condition_level = config.has_option('general', 'condition_pattern')
votes_per_file, vote_per_condition, data_per_worker = transform(test_method, accepted_sessions,
config.has_option('general', 'condition_pattern'), False)
votes_per_file_path = os.path.splitext(answer_path)[0] + f'_votes_per_clip{question_name_suffix}.csv'
votes_per_cond_path = os.path.splitext(answer_path)[0] + f'_votes_per_cond{question_name_suffix}.csv'
condition_keys = []
if config.has_option('general', 'condition_pattern'):
condition_keys = config['general']['condition_keys'].split(',')
votes_per_file = sorted(votes_per_file, key=lambda i: i[condition_keys[0]])
condition_keys.append('Unknown')
headers = create_headers_for_per_file_report(test_method, condition_keys)
write_dict_as_csv(votes_per_file, votes_per_file_path, headers=headers)
print(f' Votes per files are saved in: {votes_per_file_path}')
if use_condition_level:
vote_per_condition = sorted(vote_per_condition, key=lambda i: i['condition_name'])
write_dict_as_csv(vote_per_condition, votes_per_cond_path)
print(f' Votes per files are saved in: {votes_per_cond_path}')
condition_set.append(pd.DataFrame(vote_per_condition))
if create_per_worker:
write_dict_as_csv(data_per_worker, os.path.splitext(answer_path)[0] + f'_votes_per_worker_{question_name_suffix}.csv')
if use_condition_level and len(suffixes) > 1:
# aggregate multiple conditions into one file for p.835
full_set_conditions = None
for df in condition_set:
if full_set_conditions is None:
full_set_conditions = df
else:
df = df.drop(columns='n')
full_set_conditions = pd.merge(full_set_conditions, df, left_on='condition_name', right_on='condition_name')
votes_per_all_cond_path = os.path.splitext(answer_path)[0] + f'_votes_per_cond_all.csv'
column_names = p835_columns if test_method == 'p835' else echo_impairment_test_columns
full_set_conditions.to_csv(votes_per_all_cond_path, index=False,
columns=['condition_name', 'n', 'MOS_BAK', 'MOS_SIG', 'MOS_OVRL', 'std_bak',
'std_sig', 'std_ovrl', '95%CI_bak', '95%CI_sig', '95%CI_ovrl'])
bonus_file = os.path.splitext(answer_path)[0] + '_quantity_bonus_report.csv'
quantity_bonus_df = calc_quantity_bonuses(full_data, list_of_req, bonus_file)
if quality_bonus:
quality_bonus_path = os.path.splitext(answer_path)[0] + '_quality_bonus_report.csv'
if 'all' not in list_of_req:
quantity_bonus_df = calc_quantity_bonuses(full_data, ['all'], None)
if use_condition_level:
votes_to_use = vote_per_condition
else:
votes_to_use = votes_per_file
calc_quality_bonuses(quantity_bonus_df, accepted_sessions, votes_to_use, config, quality_bonus_path,
n_workers, test_method, use_condition_level)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Utility script to evaluate answers to the acr batch')
# Configuration: read it from mturk.cfg
parser.add_argument("--cfg", required=True,
help="Contains the configurations see acr_result_parser.cfg as an example")
parser.add_argument("--method", required=True,
help="one of the test methods: 'acr', 'dcr', 'ccr', 'p835', or 'echo_impairment_test'")
parser.add_argument("--answers", required=True,
help="Answers csv file, path relative to current directory")
parser.add_argument('--quantity_bonus', help="specify status of answers which should be counted when calculating "
" the amount of quantity bonus. All answers will be used to check "
"eligibility of worker, but those with the selected status here will "
"be used to calculate the amount of bonus. A comma separated list:"
" all|submitted. Default: submitted",
default="submitted")
parser.add_argument('--quality_bonus', help="Quality bonus will be calculated. Just use it with your final download"
" of answers and when the project is completed", action="store_true")
args = parser.parse_args()
methods = ['acr', 'dcr', 'ccr', 'p835', 'echo_impairment_test']
test_method = args.method.lower()
assert test_method in methods, f"No such a method supported, please select between 'acr', 'dcr', 'ccr', or 'p835'"
cfg_path = args.cfg
assert os.path.exists(cfg_path), f"No configuration file at [{cfg_path}]"
config = CP.ConfigParser()
config.read(cfg_path)
assert (args.answers is not None), f"--answers are required]"
# answer_path = os.path.join(os.path.dirname(__file__), args.answers)
answer_path = args.answers
assert os.path.exists(answer_path), f"No input file found in [{answer_path}]"
list_of_possible_status = ['all', 'submitted']
list_of_req = args.quantity_bonus.lower().split(',')
for req in list_of_req:
assert req.strip() in list_of_possible_status, f"unknown status {req} used in --quantity_bonus"
np.seterr(divide='ignore', invalid='ignore')
question_names = [f"q{i}" for i in range(1, int(config['general']['number_of_questions_in_rating']) + 1)]
# start
analyze_results(config, test_method, answer_path, list_of_req, args.quality_bonus)
|
47f6c413df2ee2d6b2702649a32d1cbba090901e
|
f39fbc00e7584a3e93f92534ca016fd951110cf1
|
/tests/rptest/utils/rpk_config.py
|
35160801a12aa964b926abba3efee61997083524
|
[] |
no_license
|
redpanda-data/redpanda
|
93234ff70348d3ed131bdc2d121fc8dcb7bffca9
|
bee41676303c0e5fb161256b844bb14a135de8cd
|
refs/heads/dev
| 2023-08-27T21:40:47.107399
| 2023-08-26T11:45:30
| 2023-08-26T11:45:30
| 309,512,982
| 4,297
| 308
| null | 2023-09-14T20:36:21
| 2020-11-02T22:43:36
|
C++
|
UTF-8
|
Python
| false
| false
| 893
|
py
|
rpk_config.py
|
# Copyright 2023 Redpanda Data, Inc.
#
# Use of this software is governed by the Business Source License
# included in the file licenses/BSL.md
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0
import os
import tempfile
import yaml
from rptest.services.redpanda import RedpandaService
def read_rpk_cfg(node):
with tempfile.TemporaryDirectory() as d:
node.account.copy_from(RedpandaService.RPK_CONFIG_FILE, d)
with open(os.path.join(d, 'rpk.yaml')) as f:
return yaml.full_load(f.read())
def read_redpanda_cfg(node):
with tempfile.TemporaryDirectory() as d:
node.account.copy_from(RedpandaService.NODE_CONFIG_FILE, d)
with open(os.path.join(d, 'redpanda.yaml')) as f:
return yaml.full_load(f.read())
|
dc3bf26741414e094ee74332a00f30713dbc9adc
|
85e27209a7df58f76ab0f9f2ed13b1c6ac31ffc9
|
/src_python/habitat_sim/simulator.py
|
3db49904f1ce9b76fec7d250b7c29e3ebb7c6e71
|
[
"CC-BY-4.0",
"CC-BY-3.0",
"MIT"
] |
permissive
|
facebookresearch/habitat-sim
|
72a78877c412fef1d42a553f896654c71c54d245
|
6f46bccc1733f4cec30b89d994ac55df2b46eb4a
|
refs/heads/main
| 2023-09-03T00:17:30.809849
| 2023-08-29T16:06:16
| 2023-08-29T16:06:16
| 169,164,539
| 1,924
| 432
|
MIT
| 2023-09-14T17:12:21
| 2019-02-04T23:14:28
|
C++
|
UTF-8
|
Python
| false
| false
| 28,891
|
py
|
simulator.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
from collections import OrderedDict
from collections.abc import MutableMapping
from typing import Any, Dict, List
from typing import MutableMapping as MutableMapping_T
from typing import Optional, Union, cast, overload
import attr
import magnum as mn
import numpy as np
from magnum import Vector3
from numpy import ndarray
try:
import torch
from torch import Tensor
_HAS_TORCH = True
except ImportError:
_HAS_TORCH = False
import habitat_sim.errors
from habitat_sim.agent.agent import Agent, AgentConfiguration, AgentState
from habitat_sim.bindings import cuda_enabled
from habitat_sim.logging import LoggingContext, logger
from habitat_sim.metadata import MetadataMediator
from habitat_sim.nav import GreedyGeodesicFollower
from habitat_sim.sensor import SensorSpec, SensorType
from habitat_sim.sensors.noise_models import make_sensor_noise_model
from habitat_sim.sim import SimulatorBackend, SimulatorConfiguration
from habitat_sim.utils.common import quat_from_angle_axis
# TODO maybe clean up types with TypeVars
ObservationDict = Dict[str, Union[bool, np.ndarray, "Tensor"]]
@attr.s(auto_attribs=True, slots=True)
class Configuration:
r"""Specifies how to configure the simulator.
:property sim_cfg: The configuration of the backend of the simulator
:property agents: A list of agent configurations
:property metadata_mediator: (optional) The metadata mediator to build the simulator from.
Ties together a backend config, `sim_cfg` and a list of agent
configurations `agents`.
"""
sim_cfg: SimulatorConfiguration
agents: List[AgentConfiguration]
# An existing Metadata Mediator can also be used to construct a SimulatorBackend
metadata_mediator: Optional[MetadataMediator] = None
enable_batch_renderer: bool = False
@attr.s(auto_attribs=True)
class Simulator(SimulatorBackend):
r"""The core class of habitat-sim
:property config: configuration for the simulator
The simulator ties together the backend, the agent, controls functions,
NavMesh collision checking/pathfinding, attribute template management,
object manipulation, and physics simulation.
"""
config: Configuration
agents: List[Agent] = attr.ib(factory=list, init=False)
_num_total_frames: int = attr.ib(default=0, init=False)
_default_agent_id: int = attr.ib(default=0, init=False)
__sensors: List[Dict[str, "Sensor"]] = attr.ib(factory=list, init=False)
_initialized: bool = attr.ib(default=False, init=False)
_previous_step_time: float = attr.ib(
default=0.0, init=False
) # track the compute time of each step
_async_draw_agent_ids: Optional[Union[int, List[int]]] = None
__last_state: Dict[int, AgentState] = attr.ib(factory=dict, init=False)
@staticmethod
def _sanitize_config(config: Configuration) -> None:
if len(config.agents) == 0:
raise RuntimeError(
"Config has not agents specified. Must specify at least 1 agent"
)
config.sim_cfg.create_renderer = not config.enable_batch_renderer and any(
len(cfg.sensor_specifications) > 0 for cfg in config.agents
)
config.sim_cfg.load_semantic_mesh |= any(
(
any(
sens_spec.sensor_type == SensorType.SEMANTIC
for sens_spec in cfg.sensor_specifications
)
for cfg in config.agents
)
)
config.sim_cfg.requires_textures = any(
(
any(
sens_spec.sensor_type == SensorType.COLOR
for sens_spec in cfg.sensor_specifications
)
for cfg in config.agents
)
)
def __attrs_post_init__(self) -> None:
LoggingContext.reinitialize_from_env()
self._sanitize_config(self.config)
self.__set_from_config(self.config)
def close(self, destroy: bool = True) -> None:
r"""Close the simulator instance.
:param destroy: Whether or not to force the OpenGL context to be
destroyed if async rendering was used. If async rendering wasn't used,
this has no effect.
"""
# NB: Python still calls __del__ (and thus)
# closes even if __init__ fails. We don't
# have anything to close if we aren't initialized so
# we can just return.
if not self._initialized:
return
if self.renderer is not None:
self.renderer.acquire_gl_context()
for agent_sensorsuite in self.__sensors:
for sensor in agent_sensorsuite.values():
sensor.close()
del sensor
self.__sensors = []
for agent in self.agents:
agent.close()
del agent
self.agents = []
self.__last_state.clear()
super().close(destroy)
def __enter__(self) -> "Simulator":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close(destroy=True)
def seed(self, new_seed: int) -> None:
super().seed(new_seed)
self.pathfinder.seed(new_seed)
@overload
def reset(self, agent_ids: List[int]) -> Dict[int, ObservationDict]:
...
@overload
def reset(self, agent_ids: Optional[int] = None) -> ObservationDict:
...
def reset(
self, agent_ids: Union[Optional[int], List[int]] = None
) -> Union[ObservationDict, Dict[int, ObservationDict],]:
super().reset()
for i in range(len(self.agents)):
self.reset_agent(i)
if agent_ids is None:
agent_ids = [self._default_agent_id]
return_single = True
else:
agent_ids = cast(List[int], agent_ids)
return_single = False
obs = self.get_sensor_observations(agent_ids=agent_ids)
if return_single:
return obs[agent_ids[0]]
return obs
def reset_agent(self, agent_id: int) -> None:
agent = self.get_agent(agent_id)
initial_agent_state = agent.initial_state
if initial_agent_state is None:
raise RuntimeError("reset called before agent was given an initial state")
self.initialize_agent(agent_id, initial_agent_state)
def _config_backend(self, config: Configuration) -> None:
if not self._initialized:
super().__init__(config.sim_cfg, config.metadata_mediator)
self._initialized = True
else:
super().reconfigure(config.sim_cfg)
def _config_agents(self, config: Configuration) -> None:
self.agents = [
Agent(self.get_active_scene_graph().get_root_node().create_child(), cfg)
for cfg in config.agents
]
def _config_pathfinder(self, config: Configuration) -> None:
self.pathfinder.seed(config.sim_cfg.random_seed)
if self.pathfinder is None or not self.pathfinder.is_loaded:
logger.warning(
"Navmesh not loaded or computed, no collision checking will be done."
)
def reconfigure(self, config: Configuration) -> None:
self._sanitize_config(config)
if self.config != config:
self.__set_from_config(config)
self.config = config
def __set_from_config(self, config: Configuration) -> None:
self._config_backend(config)
self._config_agents(config)
self._config_pathfinder(config)
self.frustum_culling = config.sim_cfg.frustum_culling
for i in range(len(self.agents)):
self.agents[i].controls.move_filter_fn = self.step_filter
self._default_agent_id = config.sim_cfg.default_agent_id
self.__sensors: List[Dict[str, Sensor]] = [
dict() for i in range(len(config.agents))
]
self.__last_state = dict()
for agent_id, agent_cfg in enumerate(config.agents):
for spec in agent_cfg.sensor_specifications:
self._update_simulator_sensors(spec.uuid, agent_id=agent_id)
self.initialize_agent(agent_id)
def _update_simulator_sensors(self, uuid: str, agent_id: int) -> None:
self.__sensors[agent_id][uuid] = Sensor(
sim=self, agent=self.get_agent(agent_id), sensor_id=uuid
)
def add_sensor(
self, sensor_spec: SensorSpec, agent_id: Optional[int] = None
) -> None:
if (
(
not self.config.sim_cfg.load_semantic_mesh
and sensor_spec.sensor_type == SensorType.SEMANTIC
)
or (
not self.config.sim_cfg.requires_textures
and sensor_spec.sensor_type == SensorType.COLOR
)
or (
not self.config.sim_cfg.create_renderer
and sensor_spec.sensor_type == SensorType.DEPTH
)
):
sensor_type = sensor_spec.sensor_type
raise ValueError(
f"""Data for {sensor_type} sensor was not loaded during Simulator init.
Cannot dynamically add a {sensor_type} sensor unless one already exists.
"""
)
if agent_id is None:
agent_id = self._default_agent_id
agent = self.get_agent(agent_id=agent_id)
agent._add_sensor(sensor_spec)
self._update_simulator_sensors(sensor_spec.uuid, agent_id=agent_id)
def get_agent(self, agent_id: int) -> Agent:
return self.agents[agent_id]
def initialize_agent(
self, agent_id: int, initial_state: Optional[AgentState] = None
) -> Agent:
agent = self.get_agent(agent_id=agent_id)
if initial_state is None:
initial_state = AgentState()
if self.pathfinder.is_loaded:
initial_state.position = self.pathfinder.get_random_navigable_point()
initial_state.rotation = quat_from_angle_axis(
self.random.uniform_float(0, 2.0 * np.pi), np.array([0, 1, 0])
)
agent.set_state(initial_state, is_initial=True)
self.__last_state[agent_id] = agent.state
return agent
def start_async_render_and_step_physics(
self, dt: float, agent_ids: Union[int, List[int]] = 0
):
assert not self.config.enable_batch_renderer
if self._async_draw_agent_ids is not None:
raise RuntimeError(
"start_async_render_and_step_physics was already called. "
"Call get_sensor_observations_async_finish before calling this again. "
"Use step_physics to step physics additional times."
)
self._async_draw_agent_ids = agent_ids
if isinstance(agent_ids, int):
agent_ids = [agent_ids]
for agent_id in agent_ids:
agent_sensorsuite = self.__sensors[agent_id]
for sensor in agent_sensorsuite.values():
sensor._draw_observation_async()
self.renderer.start_draw_jobs()
self.step_physics(dt)
def start_async_render(self, agent_ids: Union[int, List[int]] = 0):
assert not self.config.enable_batch_renderer
if self._async_draw_agent_ids is not None:
raise RuntimeError(
"start_async_render_and_step_physics was already called. "
"Call get_sensor_observations_async_finish before calling this again. "
"Use step_physics to step physics additional times."
)
self._async_draw_agent_ids = agent_ids
if isinstance(agent_ids, int):
agent_ids = [agent_ids]
for agent_id in agent_ids:
agent_sensorsuite = self.__sensors[agent_id]
for sensor in agent_sensorsuite.values():
sensor._draw_observation_async()
self.renderer.start_draw_jobs()
def get_sensor_observations_async_finish(
self,
) -> Union[
Dict[str, Union[ndarray, "Tensor"]],
Dict[int, Dict[str, Union[ndarray, "Tensor"]]],
]:
assert not self.config.enable_batch_renderer
if self._async_draw_agent_ids is None:
raise RuntimeError(
"get_sensor_observations_async_finish was called before calling start_async_render_and_step_physics."
)
agent_ids = self._async_draw_agent_ids
self._async_draw_agent_ids = None
if isinstance(agent_ids, int):
agent_ids = [agent_ids]
return_single = True
else:
return_single = False
self.renderer.wait_draw_jobs()
# As backport. All Dicts are ordered in Python >= 3.7
observations: Dict[int, Dict[str, Union[ndarray, "Tensor"]]] = OrderedDict()
for agent_id in agent_ids:
agent_observations: Dict[str, Union[ndarray, "Tensor"]] = {}
for sensor_uuid, sensor in self.__sensors[agent_id].items():
agent_observations[sensor_uuid] = sensor._get_observation_async()
observations[agent_id] = agent_observations
if return_single:
return next(iter(observations.values()))
return observations
@overload
def get_sensor_observations(self, agent_ids: int = 0) -> ObservationDict:
...
@overload
def get_sensor_observations(
self, agent_ids: List[int]
) -> Dict[int, ObservationDict]:
...
def get_sensor_observations(
self, agent_ids: Union[int, List[int]] = 0
) -> Union[ObservationDict, Dict[int, ObservationDict],]:
if isinstance(agent_ids, int):
agent_ids = [agent_ids]
return_single = True
else:
return_single = False
# As backport. All Dicts are ordered in Python >= 3.7.
observations: Dict[int, ObservationDict] = OrderedDict()
# Draw observations (for classic non-batched renderer).
if not self.config.enable_batch_renderer:
for agent_id in agent_ids:
agent_sensorsuite = self.__sensors[agent_id]
for _sensor_uuid, sensor in agent_sensorsuite.items():
sensor.draw_observation()
else:
# The batch renderer draws observations from external code.
# Sensors are only used as data containers.
pass
# Get observations.
for agent_id in agent_ids:
agent_observations: ObservationDict = {}
for sensor_uuid, sensor in self.__sensors[agent_id].items():
agent_observations[sensor_uuid] = sensor.get_observation()
observations[agent_id] = agent_observations
if return_single:
return next(iter(observations.values()))
return observations
@property
def _default_agent(self) -> Agent:
# TODO Deprecate and remove
return self.get_agent(agent_id=self._default_agent_id)
@property
def _last_state(self) -> AgentState:
# TODO Deprecate and remove
return self.__last_state[self._default_agent_id]
@_last_state.setter
def _last_state(self, state: AgentState) -> None:
# TODO Deprecate and remove
self.__last_state[self._default_agent_id] = state
@property
def _sensors(self) -> Dict[str, "Sensor"]:
# TODO Deprecate and remove
return self.__sensors[self._default_agent_id]
def last_state(self, agent_id: Optional[int] = None) -> AgentState:
if agent_id is None:
agent_id = self._default_agent_id
return self.__last_state[agent_id]
@overload
def step(self, action: Union[str, int], dt: float = 1.0 / 60.0) -> ObservationDict:
...
@overload
def step(
self, action: MutableMapping_T[int, Union[str, int]], dt: float = 1.0 / 60.0
) -> Dict[int, ObservationDict]:
...
def step(
self,
action: Union[str, int, MutableMapping_T[int, Union[str, int]]],
dt: float = 1.0 / 60.0,
) -> Union[ObservationDict, Dict[int, ObservationDict],]:
self._num_total_frames += 1
if isinstance(action, MutableMapping):
return_single = False
else:
action = cast(Dict[int, Union[str, int]], {self._default_agent_id: action})
return_single = True
collided_dict: Dict[int, bool] = {}
for agent_id, agent_act in action.items():
agent = self.get_agent(agent_id)
collided_dict[agent_id] = agent.act(agent_act)
self.__last_state[agent_id] = agent.get_state()
# step physics by dt
step_start_Time = time.time()
super().step_world(dt)
self._previous_step_time = time.time() - step_start_Time
multi_observations = self.get_sensor_observations(agent_ids=list(action.keys()))
for agent_id, agent_observation in multi_observations.items():
agent_observation["collided"] = collided_dict[agent_id]
if return_single:
return multi_observations[self._default_agent_id]
return multi_observations
def make_greedy_follower(
self,
agent_id: Optional[int] = None,
goal_radius: Optional[float] = None,
*,
stop_key: Optional[Any] = None,
forward_key: Optional[Any] = None,
left_key: Optional[Any] = None,
right_key: Optional[Any] = None,
fix_thrashing: bool = True,
thrashing_threshold: int = 16,
):
if agent_id is None:
agent_id = self._default_agent_id
return GreedyGeodesicFollower(
self.pathfinder,
self.get_agent(agent_id),
goal_radius,
stop_key=stop_key,
forward_key=forward_key,
left_key=left_key,
right_key=right_key,
fix_thrashing=fix_thrashing,
thrashing_threshold=thrashing_threshold,
)
def step_filter(self, start_pos: Vector3, end_pos: Vector3) -> Vector3:
r"""Computes a valid navigable end point given a target translation on the NavMesh.
Uses the configured sliding flag.
:param start_pos: The valid initial position of a translation.
:param end_pos: The target end position of a translation.
"""
if self.pathfinder.is_loaded:
if self.config.sim_cfg.allow_sliding:
end_pos = self.pathfinder.try_step(start_pos, end_pos)
else:
end_pos = self.pathfinder.try_step_no_sliding(start_pos, end_pos)
return end_pos
def __del__(self) -> None:
self.close(destroy=True)
def step_physics(self, dt: float) -> None:
self.step_world(dt)
class Sensor:
r"""Wrapper around habitat_sim.Sensor
TODO(MS) define entire Sensor class in python, reducing complexity
"""
buffer = Union[np.ndarray, "Tensor"]
def __init__(self, sim: Simulator, agent: Agent, sensor_id: str) -> None:
self._sim = sim
self._agent = agent
# sensor is an attached object to the scene node
# store such "attached object" in _sensor_object
self._sensor_object = self._agent._sensors[sensor_id]
self._spec = self._sensor_object.specification()
# When using the batch renderer, no memory is allocated here.
if not self._sim.config.enable_batch_renderer:
self._initialize_sensor()
def _initialize_sensor(self):
r"""
Allocate buffers and initialize noise model in preparation for rendering.
"""
if self._spec.sensor_type == SensorType.AUDIO:
return
if self._sim.renderer is not None:
self._sim.renderer.bind_render_target(self._sensor_object)
if self._spec.gpu2gpu_transfer:
assert cuda_enabled, "Must build habitat sim with cuda for gpu2gpu-transfer"
assert _HAS_TORCH
device = torch.device("cuda", self._sim.gpu_device) # type: ignore[attr-defined]
torch.cuda.set_device(device)
resolution = self._spec.resolution
if self._spec.sensor_type == SensorType.SEMANTIC:
self._buffer: Union[np.ndarray, "Tensor"] = torch.empty(
resolution[0], resolution[1], dtype=torch.int32, device=device
)
elif self._spec.sensor_type == SensorType.DEPTH:
self._buffer = torch.empty(
resolution[0], resolution[1], dtype=torch.float32, device=device
)
else:
self._buffer = torch.empty(
resolution[0], resolution[1], 4, dtype=torch.uint8, device=device
)
else:
size = self._sensor_object.framebuffer_size
if self._spec.sensor_type == SensorType.SEMANTIC:
self._buffer = np.empty(
(self._spec.resolution[0], self._spec.resolution[1]),
dtype=np.uint32,
)
self.view = mn.MutableImageView2D(
mn.PixelFormat.R32UI, size, self._buffer
)
elif self._spec.sensor_type == SensorType.DEPTH:
self._buffer = np.empty(
(self._spec.resolution[0], self._spec.resolution[1]),
dtype=np.float32,
)
self.view = mn.MutableImageView2D(
mn.PixelFormat.R32F, size, self._buffer
)
else:
self._buffer = np.empty(
(
self._spec.resolution[0],
self._spec.resolution[1],
self._spec.channels,
),
dtype=np.uint8,
)
self.view = mn.MutableImageView2D(
mn.PixelFormat.RGBA8_UNORM,
size,
self._buffer.reshape(self._spec.resolution[0], -1),
)
noise_model_kwargs = self._spec.noise_model_kwargs
self._noise_model = make_sensor_noise_model(
self._spec.noise_model,
{"gpu_device_id": self._sim.gpu_device, **noise_model_kwargs},
)
assert self._noise_model.is_valid_sensor_type(
self._spec.sensor_type
), "Noise model '{}' is not valid for sensor '{}'".format(
self._spec.noise_model, self._spec.uuid
)
def draw_observation(self) -> None:
# Batch rendering happens elsewhere.
assert not self._sim.config.enable_batch_renderer
if self._spec.sensor_type == SensorType.AUDIO:
# do nothing in draw observation, get_observation will be called after this
# run the simulation there
return
assert self._sim.renderer is not None
# see if the sensor is attached to a scene graph, otherwise it is invalid,
# and cannot make any observation
if not self._sensor_object.object:
raise habitat_sim.errors.InvalidAttachedObject(
"Sensor observation requested but sensor is invalid.\
(has it been detached from a scene node?)"
)
self._sim.renderer.draw(self._sensor_object, self._sim)
def _draw_observation_async(self) -> None:
# Batch rendering happens elsewhere.
assert not self._sim.config.enable_batch_renderer
if self._spec.sensor_type == SensorType.AUDIO:
# do nothing in draw observation, get_observation will be called after this
# run the simulation there
return
assert self._sim.renderer is not None
if (
self._spec.sensor_type == SensorType.SEMANTIC
and self._sim.get_active_scene_graph()
is not self._sim.get_active_semantic_scene_graph()
):
raise RuntimeError(
"Async drawing doesn't support semantic rendering when there are multiple scene graphs"
)
# TODO: sync this path with renderer changes as above (render from sensor object)
# see if the sensor is attached to a scene graph, otherwise it is invalid,
# and cannot make any observation
if not self._sensor_object.object:
raise habitat_sim.errors.InvalidAttachedObject(
"Sensor observation requested but sensor is invalid.\
(has it been detached from a scene node?)"
)
# get the correct scene graph based on application
if self._spec.sensor_type == SensorType.SEMANTIC:
if self._sim.semantic_scene is None:
raise RuntimeError(
"SemanticSensor observation requested but no SemanticScene is loaded"
)
scene = self._sim.get_active_semantic_scene_graph()
else: # SensorType is DEPTH or any other type
scene = self._sim.get_active_scene_graph()
# now, connect the agent to the root node of the current scene graph
# sanity check is not needed on agent:
# because if a sensor is attached to a scene graph,
# it implies the agent is attached to the same scene graph
# (it assumes backend simulator will guarantee it.)
agent_node = self._agent.scene_node
agent_node.parent = scene.get_root_node()
# get the correct scene graph based on application
if self._spec.sensor_type == SensorType.SEMANTIC:
scene = self._sim.get_active_semantic_scene_graph()
else: # SensorType is DEPTH or any other type
scene = self._sim.get_active_scene_graph()
render_flags = habitat_sim.gfx.Camera.Flags.NONE
if self._sim.frustum_culling:
render_flags |= habitat_sim.gfx.Camera.Flags.FRUSTUM_CULLING
self._sim.renderer.enqueue_async_draw_job(
self._sensor_object, scene, self.view, render_flags
)
def get_observation(self) -> Union[ndarray, "Tensor"]:
if self._spec.sensor_type == SensorType.AUDIO:
return self._get_audio_observation()
# Placeholder until batch renderer emplaces the final value.
if self._sim.config.enable_batch_renderer:
return None
assert self._sim.renderer is not None
tgt = self._sensor_object.render_target
if self._spec.gpu2gpu_transfer:
with torch.cuda.device(self._buffer.device): # type: ignore[attr-defined, union-attr]
if self._spec.sensor_type == SensorType.SEMANTIC:
tgt.read_frame_object_id_gpu(self._buffer.data_ptr()) # type: ignore[attr-defined, union-attr]
elif self._spec.sensor_type == SensorType.DEPTH:
tgt.read_frame_depth_gpu(self._buffer.data_ptr()) # type: ignore[attr-defined, union-attr]
else:
tgt.read_frame_rgba_gpu(self._buffer.data_ptr()) # type: ignore[attr-defined, union-attr]
obs = self._buffer.flip(0) # type: ignore[union-attr]
else:
if self._spec.sensor_type == SensorType.SEMANTIC:
tgt.read_frame_object_id(self.view)
elif self._spec.sensor_type == SensorType.DEPTH:
tgt.read_frame_depth(self.view)
else:
tgt.read_frame_rgba(self.view)
obs = np.flip(self._buffer, axis=0)
return self._noise_model(obs)
def _get_observation_async(self) -> Union[ndarray, "Tensor"]:
if self._spec.sensor_type == SensorType.AUDIO:
return self._get_audio_observation()
if self._spec.gpu2gpu_transfer:
obs = self._buffer.flip(0) # type: ignore[union-attr]
else:
obs = np.flip(self._buffer, axis=0)
return self._noise_model(obs)
def _get_audio_observation(self) -> Union[ndarray, "Tensor"]:
assert self._spec.sensor_type == SensorType.AUDIO
audio_sensor = self._agent._sensors["audio_sensor"]
# tell the audio sensor about the agent location
rot = self._agent.state.rotation
audio_sensor.setAudioListenerTransform(
audio_sensor.node.absolute_translation, # set the listener position
np.array([rot.w, rot.x, rot.y, rot.z]), # set the listener orientation
)
# run the simulation
audio_sensor.runSimulation(self._sim)
obs = audio_sensor.getIR()
return obs
def close(self) -> None:
self._sim = None
self._agent = None
self._sensor_object = None
|
a97d1929efcefc3e61c626f9039e7860fcd75ed6
|
a41e1498e3c080f47abd8e8e57157548df3ebbf1
|
/pandas/_libs/tslibs/offsets.pyi
|
1a4742111db89dec228b963e9f344706a4ea5ee0
|
[
"BSD-3-Clause"
] |
permissive
|
pandas-dev/pandas
|
e7e639454a298bebc272622e66faa9829ea393bb
|
c7325d7e7e77ecb4a4e57b48bc25265277c75712
|
refs/heads/main
| 2023-09-01T12:42:07.927176
| 2023-09-01T11:14:10
| 2023-09-01T11:14:10
| 858,127
| 36,166
| 18,728
|
BSD-3-Clause
| 2023-09-14T21:18:41
| 2010-08-24T01:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 8,208
|
pyi
|
offsets.pyi
|
from datetime import (
datetime,
time,
timedelta,
)
from typing import (
Any,
Collection,
Literal,
TypeVar,
overload,
)
import numpy as np
from pandas._libs.tslibs.nattype import NaTType
from pandas._typing import (
OffsetCalendar,
Self,
npt,
)
from .timedeltas import Timedelta
_BaseOffsetT = TypeVar("_BaseOffsetT", bound=BaseOffset)
_DatetimeT = TypeVar("_DatetimeT", bound=datetime)
_TimedeltaT = TypeVar("_TimedeltaT", bound=timedelta)
_relativedelta_kwds: set[str]
prefix_mapping: dict[str, type]
class ApplyTypeError(TypeError): ...
class BaseOffset:
n: int
def __init__(self, n: int = ..., normalize: bool = ...) -> None: ...
def __eq__(self, other) -> bool: ...
def __ne__(self, other) -> bool: ...
def __hash__(self) -> int: ...
@property
def kwds(self) -> dict: ...
@property
def base(self) -> BaseOffset: ...
@overload
def __add__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
@overload
def __add__(self, other: BaseOffset) -> Self: ...
@overload
def __add__(self, other: _DatetimeT) -> _DatetimeT: ...
@overload
def __add__(self, other: _TimedeltaT) -> _TimedeltaT: ...
@overload
def __radd__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
@overload
def __radd__(self, other: BaseOffset) -> Self: ...
@overload
def __radd__(self, other: _DatetimeT) -> _DatetimeT: ...
@overload
def __radd__(self, other: _TimedeltaT) -> _TimedeltaT: ...
@overload
def __radd__(self, other: NaTType) -> NaTType: ...
def __sub__(self, other: BaseOffset) -> Self: ...
@overload
def __rsub__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
@overload
def __rsub__(self, other: BaseOffset): ...
@overload
def __rsub__(self, other: _DatetimeT) -> _DatetimeT: ...
@overload
def __rsub__(self, other: _TimedeltaT) -> _TimedeltaT: ...
@overload
def __mul__(self, other: np.ndarray) -> np.ndarray: ...
@overload
def __mul__(self, other: int): ...
@overload
def __rmul__(self, other: np.ndarray) -> np.ndarray: ...
@overload
def __rmul__(self, other: int) -> Self: ...
def __neg__(self) -> Self: ...
def copy(self) -> Self: ...
@property
def name(self) -> str: ...
@property
def rule_code(self) -> str: ...
@property
def freqstr(self) -> str: ...
def _apply(self, other): ...
def _apply_array(self, dtarr) -> None: ...
def rollback(self, dt: datetime) -> datetime: ...
def rollforward(self, dt: datetime) -> datetime: ...
def is_on_offset(self, dt: datetime) -> bool: ...
def __setstate__(self, state) -> None: ...
def __getstate__(self): ...
@property
def nanos(self) -> int: ...
def is_anchored(self) -> bool: ...
def _get_offset(name: str) -> BaseOffset: ...
class SingleConstructorOffset(BaseOffset):
@classmethod
def _from_name(cls, suffix: None = ...): ...
def __reduce__(self): ...
@overload
def to_offset(freq: None) -> None: ...
@overload
def to_offset(freq: _BaseOffsetT) -> _BaseOffsetT: ...
@overload
def to_offset(freq: timedelta | str) -> BaseOffset: ...
class Tick(SingleConstructorOffset):
_creso: int
_prefix: str
def __init__(self, n: int = ..., normalize: bool = ...) -> None: ...
@property
def delta(self) -> Timedelta: ...
@property
def nanos(self) -> int: ...
def delta_to_tick(delta: timedelta) -> Tick: ...
class Day(Tick): ...
class Hour(Tick): ...
class Minute(Tick): ...
class Second(Tick): ...
class Milli(Tick): ...
class Micro(Tick): ...
class Nano(Tick): ...
class RelativeDeltaOffset(BaseOffset):
def __init__(self, n: int = ..., normalize: bool = ..., **kwds: Any) -> None: ...
class BusinessMixin(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., offset: timedelta = ...
) -> None: ...
class BusinessDay(BusinessMixin): ...
class BusinessHour(BusinessMixin):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
start: str | time | Collection[str | time] = ...,
end: str | time | Collection[str | time] = ...,
offset: timedelta = ...,
) -> None: ...
class WeekOfMonthMixin(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., weekday: int = ...
) -> None: ...
class YearOffset(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., month: int | None = ...
) -> None: ...
class BYearEnd(YearOffset): ...
class BYearBegin(YearOffset): ...
class YearEnd(YearOffset): ...
class YearBegin(YearOffset): ...
class QuarterOffset(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., startingMonth: int | None = ...
) -> None: ...
class BQuarterEnd(QuarterOffset): ...
class BQuarterBegin(QuarterOffset): ...
class QuarterEnd(QuarterOffset): ...
class QuarterBegin(QuarterOffset): ...
class MonthOffset(SingleConstructorOffset): ...
class MonthEnd(MonthOffset): ...
class MonthBegin(MonthOffset): ...
class BusinessMonthEnd(MonthOffset): ...
class BusinessMonthBegin(MonthOffset): ...
class SemiMonthOffset(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., day_of_month: int | None = ...
) -> None: ...
class SemiMonthEnd(SemiMonthOffset): ...
class SemiMonthBegin(SemiMonthOffset): ...
class Week(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., weekday: int | None = ...
) -> None: ...
class WeekOfMonth(WeekOfMonthMixin):
def __init__(
self, n: int = ..., normalize: bool = ..., week: int = ..., weekday: int = ...
) -> None: ...
class LastWeekOfMonth(WeekOfMonthMixin): ...
class FY5253Mixin(SingleConstructorOffset):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
weekday: int = ...,
startingMonth: int = ...,
variation: Literal["nearest", "last"] = ...,
) -> None: ...
class FY5253(FY5253Mixin): ...
class FY5253Quarter(FY5253Mixin):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
weekday: int = ...,
startingMonth: int = ...,
qtr_with_extra_week: int = ...,
variation: Literal["nearest", "last"] = ...,
) -> None: ...
class Easter(SingleConstructorOffset): ...
class _CustomBusinessMonth(BusinessMixin):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
weekmask: str = ...,
holidays: list | None = ...,
calendar: OffsetCalendar | None = ...,
offset: timedelta = ...,
) -> None: ...
class CustomBusinessDay(BusinessDay):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
weekmask: str = ...,
holidays: list | None = ...,
calendar: OffsetCalendar | None = ...,
offset: timedelta = ...,
) -> None: ...
class CustomBusinessHour(BusinessHour):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
weekmask: str = ...,
holidays: list | None = ...,
calendar: OffsetCalendar | None = ...,
start: str | time | Collection[str | time] = ...,
end: str | time | Collection[str | time] = ...,
offset: timedelta = ...,
) -> None: ...
class CustomBusinessMonthEnd(_CustomBusinessMonth): ...
class CustomBusinessMonthBegin(_CustomBusinessMonth): ...
class OffsetMeta(type): ...
class DateOffset(RelativeDeltaOffset, metaclass=OffsetMeta): ...
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
CBMonthEnd = CustomBusinessMonthEnd
CBMonthBegin = CustomBusinessMonthBegin
CDay = CustomBusinessDay
def roll_qtrday(
other: datetime, n: int, month: int, day_opt: str, modby: int
) -> int: ...
INVALID_FREQ_ERR_MSG: Literal["Invalid frequency: {0}"]
def shift_months(
dtindex: npt.NDArray[np.int64], months: int, day_opt: str | None = ...
) -> npt.NDArray[np.int64]: ...
_offset_map: dict[str, BaseOffset]
|
58390d2ed06a640e4d9b90eaafbbbbebe3b18a8d
|
d7b9b490c954c7a9160b69f8ce2c907ef4681ecb
|
/sponsors/migrations/0008_auto_20201028_1814.py
|
5a527fb08f990478d084d1fba395b4c4ea6f25b7
|
[
"Apache-2.0"
] |
permissive
|
python/pythondotorg
|
00db93a4b1789a4d438806d106d9cee3349ad78c
|
c4ee749942227ca75c8e670546afe67232d647b2
|
refs/heads/main
| 2023-08-28T20:04:24.735314
| 2023-08-03T19:12:29
| 2023-08-03T19:12:29
| 6,127,047
| 1,131
| 646
|
Apache-2.0
| 2023-08-24T15:57:04
| 2012-10-08T16:00:15
|
Python
|
UTF-8
|
Python
| false
| false
| 8,190
|
py
|
0008_auto_20201028_1814.py
|
# Generated by Django 2.0.13 on 2020-10-28 18:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("sponsors", "0007_auto_20201021_1410"),
]
operations = [
migrations.CreateModel(
name="SponsorBenefit",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
models.CharField(
help_text="For display in the statement of work and sponsor dashboard.",
max_length=1024,
verbose_name="Benefit Name",
),
),
(
"description",
models.TextField(
blank=True,
help_text="For display in the statement of work and sponsor dashboard.",
null=True,
verbose_name="Benefit Description",
),
),
(
"program",
models.ForeignKey(
help_text="Which sponsorship program the benefit is associated with.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="sponsors.SponsorshipProgram",
verbose_name="Sponsorship Program",
),
),
],
),
migrations.CreateModel(
name="SponsorContact",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"primary",
models.BooleanField(
default=False,
help_text="If this is the primary contact for the sponsor",
),
),
(
"manager",
models.BooleanField(
default=False,
help_text="If this contact can manage sponsorship information on python.org",
),
),
("name", models.CharField(max_length=100)),
("email", models.EmailField(max_length=256)),
(
"phone",
models.CharField(max_length=32, verbose_name="Contact Phone"),
),
],
),
migrations.CreateModel(
name="SponsorInformation",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
models.CharField(
help_text="Name of the sponsor, for public display.",
max_length=100,
verbose_name="Sponsor name",
),
),
(
"description",
models.TextField(
help_text="Brief description of the sponsor for public display.",
verbose_name="Sponsor description",
),
),
(
"landing_page_url",
models.URLField(
blank=True,
help_text="Sponsor landing page URL. This may be provided by the sponsor, however the linked page may not contain any sales or marketing information.",
null=True,
verbose_name="Sponsor landing page",
),
),
(
"web_logo",
models.ImageField(
help_text="For display on our sponsor webpage. High resolution PNG or JPG, smallest dimension no less than 256px",
upload_to="sponsor_web_logos",
verbose_name="Sponsor web logo",
),
),
(
"print_logo",
models.FileField(
blank=True,
help_text="For printed materials, signage, and projection. SVG or EPS",
null=True,
upload_to="sponsor_print_logos",
verbose_name="Sponsor print logo",
),
),
(
"primary_phone",
models.CharField(
max_length=32, verbose_name="Sponsor Primary Phone"
),
),
(
"mailing_address",
models.TextField(verbose_name="Sponsor Mailing/Billing Address"),
),
],
),
migrations.CreateModel(
name="Sponsorship",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("applied_on", models.DateField(auto_now_add=True)),
("approved_on", models.DateField(blank=True, null=True)),
("start_date", models.DateField(blank=True, null=True)),
("end_date", models.DateField(blank=True, null=True)),
("level_name", models.CharField(max_length=64)),
("sponsorship_fee", models.PositiveIntegerField(blank=True, null=True)),
(
"sponsor_info",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="sponsors.SponsorInformation",
),
),
],
),
migrations.AddField(
model_name="sponsorcontact",
name="sponsor",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="contacts",
to="sponsors.SponsorInformation",
),
),
migrations.AddField(
model_name="sponsorcontact",
name="user",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="sponsorbenefit",
name="sponsorship",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="benefits",
to="sponsors.Sponsorship",
),
),
migrations.AddField(
model_name="sponsorbenefit",
name="sponsorship_benefit",
field=models.ForeignKey(
help_text="Sponsorship Benefit this Sponsor Benefit came from",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="sponsors.SponsorshipBenefit",
),
),
]
|
ecebe1cd8fc1dfe9a44c9c502c7b12c75c90973d
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/telemetry/telemetry/page/traffic_setting.py
|
9f5416ba6b2a58ddc2dc68875ea2b153aa9700c1
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,386
|
py
|
traffic_setting.py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import collections
_Configs = collections.namedtuple(
'_Configs', ('download_bandwidth_kbps,'
'upload_bandwidth_kbps,'
'round_trip_latency_ms'))
# These presets are copied from devtool's:
# https://chromium.googlesource.com/chromium/src/+/3ff085d04100b20b8e33b6d72f9505d944046c6a/third_party/WebKit/Source/devtools/front_end/components/NetworkConditionsSelector.js#36
# (note: devtools' presets are expressed as Bytes/sec.
NONE = 'none'
GPRS = 'GPRS'
REGULAR_2G = 'Regular-2G'
GOOD_2G = 'Good-2G'
REGULAR_3G = 'Regular-3G'
GOOD_3G = 'Good-3G'
REGULAR_4G = 'Regular-4G'
DSL = 'DSL'
WIFI = 'WiFi'
NETWORK_CONFIGS = {
NONE: _Configs(0, 0, 0),
GPRS: _Configs(50, 20, 500),
REGULAR_2G: _Configs(250, 50, 300),
GOOD_2G: _Configs(450, 150, 150),
REGULAR_3G: _Configs(750, 250, 100),
GOOD_3G: _Configs(1.5 * 1024, 750, 40),
REGULAR_4G: _Configs(4 * 1024, 3 * 1024, 20),
DSL: _Configs(2 * 1024, 1 * 1024, 5),
WIFI: _Configs(30 * 1024, 15 * 1024, 2),
}
|
67c8e98ca49492c62edd00500aa4418f491322ff
|
0e4860fecfdd34a3255003cc8c8df086c14083dd
|
/python/practise/learn-python/python_basic/dict_def.py
|
702c6280381974095a3665183e8724d41225b241
|
[] |
no_license
|
anzhihe/learning
|
503ab9a58f280227011da5eaa4b14b46c678e6f3
|
66f7f801e1395207778484e1543ea26309d4b354
|
refs/heads/master
| 2023-08-08T11:42:11.983677
| 2023-07-29T09:19:47
| 2023-07-29T09:19:47
| 188,768,643
| 1,443
| 617
| null | 2023-08-24T02:10:34
| 2019-05-27T04:04:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,764
|
py
|
dict_def.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@FileName: dict_def.py
@Function: dict definition
@Author: Zhihe An
@Site: https://chegva.com
@Time: 2021/6/27
"""
"""一、为什么需要字典?"""
"""
电话簿中存储了姓名和电话号码:
张三 → 13333333333
李四 → 14444444444
王五 → 15555555555
赵六 → 16666666666
"""
# 姓名
names = ['张三', '李四', '王五', '赵六']
# 电话号码
numbers = ['13333333333', '14444444444', '15555555555', '16666666666']
# 如果想要查找某人的电话号码
print(numbers[names.index('王五')]) # 15555555555
"""
最好能这样:把姓名和电话号码全部都存储在一个叫phonebook的数据结构中,指定姓名'王五'后
就能直接得到其对应的电话号码
print(phonebook['王五']) # 15555555555
使用字典就可以实现
"""
phonebook = {
'张三': '13333333333',
'李四': '14444444444',
'王五': '15555555555',
'赵六': '16666666666'
}
print(phonebook['王五']) # 15555555555
"""二、什么是字典?"""
"""
除了列表和元组,字典也是python语言提供的内置数据结构之一
1、字典的实现原理
字典的实现原理和查字典是类似的。当我们在字典中查找某个字时,一种办法是从字典的第一页开始
往后翻,直到找到我们要查找的字为止。这种办法就是在列表中查找元素的办法,其缺点是:
字典中的字数越多查找效率越低。第二种办法是先在字典的索引表里(比如部首表)查找这个字对应的页码,
然后直接翻到这个字对应的页,其优点是:查找效率不会随着字典中字数的增加而降低,无论查找哪个字,查找速度都非常快
"""
"""
2、字典的特点
(1) 字典中的所有元素都是一个key-value对,通过指定的key总能映射到唯一指定的value
字典中不可以存在重复的key,但是可以存在重复的value
(2) 字典中的元素是无序的
顺序不重要,重要的是key和value的映射关系
(3) 字典中的key必须是不可变对象
存取字典中的key-value对时,系统会调用内置函数hash根据指定的key计算出value的存储位置,也就是哈希值
对于指定的key,为了保证每次计算出的哈希值都是相同的,要求key必须是不可变对象
也就是说,只有不可变对象才存在哈希值
(4) 字典可以根据需要动态地伸缩
系统会根据需要动态地分配和回收内存,因此在使用前无须预先声明字典的容量
(5) 字典会浪费较大的内存
与列表相比,是用空间换取了时间
"""
"""三、字典的创建"""
"""
创建字典的常见方式有三种:
1、使用花括号
"""
print({'name': 'Jack', 'age': 18}) # {'name': 'Jack', 'age': 18}
# 空字典
print({}) # {}
"""
2、调用内置函数dict(类dict的构造方法)
"""
print(dict({'name': 'Jack', 'age': 18})) # {'name': 'Jack', 'age': 18}
print(dict(name = 'Jack', age = 18)) # {'name': 'Jack', 'age': 18}
print(dict([('name', 'Jack'), ('age', 18)])) # {'name': 'Jack', 'age': 18}
print(dict(zip(range(3), 'ABC'))) # {0: 'A', 1: 'B', 2: 'C'}
# 空字典
print(dict()) # {}
"""
3、调用dict的方法fromKeys
调用该方法时通过参数指定所有的key,所有的value的默认值为None
"""
print(dict.fromkeys(['name', 'age'])) # {'name': None, 'age': None}
print(dict.fromkeys(('name', 'age'))) # {'name': None, 'age': None}
# 调用该方法时可以通过参数指定所有value的值
print(dict.fromkeys(['name', 'age'], 'N/A')) # {'name': 'N/A', 'age': 'N/A'}
|
cb0c1b424b3836b6012d93672fce3c795de172be
|
20ef5a192d8fa9c1eb25b0198ba059e002036229
|
/tests/sklearn/test_infairness.py
|
fb7222d5f36bd73588f3f961ca9e5aa5d5ebe23d
|
[
"Apache-2.0"
] |
permissive
|
Trusted-AI/AIF360
|
8ee9835e0c394f38ebb70f9351b113dac8710435
|
6f9972e4a7dbca2402f29b86ea67889143dbeb3e
|
refs/heads/master
| 2023-08-31T07:48:43.056722
| 2023-07-27T19:09:06
| 2023-07-27T19:09:06
| 145,761,123
| 1,157
| 432
|
Apache-2.0
| 2023-09-08T10:42:28
| 2018-08-22T20:47:15
|
Python
|
UTF-8
|
Python
| false
| false
| 7,100
|
py
|
test_infairness.py
|
from contextlib import nullcontext
from functools import partial
from inFairness import fairalgo, distances
import numpy as np
import pandas as pd
import pytest
from sklearn.datasets import make_classification, make_regression
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from aif360.sklearn.inprocessing import SenSeI, SenSR
from aif360.sklearn.inprocessing.infairness import Dataset
def test_sensei_classification():
"""Tests whether SenSeI output matches original implementation."""
X, y = make_classification(n_features=10)
X = X.astype('float32')
ds = Dataset(X, y)
dx = distances.SVDSensitiveSubspaceDistance()
dx.fit(X, 2)
dy = distances.SquaredEuclideanDistance()
dy.fit(2)
torch.random.manual_seed(0)
mlp = nn.Sequential(nn.Linear(10, 100), nn.ReLU(), nn.Linear(100, 2))
aif360_sensei = SenSeI(
mlp, criterion=nn.CrossEntropyLoss, distance_x=dx, distance_y=dy,
rho=1., eps=0.1, auditor_nsteps=10, auditor_lr=0.01, max_epochs=5,
optimizer=optim.Adam, lr=1e-3, predict_nonlinearity=None, verbose=0)
y_pred = aif360_sensei.fit(X, y).predict_proba(X)
assert aif360_sensei.regression_ == False
torch.random.manual_seed(0)
mlp = nn.Sequential(nn.Linear(10, 100), nn.ReLU(), nn.Linear(100, 2))
orig_sensei = fairalgo.SenSeI(mlp, dx, dy, F.cross_entropy, 1., 0.1, 10, 0.01)
orig_sensei.train()
opt = optim.Adam(orig_sensei.parameters(), lr=1e-3)
for _ in range(5):
opt.zero_grad()
batch = next(iter(torch.utils.data.DataLoader(ds, batch_size=len(ds))))
loss = orig_sensei(*batch).loss
loss.backward()
opt.step()
orig_sensei.eval()
y_pred_orig = orig_sensei(torch.as_tensor(X), torch.as_tensor(y)).y_pred.detach().numpy()
assert np.allclose(y_pred, y_pred_orig)
def test_sensr_regression():
"""Tests whether SenSR output matches original implementation."""
X, y = make_regression(n_features=10)
X, y = X.astype('float32'), y.astype('float32').reshape(-1, 1)
ds = Dataset(X, y)
dx = distances.MahalanobisDistances()
dx.fit(torch.eye(10))
torch.random.manual_seed(0)
mlp = nn.Sequential(nn.Linear(10, 100), nn.ReLU(), nn.Linear(100, 1))
aif360_sensei = SenSR(
mlp, criterion=nn.MSELoss, distance_x=dx, eps=0.1, lr_lamb=1.,
lr_param=1., auditor_nsteps=10, auditor_lr=0.01, max_epochs=5,
optimizer=optim.Adam, lr=1e-3, predict_nonlinearity=None, verbose=0)
y_pred = aif360_sensei.fit(X, y).predict(X)
assert aif360_sensei.regression_ == True
torch.random.manual_seed(0)
mlp = nn.Sequential(nn.Linear(10, 100), nn.ReLU(), nn.Linear(100, 1))
orig_sensei = fairalgo.SenSR(mlp, dx, F.mse_loss, 0.1, 1., 1., 10, 0.01)
orig_sensei.train()
opt = optim.Adam(orig_sensei.parameters(), lr=1e-3)
for _ in range(5):
opt.zero_grad()
batch = next(iter(torch.utils.data.DataLoader(ds, batch_size=len(ds))))
loss = orig_sensei(*batch).loss
loss.backward()
opt.step()
orig_sensei.eval()
y_pred_orig = orig_sensei(torch.as_tensor(X), torch.as_tensor(y)).y_pred.detach().numpy()
assert np.allclose(y_pred, y_pred_orig)
@pytest.mark.parametrize(
"y, criterion, raises", [
# input unchanged
([1, 0, 1, 0], nn.CrossEntropyLoss, None),
# input unchanged (wrong dtype)
([1., 0, 1, 0], nn.CrossEntropyLoss, RuntimeError),
# input unchanged (must be 2D)
([[1.], [0], [1], [0]], nn.BCEWithLogitsLoss, None),
# binarized to [[0], [1], [0], [1]]
([-1, 1, -1, 1], nn.BCEWithLogitsLoss, None),
# binarized to [[0], [1], [0], [1]] (wrong shape)
([-1, 1, -1, 1], nn.CrossEntropyLoss, RuntimeError),
# binarized to [[0], [1], [0], [1]]
([0, 2, 0, 2], nn.BCEWithLogitsLoss, None),
# binarized to [[0], [1], [0], [1]] (wrong shape)
([0, 2, 0, 2], nn.CrossEntropyLoss, RuntimeError),
# binarize to one-hot
([0, 1, 0, 2], nn.CrossEntropyLoss, None),
# input unchanged -- detected regression (wrong shape)
([0.1, 1, 0, 2], nn.CrossEntropyLoss, RuntimeError),
# input unchanged -- detected regression
([[0.1], [1], [0], [2]], nn.MSELoss, None),
# binarized to [[0], [1], [0], [1]]
(['a', 'b', 'a', 'b'], nn.BCEWithLogitsLoss, None),
# input unchanged -- binary
([[0., 1], [1, 0], [0, 1], [1, 0]], nn.CrossEntropyLoss, None),
# input unchanged -- multilabel
([[0., 1], [1, 0], [0, 1], [1, 0]], nn.BCEWithLogitsLoss, None),
# multiclass-multioutput (not supported)
([[0, 1], [1, 2], [0, 1], [1, 0]], nn.CrossEntropyLoss, ValueError),
# input unchanged -- detected multioutput regression
([[0.1, 1], [1, 0], [0, 1], [1, 0]], nn.MSELoss, None),
])
def test_target_encoding(y, criterion, raises):
"""Tests the automatic type casting for classification problems."""
X = np.random.random((4, 2)).astype('float32')
y = np.array(y)
if criterion == nn.MSELoss:
y = np.array(y, dtype='float32')
classes = np.unique(y).tolist()
if criterion == nn.BCEWithLogitsLoss or criterion == nn.MSELoss:
ndim = 1 if y.ndim < 2 else y.shape[1]
else:
ndim = len(classes)
dx = distances.SquaredEuclideanDistance()
dx.fit(2)
dy = distances.SquaredEuclideanDistance()
dy.fit(ndim)
mlp = nn.Sequential(nn.Linear(2, 10), nn.ReLU(), nn.Linear(10, ndim))
sensei = SenSeI(mlp, criterion=criterion, distance_x=dx, distance_y=dy,
rho=1., eps=0.1, auditor_nsteps=1, auditor_lr=0.01, max_epochs=1,
optimizer=optim.Adam, lr=1e-3, verbose=0)
with pytest.raises(raises) if raises is not None else nullcontext():
sensei.fit(X, y)
assert sensei.regression_ == (criterion == nn.MSELoss)
if not sensei.regression_:
assert sensei.classes_.tolist() == classes
dy = distances.SquaredEuclideanDistance()
dy.fit(2)
@pytest.mark.parametrize(
"estimator_cls", [
partial(SenSeI, distance_y=dy, rho=1., eps=0.1, auditor_nsteps=10, auditor_lr=0.01),
partial(SenSR, eps=0.1, lr_lamb=1., lr_param=1., auditor_nsteps=10, auditor_lr=0.01),
])
def test_grid_pipe(estimator_cls):
"""Tests if SenSeI/SenSR works in a Pipeline and GridSearchCV."""
X, y = make_classification(n_features=10)
X = X.astype('float32')
dx = distances.SquaredEuclideanDistance()
dx.fit(10)
mlp = nn.Sequential(nn.Linear(10, 100), nn.ReLU(), nn.Linear(100, 2))
estimator = estimator_cls(mlp, criterion=nn.CrossEntropyLoss, distance_x=dx,
optimizer=optim.Adam, lr=1e-3, max_epochs=5, verbose=0)
pipe = Pipeline([('scaler', StandardScaler()), ('estimator', estimator)])
params = {'estimator__auditor_nsteps': [0, 10, 25]}
grid = GridSearchCV(pipe, params, scoring='accuracy')
grid.fit(X, y)
assert not pd.DataFrame(grid.cv_results_).isna().any(axis=None)
|
5e99522fc7459932c35a585e800feeec7da5d6af
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/python/paddle/sparse/nn/layer/pooling.py
|
f42af9f5dc1405ff999e4cbe466b954393984a2c
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 4,806
|
py
|
pooling.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.nn import Layer
from .. import functional as F
class MaxPool3D(Layer):
"""
This operation applies 3D max pooling over input features based on the sparse input,
and kernel_size, stride, padding parameters. Input(X) and Output(Out) are
in NDHWC format, where N is batch size, C is the number of channels,
H is the height of the feature, D is the depth of the feature, and W is the width of the feature.
Parameters:
kernel_size(int|list|tuple): The pool kernel size. If the kernel size
is a tuple or list, it must contain three integers,
(kernel_size_Depth, kernel_size_Height, kernel_size_Width).
Otherwise, the pool kernel size will be the cube of an int.
stride(int|list|tuple, optional): The pool stride size. If pool stride size is a tuple or list,
it must contain three integers, [stride_Depth, stride_Height, stride_Width).
Otherwise, the pool stride size will be a cube of an int.
Default None, then stride will be equal to the kernel_size.
padding(str|int|list|tuple, optional): The padding size. Padding could be in one of the following forms.
1. A string in ['valid', 'same'].
2. An int, which means the feature map is zero padded by size of `padding` on every sides.
3. A list[int] or tuple(int) whose length is 3, [pad_depth, pad_height, pad_weight] whose value means the padding size of each dimension.
4. A list[int] or tuple(int) whose length is \6. [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] whose value means the padding size of each side.
5. A list or tuple of pairs of integers. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension should be [0,0] or (0,0).
The default value is 0.
ceil_mode(bool, optional): ${ceil_mode_comment}
return_mask(bool, optional): Whether to return the max indices along with the outputs.
data_format(str, optional): The data format of the input and output data. An optional string from: `"NCDHW"`,
`"NDHWC"`. The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_depth, input_height, input_width]`. Currently, only support "NDHWC".
name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`.
Usually name is no need to set and None by default.
Returns:
A callable object of MaxPool3D.
Shape:
- x(Tensor): The input SparseCooTensor of max pool3d operator, which is a 5-D tensor.
The data type can be float32, float64.
- output(Tensor): The output tensor of max pool3d operator, which is a 5-D tensor.
The data type is same as input x.
Examples:
.. code-block:: python
import paddle
dense_x = paddle.randn((2, 3, 6, 6, 3))
sparse_x = dense_x.to_sparse_coo(4)
max_pool3d = paddle.sparse.nn.MaxPool3D(
kernel_size=3, data_format='NDHWC')
out = max_pool3d(sparse_x)
#shape=[2, 1, 2, 2, 3]
"""
def __init__(
self,
kernel_size,
stride=None,
padding=0,
return_mask=False,
ceil_mode=False,
data_format="NDHWC",
name=None,
):
super().__init__()
self.ksize = kernel_size
self.stride = stride
self.padding = padding
self.return_mask = return_mask
self.ceil_mode = ceil_mode
self.data_format = data_format
self.name = name
def forward(self, x):
return F.max_pool3d(
x,
kernel_size=self.ksize,
stride=self.stride,
padding=self.padding,
ceil_mode=self.ceil_mode,
data_format=self.data_format,
name=self.name,
)
def extra_repr(self):
return 'kernel_size={ksize}, stride={stride}, padding={padding}'.format(
**self.__dict__
)
|
8edb52ee8342012fba3d476fea9e5103a6633fba
|
187414dcb264fb49d82507a099fd5fdca6e55e38
|
/dev/run-tests-jenkins.py
|
aa82b28e38217e2510485b6733969eb5fc3e039e
|
[
"Apache-2.0",
"BSD-3-Clause",
"CC0-1.0",
"CDDL-1.1",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"EPL-2.0",
"CDDL-1.0",
"MIT",
"LGPL-2.0-or-later",
"Python-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"EPL-1.0",
"Classpath-exception-2.0",
"GCC-exception-3.1",
"CC-BY-SA-3.0",
"LGPL-2.1-only",
"LicenseRef-scancode-unicode",
"CPL-1.0",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-only",
"CC-PDDC",
"NAIST-2003",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
apache/spark
|
8aeba2d80465a262acc95781ede105a5b5886f6d
|
60d8fc49bec5dae1b8cf39a0670cb640b430f520
|
refs/heads/master
| 2023-09-04T04:33:36.058199
| 2023-09-04T03:48:52
| 2023-09-04T03:48:52
| 17,165,658
| 39,983
| 32,449
|
Apache-2.0
| 2023-09-14T19:46:24
| 2014-02-25T08:00:08
|
Scala
|
UTF-8
|
Python
| false
| false
| 9,116
|
py
|
run-tests-jenkins.py
|
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import json
import functools
import subprocess
from urllib.request import urlopen
from urllib.request import Request
from urllib.error import HTTPError, URLError
from sparktestsupport import SPARK_HOME, ERROR_CODES
from sparktestsupport.shellutils import run_cmd
def print_err(msg):
"""
Given a set of arguments, will print them to the STDERR stream
"""
print(msg, file=sys.stderr)
def post_message_to_github(msg, ghprb_pull_id):
print("Attempting to post to GitHub...")
api_url = os.getenv("GITHUB_API_BASE", "https://api.github.com/repos/apache/spark")
url = api_url + "/issues/" + ghprb_pull_id + "/comments"
github_oauth_key = os.environ["GITHUB_OAUTH_KEY"]
posted_message = json.dumps({"body": msg})
request = Request(
url,
headers={
"Authorization": "token %s" % github_oauth_key,
"Content-Type": "application/json",
},
data=posted_message.encode("utf-8"),
)
try:
response = urlopen(request)
if response.getcode() == 201:
print(" > Post successful.")
except HTTPError as http_e:
print_err("Failed to post message to GitHub.")
print_err(" > http_code: %s" % http_e.code)
print_err(" > api_response: %s" % http_e.read())
print_err(" > data: %s" % posted_message)
except URLError as url_e:
print_err("Failed to post message to GitHub.")
print_err(" > urllib_status: %s" % url_e.reason[1])
print_err(" > data: %s" % posted_message)
def pr_message(
build_display_name, build_url, ghprb_pull_id, short_commit_hash, commit_url, msg, post_msg=""
):
# align the arguments properly for string formatting
str_args = (
build_display_name,
msg,
build_url,
ghprb_pull_id,
short_commit_hash,
commit_url,
str(" " + post_msg + ".") if post_msg else ".",
)
return "**[Test build %s %s](%stestReport)** for PR %s at commit [`%s`](%s)%s" % str_args
def run_pr_checks(pr_tests, ghprb_actual_commit, sha1):
"""
Executes a set of pull request checks to ease development and report issues with various
components such as style, linting, dependencies, compatibilities, etc.
@return a list of messages to post back to GitHub
"""
# Ensure we save off the current HEAD to revert to
current_pr_head = run_cmd(["git", "rev-parse", "HEAD"], return_output=True).strip()
pr_results = list()
for pr_test in pr_tests:
test_name = pr_test + ".sh"
pr_results.append(
run_cmd(
[
"bash",
os.path.join(SPARK_HOME, "dev", "tests", test_name),
ghprb_actual_commit,
sha1,
],
return_output=True,
).rstrip()
)
# Ensure, after each test, that we're back on the current PR
run_cmd(["git", "checkout", "-f", current_pr_head])
return pr_results
def run_tests(tests_timeout):
"""
Runs the `dev/run-tests` script and responds with the correct error message
under the various failure scenarios.
@return a tuple containing the test result code and the result note to post to GitHub
"""
test_result_code = subprocess.Popen(
["timeout", tests_timeout, os.path.join(SPARK_HOME, "dev", "run-tests")]
).wait()
failure_note_by_errcode = {
# error to denote run-tests script failures:
1: "executing the `dev/run-tests` script",
ERROR_CODES["BLOCK_GENERAL"]: "some tests",
ERROR_CODES["BLOCK_RAT"]: "RAT tests",
ERROR_CODES["BLOCK_SCALA_STYLE"]: "Scala style tests",
ERROR_CODES["BLOCK_JAVA_STYLE"]: "Java style tests",
ERROR_CODES["BLOCK_PYTHON_STYLE"]: "Python style tests",
ERROR_CODES["BLOCK_R_STYLE"]: "R style tests",
ERROR_CODES["BLOCK_DOCUMENTATION"]: "to generate documentation",
ERROR_CODES["BLOCK_BUILD"]: "to build",
ERROR_CODES["BLOCK_BUILD_TESTS"]: "build dependency tests",
ERROR_CODES["BLOCK_MIMA"]: "MiMa tests",
ERROR_CODES["BLOCK_SPARK_UNIT_TESTS"]: "Spark unit tests",
ERROR_CODES["BLOCK_PYSPARK_UNIT_TESTS"]: "PySpark unit tests",
ERROR_CODES["BLOCK_PYSPARK_PIP_TESTS"]: "PySpark pip packaging tests",
ERROR_CODES["BLOCK_SPARKR_UNIT_TESTS"]: "SparkR unit tests",
ERROR_CODES["BLOCK_TIMEOUT"]: "from timeout after a configured wait of `%s`"
% (tests_timeout),
}
if test_result_code == 0:
test_result_note = " * This patch passes all tests."
else:
note = failure_note_by_errcode.get(
test_result_code, "due to an unknown error code, %s" % test_result_code
)
test_result_note = " * This patch **fails %s**." % note
return [test_result_code, test_result_note]
def main():
# Important Environment Variables
# ---
# $ghprbActualCommit
# This is the hash of the most recent commit in the PR.
# The merge-base of this and master is the commit from which the PR was branched.
# $sha1
# If the patch merges cleanly, this is a reference to the merge commit hash
# (e.g. "origin/pr/2606/merge").
# If the patch does not merge cleanly, it is equal to $ghprbActualCommit.
# The merge-base of this and master in the case of a clean merge is the most recent commit
# against master.
ghprb_pull_id = os.environ["ghprbPullId"]
ghprb_actual_commit = os.environ["ghprbActualCommit"]
ghprb_pull_title = os.environ["ghprbPullTitle"].lower()
sha1 = os.environ["sha1"]
# Marks this build as a pull request build.
os.environ["SPARK_JENKINS_PRB"] = "true"
# Switch to a Maven-based build if the PR title contains "test-maven":
if "test-maven" in ghprb_pull_title:
os.environ["SPARK_JENKINS_BUILD_TOOL"] = "maven"
if "test-hadoop3" in ghprb_pull_title:
os.environ["SPARK_JENKINS_BUILD_PROFILE"] = "hadoop3"
# Switch the Scala profile based on the PR title:
if "test-scala2.13" in ghprb_pull_title:
os.environ["SPARK_JENKINS_BUILD_SCALA_PROFILE"] = "scala2.13"
build_display_name = os.environ["BUILD_DISPLAY_NAME"]
build_url = os.environ["BUILD_URL"]
project_url = os.getenv("SPARK_PROJECT_URL", "https://github.com/apache/spark")
commit_url = project_url + "/commit/" + ghprb_actual_commit
# GitHub doesn't auto-link short hashes when submitted via the API, unfortunately. :(
short_commit_hash = ghprb_actual_commit[0:7]
# format: http://linux.die.net/man/1/timeout
# must be less than the timeout configured on Jenkins. Usually Jenkins's timeout is higher
# then this. Please consult with the build manager or a committer when it should be increased.
tests_timeout = "500m"
# Array to capture all test names to run on the pull request. These tests are represented
# by their file equivalents in the dev/tests/ directory.
#
# To write a PR test:
# * the file must reside within the dev/tests directory
# * be an executable bash script
# * accept three arguments on the command line, the first being the GitHub PR long commit
# hash, the second the GitHub SHA1 hash, and the final the current PR hash
# * and, lastly, return string output to be included in the pr message output that will
# be posted to GitHub
pr_tests = ["pr_merge_ability", "pr_public_classes"]
# `bind_message_base` returns a function to generate messages for GitHub posting
github_message = functools.partial(
pr_message, build_display_name, build_url, ghprb_pull_id, short_commit_hash, commit_url
)
# post start message
post_message_to_github(github_message("has started"), ghprb_pull_id)
pr_check_results = run_pr_checks(pr_tests, ghprb_actual_commit, sha1)
test_result_code, test_result_note = run_tests(tests_timeout)
# post end message
result_message = github_message("has finished")
result_message += "\n" + test_result_note + "\n"
result_message += "\n".join(pr_check_results)
post_message_to_github(result_message, ghprb_pull_id)
sys.exit(test_result_code)
if __name__ == "__main__":
main()
|
cec40fb03891dc898eca779bdc450f6096ee483a
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/python/paddle/geometric/message_passing/utils.py
|
5122a4f110ffe8d1c5fc8ef8933d18ac5847a65d
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 2,971
|
py
|
utils.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
from paddle.fluid.data_feeder import check_dtype, convert_dtype
from paddle.fluid.framework import Variable
def convert_out_size_to_list(out_size):
"""
Convert out_size(int, np.int32, np.int64, Variable) to list
in imperative mode.
"""
if out_size is None:
out_size = [0]
elif isinstance(out_size, (int, np.int32, np.int64)):
out_size = [out_size]
else:
out_size = [int(out_size)]
return out_size
def get_out_size_tensor_inputs(inputs, attrs, out_size, op_type):
"""
Convert out_size(int, np.int32, np.int64, Variable) to inputs
and attrs in static graph mode.
"""
if out_size is None:
attrs['out_size'] = [0]
elif isinstance(out_size, (int, np.int32, np.int64)):
attrs['out_size'] = [out_size]
elif isinstance(out_size, Variable):
out_size.stop_gradient = True
check_dtype(
out_size.dtype,
'out_size',
['int32', 'int64'],
'op_type',
'(When type of out_size in' + op_type + ' is Variable.)',
)
if convert_dtype(out_size.dtype) == 'int64':
out_size = paddle.cast(out_size, 'int32')
inputs["Out_size"] = out_size
else:
raise TypeError("Out_size only supports Variable or int.")
def reshape_lhs_rhs(x, y):
"""
Expand dims to ensure there will be no broadcasting issues with different
number of dimensions.
"""
if len(x.shape) == 1:
x = paddle.reshape(x, [-1, 1])
if len(y.shape) == 1:
y = paddle.reshape(y, [-1, 1])
x_shape = paddle.shape(x)
y_shape = paddle.shape(y)
if len(x.shape) != len(y.shape):
max_ndims = max(len(x.shape), len(y.shape))
x_pad_ndims = max_ndims - len(x.shape)
y_pad_ndims = max_ndims - len(y.shape)
new_x_shape = (
[
x_shape[0],
]
+ [
1,
]
* x_pad_ndims
+ list(x_shape[1:])
)
new_y_shape = (
[
y_shape[0],
]
+ [
1,
]
* y_pad_ndims
+ list(y_shape[1:])
)
x = paddle.reshape(x, new_x_shape)
y = paddle.reshape(y, new_y_shape)
return x, y
|
26cb2fd8c979d2cf42d52b972c95dee035efe7a8
|
a08fe655bd0beed8433d386aec22b15a60916ebf
|
/utils/bits.py
|
95e61c968dc82df3b4e6586a98ce898e4b65b596
|
[] |
no_license
|
bastibl/gr-keyfob
|
bfd5bb8d95b2ec4ddc79682328474e52bfcb7a3a
|
e9b144e50dac0aeb81708dc9ae80fcb044c670d3
|
refs/heads/maint-3.9
| 2022-05-03T11:17:24.555464
| 2022-03-28T10:53:55
| 2022-03-28T10:53:55
| 37,908,760
| 168
| 37
| null | 2022-03-28T10:53:56
| 2015-06-23T09:09:52
|
Python
|
UTF-8
|
Python
| false
| false
| 190
|
py
|
bits.py
|
#!/usr/bin/env python
import sys
f = open("~/bits.bin", 'rb')
try:
byte = f.read(1)
while byte != "":
sys.stdout.write(str(ord(byte)))
byte = f.read(1)
finally:
f.close()
print
|
a61d0cdda7e23d6a95e08fa34aac1301aea03ed6
|
226727e281e6ce17450fac3ea78d1a3c4a3999fc
|
/examples/MuscularFlagella/connection_flagella.py
|
e8e22c02ac0b31cec298449ee73682c598c0d94d
|
[
"MIT"
] |
permissive
|
GazzolaLab/PyElastica
|
20df23e97560d05ef50e60f2aeefb420968fb01d
|
49017d456aa10032e0ba1af23d5afd92cecedfa5
|
refs/heads/master
| 2023-08-31T14:28:48.056038
| 2023-08-18T16:54:51
| 2023-08-18T16:54:51
| 254,172,891
| 159
| 94
|
MIT
| 2023-09-09T04:11:01
| 2020-04-08T18:47:47
|
Python
|
UTF-8
|
Python
| false
| false
| 4,153
|
py
|
connection_flagella.py
|
__doc__ = """Muscular flagella connection class Numba implementation. """
__all__ = ["MuscularFlagellaConnection"]
import numpy as np
from numba import njit
from elastica.joint import FreeJoint
from elastica._linalg import _batch_matvec
class MuscularFlagellaConnection(FreeJoint):
"""
This connection class is for Muscular Flagella and it is not generalizable. Since our goal is to
replicate the experimental data. We assume muscular flagella is not moving out of plane.
"""
def __init__(
self,
k,
normal,
):
"""
Parameters
----------
k : float
The spring constant at the connection.
normal : np.ndarray
1D array of floats. Normal direction of the rods.
"""
super().__init__(k, nu=0)
self.normal = normal
def apply_forces(self, rod_one, index_one, rod_two, index_two):
self.torque = self._apply_forces(
self.k,
self.normal,
index_one,
index_two,
rod_one.tangents,
rod_one.position_collection,
rod_two.position_collection,
rod_two.external_forces,
)
@staticmethod
@njit(cache=True)
def _apply_forces(
k,
normal,
index_one,
index_two,
rod_one_tangents,
rod_one_position_collection,
rod_two_position_collection,
rod_two_external_forces,
):
# This connection routine is not generalizable. Our goal here is to replicate the experiment data.
# Thus below code is hard codded. Torques are computed along the centerline of the muscle
# and transfered to the body.
start_idx = index_one[0]
end_idx = index_one[-1]
armlength = 0.0053
armdirection1 = np.cross(rod_one_tangents[..., start_idx], normal)
armposition1 = armlength * (armdirection1) / np.linalg.norm(armdirection1)
startposition = (
rod_one_position_collection[..., start_idx]
+ rod_one_position_collection[..., start_idx + 1]
) / 2 + armposition1
armdirection2 = np.cross(rod_one_tangents[..., end_idx], normal)
armposition2 = armlength * (armdirection2) / np.linalg.norm(armdirection2)
endposition = (
rod_one_position_collection[..., end_idx]
+ rod_one_position_collection[..., end_idx + 1]
) / 2 + armposition2
forcestart = k * (
rod_two_position_collection[..., index_two[0]] - startposition
)
forceend = k * (rod_two_position_collection[..., index_two[-1]] - endposition)
rod_two_external_forces[..., index_two[0]] -= forcestart
rod_two_external_forces[..., index_two[-1]] -= forceend
Torque1 = np.cross(
armposition1, (forcestart - np.array([0.0, 0.0, forcestart[2]]))
)
Torque2 = np.cross(armposition2, (forceend - np.array([0.0, 0.0, forceend[2]])))
# We are taking the average torques to prevent any numerical issues.
# Torque has to have only one component, thus remove the other components, because motion is in plane.
Torqueaverage2 = 0.5 * (Torque1[2] - Torque2[2])
Torqueaverage = np.array([0.0, 0.0, Torqueaverage2]).reshape(3, 1)
return Torqueaverage
def apply_torques(self, rod_one, index_one, rod_two, index_two):
self._apply_torques(
index_one,
self.torque,
rod_one.director_collection,
rod_one.external_torques,
)
@staticmethod
@njit(cache=True)
def _apply_torques(
index_one, torque, rod_one_director_collection, rod_one_external_torques
):
start_idx = index_one[0]
end_idx = index_one[-1]
rod_one_external_torques[..., start_idx] += 0.5 * _batch_matvec(
rod_one_director_collection[..., start_idx : start_idx + 1], torque
).reshape(3)
rod_one_external_torques[..., end_idx] -= 0.5 * _batch_matvec(
rod_one_director_collection[..., end_idx : end_idx + 1], torque
).reshape(3)
|
3c29634bdc27ce94bc9beb1d26a25e12fb7a2cb2
|
ec7591c3f478c43e76257aaa500d8f6a2e763d74
|
/stanza/pipeline/external/sudachipy.py
|
7b142c7b149df84e9aaea69c512fc04b8dda8533
|
[
"Apache-2.0"
] |
permissive
|
stanfordnlp/stanza
|
5cc3dbe70a96dd565639b7dae1efde6b4fa76985
|
c530c9af647d521262b56b717bcc38b0cfc5f1b8
|
refs/heads/main
| 2023-09-01T12:01:38.980322
| 2023-03-14T16:10:05
| 2023-03-14T16:10:05
| 104,854,615
| 4,281
| 599
|
NOASSERTION
| 2023-09-10T00:31:36
| 2017-09-26T08:00:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,917
|
py
|
sudachipy.py
|
"""
Processors related to SudachiPy in the pipeline.
GitHub Home: https://github.com/WorksApplications/SudachiPy
"""
import re
from stanza.models.common import doc
from stanza.pipeline._constants import TOKENIZE
from stanza.pipeline.processor import ProcessorVariant, register_processor_variant
def check_sudachipy():
"""
Import necessary components from SudachiPy to perform tokenization.
"""
try:
import sudachipy
import sudachidict_core
except ImportError:
raise ImportError(
"Both sudachipy and sudachidict_core libraries are required. "
"Try install them with `pip install sudachipy sudachidict_core`. "
"Go to https://github.com/WorksApplications/SudachiPy for more information."
)
return True
@register_processor_variant(TOKENIZE, 'sudachipy')
class SudachiPyTokenizer(ProcessorVariant):
def __init__(self, config):
""" Construct a SudachiPy-based tokenizer.
Note that this tokenizer uses regex for sentence segmentation.
"""
if config['lang'] != 'ja':
raise Exception("SudachiPy tokenizer is only allowed in Japanese pipelines.")
check_sudachipy()
from sudachipy import tokenizer
from sudachipy import dictionary
self.tokenizer = dictionary.Dictionary().create()
self.no_ssplit = config.get('no_ssplit', False)
def process(self, document):
""" Tokenize a document with the SudachiPy tokenizer and wrap the results into a Doc object.
"""
if isinstance(document, doc.Document):
text = document.text
else:
text = document
if not isinstance(text, str):
raise Exception("Must supply a string or Stanza Document object to the SudachiPy tokenizer.")
# we use the default sudachipy tokenization mode (i.e., mode C)
# more config needs to be added to support other modes
tokens = self.tokenizer.tokenize(text)
sentences = []
current_sentence = []
for token in tokens:
token_text = token.surface()
# by default sudachipy will output whitespace as a token
# we need to skip these tokens to be consistent with other tokenizers
if token_text.isspace():
continue
start = token.begin()
end = token.end()
token_entry = {
doc.TEXT: token_text,
doc.MISC: f"{doc.START_CHAR}={start}|{doc.END_CHAR}={end}"
}
current_sentence.append(token_entry)
if not self.no_ssplit and token_text in ['。', '!', '?', '!', '?']:
sentences.append(current_sentence)
current_sentence = []
if len(current_sentence) > 0:
sentences.append(current_sentence)
return doc.Document(sentences, text)
|
466830adbf4b689a1e2fff13ed25277927b974a0
|
21590487701d2dcbe1a1c1dd81c6e983f7523cb6
|
/opentelemetry-sdk/tests/performance/benchmarks/metrics/test_benchmark_metrics_histogram,.py
|
2f9c4405418ebf40bb639144e077ed181f7b31df
|
[
"Apache-2.0"
] |
permissive
|
open-telemetry/opentelemetry-python
|
837199e541c03cff311cad075401791ee2a23583
|
d8490c5f557dd7005badeb800095cb51b553c98c
|
refs/heads/main
| 2023-08-26T06:47:23.837997
| 2023-08-17T22:35:13
| 2023-08-17T22:35:13
| 185,478,926
| 1,361
| 668
|
Apache-2.0
| 2023-09-14T20:48:40
| 2019-05-07T21:13:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,939
|
py
|
test_benchmark_metrics_histogram,.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import pytest
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import InMemoryMetricReader
from opentelemetry.sdk.metrics.view import (
ExplicitBucketHistogramAggregation,
View,
)
MAX_BOUND_VALUE = 10000
def _generate_bounds(bound_count):
bounds = []
for i in range(bound_count):
bounds.append(i * MAX_BOUND_VALUE / bound_count)
return bounds
hist_view_10 = View(
instrument_name="test_histogram_10_bound",
aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(10)),
)
hist_view_49 = View(
instrument_name="test_histogram_49_bound",
aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(49)),
)
hist_view_50 = View(
instrument_name="test_histogram_50_bound",
aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(50)),
)
hist_view_1000 = View(
instrument_name="test_histogram_1000_bound",
aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(1000)),
)
reader = InMemoryMetricReader()
provider = MeterProvider(
metric_readers=[reader],
views=[
hist_view_10,
hist_view_49,
hist_view_50,
hist_view_1000,
],
)
meter = provider.get_meter("sdk_meter_provider")
hist = meter.create_histogram("test_histogram_default")
hist10 = meter.create_histogram("test_histogram_10_bound")
hist49 = meter.create_histogram("test_histogram_49_bound")
hist50 = meter.create_histogram("test_histogram_50_bound")
hist1000 = meter.create_histogram("test_histogram_1000_bound")
@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)
def benchmark_histogram_record():
hist.record(random.random() * MAX_BOUND_VALUE)
benchmark(benchmark_histogram_record)
@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record_10(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)
def benchmark_histogram_record_10():
hist10.record(random.random() * MAX_BOUND_VALUE)
benchmark(benchmark_histogram_record_10)
@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record_49(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)
def benchmark_histogram_record_49():
hist49.record(random.random() * MAX_BOUND_VALUE)
benchmark(benchmark_histogram_record_49)
@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record_50(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)
def benchmark_histogram_record_50():
hist50.record(random.random() * MAX_BOUND_VALUE)
benchmark(benchmark_histogram_record_50)
@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record_1000(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)
def benchmark_histogram_record_1000():
hist1000.record(random.random() * MAX_BOUND_VALUE)
benchmark(benchmark_histogram_record_1000)
|
e68e92b9ae27c86621e6dd843175a0e23960451b
|
a8ca3225e24c8b093056ce6baa1db6ba3aea8f97
|
/tests/pf/test_mag_inversion_linear.py
|
ff8adfdfd773d053207cfb349a0eb6523329b0fd
|
[
"MIT"
] |
permissive
|
simpeg/simpeg
|
3e8779392d7b26fe576a7a665205068989d8f4d8
|
ebde5856c318f7b4deb92d755b4fefe19012c48e
|
refs/heads/main
| 2023-09-03T18:49:03.545965
| 2023-08-27T15:45:50
| 2023-08-27T15:45:50
| 14,727,320
| 437
| 268
|
MIT
| 2023-09-10T18:16:22
| 2013-11-26T19:46:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,884
|
py
|
test_mag_inversion_linear.py
|
import unittest
import discretize
from discretize.utils import active_from_xyz
from SimPEG import (
utils,
maps,
regularization,
data_misfit,
optimization,
inverse_problem,
directives,
inversion,
)
import numpy as np
# import SimPEG.PF as PF
from SimPEG.potential_fields import magnetics as mag
import shutil
class MagInvLinProblemTest(unittest.TestCase):
def setUp(self):
np.random.seed(0)
# Define the inducing field parameter
H0 = (50000, 90, 0)
# Create a mesh
dx = 5.0
hxind = [(dx, 5, -1.3), (dx, 5), (dx, 5, 1.3)]
hyind = [(dx, 5, -1.3), (dx, 5), (dx, 5, 1.3)]
hzind = [(dx, 5, -1.3), (dx, 6)]
self.mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC")
# Get index of the center
midx = int(self.mesh.shape_cells[0] / 2)
midy = int(self.mesh.shape_cells[1] / 2)
# Lets create a simple Gaussian topo and set the active cells
[xx, yy] = np.meshgrid(self.mesh.nodes_x, self.mesh.nodes_y)
zz = -np.exp((xx**2 + yy**2) / 75**2) + self.mesh.nodes_z[-1]
# Go from topo to actv cells
topo = np.c_[utils.mkvc(xx), utils.mkvc(yy), utils.mkvc(zz)]
actv = active_from_xyz(self.mesh, topo, "N")
# Create active map to go from reduce space to full
self.actvMap = maps.InjectActiveCells(self.mesh, actv, -100)
nC = int(actv.sum())
# Create and array of observation points
xr = np.linspace(-20.0, 20.0, 20)
yr = np.linspace(-20.0, 20.0, 20)
X, Y = np.meshgrid(xr, yr)
# Move the observation points 5m above the topo
Z = -np.exp((X**2 + Y**2) / 75**2) + self.mesh.nodes_z[-1] + 5.0
# Create a MAGsurvey
rxLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)]
rxLoc = mag.Point(rxLoc)
srcField = mag.SourceField([rxLoc], parameters=H0)
survey = mag.Survey(srcField)
# We can now create a susceptibility model and generate data
# Here a simple block in half-space
model = np.zeros(self.mesh.shape_cells)
model[(midx - 2) : (midx + 2), (midy - 2) : (midy + 2), -6:-2] = 0.02
model = utils.mkvc(model)
self.model = model[actv]
# Create active map to go from reduce set to full
self.actvMap = maps.InjectActiveCells(self.mesh, actv, -100)
# Creat reduced identity map
idenMap = maps.IdentityMap(nP=nC)
# Create the forward model operator
sim = mag.Simulation3DIntegral(
self.mesh,
survey=survey,
chiMap=idenMap,
ind_active=actv,
store_sensitivities="disk",
n_processes=None,
)
self.sim = sim
# Compute linear forward operator and compute some data
data = sim.make_synthetic_data(
self.model, relative_error=0.0, noise_floor=1.0, add_noise=True
)
# Create a regularization
reg = regularization.Sparse(self.mesh, indActive=actv, mapping=idenMap)
reg.norms = [0, 0, 0, 0]
reg.gradientType = "components"
# Data misfit function
dmis = data_misfit.L2DataMisfit(simulation=sim, data=data)
# Add directives to the inversion
opt = optimization.ProjectedGNCG(
maxIter=100, lower=0.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3
)
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt)
betaest = directives.BetaEstimate_ByEig()
# Here is where the norms are applied
IRLS = directives.Update_IRLS(f_min_change=1e-4, minGNiter=1)
update_Jacobi = directives.UpdatePreconditioner()
sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False)
self.inv = inversion.BaseInversion(
invProb, directiveList=[IRLS, sensitivity_weights, betaest, update_Jacobi]
)
def test_mag_inverse(self):
# Run the inversion
mrec = self.inv.run(self.model)
residual = np.linalg.norm(mrec - self.model) / np.linalg.norm(self.model)
# plt.figure()
# ax = plt.subplot(1, 2, 1)
# midx = int(self.mesh.shape_cells[0]/2)
# self.mesh.plot_slice(self.actvMap*mrec, ax=ax, normal='Y', ind=midx,
# grid=True, clim=(0, 0.02))
# ax = plt.subplot(1, 2, 2)
# midx = int(self.mesh.shape_cells[0]/2)
# self.mesh.plot_slice(self.actvMap*self.model, ax=ax, normal='Y', ind=midx,
# grid=True, clim=(0, 0.02))
# plt.show()
self.assertTrue(residual < 0.05)
def tearDown(self):
# Clean up the working directory
if self.sim.store_sensitivities == "disk":
shutil.rmtree(self.sim.sensitivity_path)
if __name__ == "__main__":
unittest.main()
|
c6c9df075ac4ba3de72171d607b4917472f79225
|
1dbbb05b30d27c6419b9f34eea3b9a47f92582a0
|
/parlai/crowdsourcing/projects/wizard_of_internet/worlds.py
|
7c919b4a4d4fd88b4413e6a01bac30092dc61bd9
|
[
"MIT"
] |
permissive
|
facebookresearch/ParlAI
|
815334323d0ebef51bf9837336fe3eef6fe1655d
|
e1d899edfb92471552bae153f59ad30aa7fca468
|
refs/heads/main
| 2023-08-31T22:20:45.918129
| 2023-08-14T19:39:56
| 2023-08-14T19:39:56
| 89,266,735
| 10,943
| 2,395
|
MIT
| 2023-09-13T23:07:40
| 2017-04-24T17:10:44
|
Python
|
UTF-8
|
Python
| false
| false
| 51,244
|
py
|
worlds.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
import random
import time
from datetime import datetime
from typing import Any, Dict, List, Union
from joblib import Parallel, delayed
from parlai.agents.rag.retrieve_api import SearchEngineRetriever
from parlai.crowdsourcing.utils.worlds import CrowdOnboardWorld, CrowdTaskWorld
from parlai.core.agents import Agent
from parlai.core.message import Message
from parlai.core.opt import Opt
from parlai.core.worlds import validate
import parlai.utils.logging as logging
from parlai.crowdsourcing.projects.wizard_of_internet import constants
from parlai.crowdsourcing.projects.wizard_of_internet.acceptability import (
WizardOfInternetAcceptabilityChecker,
)
from mephisto.abstractions.blueprint import AgentState
from mephisto.abstractions.databases.local_database import LocalMephistoDB
mephisto_db = LocalMephistoDB()
ROLE_TALLY_CHACHE = {'data': None, 'last_update': None}
def sec_to_min_pretty(time_secs: int) -> str:
"""
Returns formatted string for converting secs to mins.
"""
if time_secs % 60 == 0:
return f'{time_secs // 60}'
m = time_secs / 60
return f'{m:.2g}'
def get_worker_from_agent(agent: Agent):
"""
Returns Mephisto worker for a given ParlAI agent.
"""
return agent.mephisto_agent.get_worker()
def get_worker_by_name(worker_name: str):
"""
Returns the Mephisto worker from their worker name.
"""
workers = mephisto_db.find_workers(worker_name)
if len(workers) != 1:
logging.warning(f'Found {len(workers)} for worker {worker_name}')
if not workers:
return
return workers[0]
def _is_wiz(agent: Agent):
"""
Returns true if the `agent` is wizard.
"""
return agent.agent_id == 'Wizard'
def _is_query(act: Message):
"""
Checks if an agent action is a search query (only wizard can do this).
"""
k = 'is_search_query'
return k in act and act[k]
def _has_selected_sentence_from_search_results(action: Union[Dict, Message]):
"""
Whether there is any knowledges selected with this message.
"""
k_task = 'task_data'
k_selected = 'selected_text_candidates'
if (k_task in action) and (k_selected in action[k_task]):
# Boolean value that user has not selected any option
return not action[k_task][k_selected][0][0]
return False
def create_search_agent(opt):
"""
Creates and instance of SearchEngineRetriever object.
"""
logging.info('Initializing the search engine API.')
search_api_opt = deepcopy(opt)
search_api_opt['skip_retrieval_token'] = None
return SearchEngineRetriever(search_api_opt)
def run_search_query(query: str, search_client: SearchEngineRetriever):
"""
Conducts search through the SearchEngineRetriever client, and sorts the retrieved
docs.
This function runs two searches for each query:
1- <query> + " news"
2- <query>
The
"""
def _search(q: str, n: int):
"""
Sends the search query to the search API.
"""
return search_client.retrieve([q], n)[0]
def _dedupl_docs(docs_list):
uniq_docs = []
seen_urls = set()
for d in docs_list:
url = d['url']
if url in seen_urls:
continue
uniq_docs.append(d)
if len(uniq_docs) == constants.NUM_RETRIEVED_SEARCH_DOCS:
return uniq_docs
seen_urls.add(url)
logging.warning(
f'Only retrieved {len(uniq_docs)}, not {constants.NUM_RETRIEVED_SEARCH_DOCS}'
)
return uniq_docs
def _wiki_sort_key(doc):
"""
Helper function to put the Wikipedia pages last in ranking retrieved doc
results.
"""
url = doc['url']
return 1 if url.startswith('https://en.wikipedia') else -1
if not search_client:
logging.error('No search client; can not run search request.')
return
logging.info(f'Running search for query "{query}"')
# getting query with news
query_had_news = 'news' in query
if not query_had_news:
search_results = _search(f'{query} news', constants.NUM_RETRIEVED_SEARCH_NEWS)
else:
search_results = []
# getting web documents for the main search query
search_results.extend(_search(query, constants.NUM_RETRIEVED_SEARCH_DOCS))
# Remove a doc that was fetched by both news and regular search
# and reduce the number of dosc to NUM_RETRIEVED_SEARCH_DOCS
if not query_had_news:
# We did not have two separate queries if query_had_news was True.
search_results = _dedupl_docs(search_results)
# Sorting retrieved docs based on their URL: Wikipedia pages go last.
search_results.sort(key=_wiki_sort_key)
return Message(
{
'id': constants.SEARCH_AGENT,
'text': '*** SEARCH AGENT RESULTS (CHECK ACCOMPANIED DATA FOR RETRIEVED DOCS) ***',
'task_data': {'search_results': search_results},
}
)
def _coordinator_send_message(
agent, message: str = '', task_data: Dict = None, episode_done: bool = False
):
"""
Sends a message to 'agent' from the coordinator.
We use this to send a message to only one of the agents. It usually contains
specific instructions, alerts, or warnings for certain situations during the task.
"""
if not task_data:
task_data = dict()
agent.observe(
{
'id': constants.COORDINATOR_AGENT,
'text': message,
'episode_done': episode_done,
'task_data': task_data,
}
)
def persona_from_template_values(topic: str, topic_item: str, extra_details: str = ''):
"""
Generates a sentence stating the persona of the apprentice, given their selection.
"""
pers = f'My favorite {topic} is {topic_item}.'
if extra_details:
pers += f'\n{extra_details}'
return pers
def _form_response_get_field(form_response: Dict[str, Any], filed_num: int):
"""
Extracts the value of a certain field from the Mephisto response.
"""
frd = form_response['task_data']
k = 'form_responses'
if k in frd and len(frd[k]) and (filed_num < len(frd[k])):
return frd[k][filed_num]['response']
def _form_response_main_persona(form_response: Dict[str, Any]):
"""
Extracts the main selected persona from persona selection form response.
"""
topic = _form_response_get_field(form_response, 0)
entity = _form_response_get_field(form_response, 1)
return persona_from_template_values(topic, entity)
def _form_response_persona_expantion(form_response: Dict[str, Any]):
"""
Extracts the expanded details of persona from persona selection form response.
"""
return _form_response_get_field(form_response, -1)
def _send_persona_too_short_warning(agent: Agent, persona_expantion: str):
"""
Sends a warning to agent if persona details it too short.
"""
_coordinator_send_message(
agent,
message=f'Your expansion on persona ("{persona_expantion}") was too short. '
'Please rewrite to make a more elaborate and refined persona.',
)
def _send_persona_overuse_warning(agent: Agent, main_persona: str):
"""
Ask agent to choose another persona, if the selected one looks repeated.
For example, we don't want 200 pesonas that reads "My favorite book is Harry
Potter".
"""
_coordinator_send_message(
agent,
message=f'The character you chose for the persona ("{main_persona}")'
' has already been used by others. Please choose some other character.',
)
class SharedOnboardWorld(CrowdOnboardWorld):
"""
The parent (base) onboarding class for both agents.
"""
def __init__(self, opt: Opt, mturk_agent: Agent):
super().__init__(opt, mturk_agent)
self.agent.agent_id = 'Participant'
self.role_training_qname = opt[constants.ROLE_QUALIFICATION_NAME_KEY]
self._world_name = self._get_world_name()
self._num_rounds = 0
self.messages = []
def _get_world_name(self):
"""
Assigns a name to this world.
"""
dt = datetime.now()
return f'onboarding_world_{dt.strftime("%H-%M-%S")}'
def wait_for_response(self, message: str = None, delay_time: int = 0):
"""
Starts waiting for a response from the agent, after `delay_time` many seconds.
"""
self._num_rounds += 1
logging.info(
f'{self._world_name} waiting for response at round {self._num_rounds}'
)
if delay_time > 0:
time.sleep(delay_time)
self.agent.observe(
{'id': constants.ONBOARDING_AGENT, 'text': message, 'episode_done': False}
)
self.messages.append(self.agent.act(timeout=self.turn_timeout))
def send_message(
self,
message: str,
onboarding_step: int = None,
done: bool = False,
delay_time: int = 0,
):
"""
Sends the next onboarding instruction to the agent.
"""
task_data = dict()
if onboarding_step:
task_data['on_boarding_step'] = onboarding_step
act = {
'id': constants.ONBOARDING_AGENT,
'text': message,
'episode_done': done,
'task_data': task_data,
}
if delay_time > 0:
time.sleep(delay_time)
self.agent.observe(act)
def introduce_chat_interface(self):
"""
Showing the welcome onboard message to the agent, the first step during the
onboarding.
"""
self.send_message(
message=constants.ONBOARDING_WELCOME,
onboarding_step=constants.ONBOARDING_STEPS["CHAT_INTERFACE"],
done=True,
)
def go_for_start(self):
"""
The onboarding graduation message.
"""
self.send_message(message=constants.FINISHED_ONBOARDING, done=False)
# waiting for agent to read the final message
# then ending the onboarding (purging the onboarding world).
time.sleep(5)
self.send_message(
message="", onboarding_step=constants.ONBOARDING_STEPS["WAITING"], done=True
)
def parley(self):
"""
Provides a step by step scripted interactive onboarding for the agents.
In each step, we introduce the agent to one part of their experience and
expectations in this task (eg, persona, chat interface etc.). Then, after a
short delay (parametrized by TUTORIAL_WAIT_TIMES), we as ask them to send a
response to move to the next step. This method needs to be implemented for
Wizard and Apprentice separately, as they have different onboarding experiences.
"""
error_message = "Implement parley for each role individually."
raise NotImplementedError(error_message)
def get_worker(self):
return get_worker_from_agent(self.agent)
def get_worker_name(self):
return self.get_worker().worker_name
def grant_agent_training_qualification(self, role_id: int):
"""
Granting the onboarding qualification to the agent, based on their assigned
role.
"""
role = constants.ROLE_QUALIFICATION_NAME_KEY[role_id]
logging.info(f'Granting worker qualification for {role} role.')
worker = self.get_worker()
worker.grant_qualification(self.role_training_qname, role_id)
def reason_to_reject(self):
"""
Check for bad behavior for poor quality of work from agent.
"""
if not self.episodeDone:
return 'left/diconnected before the task was over.'
# messages were too short
messages_len = []
for msg in self.messages:
if self.agent.agent_id != msg['id']:
# Not from this agent
continue
messages_len.append(len(msg['text']))
msg_char_length_avg = sum(messages_len) / len(messages_len)
if msg_char_length_avg < constants.MIN_AVG_CHAR_LENGTH_UTTERANCES:
return (
'messages were too short for meaningfull conversations '
f'(average message length: {msg_char_length_avg:.2f} chars).'
)
# how many times talked abut persona
n_persona_keyword_mentions = 0
for msg in self.messages:
if self.agent.agent_id != msg['id']:
continue
for keyword in constants.ONBOARDING_PERSONA_KEYWORDS:
if keyword in msg['text'].lower():
n_persona_keyword_mentions += 1
if n_persona_keyword_mentions < 1:
return (
'Did not talk enough about the persona. '
f'Number of keyword overlaps: {n_persona_keyword_mentions}.'
)
# returning None means no reason to reject
return None
def shutdown(self):
logging.info(f'Shutting down {self._world_name}')
super().shutdown()
logging.info('Shutdown completed successfully.')
class WizardOnboardingWorld(SharedOnboardWorld):
"""
The onboarding world for the wizard agent.
"""
def __init__(self, opt: Opt, mturk_agent: Agent):
self.turn_timeout = opt['wizard_time_out']
self._search_client = create_search_agent(opt)
self.num_searches = 0
super().__init__(opt, mturk_agent)
def _get_world_name(self):
return f'wizard-{super()._get_world_name()}'
def introduce_knowledgeable_entity(self):
self.send_message(constants.WIZARD_INTRODUCE_KNOWLEDGE)
def introduce_search(self):
self.send_message(message=constants.WIZARD_INTRODUCE_SEARCH)
def try_search(self):
self.send_message(
message=constants.WIZARD_TRY_SEARCH,
onboarding_step=constants.ONBOARDING_STEPS['TRY_SEARCH'],
)
def introduce_persona(self):
self.send_message(
message=constants.WIZARD_INTRODUCE_APPRENTICE_PERSONA,
onboarding_step=constants.ONBOARDING_STEPS['PERSONA_WIZARD'],
)
def wait_for_response_with_search(self, message: str = None, delay_time: int = 0):
"""
Send a message to Wizard and waits for a search or response action.
"""
if message:
self.send_message(message=message, delay_time=delay_time)
time_out = self.turn_timeout
agent = self.agent
while time_out > 0:
start_time = time.time()
act = agent.act(timeout=time_out)
if _is_query(act):
self.num_searches += 1
search_query = act['text']
search_res = run_search_query(search_query, self._search_client)
n = len(search_res['task_data']['search_results'])
logging.info(
f'Retrieved {n} documents for search query "{search_query}".'
)
agent.observe(search_res)
else:
self.messages.append(act)
return
# subtracting the wait time from what was spent during search
spent_time = time.time() - start_time
time_out -= spent_time
def parley(self):
"""
The interactive onboarding for the Wizard.
"""
wait_times = constants.TUTORIAL_WAIT_TIMES
self.introduce_chat_interface()
self.wait_for_response(
message='Please type a greeting message to continue.',
delay_time=wait_times['chat-interface'],
)
self.introduce_knowledgeable_entity()
self.wait_for_response(
message=constants.ONBOARDING_ACKNOWLEDGE_UNDERSTOOD,
delay_time=wait_times['chat-interface'],
)
self.introduce_search()
self.wait_for_response(
message=constants.ONBOARDING_ACKNOWLEDGE_UNDERSTOOD,
delay_time=wait_times['knowledge'],
)
self.try_search()
self.wait_for_response_with_search()
self.introduce_persona()
self.wait_for_response_with_search()
self.go_for_start()
self.episodeDone = True
def reason_to_reject(self):
"""
Check for bad behavior for poor quality of work from wizard agent.
"""
# Has used search enough
if self.num_searches < constants.MIN_NUM_SEARCH_ONBOARDING:
return f'did not use search enough (number of use {self.num_searches}).'
# Has selected enough sentenes
num_selections = 0
for msg in self.messages:
task_data = msg.get('task_data')
if not (task_data and isinstance(task_data, dict)):
continue
sel_options = task_data.get('selected_text_candidates')
if not sel_options or len(sel_options) == 1: # No choices
continue
if not sel_options[0][0]:
# sel_options[0][0] is "Did no use ..." option
num_selections += 1
if num_selections < constants.MIN_NUM_SELECTED_SENTENCES_ONBOARDING:
return (
'did not use or select search results enough times '
f'(number of times used: {num_selections})'
)
return super().reason_to_reject()
def prep_save_data(self, agent: Agent):
"""
Saving session data after the world is closed.
"""
rejection_reason = self.reason_to_reject()
qualified_role = constants.WIZARD if self.episodeDone else constants.NO_ROLE
return {
constants.SAVED_DATA_IS_WIZARD_KEY: True,
constants.SAVED_DATA_WORKER_KEY: self.get_worker_name(),
constants.SAVED_DATA_ROLE_QUALIFICATION_DATA_KEY: (
self.role_training_qname,
qualified_role,
),
constants.WORKER_REJECT_REASON: rejection_reason,
}
class ApprenticeOnboardingWorld(SharedOnboardWorld):
def __init__(self, opt, mturk_agent):
self.turn_timeout = opt['apprentice_time_out']
super().__init__(opt, mturk_agent)
def _get_world_name(self):
return f'apprentice-{super()._get_world_name()}'
def introduce_persona(self):
self.send_message(
message=constants.APPRENTICE_INTRODUCE_PERSONA,
onboarding_step=constants.ONBOARDING_STEPS['PERSONA_APPRENTICE'],
)
def introduce_partner_entity(self):
self.send_message(message=constants.APPRENTICE_INTRODUCE_WIZARD)
def introduce_partner_knowledge(self):
self.send_message(message=constants.APPRENTICE_INTRODUCE_WIZARD_KNOWLEDGE)
def parley(self):
"""
The interactive onboarding for the Apprentice.
"""
wait_times = constants.TUTORIAL_WAIT_TIMES
self.introduce_chat_interface()
self.wait_for_response(
message='Please type a greeting message to continue.',
delay_time=wait_times['chat-interface'],
)
self.introduce_persona()
self.wait_for_response(
message=constants.APPRENTICE_PERSONA_ROLE_INSTRUCTION,
delay_time=wait_times['persona'],
)
self.introduce_partner_entity()
self.wait_for_response(
message=constants.APPRENTICE_CHITCHAT_INSTRUCTION,
delay_time=wait_times['persona'],
)
self.introduce_partner_knowledge()
self.wait_for_response(
message=constants.APPRENTICE_PERSONA_MSG_INSTRUCTION,
delay_time=wait_times['knowledge'],
)
self.go_for_start()
self.episodeDone = True
def prep_save_data(self, agent: Agent):
"""
Saving session data after the world is closed.
"""
rejection_reason = self.reason_to_reject()
qualified_role = constants.APPRENTICE if self.episodeDone else constants.NO_ROLE
return {
constants.SAVED_DATA_IS_WIZARD_KEY: False,
constants.SAVED_DATA_WORKER_KEY: self.get_worker_name(),
constants.SAVED_DATA_ROLE_QUALIFICATION_DATA_KEY: (
self.role_training_qname,
qualified_role,
),
constants.WORKER_REJECT_REASON: rejection_reason,
}
class MTurkMultiAgentDialogWorld(CrowdTaskWorld):
"""
The ParlAI world to run conversation, search, and flow.
Two agents (wizard, apprentice) chat. One agent (wizard) has access to a search bar
that they may use for seraching our knowledge source (common crawl here).
"""
def __init__(self, opt: Opt, agents: List[Agent] = None):
# Init world state
self.agents = agents
self._change_agents_order = False
self.messages = []
self.episodeDone = False
self.turn_idx = 0
self.num_search_queries = 0
self.num_times_search_resutls_selected = 0
self.world_tag = self._get_world_name()
# Get world parameters from opt
self.min_num_turns = opt['min_turns']
self.wizard_time_out = opt['wizard_time_out']
self.apprentice_time_out = opt['apprentice_time_out']
self.search_warning_turn = opt['search_warning_turn']
self.search_warning_threshold = opt['search_warning_threshold']
self.select_warning_turn = opt['select_warning_turn']
self.select_warning_threshold = opt['select_warning_threshold']
self.soft_block_qname = opt['soft_block_qname']
self.send_task_data = opt['send_task_data']
self.role_training_qname = opt[constants.ROLE_QUALIFICATION_NAME_KEY]
# The agent that checks the acceptability of the messages (quality and safety).
self.acceptability_checker = self._get_acceptability_checker()
# Number of pages to request for each wizard search
self.num_passages_to_retrieve = opt['num_passages_retrieved']
self._search_client = create_search_agent(opt)
# Information about personas and their availability.
self.personas_list = opt['personas']
self.prev_persona_count = opt['prev_persona_count']
self.max_times_persona_use = opt['max_times_persona_use']
self.locations_list = opt['locations']
self.persona_replacement = opt['pick_persona_with_replacement']
self.selected_persona = None
# Get worker names
self.worker_names = dict()
for a in self.agents:
self.worker_names[a] = get_worker_from_agent(a).worker_name
def _get_acceptability_checker(self):
"""
Instantiate an instance of WizardOfInternetAcceptabilityChecker to monitor the
world.
"""
acr = WizardOfInternetAcceptabilityChecker()
acr.min_words_violation_threshold = constants.MIN_AVG_WORD_LENGTH_UTTERANCES
return acr
def _get_world_name(self):
dt = datetime.now()
return f'cc_world_{dt.strftime("%H-%M-%S")}'
def get_agent_order_mask(self, agent_index: int):
"""
A mask for simulating rotation/reordering of agents.
Use this method for accessing agents by a certaint order. Do not use
self.agents[i] directly!
"""
assert agent_index in (0, 1), 'Invalid index for accessing agents.'
if self._change_agents_order:
# 0->1 and 1->0
agent_index = 1 - agent_index
return self.agents[agent_index]
def get_wizard_action(self, agent: Agent):
"""
Handles wizard message or search action.
"""
time_out = self.wizard_time_out
while time_out > 0:
start_time = time.time()
act = agent.act(timeout=time_out)
if _is_query(act):
self.num_search_queries += 1
search_res = run_search_query(act['text'], self._search_client)
n = len(search_res['task_data']['search_results'])
logging.info(f'{n} search results were retrieved.')
agent.observe(search_res)
else:
if _has_selected_sentence_from_search_results(act):
self.num_times_search_resutls_selected += 1
break
# subtracting the wait time from what was spent during search
spent_time = time.time() - start_time
time_out -= spent_time
return act
def _send_task_objective_reminders(self, agent: Agent):
"""
Monitors the stats for target activies. If needed, sends goal reminders to
agent.
This is mostly for checking if wizard does enough search and knowledge
selection.
"""
agent_id = agent.agent_id
if agent_id == constants.ROLE_NAMES[constants.WIZARD]:
# Checks if wizard has used search enough so far
if (self.turn_idx >= self.search_warning_turn) and (
self.num_search_queries < self.search_warning_threshold
):
_coordinator_send_message(
agent, message=constants.USE_SEARCH_WARNING_MESSAGE
)
# Checks if wizard has selected search results enough times so far
elif (self.turn_idx >= self.select_warning_turn) and (
self.num_times_search_resutls_selected < self.select_warning_threshold
):
_coordinator_send_message(
agent, message=constants.USE_SEARCH_RESULTS_WARNING_MESSAGE
)
def next_utterance(self, agent: Agent):
"""
Handles receiving the next message from agent.
"""
agent_id = agent.agent_id
if agent_id == constants.ROLE_NAMES[constants.APPRENTICE]:
return agent.act(timeout=self.apprentice_time_out)
else: # It is wizard
return self.get_wizard_action(agent)
def end_onboarding_state(self):
"""
Sends a message to front-end app to announce transition from onboarding.
"""
onboard_state = constants.ONBOARDING_STEPS['NOT_ONBOARDING']
for agent in self.agents:
agent.observe(onboarding_mode_toggle_message(onboard_state))
def broadcast_apprentice_persona(self, persona: str):
"""
Sends the selected apprentice persona to the front-end app for display.
"""
for agent in self.agents:
persona_msg = {
'id': constants.PERSONA_AGENT,
'text': '',
'episode_done': False,
'task_data': {'apprentice_persona': persona},
}
agent.observe(persona_msg)
def shuffle_agents(self):
"""
Changes the starting order: who goes first.
"""
reorder = random.random() > 0.5
if reorder:
logging.info(f'Switching agents orders in {self.world_tag}')
self._change_agents_order = True
def sample_personas(self):
"""
Generates a list of sampled personas, apprentice will choose from this list.
"""
persona = self.personas_list
n = constants.CURATED_PERSONA_CHOICES
logging.info(
f'Randomly choosing {n} personas from {len(persona)} available ones.'
)
if self.persona_replacement:
return random.sample(persona, k=n)
else:
return [persona.pop() for _ in range(n)]
def random_location(self):
"""
Chooses a random location (only for personas that need one)
"""
return random.choice(self.locations_list)
def assign_roles(self):
"""
Determines the order and the role of the agents in the world.
Determines which agent goes first by random assignment. The agent roles are
based on their onboarding qualification.
"""
# Roling the dice for the starting order
self.shuffle_agents()
# The role and order assignment to the agents.
starting_role = None
for agent_index in range(len(self.agents)):
agent = self.get_agent_order_mask(agent_index)
worker = get_worker_from_agent(agent)
qual = worker.get_granted_qualification(self.role_training_qname)
assert qual
role_qual = qual.value
if role_qual == constants.WIZARD:
agent.agent_id = 'Wizard'
elif role_qual == constants.APPRENTICE:
agent.agent_id = 'Apprentice'
else:
raise ValueError(f'Unrecognized role qulification {role_qual}.')
if not starting_role: # sets it the first time that loop runs
starting_role = role_qual
logging.info('Agent roles assigned.')
logging.info(f'Agent with {self.get_agent_order_mask(0).agent_id} role starts.')
return starting_role
def _get_apprentice(self):
if _is_wiz(self.agents[0]):
return self.agents[1]
else:
return self.agents[0]
def receive_form_response(self, agent: Agent, check_persona_overuse: bool = False):
"""
Extracts the selected persona from the response form and validates it.
"""
def generate_persona_key(persona_desc):
ret = persona_desc.strip().lower()
for sym in ('.', ',', ';', '!', '?'):
ret = ret.replace(sym, ' ')
return ' '.join([s for s in ret.split(' ') if s])
# Repeat asking for persona until having a valid one.
acceptable_response = False
while not acceptable_response:
agent_resp = agent.act(timeout=self.wizard_time_out)
pers_exp = _form_response_persona_expantion(agent_resp)
# Too short
if not pers_exp or len(pers_exp) < constants.PERSONA_EXPANSION_MIN_LEN_CHAR:
_send_persona_too_short_warning(agent, pers_exp)
continue
# Persona was selected before
if check_persona_overuse:
persona_key = generate_persona_key(
_form_response_main_persona(agent_resp)
)
if self.prev_persona_count[persona_key] >= self.max_times_persona_use:
_send_persona_overuse_warning(agent, persona_key)
continue
self.prev_persona_count[persona_key] += 1
acceptable_response = True
return agent_resp
def _update_curated_personas_use(self, persona: str):
"""
Updates the persona use count.
Increases the count for the number of times that the selected `persona` was
used, and removes it from available list of personas if it was selected too many
times.
"""
lower_persona = persona.lower()
self.prev_persona_count[lower_persona] += 1
if self.prev_persona_count[lower_persona] < self.max_times_persona_use:
return
logging.info(f'Trying to remove "{persona}" from list of personas.')
if len(persona) < constants.CURATED_PERSONA_CHOICES:
logging.warning(
'Not enough personas may remain after removing, canceling removal.'
)
return
self.personas_list.remove(persona)
logging.info(
f'New number of available personas is "{len(self.personas_list)}".'
)
def _choose_curated_persona(self):
"""
Asks apprentice to choose a persona from the curated list of personas.
"""
persona_opts = self.sample_personas()
apprentice_agent = self._get_apprentice()
# Removing PERSONA_NEEDS_LOCATION_TOKEN from what agents will see
persona_opts_views = [
p.replace(constants.PERSONA_NEEDS_LOCATION_TOKEN, '') for p in persona_opts
]
persona_selection_form = [
{
'type': 'choices',
'question': 'Choose one of these personas to start:',
'choices': persona_opts_views,
},
{'type': 'text', 'question': 'Add something imaginative to refine it:'},
]
_coordinator_send_message(
apprentice_agent,
message=constants.APPRENTICE_CHOOSE_CURATED_PERSONA_REQUEST,
task_data={'respond_with_form': persona_selection_form},
)
agent_response = self.receive_form_response(apprentice_agent)
rs = [r['response'] for r in agent_response['task_data']['form_responses']]
assert len(rs) == 2, 'Persona response form length is not 2.'
selected_persona, added_persona = rs
apprentice_persona = f'{selected_persona}\n{added_persona}'
worker_name = self.worker_names[apprentice_agent]
logging.info(f'Agent ({worker_name}) selected a persona: {apprentice_persona}')
selected_persona_ind = persona_opts_views.index(selected_persona)
# Checking if persona needs location
if constants.PERSONA_NEEDS_LOCATION_TOKEN in persona_opts[selected_persona_ind]:
apprentice_location = self.random_location()
logging.info(f'Persona needs a location. {apprentice_location} selected.')
apprentice_persona = (
f'I live in {apprentice_location}.\n{apprentice_persona}'
)
# Checking if the persona was used too often and needs to be removed.
self._update_curated_personas_use(persona_opts[selected_persona_ind])
return apprentice_persona
def _choose_templated_topics_persona(self):
"""
Asks apprentice to choose a persona using the provided template.
"""
topic_bundles = random.sample(
constants.TEMPLATE_PERSONAS_TOPICS, k=constants.TEMPLATE_PERSONAS_CHOICES
) # Each topic bundle is string of comma-seperated related topics, eg. "book,author"
topics = []
for tb in topic_bundles:
topics.extend(tb.split(','))
apprentice_agent = self._get_apprentice()
persona_selection_form = [
{
'type': 'choices',
'question': 'My character\'s favorite ',
'choices': topics,
},
{'type': 'text', 'question': 'is '},
{'type': 'text', 'question': 'Add something imaginative to refine it:'},
]
_coordinator_send_message(
apprentice_agent,
message=constants.APPRENTICE_CHOOSE_PERSONA_TEMPLATE_REQUEST,
task_data={'respond_with_form': persona_selection_form},
)
agent_response = self.receive_form_response(
apprentice_agent, check_persona_overuse=True
)
rs = [r['response'] for r in agent_response['task_data']['form_responses']]
assert len(rs) == 3, 'Template persona response form length is not 3.'
topic, topic_item, extra_details = rs
apprentice_persona = persona_from_template_values(
topic, topic_item, extra_details
)
worker_name = self.worker_names[apprentice_agent]
logging.info(f'Agent ({worker_name}) selected a persona: {apprentice_persona}')
return apprentice_persona
def _reset_to_text_response(self, agent):
"""
Returns Mephisto response from form to text.
"""
_coordinator_send_message(agent=agent, task_data={'respond_with_form': False})
def apprentice_choose_persona(self):
"""
Randomly selects a persona selection type (template, curated) and asks agent.
"""
logging.info('Randomly choosing persona selection type.')
choose_from_templates = (
random.random() < constants.PROBABILITY_CHOOSING_TEMPLATE_PERSONA
)
if choose_from_templates:
logging.info('Choosing persona persona from template.')
resp = self._choose_templated_topics_persona()
else:
logging.info('Choosing persona persona from curated cases.')
resp = self._choose_curated_persona()
self._reset_to_text_response(self._get_apprentice())
return resp
def send_time_length_info(self):
"""
Sends a message to agents informing them about the length of the task (turns,
and timeout).
"""
min_rounds = self.min_num_turns
wiz_time = sec_to_min_pretty(self.wizard_time_out)
app_time = sec_to_min_pretty(self.apprentice_time_out)
for agent in self.agents:
message = f'This conversation continues for at least {min_rounds} rounds.\n'
t = wiz_time if _is_wiz(agent) else app_time
message += (
f'In your turn, please send your message within {t} minutes. '
'Otherwise you may be disqualified. '
)
if not _is_wiz(agent):
message += (
f'Note that you might have to wait up to {wiz_time} '
'mintes to receive a response from the other person.'
)
agent.observe(
{
'id': constants.COORDINATOR_AGENT,
'text': message,
'episode_done': False,
}
)
def send_starter_instruction(self, role: int):
"""
Sends a reminder about the role and goals in the beginning of chat.
"""
message_text = None
if role == constants.WIZARD:
message_text = constants.WIZARD_STARTING_INSTRUCTION
else:
assert role == constants.APPRENTICE
message_text = constants.APPRENTICE_STARTING_INSTRUCTION
start_instruction_message = {
'id': constants.COORDINATOR_AGENT,
'text': message_text,
'episode_done': False,
}
self.get_agent_order_mask(0).observe(start_instruction_message)
def send_wizard_persona_emphasize_message(self):
"""
Sends a message to wizard emphasizing on main goal here (apprentice persona).
"""
for agent in self.agents:
if not _is_wiz(agent):
continue
agent.observe(
{
'id': constants.COORDINATOR_AGENT,
'text': constants.WIZARD_PERSONA_EMPHASIZE,
'episode_done': False,
}
)
def setup_roles_and_persona(self):
"""
Prepares the chat environment and states before starting agent interactions.
"""
logging.info('Setting up roles, orders, persona.')
self.end_onboarding_state()
self.broadcast_apprentice_persona('') # clear onboarding persona
starting_role = self.assign_roles()
self.send_wizard_persona_emphasize_message()
self.selected_persona = self.apprentice_choose_persona()
self.broadcast_apprentice_persona(self.selected_persona)
self.send_time_length_info()
self.send_starter_instruction(starting_role)
def parley(self):
"""
parley process for the agents: running the chat world.
"""
if self.turn_idx == 0:
self.setup_roles_and_persona()
self.turn_idx += 1
logging.info(
f'{self.world_tag} is at turn {self.turn_idx}...\n'
f'Wizard has searched {self.num_search_queries} times and '
f'selected results {self.num_times_search_resutls_selected} times.'
)
for idx in range(len(self.agents)):
agent = self.get_agent_order_mask(idx)
act = self.next_utterance(agent)
self.messages.append(deepcopy(act))
if self.send_task_data:
act.force_set(
'task_data',
{
'last_acting_agent': agent.agent_id,
'current_dialogue_turn': self.turn_idx,
'utterance_count': self.turn_idx + idx,
},
)
if 'requested_finish' in act and act['requested_finish']:
# One of the agents has requested for end of the chat.
self.episodeDone = True
break
for other_agent in self.agents:
if other_agent != agent:
other_agent.observe(validate(act))
# Reminds wizard about searching and selecting knowledge if needed.
self._send_task_objective_reminders(agent)
def _reason_to_disqualify(self, agent: Agent):
"""
Determining if agents had low quality work or had unsafe behaviour.
"""
# Disconncet or timeout
mephisto_agent = agent.mephisto_agent
if mephisto_agent.get_status() in (
AgentState.STATUS_EXPIRED,
AgentState.STATUS_TIMEOUT,
):
return 'agent was disconnected.'
# Wizard not using search enough
if agent.agent_id == 'Wizard' and (
(self.num_search_queries < self.search_warning_threshold)
or (self.num_times_search_resutls_selected < self.select_warning_threshold)
):
return (
'blocked for not enough search activity '
f'({self.num_search_queries} searches; '
f'{self.num_times_search_resutls_selected} selected sentecnes).'
)
acceptability_checker_results = self.acceptability_checker.check_messages(
agent.agent_id,
self.selected_persona,
messages=self.messages,
is_worker_0=False,
violation_types=constants.ACCEPTABILITY_VIOLATIONS,
)
if acceptability_checker_results:
return f'ParlAI acceptability checker found violations: "{acceptability_checker_results}"'
def _soft_block_agent(self, agent):
"""
Softblocking the agent: they can not participate in this task anymore.
"""
worker = get_worker_from_agent(agent)
logging.warning(f'Soft blocking {worker.worker_name}')
worker.grant_qualification(self.soft_block_qname)
def prep_save_data(self, agent_as_list):
"""
Saving the chat data, after checking its quality and safety.
"""
agent = agent_as_list[0]
agent_id = agent.agent_id
logging.info(f'Preparing saved data for {agent_id}')
ret = {'agent_id': agent_id, 'message_history_copy': self.messages}
disqualify_reason = self._reason_to_disqualify(agent)
if disqualify_reason:
logging.info(f'Disqualified submission detecetd: "{disqualify_reason}"')
ret['disqualify_reason'] = disqualify_reason
self._soft_block_agent(agent)
return ret
def episode_done(self):
return self.episodeDone
def shutdown(self):
"""
Shutdown all mturk agents in parallel, otherwise if one mturk agent is
disconnected then it could prevent other mturk agents from completing.
"""
global shutdown_agent
def shutdown_agent(agent):
try:
agent.shutdown(timeout=None)
except Exception:
agent.shutdown() # not MTurkAgent
Parallel(n_jobs=len(self.agents), backend='threading')(
delayed(shutdown_agent)(agent) for agent in self.agents
)
def onboarding_mode_toggle_message(onboarding_step):
"""
Formats a message to be sent to front-end to detemine the state of onboarding.
"""
return {
'id': constants.ONBOARDING_AGENT,
'text': '',
'episode_done': False,
'task_data': {'on_boarding_step': onboarding_step},
}
def _get_cached_roll_tally():
"""
Returns the role tally counts from cache, if the cache is not expired.
"""
utime = ROLE_TALLY_CHACHE['last_update']
if not utime:
logging.info('Initiated rolls tally cache.')
return None
dt = time.time() - utime
logging.info(f'The last rolls tally cached {dt:.2f} seconds ago.')
if dt > constants.TALLY_CACHE_TIMEOUT:
logging.info(
'Rolls tally cache is outdated '
f'(is greater than {constants.TALLY_CACHE_TIMEOUT} s).'
)
return None
logging.info(
'Rolls tally is fresh enough to use '
f'(is less than {constants.TALLY_CACHE_TIMEOUT} s).'
)
return ROLE_TALLY_CHACHE['data']
def _cache_roll_tally(rolls_tally: Dict[int, int]):
"""
Updates the content of the roles tally cache.
"""
logging.info('Setting rolls tally cache.')
ROLE_TALLY_CHACHE['last_update'] = time.time()
ROLE_TALLY_CHACHE['data'] = rolls_tally
def find_needed_role(agent, rqname: str):
"""
Determines the role that the agent starting the onboarding needs to go through.
Checks the number of agents who passed the onboarding and are waiting to be matched,
and the agents who are currently in the onboarding. Based the number of roles in the
pool decides what role a newcoming agent needs to be trained on.
It caches the recent values of tally to avoid heavy DB queries.
The cache value is handled by ROLE_TALLY_CHACHE global variable.
To control the cache freshness and time out set TALLY_CACHE_TIMEOUT (in seconds).
"""
role_tally = _get_cached_roll_tally()
if not role_tally:
role_tally = {constants.WIZARD: 0, constants.APPRENTICE: 0}
db = agent.mephisto_agent.db
task_run_id = agent.mephisto_agent.task_run_id
agents_need_paring = db.find_onboarding_agents(
status=AgentState.STATUS_ONBOARDING, task_run_id=task_run_id
)
agents_need_paring.extend(
db.find_agents(status=AgentState.STATUS_WAITING, task_run_id=task_run_id)
)
no_qual = 0
unk_qual = 0
this_agent_id = agent.mephisto_agent.get_agent_id()
for ag in agents_need_paring:
if ag.get_agent_id() == this_agent_id:
continue
worker = ag.get_worker()
worker_qualification = worker.get_granted_qualification(rqname)
if not worker_qualification:
no_qual += 1
continue
qstatus = worker_qualification.value
if qstatus in (constants.WIZARD, constants.WIZARD_IN_TRAINING):
role_tally[constants.WIZARD] += 1
elif qstatus in (constants.APPRENTICE, constants.APPRENTICE_IN_TRAINING):
role_tally[constants.APPRENTICE] += 1
else:
unk_qual += 1
if no_qual or unk_qual:
logging.warning(
f'\tNo qualifications: {no_qual}\tUnknown qualifications: {unk_qual}'
)
_cache_roll_tally(role_tally)
logging.info(
f'Wizard: {role_tally[constants.WIZARD]}\tApprentices: {role_tally[constants.APPRENTICE]}'
)
if role_tally[constants.WIZARD] > role_tally[constants.APPRENTICE]:
logging.info('Onboarding a new Apprentice.')
role_tally[constants.APPRENTICE] += 1
return constants.APPRENTICE
else:
logging.info('Onboarding a new Wizard.')
role_tally[constants.WIZARD] += 1
return constants.WIZARD
def make_onboarding_world(opt, agent: Agent):
"""
Assigns agents to apporopraite onboarding worlds to balance the roles.
"""
role_qual_name = opt[constants.ROLE_QUALIFICATION_NAME_KEY]
def assign_role_based_on_ques(agent):
worker = get_worker_from_agent(agent)
needed_worker = find_needed_role(agent, role_qual_name)
if needed_worker == constants.WIZARD:
worker.grant_qualification(role_qual_name, constants.WIZARD_IN_TRAINING)
return WizardOnboardingWorld(opt, agent)
else:
worker.grant_qualification(role_qual_name, constants.APPRENTICE_IN_TRAINING)
return ApprenticeOnboardingWorld(opt, agent)
# sends a message to UI to set the onboarding step.
agent.observe(
onboarding_mode_toggle_message(constants.ONBOARDING_STEPS['CHAT_INTERFACE'])
)
worker_qualification = get_worker_from_agent(agent).get_granted_qualification(
role_qual_name
)
if not worker_qualification: # Has not started onboarding before
return assign_role_based_on_ques(agent)
else: # Had been in onboarding but didn't finish
qstatus = worker_qualification.value
if qstatus == constants.WIZARD_IN_TRAINING:
return WizardOnboardingWorld(opt, agent)
elif qstatus == constants.APPRENTICE_IN_TRAINING:
return ApprenticeOnboardingWorld(opt, agent)
else:
logging.warning(
f'Unknown qualification status "{qstatus}" during creating onboarding workds'
+ 'Assigning the roles based on waiting and onboarding agents queue size.'
)
return assign_role_based_on_ques(agent)
def assign_role_training_qualification(
worker, role_qulification_name: str, role_qulification_value: int
):
"""
Syncs the training qualification of the agent (worker) with the DB.
"""
if not role_qulification_value or role_qulification_value == constants.NO_ROLE:
logging.warning('Agent did not qualify for a role.')
return False
role_name = constants.ROLE_NAMES[role_qulification_value]
logging.info(f'Agent qulified for {role_name} role. Granting worker qualification.')
worker.grant_qualification(role_qulification_name, role_qulification_value)
return True
def validate_onboarding(data: Dict):
"""
Check the contents of the data to ensure they are valid and safe.
"""
try:
saved_data = data['outputs']['messages'][-1]['data']['WORLD_DATA']
role = (
'Wizard' if saved_data[constants.SAVED_DATA_IS_WIZARD_KEY] else 'Apprentice'
)
logging.info(f'Validating {role} onboarding.')
except (IndexError, KeyError) as e:
logging.warning(
'Incomplete data to validate agent onboarding.'
f'Onboarding saved_data error: {e}'
)
return False
rejection_reason = saved_data[constants.WORKER_REJECT_REASON]
if rejection_reason:
logging.warning(f'Rejected: {rejection_reason}')
return False
# Role qualification
worker = get_worker_by_name(saved_data[constants.SAVED_DATA_WORKER_KEY])
qual_name, qual_val = saved_data[constants.SAVED_DATA_ROLE_QUALIFICATION_DATA_KEY]
if not assign_role_training_qualification(worker, qual_name, qual_val):
return False
logging.info('Onboarding work accepted.')
return True
def make_world(opt, agents: Agent):
return MTurkMultiAgentDialogWorld(opt, agents)
def get_world_params():
return {'agent_count': 2}
|
a46d04c1cb58fb70cb6da0841961323f3a2b1b4e
|
b4e36b2c71b41f8971b57bda977c29503aa4846d
|
/easCheck.py
|
623376d92b876e1a91cc678b0df0b03e920d8b63
|
[] |
no_license
|
3gstudent/Homework-of-Python
|
b79157eceb63f171e2a838479611bb9e5e85018a
|
d436661fbcb3d57021134f61e6c59f4d5a29b948
|
refs/heads/master
| 2023-04-09T15:55:16.360349
| 2023-03-31T03:02:43
| 2023-03-31T03:02:43
| 150,383,185
| 300
| 105
| null | 2022-11-28T02:49:10
| 2018-09-26T07:05:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
easCheck.py
|
import requests
import base64
import sys
import warnings
warnings.filterwarnings("ignore")
def test_options_https(ip,username,password):
try:
credential = base64.b64encode(username+":"+password)
url = 'https://' + ip + '/Microsoft-Server-ActiveSync'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/vnd.ms-sync.wbxml',
'Authorization': 'Basic '+credential
}
r = requests.options(url, headers = headers, verify = False)
if r.status_code ==200:
print('[+] Valid: %s %s'%(username,password))
#print(r.headers)
else:
print('[!] Authentication failed')
except Exception as e:
print('[!]Error:%s'%e)
if __name__ == '__main__':
if len(sys.argv)!=4:
print('[!]Wrong parameter')
print('easCheck')
print('Use to check the valid credential of eas(Exchange Server ActiveSync)')
print('Usage:')
print('%s <host> <user> <password>'%(sys.argv[0]))
print('Eg.')
print('%s 192.168.1.1 user1 password1'%(sys.argv[0]))
sys.exit(0)
else:
test_options_https(sys.argv[1], sys.argv[2], sys.argv[3])
|
897645c2c1080cf91730fb7ab89d7bc8f7422b2e
|
f509ab9825c542e09b0c6591d86ef1f9feb540a6
|
/pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/syslog/configure.py
|
11350d5b27459983a48c4d867a633efc3d6c8430
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genielibs
|
97f597117193aaa18028defeb69078ebb241173a
|
e42e51475cddcb10f5c7814d0fe892ac865742ba
|
refs/heads/master
| 2023-08-11T16:39:41.959947
| 2023-07-27T17:58:42
| 2023-07-27T17:58:42
| 130,717,047
| 109
| 60
|
Apache-2.0
| 2023-08-29T22:32:08
| 2018-04-23T15:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 830
|
py
|
configure.py
|
# Steps
from pyats.aetest.steps import Steps
# Unicon
from unicon.core.errors import SubCommandFailure
def configure_syslog_server(device, server):
""" Configure Syslog servers
Args:
device ('obj') : Device to be configured server
server ('str'): Syslog server to be configured
steps ('obj'): Context manager object
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure("logging host {ip_address}".format(ip_address=server))
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to configure syslog server "
"with ip address {ip} on device {dev}".format(
ip=server, dev=device.name
)
)
|
2aea090e316f9e75eb9a17bf32eee321a2993d1c
|
5624a2063891918855c7832d4f4bab2c3df27a28
|
/setup.py
|
820012f4181900685935125915176250a76d2ce6
|
[
"MIT"
] |
permissive
|
ealcobaca/pymfe
|
52908a9e54d83b431e8aed47b4ea7943e4875b31
|
50131572309dd92cfdf1eceb313be7408f3941b6
|
refs/heads/master
| 2023-05-12T12:47:08.060399
| 2023-01-03T20:04:24
| 2023-01-03T20:04:24
| 158,245,631
| 117
| 33
|
MIT
| 2023-05-02T20:00:34
| 2018-11-19T15:20:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,876
|
py
|
setup.py
|
"""Setup for pymfe package."""
import setuptools
import os
import pymfe
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
NAME = "pymfe"
VERSION = pymfe.__version__
DESCRIPTION = "Meta-feature Extractor"
LICENSE = "MIT"
URL = "https://github.com/ealcobaca/pymfe"
MAINTAINER = "Edesio Alcobaça, Felipe Alves Siqueira"
MAINTAINER_EMAIL = "edesio@usp.br, felipe.siqueira@usp.br"
DOWNLOAD_URL = "https://github.com/ealcobaca/pymfe/releases"
CLASSIFIERS = [
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
]
INSTALL_REQUIRES = [
"numpy",
"scipy",
"scikit-learn",
"patsy",
"pandas",
"statsmodels",
"texttable",
"tqdm",
"igraph>=0.10.1",
"gower",
]
EXTRAS_REQUIRE = {
"code-check": ["pytest", "mypy", "liac-arff", "flake8", "pylint"],
"tests": ["pytest", "pytest-cov", "pytest-xdist", "liac-arff"],
"docs": ["sphinx", "sphinx-gallery", "sphinx_rtd_theme", "numpydoc", "liac-arff"],
}
setuptools.setup(
name=NAME,
version=VERSION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
license=LICENSE,
url=URL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
download_url=DOWNLOAD_URL,
packages=setuptools.find_packages(),
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
)
|
81ab294e628549640d089b004a7f3457409f2c44
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/memorydb/outputs.py
|
7babc7e87682dbeb8de1d4a7dce40fe33907e067
|
[
"BSD-3-Clause",
"Apache-2.0",
"MPL-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 29,750
|
py
|
outputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'ClusterClusterEndpoint',
'ClusterShard',
'ClusterShardNode',
'ClusterShardNodeEndpoint',
'ParameterGroupParameter',
'SnapshotClusterConfiguration',
'UserAuthenticationMode',
'GetClusterClusterEndpointResult',
'GetClusterShardResult',
'GetClusterShardNodeResult',
'GetClusterShardNodeEndpointResult',
'GetParameterGroupParameterResult',
'GetSnapshotClusterConfigurationResult',
'GetUserAuthenticationModeResult',
]
@pulumi.output_type
class ClusterClusterEndpoint(dict):
def __init__(__self__, *,
address: Optional[str] = None,
port: Optional[int] = None):
"""
:param str address: DNS hostname of the node.
:param int port: The port number on which each of the nodes accepts connections. Defaults to `6379`.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def address(self) -> Optional[str]:
"""
DNS hostname of the node.
"""
return pulumi.get(self, "address")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
The port number on which each of the nodes accepts connections. Defaults to `6379`.
"""
return pulumi.get(self, "port")
@pulumi.output_type
class ClusterShard(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "numNodes":
suggest = "num_nodes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterShard. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterShard.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterShard.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: Optional[str] = None,
nodes: Optional[Sequence['outputs.ClusterShardNode']] = None,
num_nodes: Optional[int] = None,
slots: Optional[str] = None):
"""
:param str name: Name of the cluster. If omitted, the provider will assign a random, unique name. Conflicts with `name_prefix`.
:param Sequence['ClusterShardNodeArgs'] nodes: Set of nodes in this shard.
:param int num_nodes: Number of individual nodes in this shard.
:param str slots: Keyspace for this shard. Example: `0-16383`.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if nodes is not None:
pulumi.set(__self__, "nodes", nodes)
if num_nodes is not None:
pulumi.set(__self__, "num_nodes", num_nodes)
if slots is not None:
pulumi.set(__self__, "slots", slots)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the cluster. If omitted, the provider will assign a random, unique name. Conflicts with `name_prefix`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def nodes(self) -> Optional[Sequence['outputs.ClusterShardNode']]:
"""
Set of nodes in this shard.
"""
return pulumi.get(self, "nodes")
@property
@pulumi.getter(name="numNodes")
def num_nodes(self) -> Optional[int]:
"""
Number of individual nodes in this shard.
"""
return pulumi.get(self, "num_nodes")
@property
@pulumi.getter
def slots(self) -> Optional[str]:
"""
Keyspace for this shard. Example: `0-16383`.
"""
return pulumi.get(self, "slots")
@pulumi.output_type
class ClusterShardNode(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "availabilityZone":
suggest = "availability_zone"
elif key == "createTime":
suggest = "create_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterShardNode. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterShardNode.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterShardNode.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
availability_zone: Optional[str] = None,
create_time: Optional[str] = None,
endpoints: Optional[Sequence['outputs.ClusterShardNodeEndpoint']] = None,
name: Optional[str] = None):
"""
:param str availability_zone: The Availability Zone in which the node resides.
:param str create_time: The date and time when the node was created. Example: `2022-01-01T21:00:00Z`.
:param str name: Name of the cluster. If omitted, the provider will assign a random, unique name. Conflicts with `name_prefix`.
"""
if availability_zone is not None:
pulumi.set(__self__, "availability_zone", availability_zone)
if create_time is not None:
pulumi.set(__self__, "create_time", create_time)
if endpoints is not None:
pulumi.set(__self__, "endpoints", endpoints)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> Optional[str]:
"""
The Availability Zone in which the node resides.
"""
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> Optional[str]:
"""
The date and time when the node was created. Example: `2022-01-01T21:00:00Z`.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def endpoints(self) -> Optional[Sequence['outputs.ClusterShardNodeEndpoint']]:
return pulumi.get(self, "endpoints")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the cluster. If omitted, the provider will assign a random, unique name. Conflicts with `name_prefix`.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class ClusterShardNodeEndpoint(dict):
def __init__(__self__, *,
address: Optional[str] = None,
port: Optional[int] = None):
"""
:param str address: DNS hostname of the node.
:param int port: The port number on which each of the nodes accepts connections. Defaults to `6379`.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def address(self) -> Optional[str]:
"""
DNS hostname of the node.
"""
return pulumi.get(self, "address")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
The port number on which each of the nodes accepts connections. Defaults to `6379`.
"""
return pulumi.get(self, "port")
@pulumi.output_type
class ParameterGroupParameter(dict):
def __init__(__self__, *,
name: str,
value: str):
"""
:param str name: The name of the parameter.
:param str value: The value of the parameter.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the parameter.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> str:
"""
The value of the parameter.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class SnapshotClusterConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "engineVersion":
suggest = "engine_version"
elif key == "maintenanceWindow":
suggest = "maintenance_window"
elif key == "nodeType":
suggest = "node_type"
elif key == "numShards":
suggest = "num_shards"
elif key == "parameterGroupName":
suggest = "parameter_group_name"
elif key == "snapshotRetentionLimit":
suggest = "snapshot_retention_limit"
elif key == "snapshotWindow":
suggest = "snapshot_window"
elif key == "subnetGroupName":
suggest = "subnet_group_name"
elif key == "topicArn":
suggest = "topic_arn"
elif key == "vpcId":
suggest = "vpc_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SnapshotClusterConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SnapshotClusterConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SnapshotClusterConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
description: Optional[str] = None,
engine_version: Optional[str] = None,
maintenance_window: Optional[str] = None,
name: Optional[str] = None,
node_type: Optional[str] = None,
num_shards: Optional[int] = None,
parameter_group_name: Optional[str] = None,
port: Optional[int] = None,
snapshot_retention_limit: Optional[int] = None,
snapshot_window: Optional[str] = None,
subnet_group_name: Optional[str] = None,
topic_arn: Optional[str] = None,
vpc_id: Optional[str] = None):
"""
:param str description: Description for the cluster.
:param str engine_version: Version number of the Redis engine used by the cluster.
:param str maintenance_window: The weekly time range during which maintenance on the cluster is performed.
:param str name: Name of the snapshot. If omitted, the provider will assign a random, unique name. Conflicts with `name_prefix`.
:param str node_type: Compute and memory capacity of the nodes in the cluster.
:param int num_shards: Number of shards in the cluster.
:param str parameter_group_name: Name of the parameter group associated with the cluster.
:param int port: Port number on which the cluster accepts connections.
:param int snapshot_retention_limit: Number of days for which MemoryDB retains automatic snapshots before deleting them.
:param str snapshot_window: The daily time range (in UTC) during which MemoryDB begins taking a daily snapshot of the shard.
:param str subnet_group_name: Name of the subnet group used by the cluster.
:param str topic_arn: ARN of the SNS topic to which cluster notifications are sent.
:param str vpc_id: The VPC in which the cluster exists.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if engine_version is not None:
pulumi.set(__self__, "engine_version", engine_version)
if maintenance_window is not None:
pulumi.set(__self__, "maintenance_window", maintenance_window)
if name is not None:
pulumi.set(__self__, "name", name)
if node_type is not None:
pulumi.set(__self__, "node_type", node_type)
if num_shards is not None:
pulumi.set(__self__, "num_shards", num_shards)
if parameter_group_name is not None:
pulumi.set(__self__, "parameter_group_name", parameter_group_name)
if port is not None:
pulumi.set(__self__, "port", port)
if snapshot_retention_limit is not None:
pulumi.set(__self__, "snapshot_retention_limit", snapshot_retention_limit)
if snapshot_window is not None:
pulumi.set(__self__, "snapshot_window", snapshot_window)
if subnet_group_name is not None:
pulumi.set(__self__, "subnet_group_name", subnet_group_name)
if topic_arn is not None:
pulumi.set(__self__, "topic_arn", topic_arn)
if vpc_id is not None:
pulumi.set(__self__, "vpc_id", vpc_id)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description for the cluster.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> Optional[str]:
"""
Version number of the Redis engine used by the cluster.
"""
return pulumi.get(self, "engine_version")
@property
@pulumi.getter(name="maintenanceWindow")
def maintenance_window(self) -> Optional[str]:
"""
The weekly time range during which maintenance on the cluster is performed.
"""
return pulumi.get(self, "maintenance_window")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the snapshot. If omitted, the provider will assign a random, unique name. Conflicts with `name_prefix`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nodeType")
def node_type(self) -> Optional[str]:
"""
Compute and memory capacity of the nodes in the cluster.
"""
return pulumi.get(self, "node_type")
@property
@pulumi.getter(name="numShards")
def num_shards(self) -> Optional[int]:
"""
Number of shards in the cluster.
"""
return pulumi.get(self, "num_shards")
@property
@pulumi.getter(name="parameterGroupName")
def parameter_group_name(self) -> Optional[str]:
"""
Name of the parameter group associated with the cluster.
"""
return pulumi.get(self, "parameter_group_name")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
Port number on which the cluster accepts connections.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="snapshotRetentionLimit")
def snapshot_retention_limit(self) -> Optional[int]:
"""
Number of days for which MemoryDB retains automatic snapshots before deleting them.
"""
return pulumi.get(self, "snapshot_retention_limit")
@property
@pulumi.getter(name="snapshotWindow")
def snapshot_window(self) -> Optional[str]:
"""
The daily time range (in UTC) during which MemoryDB begins taking a daily snapshot of the shard.
"""
return pulumi.get(self, "snapshot_window")
@property
@pulumi.getter(name="subnetGroupName")
def subnet_group_name(self) -> Optional[str]:
"""
Name of the subnet group used by the cluster.
"""
return pulumi.get(self, "subnet_group_name")
@property
@pulumi.getter(name="topicArn")
def topic_arn(self) -> Optional[str]:
"""
ARN of the SNS topic to which cluster notifications are sent.
"""
return pulumi.get(self, "topic_arn")
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> Optional[str]:
"""
The VPC in which the cluster exists.
"""
return pulumi.get(self, "vpc_id")
@pulumi.output_type
class UserAuthenticationMode(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "passwordCount":
suggest = "password_count"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserAuthenticationMode. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserAuthenticationMode.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserAuthenticationMode.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
passwords: Sequence[str],
type: str,
password_count: Optional[int] = None):
"""
:param Sequence[str] passwords: The set of passwords used for authentication. You can create up to two passwords for each user.
:param str type: Indicates whether the user requires a password to authenticate. Must be set to `password`.
:param int password_count: The number of passwords belonging to the user.
"""
pulumi.set(__self__, "passwords", passwords)
pulumi.set(__self__, "type", type)
if password_count is not None:
pulumi.set(__self__, "password_count", password_count)
@property
@pulumi.getter
def passwords(self) -> Sequence[str]:
"""
The set of passwords used for authentication. You can create up to two passwords for each user.
"""
return pulumi.get(self, "passwords")
@property
@pulumi.getter
def type(self) -> str:
"""
Indicates whether the user requires a password to authenticate. Must be set to `password`.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="passwordCount")
def password_count(self) -> Optional[int]:
"""
The number of passwords belonging to the user.
"""
return pulumi.get(self, "password_count")
@pulumi.output_type
class GetClusterClusterEndpointResult(dict):
def __init__(__self__, *,
address: str,
port: int):
"""
:param str address: DNS hostname of the node.
:param int port: Port number that this node is listening on.
"""
pulumi.set(__self__, "address", address)
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def address(self) -> str:
"""
DNS hostname of the node.
"""
return pulumi.get(self, "address")
@property
@pulumi.getter
def port(self) -> int:
"""
Port number that this node is listening on.
"""
return pulumi.get(self, "port")
@pulumi.output_type
class GetClusterShardResult(dict):
def __init__(__self__, *,
name: str,
nodes: Sequence['outputs.GetClusterShardNodeResult'],
num_nodes: int,
slots: str):
"""
:param str name: Name of the cluster.
:param Sequence['GetClusterShardNodeArgs'] nodes: Set of nodes in this shard.
:param int num_nodes: Number of individual nodes in this shard.
:param str slots: Keyspace for this shard. Example: `0-16383`.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "nodes", nodes)
pulumi.set(__self__, "num_nodes", num_nodes)
pulumi.set(__self__, "slots", slots)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the cluster.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def nodes(self) -> Sequence['outputs.GetClusterShardNodeResult']:
"""
Set of nodes in this shard.
"""
return pulumi.get(self, "nodes")
@property
@pulumi.getter(name="numNodes")
def num_nodes(self) -> int:
"""
Number of individual nodes in this shard.
"""
return pulumi.get(self, "num_nodes")
@property
@pulumi.getter
def slots(self) -> str:
"""
Keyspace for this shard. Example: `0-16383`.
"""
return pulumi.get(self, "slots")
@pulumi.output_type
class GetClusterShardNodeResult(dict):
def __init__(__self__, *,
availability_zone: str,
create_time: str,
endpoints: Sequence['outputs.GetClusterShardNodeEndpointResult'],
name: str):
"""
:param str availability_zone: The Availability Zone in which the node resides.
:param str create_time: The date and time when the node was created. Example: `2022-01-01T21:00:00Z`.
:param str name: Name of the cluster.
"""
pulumi.set(__self__, "availability_zone", availability_zone)
pulumi.set(__self__, "create_time", create_time)
pulumi.set(__self__, "endpoints", endpoints)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> str:
"""
The Availability Zone in which the node resides.
"""
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
The date and time when the node was created. Example: `2022-01-01T21:00:00Z`.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def endpoints(self) -> Sequence['outputs.GetClusterShardNodeEndpointResult']:
return pulumi.get(self, "endpoints")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the cluster.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetClusterShardNodeEndpointResult(dict):
def __init__(__self__, *,
address: str,
port: int):
"""
:param str address: DNS hostname of the node.
:param int port: Port number that this node is listening on.
"""
pulumi.set(__self__, "address", address)
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def address(self) -> str:
"""
DNS hostname of the node.
"""
return pulumi.get(self, "address")
@property
@pulumi.getter
def port(self) -> int:
"""
Port number that this node is listening on.
"""
return pulumi.get(self, "port")
@pulumi.output_type
class GetParameterGroupParameterResult(dict):
def __init__(__self__, *,
name: str,
value: str):
"""
:param str name: Name of the parameter group.
:param str value: Value of the parameter.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the parameter group.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> str:
"""
Value of the parameter.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class GetSnapshotClusterConfigurationResult(dict):
def __init__(__self__, *,
description: str,
engine_version: str,
maintenance_window: str,
name: str,
node_type: str,
num_shards: int,
parameter_group_name: str,
port: int,
snapshot_retention_limit: int,
snapshot_window: str,
subnet_group_name: str,
topic_arn: str,
vpc_id: str):
"""
:param str description: Description for the cluster.
:param str engine_version: Version number of the Redis engine used by the cluster.
:param str maintenance_window: The weekly time range during which maintenance on the cluster is performed.
:param str name: Name of the snapshot.
:param str node_type: Compute and memory capacity of the nodes in the cluster.
:param int num_shards: Number of shards in the cluster.
:param str parameter_group_name: Name of the parameter group associated with the cluster.
:param int port: Port number on which the cluster accepts connections.
:param int snapshot_retention_limit: Number of days for which MemoryDB retains automatic snapshots before deleting them.
:param str snapshot_window: The daily time range (in UTC) during which MemoryDB begins taking a daily snapshot of the shard.
:param str subnet_group_name: Name of the subnet group used by the cluster.
:param str topic_arn: ARN of the SNS topic to which cluster notifications are sent.
:param str vpc_id: The VPC in which the cluster exists.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "engine_version", engine_version)
pulumi.set(__self__, "maintenance_window", maintenance_window)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "node_type", node_type)
pulumi.set(__self__, "num_shards", num_shards)
pulumi.set(__self__, "parameter_group_name", parameter_group_name)
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "snapshot_retention_limit", snapshot_retention_limit)
pulumi.set(__self__, "snapshot_window", snapshot_window)
pulumi.set(__self__, "subnet_group_name", subnet_group_name)
pulumi.set(__self__, "topic_arn", topic_arn)
pulumi.set(__self__, "vpc_id", vpc_id)
@property
@pulumi.getter
def description(self) -> str:
"""
Description for the cluster.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> str:
"""
Version number of the Redis engine used by the cluster.
"""
return pulumi.get(self, "engine_version")
@property
@pulumi.getter(name="maintenanceWindow")
def maintenance_window(self) -> str:
"""
The weekly time range during which maintenance on the cluster is performed.
"""
return pulumi.get(self, "maintenance_window")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the snapshot.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nodeType")
def node_type(self) -> str:
"""
Compute and memory capacity of the nodes in the cluster.
"""
return pulumi.get(self, "node_type")
@property
@pulumi.getter(name="numShards")
def num_shards(self) -> int:
"""
Number of shards in the cluster.
"""
return pulumi.get(self, "num_shards")
@property
@pulumi.getter(name="parameterGroupName")
def parameter_group_name(self) -> str:
"""
Name of the parameter group associated with the cluster.
"""
return pulumi.get(self, "parameter_group_name")
@property
@pulumi.getter
def port(self) -> int:
"""
Port number on which the cluster accepts connections.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="snapshotRetentionLimit")
def snapshot_retention_limit(self) -> int:
"""
Number of days for which MemoryDB retains automatic snapshots before deleting them.
"""
return pulumi.get(self, "snapshot_retention_limit")
@property
@pulumi.getter(name="snapshotWindow")
def snapshot_window(self) -> str:
"""
The daily time range (in UTC) during which MemoryDB begins taking a daily snapshot of the shard.
"""
return pulumi.get(self, "snapshot_window")
@property
@pulumi.getter(name="subnetGroupName")
def subnet_group_name(self) -> str:
"""
Name of the subnet group used by the cluster.
"""
return pulumi.get(self, "subnet_group_name")
@property
@pulumi.getter(name="topicArn")
def topic_arn(self) -> str:
"""
ARN of the SNS topic to which cluster notifications are sent.
"""
return pulumi.get(self, "topic_arn")
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> str:
"""
The VPC in which the cluster exists.
"""
return pulumi.get(self, "vpc_id")
@pulumi.output_type
class GetUserAuthenticationModeResult(dict):
def __init__(__self__, *,
password_count: int,
type: str):
"""
:param int password_count: The number of passwords belonging to the user.
:param str type: Whether the user requires a password to authenticate.
"""
pulumi.set(__self__, "password_count", password_count)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="passwordCount")
def password_count(self) -> int:
"""
The number of passwords belonging to the user.
"""
return pulumi.get(self, "password_count")
@property
@pulumi.getter
def type(self) -> str:
"""
Whether the user requires a password to authenticate.
"""
return pulumi.get(self, "type")
|
f547747b8bd3f940c2035c80d10c7fc5b31d56ed
|
489c11c84d604c3363a326f98f931b09378be7e3
|
/oauth/tests.py
|
bb23b9baba8660c1703c60fb33ae42cc17f716ab
|
[
"MIT"
] |
permissive
|
liangliangyy/DjangoBlog
|
88441bb728d84f1a52f8122a09fd45fe8e1f14c3
|
6a708de228aaa07a2bf0de84f05eb1fb8f33a37d
|
refs/heads/master
| 2023-08-31T08:31:56.249077
| 2023-08-23T13:48:34
| 2023-08-23T13:48:34
| 72,640,835
| 6,499
| 3,157
|
MIT
| 2023-09-13T10:09:26
| 2016-11-02T13:07:55
|
Python
|
UTF-8
|
Python
| false
| false
| 9,781
|
py
|
tests.py
|
import json
from unittest.mock import patch
from django.conf import settings
from django.contrib import auth
from django.test import Client, RequestFactory, TestCase
from django.urls import reverse
from djangoblog.utils import get_sha256
from oauth.models import OAuthConfig
from oauth.oauthmanager import BaseOauthManager
# Create your tests here.
class OAuthConfigTest(TestCase):
def setUp(self):
self.client = Client()
self.factory = RequestFactory()
def test_oauth_login_test(self):
c = OAuthConfig()
c.type = 'weibo'
c.appkey = 'appkey'
c.appsecret = 'appsecret'
c.save()
response = self.client.get('/oauth/oauthlogin?type=weibo')
self.assertEqual(response.status_code, 302)
self.assertTrue("api.weibo.com" in response.url)
response = self.client.get('/oauth/authorize?type=weibo&code=code')
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/')
class OauthLoginTest(TestCase):
def setUp(self) -> None:
self.client = Client()
self.factory = RequestFactory()
self.apps = self.init_apps()
def init_apps(self):
applications = [p() for p in BaseOauthManager.__subclasses__()]
for application in applications:
c = OAuthConfig()
c.type = application.ICON_NAME.lower()
c.appkey = 'appkey'
c.appsecret = 'appsecret'
c.save()
return applications
def get_app_by_type(self, type):
for app in self.apps:
if app.ICON_NAME.lower() == type:
return app
@patch("oauth.oauthmanager.WBOauthManager.do_post")
@patch("oauth.oauthmanager.WBOauthManager.do_get")
def test_weibo_login(self, mock_do_get, mock_do_post):
weibo_app = self.get_app_by_type('weibo')
assert weibo_app
url = weibo_app.get_authorization_url()
mock_do_post.return_value = json.dumps({"access_token": "access_token",
"uid": "uid"
})
mock_do_get.return_value = json.dumps({
"avatar_large": "avatar_large",
"screen_name": "screen_name",
"id": "id",
"email": "email",
})
userinfo = weibo_app.get_access_token_by_code('code')
self.assertEqual(userinfo.token, 'access_token')
self.assertEqual(userinfo.openid, 'id')
@patch("oauth.oauthmanager.GoogleOauthManager.do_post")
@patch("oauth.oauthmanager.GoogleOauthManager.do_get")
def test_google_login(self, mock_do_get, mock_do_post):
google_app = self.get_app_by_type('google')
assert google_app
url = google_app.get_authorization_url()
mock_do_post.return_value = json.dumps({
"access_token": "access_token",
"id_token": "id_token",
})
mock_do_get.return_value = json.dumps({
"picture": "picture",
"name": "name",
"sub": "sub",
"email": "email",
})
token = google_app.get_access_token_by_code('code')
userinfo = google_app.get_oauth_userinfo()
self.assertEqual(userinfo.token, 'access_token')
self.assertEqual(userinfo.openid, 'sub')
@patch("oauth.oauthmanager.GitHubOauthManager.do_post")
@patch("oauth.oauthmanager.GitHubOauthManager.do_get")
def test_github_login(self, mock_do_get, mock_do_post):
github_app = self.get_app_by_type('github')
assert github_app
url = github_app.get_authorization_url()
self.assertTrue("github.com" in url)
self.assertTrue("client_id" in url)
mock_do_post.return_value = "access_token=gho_16C7e42F292c6912E7710c838347Ae178B4a&scope=repo%2Cgist&token_type=bearer"
mock_do_get.return_value = json.dumps({
"avatar_url": "avatar_url",
"name": "name",
"id": "id",
"email": "email",
})
token = github_app.get_access_token_by_code('code')
userinfo = github_app.get_oauth_userinfo()
self.assertEqual(userinfo.token, 'gho_16C7e42F292c6912E7710c838347Ae178B4a')
self.assertEqual(userinfo.openid, 'id')
@patch("oauth.oauthmanager.FaceBookOauthManager.do_post")
@patch("oauth.oauthmanager.FaceBookOauthManager.do_get")
def test_facebook_login(self, mock_do_get, mock_do_post):
facebook_app = self.get_app_by_type('facebook')
assert facebook_app
url = facebook_app.get_authorization_url()
self.assertTrue("facebook.com" in url)
mock_do_post.return_value = json.dumps({
"access_token": "access_token",
})
mock_do_get.return_value = json.dumps({
"name": "name",
"id": "id",
"email": "email",
"picture": {
"data": {
"url": "url"
}
}
})
token = facebook_app.get_access_token_by_code('code')
userinfo = facebook_app.get_oauth_userinfo()
self.assertEqual(userinfo.token, 'access_token')
@patch("oauth.oauthmanager.QQOauthManager.do_get", side_effect=[
'access_token=access_token&expires_in=3600',
'callback({"client_id":"appid","openid":"openid"} );',
json.dumps({
"nickname": "nickname",
"email": "email",
"figureurl": "figureurl",
"openid": "openid",
})
])
def test_qq_login(self, mock_do_get):
qq_app = self.get_app_by_type('qq')
assert qq_app
url = qq_app.get_authorization_url()
self.assertTrue("qq.com" in url)
token = qq_app.get_access_token_by_code('code')
userinfo = qq_app.get_oauth_userinfo()
self.assertEqual(userinfo.token, 'access_token')
@patch("oauth.oauthmanager.WBOauthManager.do_post")
@patch("oauth.oauthmanager.WBOauthManager.do_get")
def test_weibo_authoriz_login_with_email(self, mock_do_get, mock_do_post):
mock_do_post.return_value = json.dumps({"access_token": "access_token",
"uid": "uid"
})
mock_user_info = {
"avatar_large": "avatar_large",
"screen_name": "screen_name1",
"id": "id",
"email": "email",
}
mock_do_get.return_value = json.dumps(mock_user_info)
response = self.client.get('/oauth/oauthlogin?type=weibo')
self.assertEqual(response.status_code, 302)
self.assertTrue("api.weibo.com" in response.url)
response = self.client.get('/oauth/authorize?type=weibo&code=code')
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/')
user = auth.get_user(self.client)
assert user.is_authenticated
self.assertTrue(user.is_authenticated)
self.assertEqual(user.username, mock_user_info['screen_name'])
self.assertEqual(user.email, mock_user_info['email'])
self.client.logout()
response = self.client.get('/oauth/authorize?type=weibo&code=code')
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/')
user = auth.get_user(self.client)
assert user.is_authenticated
self.assertTrue(user.is_authenticated)
self.assertEqual(user.username, mock_user_info['screen_name'])
self.assertEqual(user.email, mock_user_info['email'])
@patch("oauth.oauthmanager.WBOauthManager.do_post")
@patch("oauth.oauthmanager.WBOauthManager.do_get")
def test_weibo_authoriz_login_without_email(self, mock_do_get, mock_do_post):
mock_do_post.return_value = json.dumps({"access_token": "access_token",
"uid": "uid"
})
mock_user_info = {
"avatar_large": "avatar_large",
"screen_name": "screen_name1",
"id": "id",
}
mock_do_get.return_value = json.dumps(mock_user_info)
response = self.client.get('/oauth/oauthlogin?type=weibo')
self.assertEqual(response.status_code, 302)
self.assertTrue("api.weibo.com" in response.url)
response = self.client.get('/oauth/authorize?type=weibo&code=code')
self.assertEqual(response.status_code, 302)
oauth_user_id = int(response.url.split('/')[-1].split('.')[0])
self.assertEqual(response.url, f'/oauth/requireemail/{oauth_user_id}.html')
response = self.client.post(response.url, {'email': 'test@gmail.com', 'oauthid': oauth_user_id})
self.assertEqual(response.status_code, 302)
sign = get_sha256(settings.SECRET_KEY +
str(oauth_user_id) + settings.SECRET_KEY)
url = reverse('oauth:bindsuccess', kwargs={
'oauthid': oauth_user_id,
})
self.assertEqual(response.url, f'{url}?type=email')
path = reverse('oauth:email_confirm', kwargs={
'id': oauth_user_id,
'sign': sign
})
response = self.client.get(path)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, f'/oauth/bindsuccess/{oauth_user_id}.html?type=success')
user = auth.get_user(self.client)
from oauth.models import OAuthUser
oauth_user = OAuthUser.objects.get(author=user)
self.assertTrue(user.is_authenticated)
self.assertEqual(user.username, mock_user_info['screen_name'])
self.assertEqual(user.email, 'test@gmail.com')
self.assertEqual(oauth_user.pk, oauth_user_id)
|
c44c038fb6436bbdb54c0be536b3bf90bd19cb4b
|
026ba8593b3b364ec44797e345d25324be53f9cf
|
/pydantic_cli/tests/test_examples_simple_with_enum_by_name.py
|
8405fff2715e74051733e48b8bea4f201cbe8534
|
[
"MIT"
] |
permissive
|
mpkocher/pydantic-cli
|
e8d21dd094aabb18f39935a2351c0f297f4d163e
|
d67ab8b265fd9ed045d27ecd904a04b8185481fa
|
refs/heads/master
| 2023-07-10T16:13:15.750640
| 2022-04-06T23:14:34
| 2022-04-06T23:14:34
| 197,854,268
| 109
| 9
|
MIT
| 2023-06-30T23:53:36
| 2019-07-19T23:22:18
|
Python
|
UTF-8
|
Python
| false
| false
| 884
|
py
|
test_examples_simple_with_enum_by_name.py
|
from . import _TestHarness, HarnessConfig
from pydantic_cli.examples.simple_with_enum_by_name import Options, example_runner
class TestExamples(_TestHarness[Options]):
CONFIG = HarnessConfig(Options, example_runner)
def test_simple_01(self):
args = ["--states", "RUNNING", "FAILED", "--mode", "alpha"]
self.run_config(args)
def test_case_insensitive(self):
args = ["--states", "successful", "failed", "--mode", "ALPHA"]
self.run_config(args)
def test_bad_enum_by_value(self):
args = [
"--states",
"RUNNING",
"--mode",
"1",
]
self.run_config(args, exit_code=1)
def test_bad_enum_value(self):
args = [
"--states",
"RUNNING",
"--mode",
"DRAGON",
]
self.run_config(args, exit_code=1)
|
33f38b38ef80b162d7b4792b7e6d99cd51d8decd
|
e1cddfd754d952134e72dfd03522c5ea4fb6008e
|
/docs/_scripts/filter_h.py
|
31c4c5142831308d7b3561803f0c0144d20d2f23
|
[
"Apache-2.0"
] |
permissive
|
FDio/vpp
|
0ad30fa1bec2975ffa6b66b45c9f4f32163123b6
|
f234b0d4626d7e686422cc9dfd25958584f4931e
|
refs/heads/master
| 2023-08-31T16:09:04.068646
| 2022-03-14T09:49:15
| 2023-08-31T09:50:00
| 96,556,718
| 1,048
| 630
|
Apache-2.0
| 2023-06-21T05:39:17
| 2017-07-07T16:29:40
|
C
|
UTF-8
|
Python
| false
| false
| 1,658
|
py
|
filter_h.py
|
#!/usr/bin/env python3
# Copyright (c) 2016 Comcast Cable Communications Management, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Filter for .c files to make various preprocessor tricks Doxygenish
import os
import re
import sys
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s <filename>\n" % (sys.argv[0]))
sys.exit(1)
replace_patterns = [
# Search for CLIB_PAD_FROM_TO(...); and replace with padding
# #define CLIB_PAD_FROM_TO(from,to) u8 pad_##from[(to) - (from)]
(
re.compile(
r"(?P<m>CLIB_PAD_FROM_TO)\s*[(](?P<from>[^,]+)," r"\s*(?P<to>[^)]+)[)]"
),
r"/** Padding. */ u8 pad_\g<from>[(\g<to>) - (\g<from>)]",
),
]
filename = sys.argv[1]
cwd = os.getcwd()
if filename[0 : len(cwd)] == cwd:
filename = filename[len(cwd) :]
if filename[0] == "/":
filename = filename[1:]
with open(filename) as fd:
line_num = 0
for line in fd:
line_num += 1
str = line[:-1] # filter \n
# Look for search/replace patterns
for p in replace_patterns:
str = p[0].sub(p[1], str)
sys.stdout.write(str + "\n")
# All done
|
9d323f41ae3ff2dda802170a2d85c6e465cedb87
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/api/tasks.py
|
ac67926f30c7d85e2fd33dafadccc9eff0a20b48
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 373
|
py
|
tasks.py
|
import time
from celery.schedules import crontab
from corehq.apps.celery import periodic_task
from tastypie.models import ApiAccess
@periodic_task(run_every=crontab(minute=0, hour=0), queue='background_queue')
def clean_api_access():
accessed = int(time.time()) - 90 * 24 * 3600 # only keep last 90 days
ApiAccess.objects.filter(accessed__lt=accessed).delete()
|
3c3da9bed8ae5b4c451520fe12e4e7e3a33e8890
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/python/akg/ops/math/ascend/approximate_equal.py
|
87f473d87d4dc70cc6bc54bba0fbae40ff3de8f9
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,975
|
py
|
approximate_equal.py
|
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: approximate_equal"""
import akg.tvm
from akg.utils.kernel_exec import product_is_mini
from akg.utils import validation_check as utils
from akg.utils.format_transform import get_shape
from ..sub import sub
from ..abs import abs
from ..cast import cast
@utils.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor, (float, type(None)), (str, type(None)))
def approximate_equal(x, y, tolerance=1e-5, target=utils.CCE):
"""
abs(x-y) less than or equal to the tolerance
Args:
x (tvm.tensor.Tensor): Tensor of type float16, float32.
y (tvm.tensor.Tensor): Tensor of type float16, float32.
tolerance (float): default is 1e-5
Returns:
tvm.tensor.Tensor. If abs(x-y) less than or equal to the tolerance return True,
else return False.
Supported Platforms:
'Ascend'
"""
if tolerance < 0:
raise RuntimeError("tolerance should >= 0")
# check shape
utils.check_shape(x)
utils.check_shape(y)
shape = get_shape(x)
if shape != get_shape(y):
raise RuntimeError("input shape must be same, but got %s vs %s",
shape, get_shape(y))
# check input tensor data_type
utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT)
utils.ops_dtype_check(y.dtype, utils.DtypeForDavinci.ALL_FLOAT)
dtype = x.dtype
if dtype != y.dtype:
raise RuntimeError("input type must be same, but got %s vs %s",
dtype, y.dtype)
res_vsub = sub(x, y, target)
res_vabs = abs(res_vsub, target)
# As vcmp_lt and vsel instruction don't support fp32 on mini
# It can be simplified by some methods, such as , "auto cast"
if product_is_mini():
dtype = "float16"
res_vabs = cast(res_vabs, dtype, target)
t = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(1, dtype), "t")
f = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(0, dtype), "f")
res = akg.tvm.compute(shape, lambda *indice: akg.tvm.expr.Select(
res_vabs[indice] <= akg.tvm.const(tolerance, dtype),
t[indice], f[indice]))
# It can be be simplified that let cast op support fp16 and fp32 to bool type
res_fp16 = cast(res, "float16", target)
res_bool = akg.tvm.compute(shape, lambda *indice: res_fp16(*indice).astype("bool"))
return res_bool
|
f6f46ee5c45044416c21c67fd39d2941958847f1
|
56a77194fc0cd6087b0c2ca1fb6dc0de64b8a58a
|
/applications/WindEngineeringApplication/tests/test_WindEngineeringApplication.py
|
2390d6b6f9cc47e9e8fe109965cc9769d5444907
|
[
"BSD-3-Clause"
] |
permissive
|
KratosMultiphysics/Kratos
|
82b902a2266625b25f17239b42da958611a4b9c5
|
366949ec4e3651702edc6ac3061d2988f10dd271
|
refs/heads/master
| 2023-08-30T20:31:37.818693
| 2023-08-30T18:01:01
| 2023-08-30T18:01:01
| 81,815,495
| 994
| 285
|
NOASSERTION
| 2023-09-14T13:22:43
| 2017-02-13T10:58:24
|
C++
|
UTF-8
|
Python
| false
| false
| 2,875
|
py
|
test_WindEngineeringApplication.py
|
# Kratos imports
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as UnitTest
from KratosMultiphysics.WindEngineeringApplication.test_suite import SuiteFlags, TestSuite
import run_cpp_tests
# STL imports
import pathlib
class TestLoader(UnitTest.TestLoader):
@property
def suiteClass(self):
return TestSuite
def AssembleTestSuites(enable_mpi=False):
""" Populates the test suites to run.
Populates the test suites to run. At least, it should pupulate the suites:
"small", "nighlty" and "all"
Return
------
suites: A dictionary of suites
The set of suites with its test_cases added.
"""
static_suites = UnitTest.KratosSuites
# Test cases will be organized into lists first, then loaded into their
# corresponding suites all at once
local_cases = {}
for key in static_suites.keys():
local_cases[key] = []
# Glob all test cases in this application
this_directory = pathlib.Path(__file__).absolute().parent
test_loader = TestLoader()
all_tests = test_loader.discover(this_directory)
# Sort globbed test cases into lists based on their suite flags
# flags correspond to entries in KratosUnittest.TestSuites
# (small, nightly, all, validation)
#
# Cases with the 'mpi' flag are added to mpi suites as well as their corresponding normal suites.
# Cases with the 'mpi_only' flag are not added to normal suites.
for test_case in all_tests:
suite_flags = set(test_case.suite_flags)
# Check whether the test case has a flag for mpi
mpi = SuiteFlags.MPI in suite_flags
mpi_only = SuiteFlags.MPI_ONLY in suite_flags
# Don't add the test if its mpi-exclusive and mpi is not enabled
if (not enable_mpi) and mpi_only:
continue
# Remove mpi flags
if mpi:
suite_flags.remove(SuiteFlags.MPI)
if mpi_only:
suite_flags.remove(SuiteFlags.MPI_ONLY)
# Add case to the corresponding suites
for suite_flag in suite_flags:
local_cases[suite_flag.name.lower()].append(test_case)
if mpi or mpi_only:
local_cases["mpi_" + suite_flag.name.lower()].append(test_case)
# Put test in 'all' if it isn't already there
if not (SuiteFlags.ALL in suite_flags):
if not mpi_only:
local_cases["all"].append(test_case)
if mpi or mpi_only:
local_cases["mpi_all"].append(test_case)
# Load all sorted cases into the global suites
for suite_name, test_cases in local_cases.items():
static_suites[suite_name].addTests(test_cases)
return static_suites
def Run(enable_mpi=False):
UnitTest.runTests(AssembleTestSuites(enable_mpi=enable_mpi))
if __name__ == "__main__":
Run(enable_mpi=False)
|
bbc3c5bde130c509fb252b316f20d489a41cfde2
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/oam/get_sink.py
|
3b0cb98d6fb19cfa015b691dd5456205724bdac3
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 5,065
|
py
|
get_sink.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetSinkResult',
'AwaitableGetSinkResult',
'get_sink',
'get_sink_output',
]
@pulumi.output_type
class GetSinkResult:
"""
A collection of values returned by getSink.
"""
def __init__(__self__, arn=None, id=None, name=None, sink_id=None, sink_identifier=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if sink_id and not isinstance(sink_id, str):
raise TypeError("Expected argument 'sink_id' to be a str")
pulumi.set(__self__, "sink_id", sink_id)
if sink_identifier and not isinstance(sink_identifier, str):
raise TypeError("Expected argument 'sink_identifier' to be a str")
pulumi.set(__self__, "sink_identifier", sink_identifier)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> str:
"""
ARN of the sink.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the sink.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="sinkId")
def sink_id(self) -> str:
"""
Random ID string that AWS generated as part of the sink ARN.
"""
return pulumi.get(self, "sink_id")
@property
@pulumi.getter(name="sinkIdentifier")
def sink_identifier(self) -> str:
return pulumi.get(self, "sink_identifier")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
Tags assigned to the sink.
"""
return pulumi.get(self, "tags")
class AwaitableGetSinkResult(GetSinkResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSinkResult(
arn=self.arn,
id=self.id,
name=self.name,
sink_id=self.sink_id,
sink_identifier=self.sink_identifier,
tags=self.tags)
def get_sink(sink_identifier: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSinkResult:
"""
Data source for managing an AWS CloudWatch Observability Access Manager Sink.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.oam.get_sink(sink_identifier="arn:aws:oam:us-west-1:111111111111:sink/abcd1234-a123-456a-a12b-a123b456c789")
```
:param str sink_identifier: ARN of the sink.
:param Mapping[str, str] tags: Tags assigned to the sink.
"""
__args__ = dict()
__args__['sinkIdentifier'] = sink_identifier
__args__['tags'] = tags
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:oam/getSink:getSink', __args__, opts=opts, typ=GetSinkResult).value
return AwaitableGetSinkResult(
arn=pulumi.get(__ret__, 'arn'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
sink_id=pulumi.get(__ret__, 'sink_id'),
sink_identifier=pulumi.get(__ret__, 'sink_identifier'),
tags=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_sink)
def get_sink_output(sink_identifier: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSinkResult]:
"""
Data source for managing an AWS CloudWatch Observability Access Manager Sink.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.oam.get_sink(sink_identifier="arn:aws:oam:us-west-1:111111111111:sink/abcd1234-a123-456a-a12b-a123b456c789")
```
:param str sink_identifier: ARN of the sink.
:param Mapping[str, str] tags: Tags assigned to the sink.
"""
...
|
105da23dbfa962850abc9d614601bc8da0677339
|
f0519485bd889fedc1f01e3d3f5bf8681cac163e
|
/panflute/tools.py
|
7cff301e944ab886b47bb953a6fe2800e8c62860
|
[
"BSD-3-Clause"
] |
permissive
|
sergiocorreia/panflute
|
55291ab38fcb63a162864faf6bf349e3e9aaaaa1
|
dd8b03a3f5b1eca13faf54289dae3e50d5323ca0
|
refs/heads/master
| 2023-07-08T15:28:00.102303
| 2023-03-07T03:14:02
| 2023-03-07T03:14:02
| 55,024,750
| 470
| 74
|
BSD-3-Clause
| 2023-02-08T11:01:41
| 2016-03-30T02:07:47
|
Python
|
UTF-8
|
Python
| false
| false
| 23,478
|
py
|
tools.py
|
"""
Useful (but not essential) functions for writing panflute filters
"""
# ---------------------------
# Imports
# ---------------------------
from .base import Element
from .elements import *
from .io import dump
import io
import os
import os.path as p
import re
import sys
import json
import yaml
import shlex
from typing import Tuple
from shutil import which
from subprocess import Popen, PIPE
from functools import partial
# yamlloader keeps dict ordering in yaml
try:
import yamlloader
except ImportError:
yamlloader = None
if yamlloader is None:
# property of pyyaml:
# C*Loader when compiled with C, else fallback to pure Python loader
try:
from yaml import CSafeLoader as Loader
except ImportError:
from yaml import SafeLoader as Loader
else:
from yamlloader.ordereddict import CSafeLoader as Loader
# to be filled when the first time which('pandoc') is called
PANDOC_PATH = None
# ---------------------------
# Constants
# ---------------------------
HorizontalSpaces = (Space, LineBreak, SoftBreak)
VerticalSpaces = (Para, )
# ---------------------------
# Convenience classes
# ---------------------------
class PandocVersion:
'''
Get runtime Pandoc version
use PandocVersion().version for comparing versions
'''
def __init__(self):
pass
def __str__(self) -> str:
return self._repr.splitlines()[0].split(' ')[1]
def __repr__(self) -> str:
return self._repr
@property
def _repr(self):
# lazily call pandoc only once
if not hasattr(self, '__repr'):
self.__repr: str = run_pandoc(args=['--version'])
return self.__repr
@property
def version(self) -> Tuple[int, ...]:
return tuple(int(i) for i in str(self).split('.'))
@property
def data_dir(self):
info = self._repr.splitlines()
prefix = "User data directory: "
info = [row for row in info if row.startswith(prefix)]
assert len(info) == 1, info
data_dir = info[0][len(prefix):]
# data_dir might contain multiple folders:
# Default user data directory: /home/runner/.local/share/pandoc or /home/runner/.pandoc/filters
data_dir = data_dir.split(' or ')
data_dir = [p.normpath(p.expanduser(p.expandvars(p.join(d, 'filters')))) for d in data_dir]
return data_dir
pandoc_version = PandocVersion()
# ---------------------------
# Convenience functions
# ---------------------------
def yaml_filter(element, doc, tag=None, function=None, tags=None,
strict_yaml=False):
'''
Convenience function for parsing code blocks with YAML options
This function is useful to create a filter that applies to
code blocks that have specific classes.
It is used as an argument of ``run_filter``, with two additional options:
``tag`` and ``function``.
Using this is equivalent to having filter functions that:
1. Check if the element is a code block
2. Check if the element belongs to a specific class
3. Split the YAML options (at the beginning of the block, by looking
for ``...`` or ``---`` strings in a separate line
4. Parse the YAML
5. Use the YAML options and (optionally) the data that follows the YAML
to return a new or modified element
Instead, you just need to:
1. Call ``run_filter`` with ``yaml_filter`` as the action function, and
with the additional arguments ``tag`` and ``function``
2. Construct a ``fenced_action`` function that takes four arguments:
(options, data, element, doc). Note that options is a dict and data
is a raw string. Notice that this is similar to the ``action``
functions of standard filters, but with *options* and *data* as the
new ones.
Note: if you want to apply multiple functions to separate classes,
you can use the ``tags`` argument, which receives a dict of
``tag: function`` pairs.
Note: use the ``strict_yaml=True`` option in order to allow for more verbose
but flexible YAML metadata: more than one YAML blocks are allowed, but
they all must start with ``---`` (even at the beginning) and end with
``---`` or ``...``. Also, YAML is not the default content
when no delimiters are set.
Example::
"""
Replace code blocks of class 'foo' with # horizontal rules
"""
import panflute as pf
def fenced_action(options, data, element, doc):
count = options.get('count', 1)
div = pf.Div(attributes={'count': str(count)})
div.content.extend([pf.HorizontalRule] * count)
return div
if __name__ == '__main__':
pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action)
'''
# Allow for either tag+function or a dict {tag: function}
assert (tag is None) + (tags is None) == 1 # XOR
if tags is None:
tags = {tag: function}
if type(element) == CodeBlock:
for tag in tags:
if tag in element.classes:
function = tags[tag]
if not strict_yaml:
# Split YAML and data parts (separated by ... or ---)
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 1, re.MULTILINE)
data = raw[2] if len(raw) > 2 else ''
data = data.lstrip('\n')
raw = raw[0]
try:
options = yaml.load(raw, Loader=Loader) # nosec # already using SafeLoader
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
debug("panflute: malformed YAML block:")
debug(repr(raw))
return
if options is None:
options = {}
else:
options = {}
data = []
raw = re.split("^([.]{3,}|[-]{3,})$",
element.text, 0, re.MULTILINE)
rawmode = True
for chunk in raw:
chunk = chunk.strip('\n')
if not chunk:
continue
if rawmode:
if chunk.startswith('---'):
rawmode = False
else:
data.append(chunk)
else:
if chunk.startswith('---') or chunk.startswith('...'):
rawmode = True
else:
try:
options.update(yaml.load(chunk, Loader=Loader)) # nosec # already using SafeLoader
except yaml.scanner.ScannerError:
debug("panflute: malformed YAML block")
return
data = '\n'.join(data)
return function(options=options, data=data,
element=element, doc=doc)
# ---------------------------
# Functions that extract content
# ---------------------------
def stringify(element, newlines=True):
"""
Return the raw text version of an element (and its children elements).
Example:
>>> from panflute import *
>>> e1 = Emph(Str('Hello'), Space, Str('world!'))
>>> e2 = Strong(Str('Bye!'))
>>> para = Para(e1, Space, e2)
>>> stringify(para)
'Hello world! Bye!\n\n'
:param newlines: add a new line after a paragraph (default True)
:type newlines: :class:`bool`
:rtype: :class:`str`
"""
def stop_if(e):
return isinstance(e, (DefinitionList, Cite))
def attach_str(e, doc, answer):
if hasattr(e, 'text'):
ans = e.text
elif isinstance(e, HorizontalSpaces):
ans = ' '
elif isinstance(e, VerticalSpaces) and newlines:
ans = '\n\n'
elif type(e) == DefinitionList:
ans = []
for item in e.content:
term = ''.join(stringify(part) for part in item.term)
definitions = '; '.join(stringify(defn) for defn in item.definitions)
ans.append(f'- {term}: {definitions}')
ans = '\n'.join(ans)
elif type(e) == Cite:
ans = stringify(e.content)
else:
ans = ''
# Add quotes around the contents of Quoted()
if type(e.parent) == Quoted:
if e.index == 0:
ans = '"' + ans
if e.index == len(e.container) - 1:
ans += '"'
answer.append(ans)
answer = []
f = partial(attach_str, answer=answer)
element.walk(f, stop_if=stop_if)
return ''.join(answer)
def _get_metadata(self, key='', default=None, builtin=True):
"""
get_metadata([key, default, simple])
Retrieve metadata with nested keys separated by dots.
This is useful to avoid repeatedly checking if a dict exists, as
the frontmatter might not have the keys that we expect.
With ``builtin=True`` (the default), it will convert the results to
built-in Python types, instead of :class:`.MetaValue` elements. EG: instead of returning a MetaBool it will return True|False.
:param key: string with the keys separated by a dot (``key1.key2``). Default is an empty string (which returns the entire metadata dict)
:type key: ``str``
:param default: return value in case the key is not found (default is ``None``)
:param builtin: If True, return built-in Python types (default is ``True``)
:Example:
>>> doc.metadata['format']['show-frame'] = True
>>> # ...
>>> # afterwards:
>>> show_frame = doc.get_metadata('format.show-frame', False)
>>> stata_path = doc.get_metadata('media.path.figures', '.')
"""
# Retrieve metadata
assert isinstance(key, str)
meta = self.metadata
# Retrieve specific key
if key:
for k in key.split('.'):
if isinstance(meta, MetaMap) and k in meta.content:
meta = meta[k]
else:
return default
# Stringify contents
return meta2builtin(meta) if builtin else meta
def meta2builtin(meta):
if isinstance(meta, MetaBool):
return meta.boolean
elif isinstance(meta, MetaString):
return meta.text
elif isinstance(meta, MetaList):
return [meta2builtin(v) for v in meta.content.list]
elif isinstance(meta, MetaMap):
return {k: meta2builtin(v) for k, v in meta.content.dict.items()}
elif isinstance(meta, (MetaInlines, MetaBlocks)):
return stringify(meta)
else:
debug("MISSING", type(meta))
return meta
# Bind the method
Doc.get_metadata = _get_metadata
# ---------------------------
# Functions that rely on external calls
# ---------------------------
def shell(args, wait=True, msg=None):
"""
Execute the external command and get its exitcode, stdout and stderr.
"""
# Fix Windows error if passed a string
if isinstance(args, str):
args = shlex.split(args, posix=(os.name != "nt"))
if os.name == "nt":
args = [arg.replace('/', '\\') for arg in args]
if wait:
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=msg)
exitcode = proc.returncode
if exitcode != 0:
debug('<<<< shell call failed; error message below >>>>')
debug(err.decode('utf-8'))
debug('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
raise IOError()
return out
else:
DETACHED_PROCESS = 0x00000008
proc = Popen(args, creationflags=DETACHED_PROCESS)
def run_pandoc(text='', args=None, pandoc_path=None):
"""
Low level function that calls Pandoc with (optionally)
some input text and/or arguments
:param str pandoc_path: If specified, use the Pandoc at this path.
If None, default to that from PATH.
"""
if args is None:
args = []
if pandoc_path is None:
# initialize the global PANDOC_PATH
if PANDOC_PATH is None:
temp = which('pandoc')
if temp is None:
raise OSError("Path to pandoc executable does not exists")
sys.modules[__name__].PANDOC_PATH = temp
pandoc_path = PANDOC_PATH
try:
proc = Popen([pandoc_path] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except FileNotFoundError:
raise OSError(f"Given pandoc_path {pandoc_path} is invalid")
out, err = proc.communicate(input=text.encode('utf-8'))
exitcode = proc.returncode
if err:
debug(err.decode('utf-8'))
if exitcode != 0:
raise IOError('')
return out.decode('utf-8')
def convert_text(text,
input_format='markdown',
output_format='panflute',
standalone=False,
extra_args=None,
pandoc_path=None):
r"""
Convert formatted text (usually markdown) by calling Pandoc internally
The default output format ('panflute') will return a tree
of Pandoc elements. When combined with 'standalone=True', the tree root
will be a 'Doc' element.
Example:
>>> from panflute import *
>>> md = 'Some *markdown* **text** ~xyz~'
>>> tex = r'Some $x^y$ or $x_n = \sqrt{a + b}$ \textit{a}'
>>> convert_text(md)
[Para(Str(Some) Space Emph(Str(markdown)) Space Strong(Str(text)) Space Subscript(Str(xyz)))]
>>> convert_text(tex)
[Para(Str(Some) Space Math(x^y; format='InlineMath') Space Str(or) Space Math(x_n = \sqrt{a + b}; format='InlineMath') Space RawInline(\textit{a}; format='tex'))]
:param text: text that will be converted
:type text: :class:`str` | :class:`.Element` | :class:`list` of :class:`.Element`
:param input_format: format of the text (default 'markdown').
Any Pandoc input format is valid, plus 'panflute' (a tree of Pandoc
elements)
:param output_format: format of the output
(default is 'panflute' which creates the tree of Pandoc elements).
Non-binary Pandoc formats are allowed (e.g. markdown, latex is allowed,
but docx and pdf are not).
:param standalone: whether the results will be a standalone document
or not.
:type standalone: :class:`bool`
:param extra_args: extra arguments passed to Pandoc
:type extra_args: :class:`list`
:param str pandoc_path: If specified, use the Pandoc at this path.
If None, default to that from PATH.
:rtype: :class:`list` | :class:`.Doc` | :class:`str`
Note: for a more general solution,
see `pyandoc <https://github.com/kennethreitz/pyandoc/>`_
by Kenneth Reitz.
"""
if input_format == 'panflute':
# Problem:
# We need a Doc element, but received a list of elements.
# So we wrap-up the list in a Doc, but with what pandoc-api version?
# (remember that Pandoc requires a matching api-version!)
# Workaround: call Pandoc with empty text to get its api-version
if not isinstance(text, Doc):
tmp_doc = convert_text('', standalone=True)
api_version = tmp_doc.api_version
if isinstance(text, Element):
text = [text]
text = Doc(*text, api_version=api_version)
# Dump the Doc into json
with io.StringIO() as f:
dump(text, f)
text = f.getvalue()
in_fmt = 'json' if input_format == 'panflute' else input_format
out_fmt = 'json' if output_format == 'panflute' else output_format
if extra_args is None:
extra_args = []
if standalone:
extra_args.append('--standalone')
out = inner_convert_text(text, in_fmt, out_fmt, extra_args, pandoc_path=pandoc_path)
if output_format == 'panflute':
out = json.loads(out, object_hook=from_json)
if standalone:
if not isinstance(out, Doc): # Pandoc 1.7.2 and earlier
metadata, items = out
out = Doc(*items, metadata=metadata)
else:
if isinstance(out, Doc): # Pandoc 1.8 and later
out = out.content.list
else:
out = out[1] # Pandoc 1.7.2 and earlier
return out
def inner_convert_text(text, input_format, output_format, extra_args, pandoc_path=None):
# like convert_text(), but does not support 'panflute' input/output
from_arg = '--from={}'.format(input_format)
to_arg = '--to={}'.format(output_format)
args = [from_arg, to_arg] + extra_args
out = run_pandoc(text, args, pandoc_path=pandoc_path)
out = "\n".join(out.splitlines()) # Replace \r\n with \n
return out
# ---------------------------
# Functions that modify content
# ---------------------------
def _replace_keyword(self, keyword, replacement, count=0):
"""
replace_keyword(keyword, replacement[, count])
Walk through the element and its children
and look for Str() objects that contains
exactly the keyword. Then, replace it.
Usually applied to an entire document (a :class:`.Doc` element)
Note: If the replacement is a block, it cannot be put in place of
a Str element. As a solution, the closest ancestor (e.g. the parent)
will be replaced instead, but only if possible
(if the parent only has one child).
Example:
>>> from panflute import *
>>> p1 = Para(Str('Spam'), Space, Emph(Str('and'), Space, Str('eggs')))
>>> p2 = Para(Str('eggs'))
>>> p3 = Plain(Emph(Str('eggs')))
>>> doc = Doc(p1, p2, p3)
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(eggs))) Para(Str(eggs)) Plain(Emph(Str(eggs))))
>>> doc.replace_keyword('eggs', Str('ham'))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(ham)) Plain(Emph(Str(ham))))
>>> doc.replace_keyword(keyword='ham', replacement=Para(Str('spam')))
>>> doc.content
ListContainer(Para(Str(Spam) Space Emph(Str(and) Space Str(ham))) Para(Str(spam)) Para(Str(spam)))
:param keyword: string that will be searched (cannot have spaces!)
:type keyword: :class:`str`
:param replacement: element that will be placed in turn of the ``Str``
element that contains the keyword.
:type replacement: :class:`.Element`
:param count: number of occurrences that will be replaced.
If count is not given or is set to zero, all occurrences
will be replaced.
:type count: :class:`int`
"""
def replace_with_inline(e, doc):
if type(e) == Str and e.text == keyword:
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
def replace_with_block(e, doc):
'''
It's difficult to replace a keyword with an entire Block element.
This is because the keyword is of type Str (an Inline) and the parent
object of a Str can only contain Inlines and not Blocks
(e.g. Para can contain Inlines, not Divs)
Implications:
1) If the Str that contains the keyword is inside another
Inline instead of a Block (e.g. Div -> Emph -> Str)
then we have to do a trick:
when .walk() touches an Emph that contains Str(keyword),
it replaces the Emph with Str(keyword).
2) If the element that contains the Str(keyword) has multiple children,
then we are in a bind as replacing it will destroy information.
Thus, we can't do do it
3) If the element that contains the Str(keyword) does so in a DictContainer
instead of a ListContainer, then we cannot retrieve the "first and only
element" easily, so we also abort (happens with metadata elements).
'''
# Here we can check that e.content is ListContainer (i.e. not DictContainer)
# or check that e is not a Metavalue ("not isinstance(e, MetaValue)")
if hasattr(e, 'content') and isinstance(e.content, ListContainer) and len(e.content) == 1:
ee = e.content[0]
if type(ee) == Str and ee.text == keyword:
if isinstance(e, Block):
doc.num_matches += 1
if not count or doc.num_matches <= count:
return replacement
elif isinstance(e, Inline):
return Str(keyword)
else:
pass # not implemented
doc = self.doc
if doc is None:
raise Exception('No root document')
doc.num_matches = 0
if isinstance(replacement, Inline):
return self.walk(replace_with_inline, doc)
elif isinstance(replacement, Block):
return self.walk(replace_with_block, doc)
else:
raise NotImplementedError(type(replacement))
# Bind the method
Element.replace_keyword = _replace_keyword
def get_option(options=None, local_tag=None, doc=None, doc_tag=None, default=None, error_on_none=True):
"""
Fetch an option variable from either a local (element) level option/attribute tag,
a document level metadata tag, or a default.
:type options: ``dict``
:type local_tag: ``str``
:type doc: :class:`Doc`
:type doc_tag: ``str``
:type default: ``any``
:type error_on_none: ``bool``
The order of preference is local > document > default,
although if a local or document tag returns None, then the next level down is used.
Also, if error_on_none=True and the final variable is None, then a ValueError will be raised
In this manner you can set global variables, which can be optionally overridden at a local level.
For example, the two files below show how to apply different styles to docx text:
**main.md:**
.. code-block:: none
:linenos:
------------------
style-div:
name: MyStyle
------------------
:::style
some text
:::
::: {.style name=MyOtherStyle}
some more text
:::
**style_filter.py:**
.. code-block:: python
:linenos:
import panflute as pf
def action(elem, doc):
if type(elem) == pf.Div:
style = pf.get_option(elem.attributes, "name", doc, "style-div.name")
elem.attributes["custom-style"] = style
def main(doc=None):
return run_filter(action, doc=doc)
if __name__ == "__main__":
main()
"""
variable = None
# element level
if options is not None and local_tag is not None:
if local_tag in options and options[local_tag] is not None:
variable = options[local_tag]
if variable is not None:
return variable
# doc level
if doc is not None and doc_tag is not None:
variable = doc.get_metadata(doc_tag, None)
if variable is not None:
return variable
# default level
variable = default
if variable is None and error_on_none:
raise ValueError("could not retrieve a value for tag; local={0}, doc={1}".format(local_tag, doc_tag))
return variable
|
69ec86d53dce85e7256b27da0c41bcec7cdcd9c2
|
852b57a1a2a0fa6b0d23bef16c4a989d369936e9
|
/tests/sync/test_tracing.py
|
5c50842401bb2caf296d33d40bf49dfad10de768
|
[
"Apache-2.0"
] |
permissive
|
microsoft/playwright-python
|
e28badf23e20f948b4063a314e906006dcdff7fa
|
42c0bf19d7ae415552172d7c04cdb7afd9dad7fb
|
refs/heads/main
| 2023-08-22T17:49:04.645213
| 2023-08-14T12:52:46
| 2023-08-14T12:52:46
| 276,414,382
| 9,615
| 870
|
Apache-2.0
| 2023-09-05T17:07:48
| 2020-07-01T15:28:13
|
Python
|
UTF-8
|
Python
| false
| false
| 10,397
|
py
|
test_tracing.py
|
# Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import zipfile
from pathlib import Path
from typing import Any, Dict, List, Tuple
from playwright.sync_api import Browser, BrowserContext, BrowserType, Page
from tests.server import Server
def test_browser_context_output_trace(
browser: Browser, server: Server, tmp_path: Path
) -> None:
context = browser.new_context()
context.tracing.start(screenshots=True, snapshots=True)
page = context.new_page()
page.goto(server.PREFIX + "/grid.html")
context.tracing.stop(path=tmp_path / "trace.zip")
assert Path(tmp_path / "trace.zip").exists()
def test_browser_context_should_not_throw_when_stopping_without_start_but_not_exporting(
context: BrowserContext,
) -> None:
context.tracing.stop()
def test_browser_context_output_trace_chunk(
browser: Browser, server: Server, tmp_path: Path
) -> None:
context = browser.new_context()
context.tracing.start(screenshots=True, snapshots=True)
page = context.new_page()
page.goto(server.PREFIX + "/grid.html")
button = page.locator(".box").first
context.tracing.start_chunk(title="foo")
button.click()
context.tracing.stop_chunk(path=tmp_path / "trace1.zip")
assert Path(tmp_path / "trace1.zip").exists()
context.tracing.start_chunk(title="foo")
button.click()
context.tracing.stop_chunk(path=tmp_path / "trace2.zip")
assert Path(tmp_path / "trace2.zip").exists()
def test_should_collect_sources(
context: BrowserContext, page: Page, server: Server, tmp_path: Path
) -> None:
context.tracing.start(sources=True)
page.goto(server.EMPTY_PAGE)
page.set_content("<button>Click</button>")
page.click("button")
path = tmp_path / "trace.zip"
context.tracing.stop(path=path)
(resources, events) = parse_trace(path)
current_file_content = Path(__file__).read_bytes()
found_current_file = False
for name, resource in resources.items():
if resource == current_file_content:
found_current_file = True
break
assert found_current_file
def test_should_collect_trace_with_resources_but_no_js(
context: BrowserContext, page: Page, server: Server, tmpdir: Path
) -> None:
context.tracing.start(screenshots=True, snapshots=True)
page.goto(server.PREFIX + "/frames/frame.html")
page.set_content("<button>Click</button>")
page.click('"Click"')
page.mouse.move(20, 20)
page.mouse.dblclick(30, 30)
page.keyboard.insert_text("abc")
page.wait_for_timeout(2000) # Give it some time to produce screenshots.
page.route(
"**/empty.html", lambda route: route.continue_()
) # should produce a route.continue_ entry.
page.goto(server.EMPTY_PAGE)
page.goto(
server.PREFIX + "/one-style.html"
) # should not produce a route.continue_ entry since we continue all routes if no match.
page.close()
trace_file_path = tmpdir / "trace.zip"
context.tracing.stop(path=trace_file_path)
(_, events) = parse_trace(trace_file_path)
assert events[0]["type"] == "context-options"
assert get_actions(events) == [
"Page.goto",
"Page.set_content",
"Page.click",
"Mouse.move",
"Mouse.dblclick",
"Keyboard.insert_text",
"Page.wait_for_timeout",
"Page.route",
"Page.goto",
"Route.continue_",
"Page.goto",
"Page.close",
]
assert len(list(filter(lambda e: e["type"] == "frame-snapshot", events))) >= 1
assert len(list(filter(lambda e: e["type"] == "screencast-frame", events))) >= 1
style = list(
filter(
lambda e: e["type"] == "resource-snapshot"
and e["snapshot"]["request"]["url"].endswith("style.css"),
events,
)
)[0]
assert style
assert style["snapshot"]["response"]["content"]["_sha1"]
script = list(
filter(
lambda e: e["type"] == "resource-snapshot"
and e["snapshot"]["request"]["url"].endswith("script.js"),
events,
)
)[0]
assert script
assert script["snapshot"]["response"]["content"].get("_sha1") is None
def test_should_collect_two_traces(
context: BrowserContext, page: Page, server: Server, tmpdir: Path
) -> None:
context.tracing.start(screenshots=True, snapshots=True)
page.goto(server.EMPTY_PAGE)
page.set_content("<button>Click</button>")
page.click('"Click"')
tracing1_path = tmpdir / "trace1.zip"
context.tracing.stop(path=tracing1_path)
context.tracing.start(screenshots=True, snapshots=True)
page.dblclick('"Click"')
page.close()
tracing2_path = tmpdir / "trace2.zip"
context.tracing.stop(path=tracing2_path)
(_, events) = parse_trace(tracing1_path)
assert events[0]["type"] == "context-options"
assert get_actions(events) == [
"Page.goto",
"Page.set_content",
"Page.click",
]
(_, events) = parse_trace(tracing2_path)
assert events[0]["type"] == "context-options"
assert get_actions(events) == ["Page.dblclick", "Page.close"]
def test_should_not_throw_when_stopping_without_start_but_not_exporting(
context: BrowserContext,
) -> None:
context.tracing.stop()
def test_should_work_with_playwright_context_managers(
context: BrowserContext, page: Page, server: Server, tmpdir: Path
) -> None:
context.tracing.start(screenshots=True, snapshots=True)
page.goto(server.EMPTY_PAGE)
page.set_content("<button>Click</button>")
with page.expect_console_message() as message_info:
page.evaluate('() => console.log("hello")')
page.click('"Click"')
assert (message_info.value).text == "hello"
with page.expect_popup():
page.evaluate("window._popup = window.open(document.location.href)")
trace_file_path = tmpdir / "trace.zip"
context.tracing.stop(path=trace_file_path)
(_, events) = parse_trace(trace_file_path)
assert events[0]["type"] == "context-options"
assert get_actions(events) == [
"Page.goto",
"Page.set_content",
"Page.expect_console_message",
"Page.evaluate",
"Page.click",
"Page.expect_popup",
"Page.evaluate",
]
def test_should_display_wait_for_load_state_even_if_did_not_wait_for_it(
context: BrowserContext, page: Page, server: Server, tmpdir: Path
) -> None:
context.tracing.start(screenshots=True, snapshots=True)
page.goto(server.EMPTY_PAGE)
page.wait_for_load_state("load")
page.wait_for_load_state("load")
trace_file_path = tmpdir / "trace.zip"
context.tracing.stop(path=trace_file_path)
(_, events) = parse_trace(trace_file_path)
assert get_actions(events) == [
"Page.goto",
"Page.wait_for_load_state",
"Page.wait_for_load_state",
]
def test_should_respect_traces_dir_and_name(
browser_type: BrowserType,
server: Server,
tmpdir: Path,
launch_arguments: Any,
) -> None:
traces_dir = tmpdir / "traces"
browser = browser_type.launch(traces_dir=traces_dir, **launch_arguments)
context = browser.new_context()
page = context.new_page()
context.tracing.start(name="name1", snapshots=True)
page.goto(server.PREFIX + "/one-style.html")
context.tracing.stop_chunk(path=tmpdir / "trace1.zip")
assert (traces_dir / "name1.trace").exists()
assert (traces_dir / "name1.network").exists()
context.tracing.start_chunk(name="name2")
page.goto(server.PREFIX + "/har.html")
context.tracing.stop(path=tmpdir / "trace2.zip")
assert (traces_dir / "name2.trace").exists()
assert (traces_dir / "name2.network").exists()
browser.close()
def resource_names(resources: Dict[str, bytes]) -> List[str]:
return sorted(
[
re.sub(r"^resources/.*\.(html|css)$", r"resources/XXX.\g<1>", file)
for file in resources.keys()
]
)
(resources, events) = parse_trace(tmpdir / "trace1.zip")
assert get_actions(events) == ["Page.goto"]
assert resource_names(resources) == [
"resources/XXX.css",
"resources/XXX.html",
"trace.network",
"trace.stacks",
"trace.trace",
]
(resources, events) = parse_trace(tmpdir / "trace2.zip")
assert get_actions(events) == ["Page.goto"]
assert resource_names(resources) == [
"resources/XXX.css",
"resources/XXX.html",
"resources/XXX.html",
"trace.network",
"trace.stacks",
"trace.trace",
]
def parse_trace(path: Path) -> Tuple[Dict[str, bytes], List[Any]]:
resources: Dict[str, bytes] = {}
with zipfile.ZipFile(path, "r") as zip:
for name in zip.namelist():
resources[name] = zip.read(name)
action_map: Dict[str, Any] = {}
events: List[Any] = []
for name in ["trace.trace", "trace.network"]:
for line in resources[name].decode().splitlines():
if not line:
continue
event = json.loads(line)
if event["type"] == "before":
event["type"] = "action"
action_map[event["callId"]] = event
events.append(event)
elif event["type"] == "input":
pass
elif event["type"] == "after":
existing = action_map[event["callId"]]
existing["error"] = event.get("error", None)
else:
events.append(event)
return (resources, events)
def get_actions(events: List[Any]) -> List[str]:
action_events = sorted(
list(
filter(
lambda e: e["type"] == "action",
events,
)
),
key=lambda e: e["startTime"],
)
return [e["apiName"] for e in action_events]
|
342727704b0e4a38266acdfe9a3fafbc2a3afa85
|
ab4e2883680fd0ab576d9b30be88c08f38e9562b
|
/cornac/eval_methods/propensity_stratified_evaluation.py
|
aa1751e06986c798ed481b76d8a59bdae832bed0
|
[
"Apache-2.0"
] |
permissive
|
PreferredAI/cornac
|
6b2ac4589dfe38a41128b0d00b7fc642a5ae41ba
|
b0d6fe83e9c99bfe41312059ae849ca1009a179f
|
refs/heads/master
| 2023-08-17T18:17:52.745292
| 2023-08-13T04:51:13
| 2023-08-13T04:51:13
| 141,242,285
| 806
| 150
|
Apache-2.0
| 2023-09-14T09:19:15
| 2018-07-17T06:31:35
|
Python
|
UTF-8
|
Python
| false
| false
| 14,383
|
py
|
propensity_stratified_evaluation.py
|
import time
from collections import defaultdict
from collections import OrderedDict
import powerlaw
import numpy as np
import tqdm.auto as tqdm
from ..utils.common import safe_indexing
from ..data import Dataset
from .base_method import BaseMethod, rating_eval
from .ratio_split import RatioSplit
from ..experiment.result import Result, PSTResult
def ranking_eval(
model,
metrics,
train_set,
test_set,
val_set=None,
rating_threshold=1.0,
exclude_unknowns=True,
verbose=False,
props=None,
):
"""Evaluate model on provided ranking metrics.
Parameters
----------
model: :obj:`cornac.models.Recommender`, required
Recommender model to be evaluated.
metrics: :obj:`iterable`, required
List of rating metrics :obj:`cornac.metrics.RankingMetric`.
train_set: :obj:`cornac.data.Dataset`, required
Dataset to be used for model training. This will be used to exclude
observations already appeared during training.
test_set: :obj:`cornac.data.Dataset`, required
Dataset to be used for evaluation.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
Dataset to be used for model selection. This will be used to exclude
observations already appeared during validation.
rating_threshold: float, optional, default: 1.0
The threshold to convert ratings into positive or negative feedback.
exclude_unknowns: bool, optional, default: True
Ignore unknown users and items during evaluation.
verbose: bool, optional, default: False
Output evaluation progress.
props: dictionary, optional, default: None
items propensity scores
Returns
-------
res: (List, List)
Tuple of two lists:
- average result for each of the metrics
- average result per user for each of the metrics
"""
if len(metrics) == 0:
return [], []
avg_results = []
user_results = [{} for _ in enumerate(metrics)]
gt_mat = test_set.csr_matrix
train_mat = train_set.csr_matrix
val_mat = None if val_set is None else val_set.csr_matrix
def pos_items(csr_row):
return [
item_idx
for (item_idx, rating) in zip(csr_row.indices, csr_row.data)
if rating >= rating_threshold
]
for user_idx in tqdm.tqdm(test_set.user_indices, disable=not verbose, miniters=100):
test_pos_items = pos_items(gt_mat.getrow(user_idx))
if len(test_pos_items) == 0:
continue
u_gt_pos = np.zeros(test_set.num_items, dtype='float')
u_gt_pos[test_pos_items] = 1
val_pos_items = [] if val_mat is None else pos_items(val_mat.getrow(user_idx))
train_pos_items = (
[]
if train_set.is_unk_user(user_idx)
else pos_items(train_mat.getrow(user_idx))
)
u_gt_neg = np.ones(test_set.num_items, dtype='int')
u_gt_neg[test_pos_items + val_pos_items + train_pos_items] = 0
item_indices = None if exclude_unknowns else np.arange(test_set.num_items)
item_rank, item_scores = model.rank(user_idx, item_indices)
total_pi = 0.0
if props is not None:
for idx, e in enumerate(u_gt_pos):
if e > 0 and props[str(idx)] > 0:
u_gt_pos[idx] /= props[str(idx)]
total_pi += 1 / props[str(idx)]
for i, mt in enumerate(metrics):
mt_score = mt.compute(
gt_pos=u_gt_pos,
gt_neg=u_gt_neg,
pd_rank=item_rank,
pd_scores=item_scores,
)
user_results[i][user_idx] = mt_score
# avg results of ranking metrics
for i, mt in enumerate(metrics):
avg_results.append(sum(user_results[i].values()) / len(user_results[i]))
return avg_results, user_results
class PropensityStratifiedEvaluation(BaseMethod):
"""Propensity-based Stratified Evaluation Method proposed by Jadidinejad et al. (2021)
Parameters
----------
data: array-like, required
Raw preference data in the triplet format [(user_id, item_id, rating_value)].
test_size: float, optional, default: 0.2
The proportion of the test set,
if > 1 then it is treated as the size of the test set.
val_size: float, optional, default: 0.0
The proportion of the validation set, \
if > 1 then it is treated as the size of the validation set.
n_strata: int, optional, default: 2
The number of strata for propensity-based stratification.
rating_threshold: float, optional, default: 1.0
Threshold used to binarize rating values into positive or negative feedback for
model evaluation using ranking metrics (rating metrics are not affected).
seed: int, optional, default: None
Random seed for reproducibility.
exclude_unknowns: bool, optional, default: True
If `True`, unknown users and items will be ignored during model evaluation.
verbose: bool, optional, default: False
Output running log.
References
----------
Amir H. Jadidinejad, Craig Macdonald and Iadh Ounis,
The Simpson's Paradox in the Offline Evaluation of Recommendation Systems,
ACM Transactions on Information Systems (to appear)
https://arxiv.org/abs/2104.08912
"""
def __init__(
self,
data,
test_size=0.2,
val_size=0.0,
n_strata=2,
rating_threshold=1.0,
seed=None,
exclude_unknowns=True,
verbose=False,
**kwargs,
):
BaseMethod.__init__(
self,
data=data,
rating_threshold=rating_threshold,
seed=seed,
exclude_unknowns=exclude_unknowns,
verbose=verbose,
**kwargs,
)
self.n_strata = n_strata
# estimate propensities
self.props = self._estimate_propensities()
# split the data into train/valid/test sets
self.train_size, self.val_size, self.test_size = RatioSplit.validate_size(
val_size, test_size, len(self._data)
)
self._split()
def _eval(self, model, test_set, val_set, user_based, props=None):
metric_avg_results = OrderedDict()
metric_user_results = OrderedDict()
avg_results, user_results = rating_eval(
model=model,
metrics=self.rating_metrics,
test_set=test_set,
user_based=user_based,
)
for i, mt in enumerate(self.rating_metrics):
metric_avg_results[mt.name] = avg_results[i]
metric_user_results[mt.name] = user_results[i]
avg_results, user_results = ranking_eval(
model=model,
metrics=self.ranking_metrics,
train_set=self.train_set,
test_set=test_set,
val_set=val_set,
rating_threshold=self.rating_threshold,
exclude_unknowns=self.exclude_unknowns,
verbose=self.verbose,
props=props,
)
for i, mt in enumerate(self.ranking_metrics):
metric_avg_results[mt.name] = avg_results[i]
metric_user_results[mt.name] = user_results[i]
return Result(model.name, metric_avg_results, metric_user_results)
def _split(self):
data_idx = self.rng.permutation(len(self._data))
train_idx = data_idx[: self.train_size]
test_idx = data_idx[-self.test_size :]
val_idx = data_idx[self.train_size : -self.test_size]
train_data = safe_indexing(self._data, train_idx)
test_data = safe_indexing(self._data, test_idx)
val_data = safe_indexing(self._data, val_idx) if len(val_idx) > 0 else None
# build train/test/valid datasets
self._build_datasets(
train_data=train_data, test_data=test_data, val_data=val_data
)
# build stratified dataset
self._build_stratified_dataset(test_data=test_data)
def _estimate_propensities(self):
# find the item's frequencies
item_freq = defaultdict(int)
for u, i, r in self._data:
item_freq[i] += 1
# fit the exponential param
data = np.array([e for e in item_freq.values()], dtype='float')
results = powerlaw.Fit(data, discrete=True, fit_method="Likelihood")
alpha = results.power_law.alpha
fmin = results.power_law.xmin
if self.verbose:
print("Powerlaw exponential estimates: %f, min=%d" % (alpha, fmin))
# replace raw frequencies with the estimated propensities
for k, v in item_freq.items():
if v > fmin:
item_freq[k] = pow(v, alpha)
return item_freq # user-independent propensity estimations
def _build_stratified_dataset(self, test_data):
# build stratified datasets
self.stratified_sets = {}
# match the corresponding propensity score for each feedback
test_props = np.array(
[self.props[i] for u, i, r in test_data], dtype='float'
)
# stratify
minp = min(test_props) - 0.01 * min(test_props)
maxp = max(test_props) + 0.01 * max(test_props)
slice = (maxp - minp) / self.n_strata
strata = [
f"Q{idx}"
for idx in np.digitize(x=test_props, bins=np.arange(minp, maxp, slice))
]
for stratum in sorted(np.unique(strata)):
# sample the corresponding sub-population
qtest_data = []
for (u, i, r), q in zip(test_data, strata):
if q == stratum:
qtest_data.append((u, i, r))
# build a dataset
qtest_set = Dataset.build(
data=qtest_data,
fmt=self.fmt,
global_uid_map=self.global_uid_map,
global_iid_map=self.global_iid_map,
seed=self.seed,
exclude_unknowns=self.exclude_unknowns,
)
if self.verbose:
print("---")
print("Test data ({}):".format(stratum))
print("Number of users = {}".format(len(qtest_set.uid_map)))
print("Number of items = {}".format(len(qtest_set.iid_map)))
print("Number of ratings = {}".format(qtest_set.num_ratings))
print("Max rating = {:.1f}".format(qtest_set.max_rating))
print("Min rating = {:.1f}".format(qtest_set.min_rating))
print("Global mean = {:.1f}".format(qtest_set.global_mean))
print(
"Number of unknown users = {}".format(
qtest_set.num_users - self.train_set.num_users
)
)
print(
"Number of unknown items = {}".format(
self.test_set.num_items - self.train_set.num_items
)
)
self.stratified_sets[stratum] = qtest_set
def evaluate(self, model, metrics, user_based, show_validation=True):
"""Evaluate given models according to given metrics
Parameters
----------
model: :obj:`cornac.models.Recommender`
Recommender model to be evaluated.
metrics: :obj:`iterable`
List of metrics.
user_based: bool, required
Evaluation strategy for the rating metrics. Whether results
are averaging based on number of users or number of ratings.
show_validation: bool, optional, default: True
Whether to show the results on validation set (if exists).
Returns
-------
res: :obj:`cornac.experiment.Result`
"""
result = PSTResult(model.name)
if self.train_set is None:
raise ValueError("train_set is required but None!")
if self.test_set is None:
raise ValueError("test_set is required but None!")
self._reset()
self._organize_metrics(metrics)
###########
# FITTING #
###########
if self.verbose:
print("\n[{}] Training started!".format(model.name))
start = time.time()
model.fit(self.train_set, self.val_set)
train_time = time.time() - start
##############
# EVALUATION #
##############
if self.verbose:
print("\n[{}] Evaluation started!".format(model.name))
# evaluate on the sampled test set (closed-loop)
test_result = self._eval(
model=model,
test_set=self.test_set,
val_set=self.val_set,
user_based=user_based,
)
test_result.metric_avg_results["SIZE"] = self.test_set.num_ratings
result.append(test_result)
if self.verbose:
print("\n[{}] IPS Evaluation started!".format(model.name))
# evaluate based on Inverse Propensity Scoring
ips_result = self._eval(
model=model,
test_set=self.test_set,
val_set=self.val_set,
user_based=user_based,
props=self.props,
)
ips_result.metric_avg_results["SIZE"] = self.test_set.num_ratings
result.append(ips_result)
if self.verbose:
print("\n[{}] Stratified Evaluation started!".format(model.name))
# evaluate on different strata
start = time.time()
for _, qtest_set in self.stratified_sets.items():
qtest_result = self._eval(
model=model,
test_set=qtest_set,
val_set=self.val_set,
user_based=user_based,
)
test_time = time.time() - start
qtest_result.metric_avg_results["SIZE"] = qtest_set.num_ratings
result.append(qtest_result)
result.organize()
val_result = None
if show_validation and self.val_set is not None:
start = time.time()
val_result = self._eval(
model=model, test_set=self.val_set, val_set=None, user_based=user_based
)
val_time = time.time() - start
return result, val_result
|
816af407252416c7920e38e75c056ea91ed94104
|
ff443629c167f318d071f62886581167c51690c4
|
/test/lint/lint-python.py
|
6010c787cb9cdac97dc7691397138ada7611189b
|
[
"MIT"
] |
permissive
|
bitcoin/bitcoin
|
a618b2555d9fe5a2b613e5fec0f4b1eca3b4d86f
|
6f03c45f6bb5a6edaa3051968b6a1ca4f84d2ccb
|
refs/heads/master
| 2023-09-05T00:16:48.295861
| 2023-09-02T17:43:00
| 2023-09-02T17:46:33
| 1,181,927
| 77,104
| 33,708
|
MIT
| 2023-09-14T20:47:31
| 2010-12-19T15:16:43
|
C++
|
UTF-8
|
Python
| false
| false
| 5,818
|
py
|
lint-python.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Check for specified flake8 and mypy warnings in python files.
"""
import os
import subprocess
import sys
from importlib.metadata import metadata, PackageNotFoundError
DEPS = ['flake8', 'lief', 'mypy', 'pyzmq']
MYPY_CACHE_DIR = f"{os.getenv('BASE_ROOT_DIR', '')}/test/.mypy_cache"
# All .py files, except those in src/ (to exclude subtrees there)
FLAKE_FILES_ARGS = ['git', 'ls-files', '*.py', ':!:src/*.py']
# Only .py files in test/functional and contrib/devtools have type annotations
# enforced.
MYPY_FILES_ARGS = ['git', 'ls-files', 'test/functional/*.py', 'contrib/devtools/*.py']
ENABLED = (
'E101,' # indentation contains mixed spaces and tabs
'E112,' # expected an indented block
'E113,' # unexpected indentation
'E115,' # expected an indented block (comment)
'E116,' # unexpected indentation (comment)
'E125,' # continuation line with same indent as next logical line
'E129,' # visually indented line with same indent as next logical line
'E131,' # continuation line unaligned for hanging indent
'E133,' # closing bracket is missing indentation
'E223,' # tab before operator
'E224,' # tab after operator
'E242,' # tab after ','
'E266,' # too many leading '#' for block comment
'E271,' # multiple spaces after keyword
'E272,' # multiple spaces before keyword
'E273,' # tab after keyword
'E274,' # tab before keyword
'E275,' # missing whitespace after keyword
'E304,' # blank lines found after function decorator
'E306,' # expected 1 blank line before a nested definition
'E401,' # multiple imports on one line
'E402,' # module level import not at top of file
'E502,' # the backslash is redundant between brackets
'E701,' # multiple statements on one line (colon)
'E702,' # multiple statements on one line (semicolon)
'E703,' # statement ends with a semicolon
'E711,' # comparison to None should be 'if cond is None:'
'E714,' # test for object identity should be "is not"
'E721,' # do not compare types, use "isinstance()"
'E722,' # do not use bare 'except'
'E742,' # do not define classes named "l", "O", or "I"
'E743,' # do not define functions named "l", "O", or "I"
'E901,' # SyntaxError: invalid syntax
'E902,' # TokenError: EOF in multi-line string
'F401,' # module imported but unused
'F402,' # import module from line N shadowed by loop variable
'F403,' # 'from foo_module import *' used; unable to detect undefined names
'F404,' # future import(s) name after other statements
'F405,' # foo_function may be undefined, or defined from star imports: bar_module
'F406,' # "from module import *" only allowed at module level
'F407,' # an undefined __future__ feature name was imported
'F601,' # dictionary key name repeated with different values
'F602,' # dictionary key variable name repeated with different values
'F621,' # too many expressions in an assignment with star-unpacking
'F622,' # two or more starred expressions in an assignment (a, *b, *c = d)
'F631,' # assertion test is a tuple, which are always True
'F632,' # use ==/!= to compare str, bytes, and int literals
'F701,' # a break statement outside of a while or for loop
'F702,' # a continue statement outside of a while or for loop
'F703,' # a continue statement in a finally block in a loop
'F704,' # a yield or yield from statement outside of a function
'F705,' # a return statement with arguments inside a generator
'F706,' # a return statement outside of a function/method
'F707,' # an except: block as not the last exception handler
'F811,' # redefinition of unused name from line N
'F812,' # list comprehension redefines 'foo' from line N
'F821,' # undefined name 'Foo'
'F822,' # undefined name name in __all__
'F823,' # local variable name … referenced before assignment
'F831,' # duplicate argument name in function definition
'F841,' # local variable 'foo' is assigned to but never used
'W191,' # indentation contains tabs
'W291,' # trailing whitespace
'W292,' # no newline at end of file
'W293,' # blank line contains whitespace
'W601,' # .has_key() is deprecated, use "in"
'W602,' # deprecated form of raising exception
'W603,' # "<>" is deprecated, use "!="
'W604,' # backticks are deprecated, use "repr()"
'W605,' # invalid escape sequence "x"
'W606,' # 'async' and 'await' are reserved keywords starting with Python 3.7
)
def check_dependencies():
for dep in DEPS:
try:
metadata(dep)
except PackageNotFoundError:
print(f"Skipping Python linting since {dep} is not installed.")
exit(0)
def main():
check_dependencies()
if len(sys.argv) > 1:
flake8_files = sys.argv[1:]
else:
flake8_files = subprocess.check_output(FLAKE_FILES_ARGS).decode("utf-8").splitlines()
flake8_args = ['flake8', '--ignore=B,C,E,F,I,N,W', f'--select={ENABLED}'] + flake8_files
flake8_env = os.environ.copy()
flake8_env["PYTHONWARNINGS"] = "ignore"
try:
subprocess.check_call(flake8_args, env=flake8_env)
except subprocess.CalledProcessError:
exit(1)
mypy_files = subprocess.check_output(MYPY_FILES_ARGS).decode("utf-8").splitlines()
mypy_args = ['mypy', '--show-error-codes'] + mypy_files
try:
subprocess.check_call(mypy_args)
except subprocess.CalledProcessError:
exit(1)
if __name__ == "__main__":
main()
|
14af8a75316e98e2a4e1c841b0e81baa82cfedfe
|
9e1f60a867f66b1f4e4fc84fa4252c581e5e1a36
|
/Chapter06/descriptors_pythonic_1.py
|
67b266cae1968dbbd694c1aacfb85e2d7e0fd181
|
[
"MIT"
] |
permissive
|
PacktPublishing/Clean-Code-in-Python
|
c216e002485b8cd7736f97b59215a3930f35359a
|
7348d0f9f42871f499b352e0696e0cef51c4f8c6
|
refs/heads/master
| 2023-06-10T13:40:33.331115
| 2023-05-30T17:48:09
| 2023-05-30T17:48:09
| 145,072,942
| 523
| 181
|
MIT
| 2023-05-30T17:48:10
| 2018-08-17T04:48:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
descriptors_pythonic_1.py
|
"""Clean Code in Python - Chapter 6: Descriptors
> A Pythonic Implementation
"""
import time
class Traveller:
"""A person visiting several cities.
We wish to track the path of the traveller, as he or she is visiting each
new city.
>>> alice = Traveller("Alice", "Barcelona")
>>> alice.current_city = "Paris"
>>> alice.current_city = "Brussels"
>>> alice.current_city = "Amsterdam"
>>> alice.cities_visited
['Barcelona', 'Paris', 'Brussels', 'Amsterdam']
>>> alice.current_city
'Amsterdam'
>>> alice.current_city = "Amsterdam"
>>> alice.cities_visited
['Barcelona', 'Paris', 'Brussels', 'Amsterdam']
>>> bob = Traveller("Bob", "Rotterdam")
>>> bob.current_city = "Amsterdam"
>>> bob.current_city
'Amsterdam'
>>> bob.cities_visited
['Rotterdam', 'Amsterdam']
"""
def __init__(self, name, current_city):
self.name = name
self._current_city = current_city
self._cities_visited = [current_city]
@property
def current_city(self):
return self._current_city
@current_city.setter
def current_city(self, new_city):
if new_city != self._current_city:
self._cities_visited.append(new_city)
self._current_city = new_city
@property
def cities_visited(self):
return self._cities_visited
|
d295f90736a8801c5beefdf862949445c6d141db
|
540b4199dd80228f1d84c9b687e974cfa2c289a2
|
/【Python+Dash快速web应用开发】系列文章/18 项目结构篇/dash_demo_project/models/age.py
|
fabfaf20a2554db87a32eea207cf64d3b418d89f
|
[] |
no_license
|
CNFeffery/DataScienceStudyNotes
|
1186e26c88874b89b65f841af5f78dc49429e479
|
d45b42b49be04ba4add9cdd18b4787fb3c334b1f
|
refs/heads/master
| 2023-08-17T07:18:43.730916
| 2023-07-25T14:05:17
| 2023-07-25T14:05:17
| 206,516,448
| 1,141
| 485
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
age.py
|
from peewee import SqliteDatabase, Model
from peewee import CharField, FloatField
db = SqliteDatabase('models/age.db')
class Age(Model):
# 地区,唯一
region = CharField(unique=True)
# 0-14岁占比
prop_0_to_14 = FloatField()
# 15-59岁占比
prop_15_59 = FloatField()
# 60岁及以上占比
prop_60_above = FloatField()
# 65岁及以上占比
prop_65_above = FloatField()
class Meta:
database = db
primary_key = False # 禁止自动生成唯一id列
@classmethod
def fetch_all(cls):
return list(cls.select().dicts())
|
cdbc6192e5c21788bf0aeee18ef1b43df9603d24
|
42965d9ad4214758fe25232c53b291099924f659
|
/lib/convert_files.py
|
6ebcc7db9f4e56e1a418c3c136862ad5aac59091
|
[
"MIT"
] |
permissive
|
chaosparrot/parrot.py
|
802a6af6f2c21d8e0d18d673fada93e4a4101144
|
c8e5562600612ad6fdb93c96781d7c23439f1cab
|
refs/heads/master
| 2023-08-03T12:27:09.166527
| 2023-04-10T08:34:56
| 2023-04-10T08:34:56
| 178,544,997
| 126
| 34
|
MIT
| 2023-03-14T18:42:52
| 2019-03-30T10:37:28
|
Python
|
UTF-8
|
Python
| false
| false
| 10,920
|
py
|
convert_files.py
|
from config.config import *
import os
import subprocess
import math
import time
import wave
from queue import *
import numpy as np
from scipy.fftpack import fft
from scipy.fftpack import fftfreq
from scipy.signal import blackmanharris
from lib.machinelearning import get_loudest_freq, get_recording_power
import audioop
def convert_files( with_intro ):
available_sounds = []
for fileindex, file in enumerate(os.listdir( RECORDINGS_FOLDER )):
if ( os.path.isdir(os.path.join(RECORDINGS_FOLDER, file)) and not file.startswith(".") ):
available_sounds.append( file )
try:
ffmpeg_configured = True
subprocess.call([PATH_TO_FFMPEG], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
except:
ffmpeg_configured = False
if( len( available_sounds ) == 0 ):
print( "It looks like you haven't recorded any sound yet..." )
print( "Please make sure to put your audio files inside a subfolder of " + RECORDINGS_FOLDER )
return
elif( with_intro ):
print("-------------------------")
print("File conversion possible on the following sounds: ")
print( ", ".join(available_sounds))
print("-------------------------")
if ( ffmpeg_configured ):
print(" - [W] for transforming .flac to .wav.")
print( " Wav files are required to do training or segmentation on." )
print( " Audio will be resampled automatically " )
print(" - [R] for resampling .wav files ( in case you changed the CHANNELS or RATE after recording audio )")
print(" - [F] for transforming .wav to .flac.")
else:
print( "")
print( "!! FFMPEG was not found at the configured path '" + PATH_TO_FFMPEG + "'. If you desire to convert file extensions install it and point the path to the program at PATH_TO_FFMPEG in the config" )
print(" - [S] for segmenting a directory of sound files into chunks trainable by Parrot.py")
print(" - [C] for subclassifying existing wav files using a classifier to provide advanced pruning options.")
print(" - [X] to exit conversion mode")
convert_or_segment_files( available_sounds, ffmpeg_configured )
def convert_or_segment_files( available_sounds, ffmpeg_configured ):
convert_or_segment = input( "" )
if( convert_or_segment.lower() == "w" and ffmpeg_configured ):
sounds = determine_sounds( available_sounds, "to convert to .wav" )
convert_audiofile_extension( sounds, ".flac", ".wav", "flac_to_wav", True)
elif( convert_or_segment.lower() == "f" and ffmpeg_configured ):
sounds = determine_sounds( available_sounds, "to convert to .flac" )
convert_audiofile_extension( sounds, ".wav", ".flac", "wav_to_flac" )
elif( convert_or_segment.lower() == "r" and ffmpeg_configured ):
sounds = determine_sounds( available_sounds, "to resample .wav to " + str(RATE) + " with " + str(CHANNELS) + " channels" )
convert_audiofile_extension( sounds, ".wav", ".wav", "resampling_rate_" + str(RATE) + "_channels_" + str(CHANNELS), True)
elif( convert_or_segment.lower() == "s" ):
sounds = determine_sounds( available_sounds, "to segment into " + str(int(RECORD_SECONDS * 1000)) + " chunks")
segment_audiofiles( sounds )
elif( convert_or_segment.lower() == "c" ):
print( "TODO!" )
elif( convert_or_segment.lower() == "x" ):
print("")
return
else:
convert_or_segment_files( available_sounds, ffmpeg_configured )
def convert_audiofile_extension( sound_directories, input_extension, output_extension, operation, resampling=False ):
operation_directory = CONVERSION_OUTPUT_FOLDER + "/" + str(int(time.time())) + "_" + operation
for index, directory in enumerate(sound_directories):
print( "Converting " + directory + "..." )
full_directory_path = RECORDINGS_FOLDER + "/" + directory
output_directory = operation_directory + "/" + directory
if not os.path.exists(output_directory):
os.makedirs(output_directory)
files_to_convert = []
for fileindex, file in enumerate(os.listdir(full_directory_path)):
if ( file.endswith(input_extension) ):
files_to_convert.append( file )
amount_files_to_convert = len(files_to_convert)
if ( amount_files_to_convert == 0 ):
print( "No " + input_extension + " files found to convert - skipping" )
else:
for convert_index, file in enumerate(files_to_convert):
input_file = full_directory_path + "/" + file
output_file = output_directory + "/" + file.replace(input_extension, output_extension)
if ( resampling == True ):
process_to_call = [PATH_TO_FFMPEG, "-i", input_file, "-ar", str(RATE), "-ac", str(CHANNELS), output_file ]
else:
process_to_call = [PATH_TO_FFMPEG, "-i", input_file, output_file ]
subprocess.call(process_to_call, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
print( str( math.floor(((convert_index + 1 ) / amount_files_to_convert ) * 100)) + "%", end="\r" )
print( "Placed output of conversion in " + output_directory )
def segment_audiofiles(sound_directories):
operation_directory = CONVERSION_OUTPUT_FOLDER + "/" + str(int(time.time())) + "_segmentation"
print( "-------------------------" )
print( "Set up the filtering levels to filter out silence or other unrelated sounds")
print( "-------------------------" )
threshold = 0
power_threshold = input("What signal power threshold do you need? " )
if( power_threshold == "" ):
power_threshold = 0
else:
power_threshold = int( power_threshold )
frequency_threshold = input("What frequency threshold do you need? " )
if( frequency_threshold == "" ):
frequency_threshold = 0
else:
frequency_threshold = int( frequency_threshold )
begin_threshold = 10000
for index, directory in enumerate(sound_directories):
print( "Segmenting " + directory + "..." )
full_directory_path = RECORDINGS_FOLDER + "/" + directory
# Use the source folder if it is available instead
if os.path.exists(full_directory_path + "/source"):
full_directory_path += "/source"
output_directory = operation_directory + "/" + directory
if not os.path.exists(output_directory):
os.makedirs(output_directory)
files_to_segment = []
for fileindex, file in enumerate(os.listdir(full_directory_path)):
if ( file.endswith(".wav") ):
files_to_segment.append( file )
amount_files_to_segment = len(files_to_segment)
if ( amount_files_to_segment == 0 ):
print("No .wav files found to segment - skipping" )
else:
for fileindex, file in enumerate(files_to_segment):
segment_input_file( threshold, power_threshold, frequency_threshold, begin_threshold, full_directory_path + "/" + file, output_directory + "/" + file.replace(".wav", "-"), ".wav" )
print( "Put all the segmented files inside " + operation_directory )
# Segments an existing wav file and saves the chunks onto a queue
# The queue will be used as a sliding window over the audio, where two chunks are combined into one audio file
def segment_input_file(threshold, power_threshold, frequency_threshold, begin_threshold, WAVE_INPUT_FILE, WAVE_OUTPUT_FILENAME, WAVE_OUTPUT_FILE_EXTENSION):
audioFrames = []
wf = wave.open(WAVE_INPUT_FILE, 'rb')
number_channels = wf.getnchannels()
total_frames = wf.getnframes()
frame_rate = wf.getframerate()
frames_to_read = round( frame_rate * RECORD_SECONDS / SLIDING_WINDOW_AMOUNT )
files_recorded = 0
delay_threshold = 0
if( begin_threshold < 0 ):
delay_threshold = begin_threshold * -1
begin_threshold = 1000
audio = pyaudio.PyAudio()
record_wave_file_count = 0
index = 0
while( wf.tell() < total_frames ):
index = index + 1
raw_wav = wf.readframes(frames_to_read * number_channels)
# If our wav file is shorter than the amount of bytes ( assuming 16 bit ) times the frames, we discard it and assume we arriveed at the end of the file
if (len(raw_wav) != 2 * frames_to_read * number_channels ):
break;
else:
audioFrames.append(raw_wav)
if( len( audioFrames ) >= SLIDING_WINDOW_AMOUNT ):
audioFrames = audioFrames[-SLIDING_WINDOW_AMOUNT:]
intensity = [
audioop.maxpp( audioFrames[0], 4 ) / 32767,
audioop.maxpp( audioFrames[1], 4 ) / 32767
]
highestintensity = np.amax( intensity )
byteString = b''.join(audioFrames)
fftData = np.frombuffer( byteString, dtype=np.int16 )
frequency = get_loudest_freq( fftData, RECORD_SECONDS )
power = get_recording_power( fftData, RECORD_SECONDS )
print( "Segmenting file " + WAVE_INPUT_FILE + ": " + str( math.ceil(wf.tell() / total_frames * 100) ) + "%" , end="\r" )
if( frequency > frequency_threshold and highestintensity > threshold and power > power_threshold ):
record_wave_file_count += 1
if( record_wave_file_count <= begin_threshold and record_wave_file_count > delay_threshold ):
files_recorded += 1
waveFile = wave.open(WAVE_OUTPUT_FILENAME + str(index) + WAVE_OUTPUT_FILE_EXTENSION, 'wb')
waveFile.setnchannels(number_channels)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(frame_rate)
waveFile.writeframes(byteString)
waveFile.close()
else:
record_wave_file_count = 0
print( "Extracted " + str(files_recorded) + " segmented files from " + WAVE_INPUT_FILE )
wf.close()
def determine_sounds( available_sounds, verb = "to process" ):
print( "Selecting sounds " + verb + "... ( [Y]es / [N]o / [S]kip )" )
filtered_sounds = []
for sound in available_sounds:
add = input(" - " + sound)
if( add == "" or add.strip().lower() == "y" ):
filtered_sounds.append( sound )
elif( add.strip().lower() == "s" ):
break
else:
print( "Disabled " + sound )
return filtered_sounds
|
f7eeb27ec5c0827ce700b9607e06dcd6ad8e0779
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-UserNotificationsUI/Lib/UserNotificationsUI/__init__.py
|
d966483872291daacc3faac5f5879b58f48bc244
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 798
|
py
|
__init__.py
|
"""
Python mapping for the UserNotificationsUI framework.
This module does not contain docstrings for the wrapped code, check Apple's
documentation for details on how to use these functions and classes.
"""
import sys
import Cocoa
import UserNotifications
import objc
from UserNotificationsUI import _metadata
sys.modules["UserNotificationsUI"] = mod = objc.ObjCLazyModule(
"UserNotificationsUI",
"com.apple.UserNotificationsUI",
objc.pathForFramework("/System/Library/Frameworks/UserNotificationsUI.framework"),
_metadata.__dict__,
None,
{
"__doc__": __doc__,
"objc": objc,
"__path__": __path__,
"__loader__": globals().get("__loader__", None),
},
(UserNotifications, Cocoa),
)
del sys.modules["UserNotificationsUI._metadata"]
|
96418dda8c0facb41e6e2a8fb0de3dc34961edbf
|
499f5402baed77d000c65f243b457c69dc3d2fe4
|
/pycatia/hybrid_shape_interfaces/hybrid_shape_plane_offset_pt.py
|
2c99634c7809c240e97cf444793341a3294f9bdc
|
[
"MIT"
] |
permissive
|
evereux/pycatia
|
416189b34f3c60effea8a76258e36ffc5ae86e22
|
5f5726d5dc66265b3eba8a01910c4aeae424365d
|
refs/heads/master
| 2023-08-21T10:03:41.660445
| 2023-08-09T16:21:10
| 2023-08-09T16:21:10
| 159,069,580
| 141
| 42
|
MIT
| 2023-08-09T11:15:27
| 2018-11-25T20:04:31
|
Python
|
UTF-8
|
Python
| false
| false
| 3,765
|
py
|
hybrid_shape_plane_offset_pt.py
|
#! usr/bin/python3.9
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-07-06 14:02:20.222384
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.hybrid_shape_interfaces.plane import Plane
from pycatia.in_interfaces.reference import Reference
class HybridShapePlaneOffsetPt(Plane):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| MecModInterfaces.HybridShape
| CATGSMIDLItf.Plane
| HybridShapePlaneOffsetPt
|
| Offset plane with point reference.
| Role: Allows to access data of the plane feature parallel to another plane and
| passing through a Point.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.hybrid_shape_plane_offset_pt = com_object
@property
def ref_plane(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Plane() As Reference
|
| Role: Get the reference plane.
|
| Parameters:
|
| oPlane
| reference Plane.
|
| See also:
| Reference
| Returns:
| HRESULT S_OK if Ok E_FAIL else return error code for C++
| Implementations
| See also:
| HybridShapeFactory
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_plane_offset_pt.Plane)
@ref_plane.setter
def ref_plane(self, reference_plane: Reference):
"""
:param Reference reference_plane:
"""
self.hybrid_shape_plane_offset_pt.Plane = reference_plane.com_object
@property
def point(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Point() As Reference
|
| Role: Get the reference point.
|
| Parameters:
|
| oPoint
| reference point.
|
| See also:
| Reference
| Returns:
| HRESULT S_OK if Ok E_FAIL else return error code for C++
| Implementations
| See also:
| HybridShapeFactory
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_plane_offset_pt.Point)
@point.setter
def point(self, reference_point: Reference):
"""
:param Reference reference_point:
"""
self.hybrid_shape_plane_offset_pt.Point = reference_point.com_object
def __repr__(self):
return f'HybridShapePlaneOffsetPt(name="{self.name}")'
|
a084eb63619278a87d3a7cf077610872ca30ee66
|
a2b20597759990445081057d35d113434cfcf970
|
/source/interprocedural_analyses/taint/test/integration/long_access_path_taint.py
|
f80fb9de0fe26f828a16db5cffb84f443e38b47c
|
[
"MIT"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028
| 2023-09-02T07:40:35
| 2023-09-02T07:40:35
| 110,274,488
| 6,703
| 575
|
MIT
| 2023-09-13T17:02:32
| 2017-11-10T17:31:36
|
OCaml
|
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
long_access_path_taint.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from builtins import _test_sink, _test_source
from typing import Any, Dict, List, Optional, Tuple
class C:
def __init__(
self, id: int, params: Dict[str, Any], kind: str, request: str
) -> None:
self.id = id
self.timestamp = params.get("timestamp") or 0
self.app_id = params.get("app_id")
self.kind = kind
self.request = request
@classmethod
async def async_create(
cls, id: int, params: Dict, request: Optional[str] = None
) -> "C":
kind = str(params)
if kind == "special_kind":
request = "get_current_request()"
else:
if not request:
request = str(params)
return cls(id, params, kind, request)
def test():
obj = C.async_create(1, {_test_source(): _test_source()}, "")
_test_sink(obj.id)
|
d30ca881b0d545e8dc77c7e8b36638343834375b
|
eeade6c1c22887dd87aeb4cdd5a61263ea5bf26f
|
/test_runner/blog/migrations/0001_initial.py
|
38c24edfc338d8f3f2753d9dee8b03634ea95e2b
|
[
"BSD-3-Clause"
] |
permissive
|
nyaruka/smartmin
|
ce396ee0a9d2a8eacde32568c9745c176e11925f
|
5c0d9fd189f7472c6568bd5cd3b55db4574a3d44
|
refs/heads/main
| 2023-09-05T10:15:48.963165
| 2023-08-15T15:02:01
| 2023-08-15T15:02:01
| 1,928,199
| 200
| 37
|
BSD-3-Clause
| 2023-08-15T15:00:59
| 2011-06-21T08:56:44
|
Python
|
UTF-8
|
Python
| false
| false
| 4,016
|
py
|
0001_initial.py
|
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Category",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
(
"is_active",
models.BooleanField(
default=True, help_text="Whether this item is active, use this instead of deleting"
),
),
(
"created_on",
models.DateTimeField(help_text="When this item was originally created", auto_now_add=True),
),
("modified_on", models.DateTimeField(help_text="When this item was last modified", auto_now=True)),
("name", models.SlugField(help_text="The name of this category", unique=True, max_length=64)),
(
"created_by",
models.ForeignKey(
related_name="blog_category_creations",
to=settings.AUTH_USER_MODEL,
on_delete=models.PROTECT,
help_text="The user which originally created this item",
),
),
(
"modified_by",
models.ForeignKey(
related_name="blog_category_modifications",
to=settings.AUTH_USER_MODEL,
on_delete=models.PROTECT,
help_text="The user which last modified this item",
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="Post",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
(
"is_active",
models.BooleanField(
default=True, help_text="Whether this item is active, use this instead of deleting"
),
),
(
"created_on",
models.DateTimeField(help_text="When this item was originally created", auto_now_add=True),
),
("modified_on", models.DateTimeField(help_text="When this item was last modified", auto_now=True)),
("title", models.CharField(help_text="The title of this blog post, keep it relevant", max_length=128)),
("body", models.TextField(help_text="The body of the post, go crazy")),
(
"order",
models.IntegerField(help_text="The order for this post, posts with smaller orders come first"),
),
("tags", models.CharField(help_text="Any tags for this post", max_length=128)),
(
"created_by",
models.ForeignKey(
related_name="blog_post_creations",
to=settings.AUTH_USER_MODEL,
on_delete=models.PROTECT,
help_text="The user which originally created this item",
),
),
(
"modified_by",
models.ForeignKey(
related_name="blog_post_modifications",
to=settings.AUTH_USER_MODEL,
on_delete=models.PROTECT,
help_text="The user which last modified this item",
),
),
],
options={
"abstract": False,
},
),
]
|
86b53aa8b63bb76593e92e46ee319a44cba1240e
|
1ed25da5d1e27cd49fb4a02acfe99aadcf2fae57
|
/pygeoapi/models/config.py
|
148f78d77e697af8cd23b856c176b0b65d45aaa4
|
[
"MIT"
] |
permissive
|
geopython/pygeoapi
|
6d2a7b0e8fe75d0c454a0b2fc3599a0b88c7567f
|
2d3ec88320cf5e1ed47b4b794f40b453bad487e2
|
refs/heads/master
| 2023-09-04T04:30:59.768950
| 2023-09-03T02:00:23
| 2023-09-03T02:00:23
| 121,585,259
| 391
| 245
|
MIT
| 2023-09-13T18:13:00
| 2018-02-15T02:46:27
|
Python
|
UTF-8
|
Python
| false
| false
| 4,273
|
py
|
config.py
|
# ****************************** -*-
# flake8: noqa
# =================================================================
#
# Authors: Sander Schaminee <sander.schaminee@geocat.net>
#
# Copyright (c) 2023 Sander Schaminee
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
from pydantic import BaseModel, Field
class APIRules(BaseModel):
""" Pydantic model for API design rules that must be adhered to. """
api_version: str = Field(regex=r'^\d+\.\d+\..+$',
description="Semantic API version number.")
url_prefix: str = Field(
"",
description="If set, pygeoapi routes will be prepended with the "
"given URL path prefix (e.g. '/v1'). "
"Defaults to an empty string (no prefix)."
)
version_header: str = Field(
"",
description="If set, pygeoapi will set a response header with this "
"name and its value will hold the API version. "
"Defaults to an empty string (i.e. no header). "
"Often 'API-Version' or 'X-API-Version' are used here."
)
strict_slashes: bool = Field(
False,
description="If False (default), URL trailing slashes are allowed. "
"If True, pygeoapi will return a 404."
)
@staticmethod
def create(**rules_config) -> 'APIRules':
""" Returns a new APIRules instance for the current API version
and configured rules. """
obj = {
k: v for k, v in rules_config.items() if k in APIRules.__fields__
}
# Validation will fail if required `api_version` is missing
# or if `api_version` is not a semantic version number
return APIRules.parse_obj(obj)
@property
def response_headers(self) -> dict:
""" Gets a dictionary of additional response headers for the current
API rules. Returns an empty dict if no rules apply. """
headers = {}
if self.version_header:
headers[self.version_header] = self.api_version
return headers
def get_url_prefix(self, style: str = None) -> str:
"""
Returns an API URL prefix to use in all paths.
May include a (partial) API version. See docs for syntax.
:param style: Set to 'django', 'flask' or 'starlette' to return a
specific prefix formatted for those frameworks.
If not set, only the prefix itself will be returned.
"""
if not self.url_prefix:
return ""
major, minor, build = self.api_version.split('.')
prefix = self.url_prefix.format(
api_version=self.api_version,
api_major=major,
api_minor=minor,
api_build=build
).strip('/')
style = (style or '').lower()
if style == 'django':
# Django requires the slash at the end
return rf"^{prefix}/"
elif style in ('flask', 'starlette'):
# Flask and Starlette need the slash in front
return f"/{prefix}"
# If no format is specified, return only the bare prefix
return prefix
|
a727f69b5acf7648d2566066fbde785f73d1168f
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/chrome/android/features/tab_ui/DEPS
|
8f96d0fdee2eeb3872acd71d572ff00868331842
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 1,140
|
DEPS
|
include_rules = [
"+chrome/browser/android/lifecycle",
"+chrome/browser/endpoint_fetcher",
"+chrome/browser/optimization_guide/android/java/src/org/chromium/chrome/browser/optimization_guide",
"+chrome/browser/profiles/android/java",
"+chrome/browser/tab/java",
"+chrome/browser/tabmodel/android/java",
"+chrome/browser/tabpersistence/android/java",
"+chrome/browser/ui/android/favicon/java",
"+chrome/browser/ui/android/theme/java",
"+chrome/browser/ui/android/toolbar/java",
"+chrome/browser/ui/messages/android/java",
"+components/browser_ui/notifications/android",
"+components/browser_ui/styles/android",
"+components/browser_ui/widget/android",
"+components/feature_engagement/public/android/java/src/org/chromium/components/feature_engagement",
"+components/payments/content/android/java/src/org/chromium/components/payments/CurrencyFormatter.java",
"+components/search_engines/android/java/src/org/chromium/components/search_engines",
"+components/module_installer",
"+content/public/android/java/src/org/chromium/content_public/browser",
"+content/public/android/java/src/org/chromium/content_public/common",
]
|
|
761b16461a9119b8d224dbc33659b7be9e3beb54
|
5fa348b53191cc8d8cb8df2d213d1a0d1a07a820
|
/hls4ml/converters/keras/reshape.py
|
bd9d519a2a33d6e88316a462979b18b19dd537a3
|
[
"Apache-2.0"
] |
permissive
|
fastmachinelearning/hls4ml
|
29496536ca5a63a51e009d246019680bd4950007
|
4b4b5a0cf0bc59801e94b2bb64d63d8734645c3b
|
refs/heads/main
| 2023-08-31T15:52:18.564650
| 2023-08-28T22:57:50
| 2023-08-28T23:25:36
| 108,329,371
| 562
| 199
|
Apache-2.0
| 2023-09-13T17:19:05
| 2017-10-25T21:43:56
|
C++
|
UTF-8
|
Python
| false
| false
| 3,236
|
py
|
reshape.py
|
import numpy as np
from hls4ml.converters.keras_to_hls import keras_handler, parse_default_keras_layer
from hls4ml.converters.utils import parse_data_format
@keras_handler('Flatten')
def parse_flatten_layer(keras_layer, input_names, input_shapes, data_reader):
assert keras_layer["class_name"] == 'Flatten'
layer = parse_default_keras_layer(keras_layer, input_names)
layer['class_name'] = 'Reshape'
layer['target_shape'] = [input_shapes[0][0], np.prod(input_shapes[0][1:])]
output_shape = layer['target_shape']
return layer, output_shape
@keras_handler('Reshape')
def parse_reshape_layer(keras_layer, input_names, input_shapes, data_reader):
assert keras_layer["class_name"] == 'Reshape'
layer = parse_default_keras_layer(keras_layer, input_names)
layer['target_shape'] = keras_layer['config']['target_shape']
output_shape = input_shapes[0][:1] + keras_layer['config']['target_shape']
return layer, output_shape
@keras_handler('UpSampling1D')
def parse_upsampling1d_layer(keras_layer, input_names, input_shapes, data_reader):
assert 'UpSampling' in keras_layer['class_name']
layer = parse_default_keras_layer(keras_layer, input_names)
layer['in_height'] = 1
(layer['in_width'], layer['n_chan']) = parse_data_format(input_shapes[0], layer['data_format'])
layer['algorithm'] = 'nearest'
layer['width_factor'] = keras_layer['config']['size']
layer['out_height'] = 1
layer['out_width'] = layer['in_width'] * layer['width_factor']
if layer['data_format'] == 'channels_first':
output_shape = [input_shapes[0][0], layer['n_chan'], layer['out_width']]
else:
output_shape = [input_shapes[0][0], layer['out_width'], layer['n_chan']]
return layer, output_shape
@keras_handler('UpSampling2D')
def parse_upsampling2d_layer(keras_layer, input_names, input_shapes, data_reader):
assert 'UpSampling2D' in keras_layer['class_name']
layer = parse_default_keras_layer(keras_layer, input_names)
(layer['in_height'], layer['in_width'], layer['n_chan']) = parse_data_format(input_shapes[0], layer['data_format'])
layer['algorithm'] = keras_layer['config']['interpolation']
layer['height_factor'] = keras_layer['config']['size'][0]
layer['width_factor'] = keras_layer['config']['size'][1]
layer['out_height'] = layer['in_height'] * layer['height_factor']
layer['out_width'] = layer['in_width'] * layer['width_factor']
if layer['data_format'] == 'channels_first':
output_shape = [input_shapes[0][0], layer['n_chan'], layer['out_height'], layer['out_width']]
else:
output_shape = [input_shapes[0][0], layer['out_height'], layer['out_width'], layer['n_chan']]
return layer, output_shape
@keras_handler('Permute')
def parse_permute_layer(keras_layer, input_names, input_shapes, data_reader):
assert keras_layer['class_name'] == 'Permute'
layer = parse_default_keras_layer(keras_layer, input_names)
layer['class_name'] = 'Transpose'
dims = keras_layer['config']['dims']
layer['perm'] = [dim - 1 for dim in keras_layer['config']['dims']]
output_shape = [input_shapes[0][0]] + [input_shapes[0][s] for s in dims]
return layer, output_shape
|
b0deafe57a977920a200a5aff59dbe6dbb20ea81
|
c287f29b900b4fcc1396867d5f6b099830ff216b
|
/src/portal/puffer/migrations/0007_participate.py
|
0a4d110905f57b49da0ef1e52286070d4f41a2e0
|
[] |
no_license
|
StanfordSNR/puffer
|
5660fcd243606a4223bb6769b806a99eda073041
|
a8967b4a232efeeedae392cab39042ca805314d5
|
refs/heads/master
| 2023-09-01T11:07:41.256267
| 2023-08-07T17:18:45
| 2023-08-07T17:18:45
| 106,030,394
| 739
| 123
| null | 2022-12-28T21:01:35
| 2017-10-06T17:10:05
|
C++
|
UTF-8
|
Python
| false
| false
| 603
|
py
|
0007_participate.py
|
# Generated by Django 2.1.2 on 2018-10-29 21:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('puffer', '0006_grafanasnapshot'),
]
operations = [
migrations.CreateModel(
name='Participate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=255)),
('request_date', models.DateTimeField(verbose_name='date requested')),
],
),
]
|
c2d7013e7af4665b9d85e5b6d86bdb3203851e0b
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/airflow/providers/sftp/sensors/sftp.py
|
f6a1278c18a11e4b76db3672fa5686fe7a75293e
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,400
|
py
|
sftp.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains SFTP sensor."""
from __future__ import annotations
import os
from datetime import datetime
from typing import TYPE_CHECKING, Any, Callable, Sequence
from paramiko.sftp import SFTP_NO_SUCH_FILE
from airflow.providers.sftp.hooks.sftp import SFTPHook
from airflow.sensors.base import BaseSensorOperator, PokeReturnValue
from airflow.utils.timezone import convert_to_utc
if TYPE_CHECKING:
from airflow.utils.context import Context
class SFTPSensor(BaseSensorOperator):
"""
Waits for a file or directory to be present on SFTP.
:param path: Remote file or directory path
:param file_pattern: The pattern that will be used to match the file (fnmatch format)
:param sftp_conn_id: The connection to run the sensor against
:param newer_than: DateTime for which the file or file path should be newer than, comparison is inclusive
"""
template_fields: Sequence[str] = (
"path",
"newer_than",
)
def __init__(
self,
*,
path: str,
file_pattern: str = "",
newer_than: datetime | None = None,
sftp_conn_id: str = "sftp_default",
python_callable: Callable | None = None,
op_args: list | None = None,
op_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.path = path
self.file_pattern = file_pattern
self.hook: SFTPHook | None = None
self.sftp_conn_id = sftp_conn_id
self.newer_than: datetime | None = newer_than
self.python_callable: Callable | None = python_callable
self.op_args = op_args or []
self.op_kwargs = op_kwargs or {}
def poke(self, context: Context) -> PokeReturnValue | bool:
self.hook = SFTPHook(self.sftp_conn_id)
self.log.info("Poking for %s, with pattern %s", self.path, self.file_pattern)
files_found = []
if self.file_pattern:
files_from_pattern = self.hook.get_files_by_pattern(self.path, self.file_pattern)
if files_from_pattern:
actual_files_to_check = [
os.path.join(self.path, file_from_pattern) for file_from_pattern in files_from_pattern
]
else:
return False
else:
actual_files_to_check = [self.path]
for actual_file_to_check in actual_files_to_check:
try:
mod_time = self.hook.get_mod_time(actual_file_to_check)
self.log.info("Found File %s last modified: %s", actual_file_to_check, mod_time)
except OSError as e:
if e.errno != SFTP_NO_SUCH_FILE:
raise e
continue
if self.newer_than:
_mod_time = convert_to_utc(datetime.strptime(mod_time, "%Y%m%d%H%M%S"))
_newer_than = convert_to_utc(self.newer_than)
if _newer_than <= _mod_time:
files_found.append(actual_file_to_check)
else:
files_found.append(actual_file_to_check)
self.hook.close_conn()
if not len(files_found):
return False
if self.python_callable is not None:
if self.op_kwargs:
self.op_kwargs["files_found"] = files_found
callable_return = self.python_callable(*self.op_args, **self.op_kwargs)
return PokeReturnValue(
is_done=True,
xcom_value={"files_found": files_found, "decorator_return_value": callable_return},
)
return True
|
1c870e656e7e542579c960f7ef268a6fc88f9655
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/L1Trigger/L1TMuonBarrel/test/kalmanTools/runCalibration.py
|
33f92da3dbe6ff1f66d3d085ca679e9b1256eec1
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 8,063
|
py
|
runCalibration.py
|
import ROOT
import itertools
import math
from DataFormats.FWLite import Events, Handle
from array import array
import numpy
def median(lst):
return numpy.median(numpy.array(lst))
def fetchSegmentsPhi(event,ontime=True,twinMux=True):
phiSeg = Handle ('L1MuDTChambPhContainer')
if twinMux:
event.getByLabel('simTwinMuxDigis',phiSeg)
else:
event.getByLabel('simDtTriggerPrimitiveDigis',phiSeg)
if ontime:
filtered=filter(lambda x: x.bxNum()==0, phiSeg.product().getContainer())
return filtered
else:
return phiSeg.product().getContainer()
def fetchSegmentsEta(event,ontime=True):
thetaSeg = Handle ('L1MuDTChambThContainer')
event.getByLabel('dtTriggerPrimitiveDigis',thetaSeg)
if ontime:
filtered=filter(lambda x: x.bxNum()==0, thetaSeg.product().getContainer())
return filtered
else:
return thetaSeg.product().getContainer()
def fetchGEANT(event):
geantH = Handle ('vector<PSimHit>')
event.getByLabel('g4SimHits:MuonDTHits',geantH)
geant=filter(lambda x: x.pabs()>0.5 and abs(x.particleType())==13,geantH.product())
return geant
def fetchGEN(event,etaMax=1.2):
genH = Handle ('vector<reco::GenParticle>')
event.getByLabel('genParticles',genH)
genMuons=filter(lambda x: abs(x.pdgId())==13 and x.status()==1 and abs(x.eta())<etaMax,genH.product())
return genMuons
def segINT(seg,f1=1,f2=1):
return seg.phi()*f1,seg.phiB()*f2
def qPTInt(qPT,bits=14):
lsb = lsBIT(bits)
floatbinary = int(math.floor(abs(qPT)/lsb))
return int((qPT/abs(qPT))*floatbinary)
def lsBIT(bits=14):
maximum=1.25
lsb = 1.25/pow(2,bits-1)
return lsb
def getTrueCurvature(muon,geant,segments):
thisMuonGEANT = filter(lambda x: (muon.charge()>0 and x.particleType()==13) or ((muon.charge()<0) and x.particleType()==-13),geant)
energyInfo={1:[], 2:[],3:[],4:[]}
qInfo={1:0.0, 2:0.0,3:0.0,4:0.0}
qInfoINT={1:0, 2:0,3:0,4:0}
for p in thisMuonGEANT:
detid=ROOT.DTChamberId(p.detUnitId())
station = detid.station()
for s in segments:
if s.stNum()==detid.station() and s.whNum()==detid.wheel() and s.scNum()==detid.sector()-1:
energyInfo[station].append(p.pabs()*muon.pt()/muon.energy())
break;
for s in [1,2,3,4]:
if len(energyInfo[s])==0:
continue
p = median(energyInfo[s])
qInfo[s]=muon.charge()/p
qInfoINT[s] = qPTInt(qInfo[s])
return qInfo,qInfoINT
def matchTrack(muon,segments,geant):
thisMuonGEANT = filter(lambda x: (muon.charge()>0 and x.particleType()==13) or ((muon.charge()<0) and x.particleType()==-13),geant)
chambers=[]
for p in thisMuonGEANT:
detid=ROOT.DTChamberId(p.detUnitId())
chambers.append(p.detUnitId())
chambers=list(set(chambers))
assocSeg=[]
for s in segments:
for c in chambers:
detid=ROOT.DTChamberId(c)
if s.whNum()==detid.wheel() and s.stNum()==detid.station() and s.scNum()==detid.sector()-1:
if not (s in assocSeg):
assocSeg.append(s)
return assocSeg
events = Events([
'file:singleMuonOfficial.root',
]
)
stations=[1,2,3,4]
PHISCALE=pow(2,11)
PHIBSCALE=pow(2,9)
PHIFACTOR = 1
PHIBFACTOR =8
RELFACTOR = 1
DROR = {4:0.147*RELFACTOR,3:0.173*RELFACTOR,2:0.154*RELFACTOR}
DRORB = {4:(1+0.147),3:(1+0.173),2:(1+0.154)}
alpha = {4:-0.0523,3:-0.0793,2:-0.0619}
beta = {4:0.069,3:0.079,2:0.055}
DRORCHI = {4: (726.-433.)/433. ,
3: (619.-433.)/433. ,
2: (512.-433.)/433.}
binsk = 200
maxk=8192
histos={}
offset={1:0.156,2:0.138,3:0.775,4:0.0}
offsetINV={1:0.207,2:0.,3:0.,4:0.0}
histos['phiProp']={}
histos['phiPropChi']={}
histos['phiBProp']={}
histos['curvFromPhiB']={}
histos['curvFromDPhi']={}
histos['phiBFromCurv']={}
histos['phiPropChiV']={}
histos['deltaPhiVsPhiB']={}
for i,j in itertools.permutations([1,2,3,4],2):
if not (i in histos['deltaPhiVsPhiB'].keys()):
histos['deltaPhiVsPhiB'][i]={}
histos['deltaPhiVsPhiB'][i][j]=ROOT.TH2D("deltaPhiVsPhiB_"+str(i)+"_"+str(j),"",256,-511,512,2048,-2047,2048)
if not (i in histos['curvFromDPhi'].keys()):
histos['curvFromDPhi'][i]={}
histos['curvFromDPhi'][i][j]=ROOT.TH2D("curvFromDPhi_"+str(i)+"_"+str(j),"",512,-2047,2048,1024,-8192,8192)
for s in [1,2,3,4]:
histos['curvFromPhiB'][s]=ROOT.TH2D("curvFromPhiB_"+str(s),"",1024,-512,511,4*400,-8*400,8*400)
histos['phiBFromCurv'][s]=ROOT.TH2D("phiBFromCurv_"+str(s),"",256,-512,511,256,-511,512)
histos['phiProp'][s]=ROOT.TH2D("phiProp_"+str(s),"",binsk,-maxk,maxk,50,-200,200)
histos['phiPropChiV'][s]=ROOT.TH2D("phiPropChiV_"+str(s),"",binsk,-maxk,maxk,50,-200,200)
histos['phiPropChi'][s]=ROOT.TH2D("phiPropChi_"+str(s),"",binsk,-3000,3000,50,-200,200)
histos['phiBProp'][s]=ROOT.TH2D("phiBProp_"+str(s),"",binsk,-maxk,maxk,100,-2000,2000)
N=0
for event in events:
N=N+1
if N==100000:
break;
genMuons=fetchGEN(event)
segments=fetchSegmentsPhi(event)
segmentsTheta=fetchSegmentsEta(event)
geant=fetchGEANT(event)
segmentsTheta=sorted(segmentsTheta,key=lambda x: x.stNum())
for g in genMuons:
trueK,trueKINT = getTrueCurvature(g,geant,segments)
cotTheta = int(g.eta()/0.010875)
segTheta=matchTrack(g,segmentsTheta,geant)
seg=matchTrack(g,segments,geant)
for s in seg:
phi,phiB=segINT(s,PHIFACTOR,PHIBFACTOR)
histos['curvFromPhiB'][s.stNum()].Fill(s.phiB(),trueKINT[s.stNum()])
# histos['phiBFromCurv'][s.stNum()].Fill(trueKINT[s.stNum()]>>4,phiB)
histos['phiBFromCurv'][s.stNum()].Fill(qPTInt(g.charge()/g.pt())>>4,s.phiB())
for s1,s2 in itertools.permutations(seg,2):
phi1,phiB1=segINT(s1,PHIFACTOR,PHIBFACTOR)
phi2,phiB2 = segINT(s2,PHIFACTOR,PHIBFACTOR)
if (s2.scNum()==s1.scNum()+1) or (s1.scNum()==11 and s2.scNum()==0) :
phi2=phi2+2144
if (s2.scNum()==s1.scNum()-1) or (s1.scNum()==0 and s2.scNum()==11) :
phi2=phi2-2144
if s1.code()>4 and (s1.stNum()!=s2.stNum()):
histos['deltaPhiVsPhiB'][s1.stNum()][s2.stNum()].Fill(s1.phiB(),phi2-phi1)
histos['curvFromDPhi'][s1.stNum()][s2.stNum()].Fill(phi2-phi1,qPTInt(g.charge()/g.pt()))
if s1.stNum()+1==s2.stNum():
if s2.scNum()==s1.scNum()+1 or (s2.scNum()==0 and s1.scNum()==11):
phi2=phi2+2144
if s1.scNum()==s2.scNum()+1 or (s2.scNum()==11 and s1.scNum()==0):
phi2=phi2-2144
st=s2.stNum()
qPT=trueKINT[st]
propPhi = phi2-phiB2*DROR[st]+alpha[st]*qPT
propPhiB =DRORB[st]*phiB2+beta[st]*qPT
histos['phiProp'][s2.stNum()].Fill(trueKINT[s2.stNum()],(phi1-phi2)+DROR[s2.stNum()]*phiB2)
histos['phiBProp'][s2.stNum()].Fill(trueKINT[s2.stNum()],phiB1-DRORB[s2.stNum()]*phiB2)
# for chi 2 lookmonly from station 1 -> 2,3,4
if s1.stNum()==1 and s2.stNum()!=1:
histos['phiPropChi'][s2.stNum()].Fill(trueKINT[s1.stNum()],(phi2-phi1)+DRORCHI[s2.stNum()]*phiB1)
histos['phiPropChiV'][s2.stNum()].Fill(qPTInt(g.charge()/g.pt()),(phi2-phi1))
f=ROOT.TFile("calibrationConstants.root","RECREATE")
for s in [4,3,2,1]:
histos['phiProp'][s].Write()
histos['phiPropChi'][s].Write()
histos['phiPropChiV'][s].Write()
histos['phiBProp'][s].Write()
histos['curvFromPhiB'][s].Write()
histos['phiBFromCurv'][s].Write()
for i,j in itertools.permutations([1,2,3,4],2):
histos['deltaPhiVsPhiB'][i][j].Write()
histos['curvFromDPhi'][i][j].Write()
f.Close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.